diff options
author | 2022-10-21 16:00:35 -0700 | |
---|---|---|
committer | 2022-10-21 16:00:35 -0700 | |
commit | 14e77332e74603efab8347c89d3cda447c3b97c9 (patch) | |
tree | b7b8a48f4f75590266a763c52e072dda32b228ae /drivers/pci/controller | |
parent | lib: zstd: clean up double word in comment. (diff) | |
parent | Merge tag 'for-linus-6.1-rc2-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip (diff) | |
download | wireguard-linux-14e77332e74603efab8347c89d3cda447c3b97c9.tar.xz wireguard-linux-14e77332e74603efab8347c89d3cda447c3b97c9.zip |
Merge branch 'main' into zstd-next
Diffstat (limited to 'drivers/pci/controller')
60 files changed, 4317 insertions, 2477 deletions
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig index 601f2531ee91..bfd9bac37e24 100644 --- a/drivers/pci/controller/Kconfig +++ b/drivers/pci/controller/Kconfig @@ -10,6 +10,10 @@ config PCI_MVEBU depends on ARM depends on OF select PCI_BRIDGE_EMUL + help + Add support for Marvell EBU PCIe controller. This PCIe controller + is used on 32-bit Marvell ARM SoCs: Dove, Kirkwood, Armada 370, + Armada XP, Armada 375, Armada 38x and Armada 39x. config PCI_AARDVARK tristate "Aardvark PCIe controller" @@ -233,7 +237,7 @@ config PCIE_ROCKCHIP_EP config PCIE_MEDIATEK tristate "MediaTek PCIe controller" - depends on ARCH_MEDIATEK || COMPILE_TEST + depends on ARCH_AIROHA || ARCH_MEDIATEK || COMPILE_TEST depends on OF depends on PCI_MSI_IRQ_DOMAIN help @@ -270,7 +274,7 @@ config VMD config PCIE_BRCMSTB tristate "Broadcom Brcmstb PCIe host controller" - depends on ARCH_BRCMSTB || ARCH_BCM2835 || ARCH_BCM4908 || \ + depends on ARCH_BRCMSTB || ARCH_BCM2835 || ARCH_BCMBCA || \ BMIPS_GENERIC || COMPILE_TEST depends on OF depends on PCI_MSI_IRQ_DOMAIN @@ -289,7 +293,7 @@ config PCI_HYPERV_INTERFACE config PCI_LOONGSON bool "LOONGSON PCI Controller" depends on MACH_LOONGSON64 || COMPILE_TEST - depends on OF + depends on OF || ACPI depends on PCI_QUIRKS default MACH_LOONGSON64 help diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c index 489586a4cdc7..a82f845cc4b5 100644 --- a/drivers/pci/controller/cadence/pci-j721e.c +++ b/drivers/pci/controller/cadence/pci-j721e.c @@ -69,6 +69,7 @@ struct j721e_pcie_data { enum j721e_pcie_mode mode; unsigned int quirk_retrain_flag:1; unsigned int quirk_detect_quiet_flag:1; + unsigned int quirk_disable_flr:1; u32 linkdown_irq_regfield; unsigned int byte_access_allowed:1; }; @@ -307,6 +308,7 @@ static const struct j721e_pcie_data j7200_pcie_rc_data = { static const struct j721e_pcie_data j7200_pcie_ep_data = { .mode = PCI_MODE_EP, .quirk_detect_quiet_flag = true, + .quirk_disable_flr = true, }; static const struct j721e_pcie_data am64_pcie_rc_data = { @@ -356,8 +358,8 @@ static int j721e_pcie_probe(struct platform_device *pdev) const struct j721e_pcie_data *data; struct cdns_pcie *cdns_pcie; struct j721e_pcie *pcie; - struct cdns_pcie_rc *rc; - struct cdns_pcie_ep *ep; + struct cdns_pcie_rc *rc = NULL; + struct cdns_pcie_ep *ep = NULL; struct gpio_desc *gpiod; void __iomem *base; struct clk *clk; @@ -376,6 +378,47 @@ static int j721e_pcie_probe(struct platform_device *pdev) if (!pcie) return -ENOMEM; + switch (mode) { + case PCI_MODE_RC: + if (!IS_ENABLED(CONFIG_PCIE_CADENCE_HOST)) + return -ENODEV; + + bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc)); + if (!bridge) + return -ENOMEM; + + if (!data->byte_access_allowed) + bridge->ops = &cdns_ti_pcie_host_ops; + rc = pci_host_bridge_priv(bridge); + rc->quirk_retrain_flag = data->quirk_retrain_flag; + rc->quirk_detect_quiet_flag = data->quirk_detect_quiet_flag; + + cdns_pcie = &rc->pcie; + cdns_pcie->dev = dev; + cdns_pcie->ops = &j721e_pcie_ops; + pcie->cdns_pcie = cdns_pcie; + break; + case PCI_MODE_EP: + if (!IS_ENABLED(CONFIG_PCIE_CADENCE_EP)) + return -ENODEV; + + ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL); + if (!ep) + return -ENOMEM; + + ep->quirk_detect_quiet_flag = data->quirk_detect_quiet_flag; + ep->quirk_disable_flr = data->quirk_disable_flr; + + cdns_pcie = &ep->pcie; + cdns_pcie->dev = dev; + cdns_pcie->ops = &j721e_pcie_ops; + pcie->cdns_pcie = cdns_pcie; + break; + default: + dev_err(dev, "INVALID device type %d\n", mode); + return 0; + } + pcie->mode = mode; pcie->linkdown_irq_regfield = data->linkdown_irq_regfield; @@ -426,28 +469,6 @@ static int j721e_pcie_probe(struct platform_device *pdev) switch (mode) { case PCI_MODE_RC: - if (!IS_ENABLED(CONFIG_PCIE_CADENCE_HOST)) { - ret = -ENODEV; - goto err_get_sync; - } - - bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc)); - if (!bridge) { - ret = -ENOMEM; - goto err_get_sync; - } - - if (!data->byte_access_allowed) - bridge->ops = &cdns_ti_pcie_host_ops; - rc = pci_host_bridge_priv(bridge); - rc->quirk_retrain_flag = data->quirk_retrain_flag; - rc->quirk_detect_quiet_flag = data->quirk_detect_quiet_flag; - - cdns_pcie = &rc->pcie; - cdns_pcie->dev = dev; - cdns_pcie->ops = &j721e_pcie_ops; - pcie->cdns_pcie = cdns_pcie; - gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(gpiod)) { ret = PTR_ERR(gpiod); @@ -497,23 +518,6 @@ static int j721e_pcie_probe(struct platform_device *pdev) break; case PCI_MODE_EP: - if (!IS_ENABLED(CONFIG_PCIE_CADENCE_EP)) { - ret = -ENODEV; - goto err_get_sync; - } - - ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL); - if (!ep) { - ret = -ENOMEM; - goto err_get_sync; - } - ep->quirk_detect_quiet_flag = data->quirk_detect_quiet_flag; - - cdns_pcie = &ep->pcie; - cdns_pcie->dev = dev; - cdns_pcie->ops = &j721e_pcie_ops; - pcie->cdns_pcie = cdns_pcie; - ret = cdns_pcie_init_phy(dev, cdns_pcie); if (ret) { dev_err(dev, "Failed to init phy\n"); @@ -525,8 +529,6 @@ static int j721e_pcie_probe(struct platform_device *pdev) goto err_pcie_setup; break; - default: - dev_err(dev, "INVALID device type %d\n", mode); } return 0; diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c index 88e05b9c2e5b..b8b655d4047e 100644 --- a/drivers/pci/controller/cadence/pcie-cadence-ep.c +++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c @@ -187,8 +187,7 @@ static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn, struct cdns_pcie *pcie = &ep->pcie; u32 r; - r = find_first_zero_bit(&ep->ob_region_map, - sizeof(ep->ob_region_map) * BITS_PER_LONG); + r = find_first_zero_bit(&ep->ob_region_map, BITS_PER_LONG); if (r >= ep->max_regions - 1) { dev_err(&epc->dev, "no free outbound region\n"); return -EINVAL; @@ -565,7 +564,8 @@ static int cdns_pcie_ep_start(struct pci_epc *epc) struct cdns_pcie_ep *ep = epc_get_drvdata(epc); struct cdns_pcie *pcie = &ep->pcie; struct device *dev = pcie->dev; - int ret; + int max_epfs = sizeof(epc->function_num_map) * 8; + int ret, value, epf; /* * BIT(0) is hardwired to 1, hence function 0 is always enabled @@ -573,6 +573,21 @@ static int cdns_pcie_ep_start(struct pci_epc *epc) */ cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, epc->function_num_map); + if (ep->quirk_disable_flr) { + for (epf = 0; epf < max_epfs; epf++) { + if (!(epc->function_num_map & BIT(epf))) + continue; + + value = cdns_pcie_ep_fn_readl(pcie, epf, + CDNS_PCIE_EP_FUNC_DEV_CAP_OFFSET + + PCI_EXP_DEVCAP); + value &= ~PCI_EXP_DEVCAP_FLR; + cdns_pcie_ep_fn_writel(pcie, epf, + CDNS_PCIE_EP_FUNC_DEV_CAP_OFFSET + + PCI_EXP_DEVCAP, value); + } + } + ret = cdns_pcie_start_link(pcie); if (ret) { dev_err(dev, "Failed to start link\n"); diff --git a/drivers/pci/controller/cadence/pcie-cadence-host.c b/drivers/pci/controller/cadence/pcie-cadence-host.c index fb96d37a135c..940c7dd701d6 100644 --- a/drivers/pci/controller/cadence/pcie-cadence-host.c +++ b/drivers/pci/controller/cadence/pcie-cadence-host.c @@ -123,6 +123,14 @@ static int cdns_pcie_retrain(struct cdns_pcie *pcie) return ret; } +static void cdns_pcie_host_enable_ptm_response(struct cdns_pcie *pcie) +{ + u32 val; + + val = cdns_pcie_readl(pcie, CDNS_PCIE_LM_PTM_CTRL); + cdns_pcie_writel(pcie, CDNS_PCIE_LM_PTM_CTRL, val | CDNS_PCIE_LM_TPM_CTRL_PTMRSEN); +} + static int cdns_pcie_host_start_link(struct cdns_pcie_rc *rc) { struct cdns_pcie *pcie = &rc->pcie; @@ -501,6 +509,8 @@ int cdns_pcie_host_setup(struct cdns_pcie_rc *rc) if (rc->quirk_detect_quiet_flag) cdns_pcie_detect_quiet_min_delay_set(&rc->pcie); + cdns_pcie_host_enable_ptm_response(pcie); + ret = cdns_pcie_start_link(pcie); if (ret) { dev_err(dev, "Failed to start link\n"); diff --git a/drivers/pci/controller/cadence/pcie-cadence.c b/drivers/pci/controller/cadence/pcie-cadence.c index 52767f26048f..13c4032ca379 100644 --- a/drivers/pci/controller/cadence/pcie-cadence.c +++ b/drivers/pci/controller/cadence/pcie-cadence.c @@ -243,7 +243,6 @@ err_phy: return ret; } -#ifdef CONFIG_PM_SLEEP static int cdns_pcie_suspend_noirq(struct device *dev) { struct cdns_pcie *pcie = dev_get_drvdata(dev); @@ -266,9 +265,8 @@ static int cdns_pcie_resume_noirq(struct device *dev) return 0; } -#endif const struct dev_pm_ops cdns_pcie_pm_ops = { - SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(cdns_pcie_suspend_noirq, - cdns_pcie_resume_noirq) + NOIRQ_SYSTEM_SLEEP_PM_OPS(cdns_pcie_suspend_noirq, + cdns_pcie_resume_noirq) }; diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h index c8a27b6290ce..190786e47df9 100644 --- a/drivers/pci/controller/cadence/pcie-cadence.h +++ b/drivers/pci/controller/cadence/pcie-cadence.h @@ -116,6 +116,10 @@ #define LM_RC_BAR_CFG_APERTURE(bar, aperture) \ (((aperture) - 2) << ((bar) * 8)) +/* PTM Control Register */ +#define CDNS_PCIE_LM_PTM_CTRL (CDNS_PCIE_LM_BASE + 0x0da8) +#define CDNS_PCIE_LM_TPM_CTRL_PTMRSEN BIT(17) + /* * Endpoint Function Registers (PCI configuration space for endpoint functions) */ @@ -123,6 +127,7 @@ #define CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET 0x90 #define CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET 0xb0 +#define CDNS_PCIE_EP_FUNC_DEV_CAP_OFFSET 0xc0 #define CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET 0x200 /* @@ -357,6 +362,7 @@ struct cdns_pcie_epf { * minimize time between read and write * @epf: Structure to hold info about endpoint function * @quirk_detect_quiet_flag: LTSSM Detect Quiet min delay set as quirk + * @quirk_disable_flr: Disable FLR (Function Level Reset) quirk flag */ struct cdns_pcie_ep { struct cdns_pcie pcie; @@ -372,6 +378,7 @@ struct cdns_pcie_ep { spinlock_t lock; struct cdns_pcie_epf *epf; unsigned int quirk_detect_quiet_flag:1; + unsigned int quirk_disable_flr:1; }; diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c index dfcdeb432dc8..38462ed11d07 100644 --- a/drivers/pci/controller/dwc/pci-dra7xx.c +++ b/drivers/pci/controller/dwc/pci-dra7xx.c @@ -178,7 +178,7 @@ static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx) dra7xx_pcie_enable_msi_interrupts(dra7xx); } -static int dra7xx_pcie_host_init(struct pcie_port *pp) +static int dra7xx_pcie_host_init(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); @@ -202,7 +202,7 @@ static const struct irq_domain_ops intx_domain_ops = { .xlate = pci_irqd_intx_xlate, }; -static int dra7xx_pcie_handle_msi(struct pcie_port *pp, int index) +static int dra7xx_pcie_handle_msi(struct dw_pcie_rp *pp, int index) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); unsigned long val; @@ -224,7 +224,7 @@ static int dra7xx_pcie_handle_msi(struct pcie_port *pp, int index) return 1; } -static void dra7xx_pcie_handle_msi_irq(struct pcie_port *pp) +static void dra7xx_pcie_handle_msi_irq(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); int ret, i, count, num_ctrls; @@ -255,8 +255,8 @@ static void dra7xx_pcie_msi_irq_handler(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct dra7xx_pcie *dra7xx; + struct dw_pcie_rp *pp; struct dw_pcie *pci; - struct pcie_port *pp; unsigned long reg; u32 bit; @@ -344,7 +344,7 @@ static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg) return IRQ_HANDLED; } -static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp) +static int dra7xx_pcie_init_irq_domain(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct device *dev = pci->dev; @@ -475,7 +475,7 @@ static int dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx, { int ret; struct dw_pcie *pci = dra7xx->pci; - struct pcie_port *pp = &pci->pp; + struct dw_pcie_rp *pp = &pci->pp; struct device *dev = pci->dev; pp->irq = platform_get_irq(pdev, 1); @@ -483,7 +483,7 @@ static int dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx, return pp->irq; /* MSI IRQ is muxed */ - pp->msi_irq = -ENODEV; + pp->msi_irq[0] = -ENODEV; ret = dra7xx_pcie_init_irq_domain(pp); if (ret < 0) @@ -862,7 +862,6 @@ err_link: return ret; } -#ifdef CONFIG_PM_SLEEP static int dra7xx_pcie_suspend(struct device *dev) { struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); @@ -919,7 +918,6 @@ static int dra7xx_pcie_resume_noirq(struct device *dev) return 0; } -#endif static void dra7xx_pcie_shutdown(struct platform_device *pdev) { @@ -940,9 +938,9 @@ static void dra7xx_pcie_shutdown(struct platform_device *pdev) } static const struct dev_pm_ops dra7xx_pcie_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend, dra7xx_pcie_resume) - SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend_noirq, - dra7xx_pcie_resume_noirq) + SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend, dra7xx_pcie_resume) + NOIRQ_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend_noirq, + dra7xx_pcie_resume_noirq) }; static struct platform_driver dra7xx_pcie_driver = { diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c index 467c8d1cd7e4..ec5611005566 100644 --- a/drivers/pci/controller/dwc/pci-exynos.c +++ b/drivers/pci/controller/dwc/pci-exynos.c @@ -249,7 +249,7 @@ static int exynos_pcie_link_up(struct dw_pcie *pci) return (val & PCIE_ELBI_XMLH_LINKUP); } -static int exynos_pcie_host_init(struct pcie_port *pp) +static int exynos_pcie_host_init(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct exynos_pcie *ep = to_exynos_pcie(pci); @@ -258,9 +258,8 @@ static int exynos_pcie_host_init(struct pcie_port *pp) exynos_pcie_assert_core_reset(ep); - phy_reset(ep->phy); - phy_power_on(ep->phy); phy_init(ep->phy); + phy_power_on(ep->phy); exynos_pcie_deassert_core_reset(ep); exynos_pcie_enable_irq_pulse(ep); @@ -276,7 +275,7 @@ static int exynos_add_pcie_port(struct exynos_pcie *ep, struct platform_device *pdev) { struct dw_pcie *pci = &ep->pci; - struct pcie_port *pp = &pci->pp; + struct dw_pcie_rp *pp = &pci->pp; struct device *dev = &pdev->dev; int ret; @@ -292,7 +291,7 @@ static int exynos_add_pcie_port(struct exynos_pcie *ep, } pp->ops = &exynos_pcie_host_ops; - pp->msi_irq = -ENODEV; + pp->msi_irq[0] = -ENODEV; ret = dw_pcie_host_init(pp); if (ret) { @@ -390,7 +389,7 @@ static int __exit exynos_pcie_remove(struct platform_device *pdev) return 0; } -static int __maybe_unused exynos_pcie_suspend_noirq(struct device *dev) +static int exynos_pcie_suspend_noirq(struct device *dev) { struct exynos_pcie *ep = dev_get_drvdata(dev); @@ -402,11 +401,11 @@ static int __maybe_unused exynos_pcie_suspend_noirq(struct device *dev) return 0; } -static int __maybe_unused exynos_pcie_resume_noirq(struct device *dev) +static int exynos_pcie_resume_noirq(struct device *dev) { struct exynos_pcie *ep = dev_get_drvdata(dev); struct dw_pcie *pci = &ep->pci; - struct pcie_port *pp = &pci->pp; + struct dw_pcie_rp *pp = &pci->pp; int ret; ret = regulator_bulk_enable(ARRAY_SIZE(ep->supplies), ep->supplies); @@ -421,8 +420,8 @@ static int __maybe_unused exynos_pcie_resume_noirq(struct device *dev) } static const struct dev_pm_ops exynos_pcie_pm_ops = { - SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(exynos_pcie_suspend_noirq, - exynos_pcie_resume_noirq) + NOIRQ_SYSTEM_SLEEP_PM_OPS(exynos_pcie_suspend_noirq, + exynos_pcie_resume_noirq) }; static const struct of_device_id exynos_pcie_of_match[] = { diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c index 6974bd5aa116..2616585ca5f8 100644 --- a/drivers/pci/controller/dwc/pci-imx6.c +++ b/drivers/pci/controller/dwc/pci-imx6.c @@ -51,6 +51,7 @@ enum imx6_pcie_variants { IMX7D, IMX8MQ, IMX8MM, + IMX8MP, }; #define IMX6_PCIE_FLAG_IMX6_PHY BIT(0) @@ -61,12 +62,14 @@ struct imx6_pcie_drvdata { enum imx6_pcie_variants variant; u32 flags; int dbi_length; + const char *gpr; }; struct imx6_pcie { struct dw_pcie *pci; int reset_gpio; bool gpio_active_high; + bool link_is_up; struct clk *pcie_bus; struct clk *pcie_phy; struct clk *pcie_inbound_axi; @@ -146,6 +149,32 @@ struct imx6_pcie { #define PHY_RX_OVRD_IN_LO_RX_DATA_EN BIT(5) #define PHY_RX_OVRD_IN_LO_RX_PLL_EN BIT(3) +static unsigned int imx6_pcie_grp_offset(const struct imx6_pcie *imx6_pcie) +{ + WARN_ON(imx6_pcie->drvdata->variant != IMX8MQ && + imx6_pcie->drvdata->variant != IMX8MM && + imx6_pcie->drvdata->variant != IMX8MP); + return imx6_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14; +} + +static void imx6_pcie_configure_type(struct imx6_pcie *imx6_pcie) +{ + unsigned int mask, val; + + if (imx6_pcie->drvdata->variant == IMX8MQ && + imx6_pcie->controller_id == 1) { + mask = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE; + val = FIELD_PREP(IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE, + PCI_EXP_TYPE_ROOT_PORT); + } else { + mask = IMX6Q_GPR12_DEVICE_TYPE; + val = FIELD_PREP(IMX6Q_GPR12_DEVICE_TYPE, + PCI_EXP_TYPE_ROOT_PORT); + } + + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, mask, val); +} + static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, bool exp_val) { struct dw_pcie *pci = imx6_pcie->pci; @@ -271,6 +300,135 @@ static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data) return 0; } +static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie) +{ + switch (imx6_pcie->drvdata->variant) { + case IMX8MM: + case IMX8MP: + /* + * The PHY initialization had been done in the PHY + * driver, break here directly. + */ + break; + case IMX8MQ: + /* + * TODO: Currently this code assumes external + * oscillator is being used + */ + regmap_update_bits(imx6_pcie->iomuxc_gpr, + imx6_pcie_grp_offset(imx6_pcie), + IMX8MQ_GPR_PCIE_REF_USE_PAD, + IMX8MQ_GPR_PCIE_REF_USE_PAD); + /* + * Regarding the datasheet, the PCIE_VPH is suggested + * to be 1.8V. If the PCIE_VPH is supplied by 3.3V, the + * VREG_BYPASS should be cleared to zero. + */ + if (imx6_pcie->vph && + regulator_get_voltage(imx6_pcie->vph) > 3000000) + regmap_update_bits(imx6_pcie->iomuxc_gpr, + imx6_pcie_grp_offset(imx6_pcie), + IMX8MQ_GPR_PCIE_VREG_BYPASS, + 0); + break; + case IMX7D: + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, + IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0); + break; + case IMX6SX: + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, + IMX6SX_GPR12_PCIE_RX_EQ_MASK, + IMX6SX_GPR12_PCIE_RX_EQ_2); + fallthrough; + default: + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, + IMX6Q_GPR12_PCIE_CTL_2, 0 << 10); + + /* configure constant input signal to the pcie ctrl and phy */ + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, + IMX6Q_GPR12_LOS_LEVEL, 9 << 4); + + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, + IMX6Q_GPR8_TX_DEEMPH_GEN1, + imx6_pcie->tx_deemph_gen1 << 0); + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, + IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB, + imx6_pcie->tx_deemph_gen2_3p5db << 6); + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, + IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB, + imx6_pcie->tx_deemph_gen2_6db << 12); + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, + IMX6Q_GPR8_TX_SWING_FULL, + imx6_pcie->tx_swing_full << 18); + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, + IMX6Q_GPR8_TX_SWING_LOW, + imx6_pcie->tx_swing_low << 25); + break; + } + + imx6_pcie_configure_type(imx6_pcie); +} + +static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie) +{ + u32 val; + struct device *dev = imx6_pcie->pci->dev; + + if (regmap_read_poll_timeout(imx6_pcie->iomuxc_gpr, + IOMUXC_GPR22, val, + val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED, + PHY_PLL_LOCK_WAIT_USLEEP_MAX, + PHY_PLL_LOCK_WAIT_TIMEOUT)) + dev_err(dev, "PCIe PLL lock timeout\n"); +} + +static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie) +{ + unsigned long phy_rate = clk_get_rate(imx6_pcie->pcie_phy); + int mult, div; + u16 val; + + if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY)) + return 0; + + switch (phy_rate) { + case 125000000: + /* + * The default settings of the MPLL are for a 125MHz input + * clock, so no need to reconfigure anything in that case. + */ + return 0; + case 100000000: + mult = 25; + div = 0; + break; + case 200000000: + mult = 25; + div = 1; + break; + default: + dev_err(imx6_pcie->pci->dev, + "Unsupported PHY reference clock rate %lu\n", phy_rate); + return -EINVAL; + } + + pcie_phy_read(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, &val); + val &= ~(PCIE_PHY_MPLL_MULTIPLIER_MASK << + PCIE_PHY_MPLL_MULTIPLIER_SHIFT); + val |= mult << PCIE_PHY_MPLL_MULTIPLIER_SHIFT; + val |= PCIE_PHY_MPLL_MULTIPLIER_OVRD; + pcie_phy_write(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, val); + + pcie_phy_read(imx6_pcie, PCIE_PHY_ATEOVRD, &val); + val &= ~(PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK << + PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT); + val |= div << PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT; + val |= PCIE_PHY_ATEOVRD_EN; + pcie_phy_write(imx6_pcie, PCIE_PHY_ATEOVRD, val); + + return 0; +} + static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie) { u16 tmp; @@ -367,56 +525,6 @@ static int imx6_pcie_attach_pd(struct device *dev) return 0; } -static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie) -{ - struct device *dev = imx6_pcie->pci->dev; - - switch (imx6_pcie->drvdata->variant) { - case IMX7D: - case IMX8MQ: - reset_control_assert(imx6_pcie->pciephy_reset); - fallthrough; - case IMX8MM: - reset_control_assert(imx6_pcie->apps_reset); - break; - case IMX6SX: - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, - IMX6SX_GPR12_PCIE_TEST_POWERDOWN, - IMX6SX_GPR12_PCIE_TEST_POWERDOWN); - /* Force PCIe PHY reset */ - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5, - IMX6SX_GPR5_PCIE_BTNRST_RESET, - IMX6SX_GPR5_PCIE_BTNRST_RESET); - break; - case IMX6QP: - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, - IMX6Q_GPR1_PCIE_SW_RST, - IMX6Q_GPR1_PCIE_SW_RST); - break; - case IMX6Q: - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, - IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18); - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, - IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16); - break; - } - - if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) { - int ret = regulator_disable(imx6_pcie->vpcie); - - if (ret) - dev_err(dev, "failed to disable vpcie regulator: %d\n", - ret); - } -} - -static unsigned int imx6_pcie_grp_offset(const struct imx6_pcie *imx6_pcie) -{ - WARN_ON(imx6_pcie->drvdata->variant != IMX8MQ && - imx6_pcie->drvdata->variant != IMX8MM); - return imx6_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14; -} - static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie) { struct dw_pcie *pci = imx6_pcie->pci; @@ -453,11 +561,8 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie) case IMX7D: break; case IMX8MM: - ret = clk_prepare_enable(imx6_pcie->pcie_aux); - if (ret) - dev_err(dev, "unable to enable pcie_aux clock\n"); - break; case IMX8MQ: + case IMX8MP: ret = clk_prepare_enable(imx6_pcie->pcie_aux); if (ret) { dev_err(dev, "unable to enable pcie_aux clock\n"); @@ -481,38 +586,45 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie) return ret; } -static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie) +static void imx6_pcie_disable_ref_clk(struct imx6_pcie *imx6_pcie) { - u32 val; - struct device *dev = imx6_pcie->pci->dev; - - if (regmap_read_poll_timeout(imx6_pcie->iomuxc_gpr, - IOMUXC_GPR22, val, - val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED, - PHY_PLL_LOCK_WAIT_USLEEP_MAX, - PHY_PLL_LOCK_WAIT_TIMEOUT)) - dev_err(dev, "PCIe PLL lock timeout\n"); + switch (imx6_pcie->drvdata->variant) { + case IMX6SX: + clk_disable_unprepare(imx6_pcie->pcie_inbound_axi); + break; + case IMX6QP: + case IMX6Q: + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, + IMX6Q_GPR1_PCIE_REF_CLK_EN, 0); + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, + IMX6Q_GPR1_PCIE_TEST_PD, + IMX6Q_GPR1_PCIE_TEST_PD); + break; + case IMX7D: + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, + IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, + IMX7D_GPR12_PCIE_PHY_REFCLK_SEL); + break; + case IMX8MM: + case IMX8MQ: + case IMX8MP: + clk_disable_unprepare(imx6_pcie->pcie_aux); + break; + default: + break; + } } -static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie) +static int imx6_pcie_clk_enable(struct imx6_pcie *imx6_pcie) { struct dw_pcie *pci = imx6_pcie->pci; struct device *dev = pci->dev; int ret; - if (imx6_pcie->vpcie && !regulator_is_enabled(imx6_pcie->vpcie)) { - ret = regulator_enable(imx6_pcie->vpcie); - if (ret) { - dev_err(dev, "failed to enable vpcie regulator: %d\n", - ret); - return; - } - } - ret = clk_prepare_enable(imx6_pcie->pcie_phy); if (ret) { dev_err(dev, "unable to enable pcie_phy clock\n"); - goto err_pcie_phy; + return ret; } ret = clk_prepare_enable(imx6_pcie->pcie_bus); @@ -533,34 +645,76 @@ static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie) goto err_ref_clk; } + /* allow the clocks to stabilize */ + usleep_range(200, 500); + return 0; + +err_ref_clk: + clk_disable_unprepare(imx6_pcie->pcie); +err_pcie: + clk_disable_unprepare(imx6_pcie->pcie_bus); +err_pcie_bus: + clk_disable_unprepare(imx6_pcie->pcie_phy); + + return ret; +} + +static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie) +{ + imx6_pcie_disable_ref_clk(imx6_pcie); + clk_disable_unprepare(imx6_pcie->pcie); + clk_disable_unprepare(imx6_pcie->pcie_bus); + clk_disable_unprepare(imx6_pcie->pcie_phy); +} + +static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie) +{ switch (imx6_pcie->drvdata->variant) { + case IMX7D: + case IMX8MQ: + reset_control_assert(imx6_pcie->pciephy_reset); + fallthrough; case IMX8MM: - if (phy_power_on(imx6_pcie->phy)) - dev_err(dev, "unable to power on PHY\n"); + case IMX8MP: + reset_control_assert(imx6_pcie->apps_reset); break; - default: + case IMX6SX: + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, + IMX6SX_GPR12_PCIE_TEST_POWERDOWN, + IMX6SX_GPR12_PCIE_TEST_POWERDOWN); + /* Force PCIe PHY reset */ + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5, + IMX6SX_GPR5_PCIE_BTNRST_RESET, + IMX6SX_GPR5_PCIE_BTNRST_RESET); + break; + case IMX6QP: + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, + IMX6Q_GPR1_PCIE_SW_RST, + IMX6Q_GPR1_PCIE_SW_RST); + break; + case IMX6Q: + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, + IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18); + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, + IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16); break; } - /* allow the clocks to stabilize */ - usleep_range(200, 500); /* Some boards don't have PCIe reset GPIO. */ - if (gpio_is_valid(imx6_pcie->reset_gpio)) { + if (gpio_is_valid(imx6_pcie->reset_gpio)) gpio_set_value_cansleep(imx6_pcie->reset_gpio, imx6_pcie->gpio_active_high); - msleep(100); - gpio_set_value_cansleep(imx6_pcie->reset_gpio, - !imx6_pcie->gpio_active_high); - } +} + +static int imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie) +{ + struct dw_pcie *pci = imx6_pcie->pci; + struct device *dev = pci->dev; switch (imx6_pcie->drvdata->variant) { case IMX8MQ: reset_control_deassert(imx6_pcie->pciephy_reset); break; - case IMX8MM: - if (phy_init(imx6_pcie->phy)) - dev_err(dev, "waiting for phy ready timeout!\n"); - break; case IMX7D: reset_control_deassert(imx6_pcie->pciephy_reset); @@ -596,156 +750,20 @@ static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie) usleep_range(200, 500); break; case IMX6Q: /* Nothing to do */ - break; - } - - return; - -err_ref_clk: - clk_disable_unprepare(imx6_pcie->pcie); -err_pcie: - clk_disable_unprepare(imx6_pcie->pcie_bus); -err_pcie_bus: - clk_disable_unprepare(imx6_pcie->pcie_phy); -err_pcie_phy: - if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) { - ret = regulator_disable(imx6_pcie->vpcie); - if (ret) - dev_err(dev, "failed to disable vpcie regulator: %d\n", - ret); - } -} - -static void imx6_pcie_configure_type(struct imx6_pcie *imx6_pcie) -{ - unsigned int mask, val; - - if (imx6_pcie->drvdata->variant == IMX8MQ && - imx6_pcie->controller_id == 1) { - mask = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE; - val = FIELD_PREP(IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE, - PCI_EXP_TYPE_ROOT_PORT); - } else { - mask = IMX6Q_GPR12_DEVICE_TYPE; - val = FIELD_PREP(IMX6Q_GPR12_DEVICE_TYPE, - PCI_EXP_TYPE_ROOT_PORT); - } - - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, mask, val); -} - -static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie) -{ - switch (imx6_pcie->drvdata->variant) { case IMX8MM: - /* - * The PHY initialization had been done in the PHY - * driver, break here directly. - */ - break; - case IMX8MQ: - /* - * TODO: Currently this code assumes external - * oscillator is being used - */ - regmap_update_bits(imx6_pcie->iomuxc_gpr, - imx6_pcie_grp_offset(imx6_pcie), - IMX8MQ_GPR_PCIE_REF_USE_PAD, - IMX8MQ_GPR_PCIE_REF_USE_PAD); - /* - * Regarding the datasheet, the PCIE_VPH is suggested - * to be 1.8V. If the PCIE_VPH is supplied by 3.3V, the - * VREG_BYPASS should be cleared to zero. - */ - if (imx6_pcie->vph && - regulator_get_voltage(imx6_pcie->vph) > 3000000) - regmap_update_bits(imx6_pcie->iomuxc_gpr, - imx6_pcie_grp_offset(imx6_pcie), - IMX8MQ_GPR_PCIE_VREG_BYPASS, - 0); - break; - case IMX7D: - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, - IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0); - break; - case IMX6SX: - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, - IMX6SX_GPR12_PCIE_RX_EQ_MASK, - IMX6SX_GPR12_PCIE_RX_EQ_2); - fallthrough; - default: - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, - IMX6Q_GPR12_PCIE_CTL_2, 0 << 10); - - /* configure constant input signal to the pcie ctrl and phy */ - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, - IMX6Q_GPR12_LOS_LEVEL, 9 << 4); - - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, - IMX6Q_GPR8_TX_DEEMPH_GEN1, - imx6_pcie->tx_deemph_gen1 << 0); - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, - IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB, - imx6_pcie->tx_deemph_gen2_3p5db << 6); - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, - IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB, - imx6_pcie->tx_deemph_gen2_6db << 12); - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, - IMX6Q_GPR8_TX_SWING_FULL, - imx6_pcie->tx_swing_full << 18); - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, - IMX6Q_GPR8_TX_SWING_LOW, - imx6_pcie->tx_swing_low << 25); + case IMX8MP: break; } - imx6_pcie_configure_type(imx6_pcie); -} - -static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie) -{ - unsigned long phy_rate = clk_get_rate(imx6_pcie->pcie_phy); - int mult, div; - u16 val; - - if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY)) - return 0; - - switch (phy_rate) { - case 125000000: - /* - * The default settings of the MPLL are for a 125MHz input - * clock, so no need to reconfigure anything in that case. - */ - return 0; - case 100000000: - mult = 25; - div = 0; - break; - case 200000000: - mult = 25; - div = 1; - break; - default: - dev_err(imx6_pcie->pci->dev, - "Unsupported PHY reference clock rate %lu\n", phy_rate); - return -EINVAL; + /* Some boards don't have PCIe reset GPIO. */ + if (gpio_is_valid(imx6_pcie->reset_gpio)) { + msleep(100); + gpio_set_value_cansleep(imx6_pcie->reset_gpio, + !imx6_pcie->gpio_active_high); + /* Wait for 100ms after PERST# deassertion (PCIe r5.0, 6.6.1) */ + msleep(100); } - pcie_phy_read(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, &val); - val &= ~(PCIE_PHY_MPLL_MULTIPLIER_MASK << - PCIE_PHY_MPLL_MULTIPLIER_SHIFT); - val |= mult << PCIE_PHY_MPLL_MULTIPLIER_SHIFT; - val |= PCIE_PHY_MPLL_MULTIPLIER_OVRD; - pcie_phy_write(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, val); - - pcie_phy_read(imx6_pcie, PCIE_PHY_ATEOVRD, &val); - val &= ~(PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK << - PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT); - val |= div << PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT; - val |= PCIE_PHY_ATEOVRD_EN; - pcie_phy_write(imx6_pcie, PCIE_PHY_ATEOVRD, val); - return 0; } @@ -783,11 +801,32 @@ static void imx6_pcie_ltssm_enable(struct device *dev) case IMX7D: case IMX8MQ: case IMX8MM: + case IMX8MP: reset_control_deassert(imx6_pcie->apps_reset); break; } } +static void imx6_pcie_ltssm_disable(struct device *dev) +{ + struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev); + + switch (imx6_pcie->drvdata->variant) { + case IMX6Q: + case IMX6SX: + case IMX6QP: + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, + IMX6Q_GPR12_PCIE_CTL_2, 0); + break; + case IMX7D: + case IMX8MQ: + case IMX8MM: + case IMX8MP: + reset_control_assert(imx6_pcie->apps_reset); + break; + } +} + static int imx6_pcie_start_link(struct dw_pcie *pci) { struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci); @@ -801,10 +840,12 @@ static int imx6_pcie_start_link(struct dw_pcie *pci) * started in Gen2 mode, there is a possibility the devices on the * bus will not be detected at all. This happens with PCIe switches. */ + dw_pcie_dbi_ro_wr_en(pci); tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP); tmp &= ~PCI_EXP_LNKCAP_SLS; tmp |= PCI_EXP_LNKCAP_SLS_2_5GB; dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, tmp); + dw_pcie_dbi_ro_wr_dis(pci); /* Start LTSSM. */ imx6_pcie_ltssm_enable(dev); @@ -813,11 +854,12 @@ static int imx6_pcie_start_link(struct dw_pcie *pci) if (ret) goto err_reset_phy; - if (pci->link_gen == 2) { - /* Allow Gen2 mode after the link is up. */ + if (pci->link_gen > 1) { + /* Allow faster modes after the link is up */ + dw_pcie_dbi_ro_wr_en(pci); tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP); tmp &= ~PCI_EXP_LNKCAP_SLS; - tmp |= PCI_EXP_LNKCAP_SLS_5_0GB; + tmp |= pci->link_gen; dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, tmp); /* @@ -827,6 +869,7 @@ static int imx6_pcie_start_link(struct dw_pcie *pci) tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); tmp |= PORT_LOGIC_SPEED_CHANGE; dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp); + dw_pcie_dbi_ro_wr_dis(pci); if (imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE) { @@ -848,37 +891,109 @@ static int imx6_pcie_start_link(struct dw_pcie *pci) /* Make sure link training is finished as well! */ ret = dw_pcie_wait_for_link(pci); - if (ret) { - dev_err(dev, "Failed to bring link up!\n"); + if (ret) goto err_reset_phy; - } } else { - dev_info(dev, "Link: Gen2 disabled\n"); + dev_info(dev, "Link: Only Gen1 is enabled\n"); } + imx6_pcie->link_is_up = true; tmp = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA); dev_info(dev, "Link up, Gen%i\n", tmp & PCI_EXP_LNKSTA_CLS); return 0; err_reset_phy: + imx6_pcie->link_is_up = false; dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n", dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0), dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG1)); imx6_pcie_reset_phy(imx6_pcie); - return ret; + return 0; } -static int imx6_pcie_host_init(struct pcie_port *pp) +static void imx6_pcie_stop_link(struct dw_pcie *pci) +{ + struct device *dev = pci->dev; + + /* Turn off PCIe LTSSM */ + imx6_pcie_ltssm_disable(dev); +} + +static int imx6_pcie_host_init(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct device *dev = pci->dev; struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci); + int ret; + + if (imx6_pcie->vpcie) { + ret = regulator_enable(imx6_pcie->vpcie); + if (ret) { + dev_err(dev, "failed to enable vpcie regulator: %d\n", + ret); + return ret; + } + } imx6_pcie_assert_core_reset(imx6_pcie); imx6_pcie_init_phy(imx6_pcie); - imx6_pcie_deassert_core_reset(imx6_pcie); + + ret = imx6_pcie_clk_enable(imx6_pcie); + if (ret) { + dev_err(dev, "unable to enable pcie clocks: %d\n", ret); + goto err_reg_disable; + } + + if (imx6_pcie->phy) { + ret = phy_init(imx6_pcie->phy); + if (ret) { + dev_err(dev, "pcie PHY power up failed\n"); + goto err_clk_disable; + } + } + + ret = imx6_pcie_deassert_core_reset(imx6_pcie); + if (ret < 0) { + dev_err(dev, "pcie deassert core reset failed: %d\n", ret); + goto err_phy_off; + } + + if (imx6_pcie->phy) { + ret = phy_power_on(imx6_pcie->phy); + if (ret) { + dev_err(dev, "waiting for PHY ready timeout!\n"); + goto err_phy_off; + } + } imx6_setup_phy_mpll(imx6_pcie); return 0; + +err_phy_off: + if (imx6_pcie->phy) + phy_exit(imx6_pcie->phy); +err_clk_disable: + imx6_pcie_clk_disable(imx6_pcie); +err_reg_disable: + if (imx6_pcie->vpcie) + regulator_disable(imx6_pcie->vpcie); + return ret; +} + +static void imx6_pcie_host_exit(struct dw_pcie_rp *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci); + + if (imx6_pcie->phy) { + if (phy_power_off(imx6_pcie->phy)) + dev_err(pci->dev, "unable to power off PHY\n"); + phy_exit(imx6_pcie->phy); + } + imx6_pcie_clk_disable(imx6_pcie); + + if (imx6_pcie->vpcie) + regulator_disable(imx6_pcie->vpcie); } static const struct dw_pcie_host_ops imx6_pcie_host_ops = { @@ -889,26 +1004,6 @@ static const struct dw_pcie_ops dw_pcie_ops = { .start_link = imx6_pcie_start_link, }; -#ifdef CONFIG_PM_SLEEP -static void imx6_pcie_ltssm_disable(struct device *dev) -{ - struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev); - - switch (imx6_pcie->drvdata->variant) { - case IMX6SX: - case IMX6QP: - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, - IMX6Q_GPR12_PCIE_CTL_2, 0); - break; - case IMX7D: - case IMX8MM: - reset_control_assert(imx6_pcie->apps_reset); - break; - default: - dev_err(dev, "ltssm_disable not supported\n"); - } -} - static void imx6_pcie_pm_turnoff(struct imx6_pcie *imx6_pcie) { struct device *dev = imx6_pcie->pci->dev; @@ -923,6 +1018,7 @@ static void imx6_pcie_pm_turnoff(struct imx6_pcie *imx6_pcie) /* Others poke directly at IOMUXC registers */ switch (imx6_pcie->drvdata->variant) { case IMX6SX: + case IMX6QP: regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6SX_GPR12_PCIE_PM_TURN_OFF, IMX6SX_GPR12_PCIE_PM_TURN_OFF); @@ -945,48 +1041,17 @@ pm_turnoff_sleep: usleep_range(1000, 10000); } -static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie) -{ - clk_disable_unprepare(imx6_pcie->pcie); - clk_disable_unprepare(imx6_pcie->pcie_phy); - clk_disable_unprepare(imx6_pcie->pcie_bus); - - switch (imx6_pcie->drvdata->variant) { - case IMX6SX: - clk_disable_unprepare(imx6_pcie->pcie_inbound_axi); - break; - case IMX7D: - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, - IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, - IMX7D_GPR12_PCIE_PHY_REFCLK_SEL); - break; - case IMX8MQ: - case IMX8MM: - clk_disable_unprepare(imx6_pcie->pcie_aux); - break; - default: - break; - } -} - static int imx6_pcie_suspend_noirq(struct device *dev) { struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev); + struct dw_pcie_rp *pp = &imx6_pcie->pci->pp; if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND)) return 0; imx6_pcie_pm_turnoff(imx6_pcie); - imx6_pcie_ltssm_disable(dev); - imx6_pcie_clk_disable(imx6_pcie); - switch (imx6_pcie->drvdata->variant) { - case IMX8MM: - if (phy_power_off(imx6_pcie->phy)) - dev_err(dev, "unable to power off PHY\n"); - break; - default: - break; - } + imx6_pcie_stop_link(imx6_pcie->pci); + imx6_pcie_host_exit(pp); return 0; } @@ -995,27 +1060,25 @@ static int imx6_pcie_resume_noirq(struct device *dev) { int ret; struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev); - struct pcie_port *pp = &imx6_pcie->pci->pp; + struct dw_pcie_rp *pp = &imx6_pcie->pci->pp; if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND)) return 0; - imx6_pcie_assert_core_reset(imx6_pcie); - imx6_pcie_init_phy(imx6_pcie); - imx6_pcie_deassert_core_reset(imx6_pcie); + ret = imx6_pcie_host_init(pp); + if (ret) + return ret; dw_pcie_setup_rc(pp); - ret = imx6_pcie_start_link(imx6_pcie->pci); - if (ret < 0) - dev_info(dev, "pcie link is down after resume.\n"); + if (imx6_pcie->link_is_up) + imx6_pcie_start_link(imx6_pcie->pci); return 0; } -#endif static const struct dev_pm_ops imx6_pcie_pm_ops = { - SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx6_pcie_suspend_noirq, - imx6_pcie_resume_noirq) + NOIRQ_SYSTEM_SLEEP_PM_OPS(imx6_pcie_suspend_noirq, + imx6_pcie_resume_noirq) }; static int imx6_pcie_probe(struct platform_device *pdev) @@ -1126,6 +1189,7 @@ static int imx6_pcie_probe(struct platform_device *pdev) } break; case IMX8MM: + case IMX8MP: imx6_pcie->pcie_aux = devm_clk_get(dev, "pcie_aux"); if (IS_ERR(imx6_pcie->pcie_aux)) return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_aux), @@ -1163,7 +1227,7 @@ static int imx6_pcie_probe(struct platform_device *pdev) /* Grab GPR config register range */ imx6_pcie->iomuxc_gpr = - syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr"); + syscon_regmap_lookup_by_compatible(imx6_pcie->drvdata->gpr); if (IS_ERR(imx6_pcie->iomuxc_gpr)) { dev_err(dev, "unable to find iomuxc registers\n"); return PTR_ERR(imx6_pcie->iomuxc_gpr); @@ -1242,29 +1306,41 @@ static const struct imx6_pcie_drvdata drvdata[] = { .flags = IMX6_PCIE_FLAG_IMX6_PHY | IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE, .dbi_length = 0x200, + .gpr = "fsl,imx6q-iomuxc-gpr", }, [IMX6SX] = { .variant = IMX6SX, .flags = IMX6_PCIE_FLAG_IMX6_PHY | IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE | IMX6_PCIE_FLAG_SUPPORTS_SUSPEND, + .gpr = "fsl,imx6q-iomuxc-gpr", }, [IMX6QP] = { .variant = IMX6QP, .flags = IMX6_PCIE_FLAG_IMX6_PHY | - IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE, + IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE | + IMX6_PCIE_FLAG_SUPPORTS_SUSPEND, .dbi_length = 0x200, + .gpr = "fsl,imx6q-iomuxc-gpr", }, [IMX7D] = { .variant = IMX7D, .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND, + .gpr = "fsl,imx7d-iomuxc-gpr", }, [IMX8MQ] = { .variant = IMX8MQ, + .gpr = "fsl,imx8mq-iomuxc-gpr", }, [IMX8MM] = { .variant = IMX8MM, .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND, + .gpr = "fsl,imx8mm-iomuxc-gpr", + }, + [IMX8MP] = { + .variant = IMX8MP, + .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND, + .gpr = "fsl,imx8mp-iomuxc-gpr", }, }; @@ -1275,6 +1351,7 @@ static const struct of_device_id imx6_pcie_of_match[] = { { .compatible = "fsl,imx7d-pcie", .data = &drvdata[IMX7D], }, { .compatible = "fsl,imx8mq-pcie", .data = &drvdata[IMX8MQ], }, { .compatible = "fsl,imx8mm-pcie", .data = &drvdata[IMX8MM], }, + { .compatible = "fsl,imx8mp-pcie", .data = &drvdata[IMX8MP], }, {}, }; @@ -1293,7 +1370,7 @@ static struct platform_driver imx6_pcie_driver = { static void imx6_pcie_quirk(struct pci_dev *dev) { struct pci_bus *bus = dev->bus; - struct pcie_port *pp = bus->sysdata; + struct dw_pcie_rp *pp = bus->sysdata; /* Bus parent is the PCI bridge, its parent is this platform driver */ if (!bus->dev.parent || !bus->dev.parent->parent) diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c index 1c2ee4e13f1c..78818853af9e 100644 --- a/drivers/pci/controller/dwc/pci-keystone.c +++ b/drivers/pci/controller/dwc/pci-keystone.c @@ -109,7 +109,7 @@ struct ks_pcie_of_data { enum dw_pcie_device_mode mode; const struct dw_pcie_host_ops *host_ops; const struct dw_pcie_ep_ops *ep_ops; - unsigned int version; + u32 version; }; struct keystone_pcie { @@ -147,7 +147,7 @@ static void ks_pcie_app_writel(struct keystone_pcie *ks_pcie, u32 offset, static void ks_pcie_msi_irq_ack(struct irq_data *data) { - struct pcie_port *pp = irq_data_get_irq_chip_data(data); + struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data); struct keystone_pcie *ks_pcie; u32 irq = data->hwirq; struct dw_pcie *pci; @@ -167,7 +167,7 @@ static void ks_pcie_msi_irq_ack(struct irq_data *data) static void ks_pcie_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) { - struct pcie_port *pp = irq_data_get_irq_chip_data(data); + struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data); struct keystone_pcie *ks_pcie; struct dw_pcie *pci; u64 msi_target; @@ -192,7 +192,7 @@ static int ks_pcie_msi_set_affinity(struct irq_data *irq_data, static void ks_pcie_msi_mask(struct irq_data *data) { - struct pcie_port *pp = irq_data_get_irq_chip_data(data); + struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data); struct keystone_pcie *ks_pcie; u32 irq = data->hwirq; struct dw_pcie *pci; @@ -216,7 +216,7 @@ static void ks_pcie_msi_mask(struct irq_data *data) static void ks_pcie_msi_unmask(struct irq_data *data) { - struct pcie_port *pp = irq_data_get_irq_chip_data(data); + struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data); struct keystone_pcie *ks_pcie; u32 irq = data->hwirq; struct dw_pcie *pci; @@ -247,7 +247,7 @@ static struct irq_chip ks_pcie_msi_irq_chip = { .irq_unmask = ks_pcie_msi_unmask, }; -static int ks_pcie_msi_host_init(struct pcie_port *pp) +static int ks_pcie_msi_host_init(struct dw_pcie_rp *pp) { pp->msi_irq_chip = &ks_pcie_msi_irq_chip; return dw_pcie_allocate_domains(pp); @@ -390,7 +390,7 @@ static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie) u32 val; u32 num_viewport = ks_pcie->num_viewport; struct dw_pcie *pci = ks_pcie->pci; - struct pcie_port *pp = &pci->pp; + struct dw_pcie_rp *pp = &pci->pp; u64 start, end; struct resource *mem; int i; @@ -428,7 +428,7 @@ static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie) static void __iomem *ks_pcie_other_map_bus(struct pci_bus *bus, unsigned int devfn, int where) { - struct pcie_port *pp = bus->sysdata; + struct dw_pcie_rp *pp = bus->sysdata; struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); u32 reg; @@ -456,7 +456,7 @@ static struct pci_ops ks_child_pcie_ops = { */ static int ks_pcie_v3_65_add_bus(struct pci_bus *bus) { - struct pcie_port *pp = bus->sysdata; + struct dw_pcie_rp *pp = bus->sysdata; struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); @@ -531,13 +531,13 @@ static void ks_pcie_quirk(struct pci_dev *dev) struct pci_dev *bridge; static const struct pci_device_id rc_pci_devids[] = { { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2HK), - .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, }, + .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, }, { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2E), - .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, }, + .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, }, { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2L), - .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, }, + .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, }, { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2G), - .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, }, + .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, }, { 0, }, }; @@ -574,7 +574,7 @@ static void ks_pcie_msi_irq_handler(struct irq_desc *desc) struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc); u32 offset = irq - ks_pcie->msi_host_irq; struct dw_pcie *pci = ks_pcie->pci; - struct pcie_port *pp = &pci->pp; + struct dw_pcie_rp *pp = &pci->pp; struct device *dev = pci->dev; struct irq_chip *chip = irq_desc_get_chip(desc); u32 vector, reg, pos; @@ -799,7 +799,7 @@ static int __init ks_pcie_init_id(struct keystone_pcie *ks_pcie) return 0; } -static int __init ks_pcie_host_init(struct pcie_port *pp) +static int __init ks_pcie_host_init(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); @@ -1069,19 +1069,19 @@ static int ks_pcie_am654_set_mode(struct device *dev, static const struct ks_pcie_of_data ks_pcie_rc_of_data = { .host_ops = &ks_pcie_host_ops, - .version = 0x365A, + .version = DW_PCIE_VER_365A, }; static const struct ks_pcie_of_data ks_pcie_am654_rc_of_data = { .host_ops = &ks_pcie_am654_host_ops, .mode = DW_PCIE_RC_TYPE, - .version = 0x490A, + .version = DW_PCIE_VER_490A, }; static const struct ks_pcie_of_data ks_pcie_am654_ep_of_data = { .ep_ops = &ks_pcie_am654_ep_ops, .mode = DW_PCIE_EP_TYPE, - .version = 0x490A, + .version = DW_PCIE_VER_490A, }; static const struct of_device_id ks_pcie_of_match[] = { @@ -1114,12 +1114,12 @@ static int __init ks_pcie_probe(struct platform_device *pdev) struct device_link **link; struct gpio_desc *gpiod; struct resource *res; - unsigned int version; void __iomem *base; u32 num_viewport; struct phy **phy; u32 num_lanes; char name[10]; + u32 version; int ret; int irq; int i; @@ -1233,7 +1233,7 @@ static int __init ks_pcie_probe(struct platform_device *pdev) goto err_get_sync; } - if (pci->version >= 0x480A) + if (dw_pcie_ver_is_ge(pci, 480A)) ret = ks_pcie_am654_set_mode(dev, mode); else ret = ks_pcie_set_mode(dev); @@ -1324,7 +1324,7 @@ static struct platform_driver ks_pcie_driver __refdata = { .remove = __exit_p(ks_pcie_remove), .driver = { .name = "keystone-pcie", - .of_match_table = of_match_ptr(ks_pcie_of_match), + .of_match_table = ks_pcie_of_match, }, }; builtin_platform_driver(ks_pcie_driver); diff --git a/drivers/pci/controller/dwc/pci-layerscape-ep.c b/drivers/pci/controller/dwc/pci-layerscape-ep.c index 39f4664bd84c..ad99707b3b99 100644 --- a/drivers/pci/controller/dwc/pci-layerscape-ep.c +++ b/drivers/pci/controller/dwc/pci-layerscape-ep.c @@ -32,15 +32,6 @@ struct ls_pcie_ep { const struct ls_pcie_ep_drvdata *drvdata; }; -static int ls_pcie_establish_link(struct dw_pcie *pci) -{ - return 0; -} - -static const struct dw_pcie_ops dw_ls_pcie_ep_ops = { - .start_link = ls_pcie_establish_link, -}; - static const struct pci_epc_features* ls_pcie_ep_get_features(struct dw_pcie_ep *ep) { @@ -106,19 +97,16 @@ static const struct dw_pcie_ep_ops ls_pcie_ep_ops = { static const struct ls_pcie_ep_drvdata ls1_ep_drvdata = { .ops = &ls_pcie_ep_ops, - .dw_pcie_ops = &dw_ls_pcie_ep_ops, }; static const struct ls_pcie_ep_drvdata ls2_ep_drvdata = { .func_offset = 0x20000, .ops = &ls_pcie_ep_ops, - .dw_pcie_ops = &dw_ls_pcie_ep_ops, }; static const struct ls_pcie_ep_drvdata lx2_ep_drvdata = { .func_offset = 0x8000, .ops = &ls_pcie_ep_ops, - .dw_pcie_ops = &dw_ls_pcie_ep_ops, }; static const struct of_device_id ls_pcie_ep_of_match[] = { diff --git a/drivers/pci/controller/dwc/pci-layerscape.c b/drivers/pci/controller/dwc/pci-layerscape.c index 6a4f0619bb1c..879b8692f96a 100644 --- a/drivers/pci/controller/dwc/pci-layerscape.c +++ b/drivers/pci/controller/dwc/pci-layerscape.c @@ -74,7 +74,7 @@ static void ls_pcie_fix_error_response(struct ls_pcie *pcie) iowrite32(PCIE_ABSERR_SETTING, pci->dbi_base + PCIE_ABSERR); } -static int ls_pcie_host_init(struct pcie_port *pp) +static int ls_pcie_host_init(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct ls_pcie *pcie = to_ls_pcie(pci); diff --git a/drivers/pci/controller/dwc/pci-meson.c b/drivers/pci/controller/dwc/pci-meson.c index 686ded034f22..c1527693bed9 100644 --- a/drivers/pci/controller/dwc/pci-meson.c +++ b/drivers/pci/controller/dwc/pci-meson.c @@ -313,14 +313,14 @@ static int meson_pcie_rd_own_conf(struct pci_bus *bus, u32 devfn, * cannot program the PCI_CLASS_DEVICE register, so we must fabricate * the return value in the config accessors. */ - if (where == PCI_CLASS_REVISION && size == 4) - *val = (PCI_CLASS_BRIDGE_PCI << 16) | (*val & 0xffff); - else if (where == PCI_CLASS_DEVICE && size == 2) - *val = PCI_CLASS_BRIDGE_PCI; - else if (where == PCI_CLASS_DEVICE && size == 1) - *val = PCI_CLASS_BRIDGE_PCI & 0xff; - else if (where == PCI_CLASS_DEVICE + 1 && size == 1) - *val = (PCI_CLASS_BRIDGE_PCI >> 8) & 0xff; + if ((where & ~3) == PCI_CLASS_REVISION) { + if (size <= 2) + *val = (*val & ((1 << (size * 8)) - 1)) << (8 * (where & 3)); + *val &= ~0xffffff00; + *val |= PCI_CLASS_BRIDGE_PCI_NORMAL << 8; + if (size <= 2) + *val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1); + } return PCIBIOS_SUCCESSFUL; } @@ -370,7 +370,7 @@ static int meson_pcie_link_up(struct dw_pcie *pci) return 0; } -static int meson_pcie_host_init(struct pcie_port *pp) +static int meson_pcie_host_init(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct meson_pcie *mp = to_meson_pcie(pci); diff --git a/drivers/pci/controller/dwc/pcie-al.c b/drivers/pci/controller/dwc/pcie-al.c index e8afa50129a8..b8cb77c9c4bd 100644 --- a/drivers/pci/controller/dwc/pcie-al.c +++ b/drivers/pci/controller/dwc/pcie-al.c @@ -217,7 +217,7 @@ static inline void al_pcie_target_bus_set(struct al_pcie *pcie, static void __iomem *al_pcie_conf_addr_map_bus(struct pci_bus *bus, unsigned int devfn, int where) { - struct pcie_port *pp = bus->sysdata; + struct dw_pcie_rp *pp = bus->sysdata; struct al_pcie *pcie = to_al_pcie(to_dw_pcie_from_pp(pp)); unsigned int busnr = bus->number; struct al_pcie_target_bus_cfg *target_bus_cfg = &pcie->target_bus_cfg; @@ -245,7 +245,7 @@ static struct pci_ops al_child_pci_ops = { static void al_pcie_config_prepare(struct al_pcie *pcie) { struct al_pcie_target_bus_cfg *target_bus_cfg; - struct pcie_port *pp = &pcie->pci->pp; + struct dw_pcie_rp *pp = &pcie->pci->pp; unsigned int ecam_bus_mask; u32 cfg_control_offset; u8 subordinate_bus; @@ -289,7 +289,7 @@ static void al_pcie_config_prepare(struct al_pcie *pcie) al_pcie_controller_writel(pcie, cfg_control_offset, reg); } -static int al_pcie_host_init(struct pcie_port *pp) +static int al_pcie_host_init(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct al_pcie *pcie = to_al_pcie(pci); diff --git a/drivers/pci/controller/dwc/pcie-armada8k.c b/drivers/pci/controller/dwc/pcie-armada8k.c index 4e2552dcf982..dc469ef8e99b 100644 --- a/drivers/pci/controller/dwc/pcie-armada8k.c +++ b/drivers/pci/controller/dwc/pcie-armada8k.c @@ -166,7 +166,7 @@ static int armada8k_pcie_start_link(struct dw_pcie *pci) return 0; } -static int armada8k_pcie_host_init(struct pcie_port *pp) +static int armada8k_pcie_host_init(struct dw_pcie_rp *pp) { u32 reg; struct dw_pcie *pci = to_dw_pcie_from_pp(pp); @@ -233,7 +233,7 @@ static int armada8k_add_pcie_port(struct armada8k_pcie *pcie, struct platform_device *pdev) { struct dw_pcie *pci = pcie->pci; - struct pcie_port *pp = &pci->pp; + struct dw_pcie_rp *pp = &pci->pp; struct device *dev = &pdev->dev; int ret; @@ -343,7 +343,7 @@ static struct platform_driver armada8k_pcie_driver = { .probe = armada8k_pcie_probe, .driver = { .name = "armada8k-pcie", - .of_match_table = of_match_ptr(armada8k_pcie_of_match), + .of_match_table = armada8k_pcie_of_match, .suppress_bind_attrs = true, }, }; diff --git a/drivers/pci/controller/dwc/pcie-artpec6.c b/drivers/pci/controller/dwc/pcie-artpec6.c index 2f15441770e1..98102079e26d 100644 --- a/drivers/pci/controller/dwc/pcie-artpec6.c +++ b/drivers/pci/controller/dwc/pcie-artpec6.c @@ -97,7 +97,7 @@ static void artpec6_pcie_writel(struct artpec6_pcie *artpec6_pcie, u32 offset, u static u64 artpec6_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr) { struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci); - struct pcie_port *pp = &pci->pp; + struct dw_pcie_rp *pp = &pci->pp; struct dw_pcie_ep *ep = &pci->ep; switch (artpec6_pcie->mode) { @@ -315,7 +315,7 @@ static void artpec6_pcie_deassert_core_reset(struct artpec6_pcie *artpec6_pcie) usleep_range(100, 200); } -static int artpec6_pcie_host_init(struct pcie_port *pp) +static int artpec6_pcie_host_init(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci); diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c index 0eda8236c125..83ddb190292e 100644 --- a/drivers/pci/controller/dwc/pcie-designware-ep.c +++ b/drivers/pci/controller/dwc/pcie-designware-ep.c @@ -154,22 +154,25 @@ static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no, return 0; } -static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, - enum pci_barno bar, dma_addr_t cpu_addr, - enum dw_pcie_as_type as_type) +static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type, + dma_addr_t cpu_addr, enum pci_barno bar) { int ret; u32 free_win; struct dw_pcie *pci = to_dw_pcie_from_ep(ep); - free_win = find_first_zero_bit(ep->ib_window_map, pci->num_ib_windows); + if (!ep->bar_to_atu[bar]) + free_win = find_first_zero_bit(ep->ib_window_map, pci->num_ib_windows); + else + free_win = ep->bar_to_atu[bar]; + if (free_win >= pci->num_ib_windows) { dev_err(pci->dev, "No free inbound window\n"); return -EINVAL; } - ret = dw_pcie_prog_inbound_atu(pci, func_no, free_win, bar, cpu_addr, - as_type); + ret = dw_pcie_prog_inbound_atu(pci, func_no, free_win, type, + cpu_addr, bar); if (ret < 0) { dev_err(pci->dev, "Failed to program IB window\n"); return ret; @@ -185,8 +188,9 @@ static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, u8 func_no, phys_addr_t phys_addr, u64 pci_addr, size_t size) { - u32 free_win; struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + u32 free_win; + int ret; free_win = find_first_zero_bit(ep->ob_window_map, pci->num_ob_windows); if (free_win >= pci->num_ob_windows) { @@ -194,8 +198,10 @@ static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, u8 func_no, return -EINVAL; } - dw_pcie_prog_ep_outbound_atu(pci, func_no, free_win, PCIE_ATU_TYPE_MEM, - phys_addr, pci_addr, size); + ret = dw_pcie_prog_ep_outbound_atu(pci, func_no, free_win, PCIE_ATU_TYPE_MEM, + phys_addr, pci_addr, size); + if (ret) + return ret; set_bit(free_win, ep->ob_window_map); ep->outbound_addr[free_win] = phys_addr; @@ -213,38 +219,40 @@ static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no, __dw_pcie_ep_reset_bar(pci, func_no, bar, epf_bar->flags); - dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_INBOUND); + dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_IB, atu_index); clear_bit(atu_index, ep->ib_window_map); ep->epf_bar[bar] = NULL; + ep->bar_to_atu[bar] = 0; } static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no, struct pci_epf_bar *epf_bar) { - int ret; struct dw_pcie_ep *ep = epc_get_drvdata(epc); struct dw_pcie *pci = to_dw_pcie_from_ep(ep); enum pci_barno bar = epf_bar->barno; size_t size = epf_bar->size; int flags = epf_bar->flags; - enum dw_pcie_as_type as_type; - u32 reg; unsigned int func_offset = 0; + int ret, type; + u32 reg; func_offset = dw_pcie_ep_func_select(ep, func_no); reg = PCI_BASE_ADDRESS_0 + (4 * bar) + func_offset; if (!(flags & PCI_BASE_ADDRESS_SPACE)) - as_type = DW_PCIE_AS_MEM; + type = PCIE_ATU_TYPE_MEM; else - as_type = DW_PCIE_AS_IO; + type = PCIE_ATU_TYPE_IO; - ret = dw_pcie_ep_inbound_atu(ep, func_no, bar, - epf_bar->phys_addr, as_type); + ret = dw_pcie_ep_inbound_atu(ep, func_no, type, epf_bar->phys_addr, bar); if (ret) return ret; + if (ep->epf_bar[bar]) + return 0; + dw_pcie_dbi_ro_wr_en(pci); dw_pcie_writel_dbi2(pci, reg, lower_32_bits(size - 1)); @@ -289,7 +297,7 @@ static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no, if (ret < 0) return; - dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_OUTBOUND); + dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_OB, atu_index); clear_bit(atu_index, ep->ob_window_map); } @@ -435,8 +443,7 @@ static void dw_pcie_ep_stop(struct pci_epc *epc) struct dw_pcie_ep *ep = epc_get_drvdata(epc); struct dw_pcie *pci = to_dw_pcie_from_ep(ep); - if (pci->ops && pci->ops->stop_link) - pci->ops->stop_link(pci); + dw_pcie_stop_link(pci); } static int dw_pcie_ep_start(struct pci_epc *epc) @@ -444,10 +451,7 @@ static int dw_pcie_ep_start(struct pci_epc *epc) struct dw_pcie_ep *ep = epc_get_drvdata(epc); struct dw_pcie *pci = to_dw_pcie_from_ep(ep); - if (!pci->ops || !pci->ops->start_link) - return -EINVAL; - - return pci->ops->start_link(pci); + return dw_pcie_start_link(pci); } static const struct pci_epc_features* @@ -699,17 +703,15 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep) if (!pci->dbi_base2) { res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2"); - if (!res) + if (!res) { pci->dbi_base2 = pci->dbi_base + SZ_4K; - else { + } else { pci->dbi_base2 = devm_pci_remap_cfg_resource(dev, res); if (IS_ERR(pci->dbi_base2)) return PTR_ERR(pci->dbi_base2); } } - dw_pcie_iatu_detect(pci); - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space"); if (!res) return -EINVAL; @@ -717,17 +719,17 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep) ep->phys_base = res->start; ep->addr_size = resource_size(res); - ep->ib_window_map = devm_kcalloc(dev, - BITS_TO_LONGS(pci->num_ib_windows), - sizeof(long), - GFP_KERNEL); + dw_pcie_version_detect(pci); + + dw_pcie_iatu_detect(pci); + + ep->ib_window_map = devm_bitmap_zalloc(dev, pci->num_ib_windows, + GFP_KERNEL); if (!ep->ib_window_map) return -ENOMEM; - ep->ob_window_map = devm_kcalloc(dev, - BITS_TO_LONGS(pci->num_ob_windows), - sizeof(long), - GFP_KERNEL); + ep->ob_window_map = devm_bitmap_zalloc(dev, pci->num_ob_windows, + GFP_KERNEL); if (!ep->ob_window_map) return -ENOMEM; @@ -780,8 +782,9 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep) ep->msi_mem = pci_epc_mem_alloc_addr(epc, &ep->msi_mem_phys, epc->mem->window.page_size); if (!ep->msi_mem) { + ret = -ENOMEM; dev_err(dev, "Failed to reserve memory for MSI/MSI-X\n"); - return -ENOMEM; + goto err_exit_epc_mem; } if (ep->ops->get_features) { @@ -790,6 +793,19 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep) return 0; } - return dw_pcie_ep_init_complete(ep); + ret = dw_pcie_ep_init_complete(ep); + if (ret) + goto err_free_epc_mem; + + return 0; + +err_free_epc_mem: + pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem, + epc->mem->window.page_size); + +err_exit_epc_mem: + pci_epc_mem_exit(epc); + + return ret; } EXPORT_SYMBOL_GPL(dw_pcie_ep_init); diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c index f4755f3a03be..39f3b37d4033 100644 --- a/drivers/pci/controller/dwc/pcie-designware-host.c +++ b/drivers/pci/controller/dwc/pcie-designware-host.c @@ -53,7 +53,7 @@ static struct msi_domain_info dw_pcie_msi_domain_info = { }; /* MSI int handler */ -irqreturn_t dw_handle_msi_irq(struct pcie_port *pp) +irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp) { int i, pos; unsigned long val; @@ -88,7 +88,7 @@ irqreturn_t dw_handle_msi_irq(struct pcie_port *pp) static void dw_chained_msi_isr(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); - struct pcie_port *pp; + struct dw_pcie_rp *pp; chained_irq_enter(chip, desc); @@ -100,7 +100,7 @@ static void dw_chained_msi_isr(struct irq_desc *desc) static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg) { - struct pcie_port *pp = irq_data_get_irq_chip_data(d); + struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d); struct dw_pcie *pci = to_dw_pcie_from_pp(pp); u64 msi_target; @@ -123,7 +123,7 @@ static int dw_pci_msi_set_affinity(struct irq_data *d, static void dw_pci_bottom_mask(struct irq_data *d) { - struct pcie_port *pp = irq_data_get_irq_chip_data(d); + struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d); struct dw_pcie *pci = to_dw_pcie_from_pp(pp); unsigned int res, bit, ctrl; unsigned long flags; @@ -142,7 +142,7 @@ static void dw_pci_bottom_mask(struct irq_data *d) static void dw_pci_bottom_unmask(struct irq_data *d) { - struct pcie_port *pp = irq_data_get_irq_chip_data(d); + struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d); struct dw_pcie *pci = to_dw_pcie_from_pp(pp); unsigned int res, bit, ctrl; unsigned long flags; @@ -161,7 +161,7 @@ static void dw_pci_bottom_unmask(struct irq_data *d) static void dw_pci_bottom_ack(struct irq_data *d) { - struct pcie_port *pp = irq_data_get_irq_chip_data(d); + struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d); struct dw_pcie *pci = to_dw_pcie_from_pp(pp); unsigned int res, bit, ctrl; @@ -185,7 +185,7 @@ static int dw_pcie_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *args) { - struct pcie_port *pp = domain->host_data; + struct dw_pcie_rp *pp = domain->host_data; unsigned long flags; u32 i; int bit; @@ -213,7 +213,7 @@ static void dw_pcie_irq_domain_free(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs) { struct irq_data *d = irq_domain_get_irq_data(domain, virq); - struct pcie_port *pp = domain->host_data; + struct dw_pcie_rp *pp = domain->host_data; unsigned long flags; raw_spin_lock_irqsave(&pp->lock, flags); @@ -229,7 +229,7 @@ static const struct irq_domain_ops dw_pcie_msi_domain_ops = { .free = dw_pcie_irq_domain_free, }; -int dw_pcie_allocate_domains(struct pcie_port *pp) +int dw_pcie_allocate_domains(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct fwnode_handle *fwnode = of_node_to_fwnode(pci->dev->of_node); @@ -255,24 +255,21 @@ int dw_pcie_allocate_domains(struct pcie_port *pp) return 0; } -static void dw_pcie_free_msi(struct pcie_port *pp) +static void dw_pcie_free_msi(struct dw_pcie_rp *pp) { - if (pp->msi_irq) - irq_set_chained_handler_and_data(pp->msi_irq, NULL, NULL); + u32 ctrl; + + for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++) { + if (pp->msi_irq[ctrl] > 0) + irq_set_chained_handler_and_data(pp->msi_irq[ctrl], + NULL, NULL); + } irq_domain_remove(pp->msi_domain); irq_domain_remove(pp->irq_domain); - - if (pp->msi_data) { - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct device *dev = pci->dev; - - dma_unmap_single_attrs(dev, pp->msi_data, sizeof(pp->msi_msg), - DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); - } } -static void dw_pcie_msi_init(struct pcie_port *pp) +static void dw_pcie_msi_init(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); u64 msi_target = (u64)pp->msi_data; @@ -285,7 +282,107 @@ static void dw_pcie_msi_init(struct pcie_port *pp) dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_HI, upper_32_bits(msi_target)); } -int dw_pcie_host_init(struct pcie_port *pp) +static int dw_pcie_parse_split_msi_irq(struct dw_pcie_rp *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct device *dev = pci->dev; + struct platform_device *pdev = to_platform_device(dev); + u32 ctrl, max_vectors; + int irq; + + /* Parse any "msiX" IRQs described in the devicetree */ + for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++) { + char msi_name[] = "msiX"; + + msi_name[3] = '0' + ctrl; + irq = platform_get_irq_byname_optional(pdev, msi_name); + if (irq == -ENXIO) + break; + if (irq < 0) + return dev_err_probe(dev, irq, + "Failed to parse MSI IRQ '%s'\n", + msi_name); + + pp->msi_irq[ctrl] = irq; + } + + /* If no "msiX" IRQs, caller should fallback to "msi" IRQ */ + if (ctrl == 0) + return -ENXIO; + + max_vectors = ctrl * MAX_MSI_IRQS_PER_CTRL; + if (pp->num_vectors > max_vectors) { + dev_warn(dev, "Exceeding number of MSI vectors, limiting to %u\n", + max_vectors); + pp->num_vectors = max_vectors; + } + if (!pp->num_vectors) + pp->num_vectors = max_vectors; + + return 0; +} + +static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct device *dev = pci->dev; + struct platform_device *pdev = to_platform_device(dev); + u64 *msi_vaddr; + int ret; + u32 ctrl, num_ctrls; + + for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++) + pp->irq_mask[ctrl] = ~0; + + if (!pp->msi_irq[0]) { + ret = dw_pcie_parse_split_msi_irq(pp); + if (ret < 0 && ret != -ENXIO) + return ret; + } + + if (!pp->num_vectors) + pp->num_vectors = MSI_DEF_NUM_VECTORS; + num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; + + if (!pp->msi_irq[0]) { + pp->msi_irq[0] = platform_get_irq_byname_optional(pdev, "msi"); + if (pp->msi_irq[0] < 0) { + pp->msi_irq[0] = platform_get_irq(pdev, 0); + if (pp->msi_irq[0] < 0) + return pp->msi_irq[0]; + } + } + + dev_dbg(dev, "Using %d MSI vectors\n", pp->num_vectors); + + pp->msi_irq_chip = &dw_pci_msi_bottom_irq_chip; + + ret = dw_pcie_allocate_domains(pp); + if (ret) + return ret; + + for (ctrl = 0; ctrl < num_ctrls; ctrl++) { + if (pp->msi_irq[ctrl] > 0) + irq_set_chained_handler_and_data(pp->msi_irq[ctrl], + dw_chained_msi_isr, pp); + } + + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); + if (ret) + dev_warn(dev, "Failed to set DMA mask to 32-bit. Devices with only 32-bit MSI support may not work properly\n"); + + msi_vaddr = dmam_alloc_coherent(dev, sizeof(u64), &pp->msi_data, + GFP_KERNEL); + if (!msi_vaddr) { + dev_err(dev, "Failed to alloc and map MSI data\n"); + dw_pcie_free_msi(pp); + return -ENOMEM; + } + + return 0; +} + +int dw_pcie_host_init(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct device *dev = pci->dev; @@ -293,17 +390,17 @@ int dw_pcie_host_init(struct pcie_port *pp) struct platform_device *pdev = to_platform_device(dev); struct resource_entry *win; struct pci_host_bridge *bridge; - struct resource *cfg_res; + struct resource *res; int ret; - raw_spin_lock_init(&pci->pp.lock); + raw_spin_lock_init(&pp->lock); - cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config"); - if (cfg_res) { - pp->cfg0_size = resource_size(cfg_res); - pp->cfg0_base = cfg_res->start; + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config"); + if (res) { + pp->cfg0_size = resource_size(res); + pp->cfg0_base = res->start; - pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, cfg_res); + pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res); if (IS_ERR(pp->va_cfg0_base)) return PTR_ERR(pp->va_cfg0_base); } else { @@ -312,8 +409,8 @@ int dw_pcie_host_init(struct pcie_port *pp) } if (!pci->dbi_base) { - struct resource *dbi_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); - pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_res); + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); + pci->dbi_base = devm_pci_remap_cfg_resource(dev, res); if (IS_ERR(pci->dbi_base)) return PTR_ERR(pci->dbi_base); } @@ -350,60 +447,39 @@ int dw_pcie_host_init(struct pcie_port *pp) of_property_read_bool(np, "msi-parent") || of_property_read_bool(np, "msi-map")); - if (!pp->num_vectors) { + /* + * For the has_msi_ctrl case the default assignment is handled + * in the dw_pcie_msi_host_init(). + */ + if (!pp->has_msi_ctrl && !pp->num_vectors) { pp->num_vectors = MSI_DEF_NUM_VECTORS; } else if (pp->num_vectors > MAX_MSI_IRQS) { dev_err(dev, "Invalid number of vectors\n"); - return -EINVAL; + ret = -EINVAL; + goto err_deinit_host; } if (pp->ops->msi_host_init) { ret = pp->ops->msi_host_init(pp); if (ret < 0) - return ret; + goto err_deinit_host; } else if (pp->has_msi_ctrl) { - if (!pp->msi_irq) { - pp->msi_irq = platform_get_irq_byname_optional(pdev, "msi"); - if (pp->msi_irq < 0) { - pp->msi_irq = platform_get_irq(pdev, 0); - if (pp->msi_irq < 0) - return pp->msi_irq; - } - } - - pp->msi_irq_chip = &dw_pci_msi_bottom_irq_chip; - - ret = dw_pcie_allocate_domains(pp); - if (ret) - return ret; - - if (pp->msi_irq > 0) - irq_set_chained_handler_and_data(pp->msi_irq, - dw_chained_msi_isr, - pp); - - ret = dma_set_mask(pci->dev, DMA_BIT_MASK(32)); - if (ret) - dev_warn(pci->dev, "Failed to set DMA mask to 32-bit. Devices with only 32-bit MSI support may not work properly\n"); - - pp->msi_data = dma_map_single_attrs(pci->dev, &pp->msi_msg, - sizeof(pp->msi_msg), - DMA_FROM_DEVICE, - DMA_ATTR_SKIP_CPU_SYNC); - if (dma_mapping_error(pci->dev, pp->msi_data)) { - dev_err(pci->dev, "Failed to map MSI data\n"); - pp->msi_data = 0; - goto err_free_msi; - } + ret = dw_pcie_msi_host_init(pp); + if (ret < 0) + goto err_deinit_host; } } + dw_pcie_version_detect(pci); + dw_pcie_iatu_detect(pci); - dw_pcie_setup_rc(pp); + ret = dw_pcie_setup_rc(pp); + if (ret) + goto err_free_msi; - if (!dw_pcie_link_up(pci) && pci->ops && pci->ops->start_link) { - ret = pci->ops->start_link(pci); + if (!dw_pcie_link_up(pci)) { + ret = dw_pcie_start_link(pci); if (ret) goto err_free_msi; } @@ -414,32 +490,50 @@ int dw_pcie_host_init(struct pcie_port *pp) bridge->sysdata = pp; ret = pci_host_probe(bridge); - if (!ret) - return 0; + if (ret) + goto err_stop_link; + + return 0; + +err_stop_link: + dw_pcie_stop_link(pci); err_free_msi: if (pp->has_msi_ctrl) dw_pcie_free_msi(pp); + +err_deinit_host: + if (pp->ops->host_deinit) + pp->ops->host_deinit(pp); + return ret; } EXPORT_SYMBOL_GPL(dw_pcie_host_init); -void dw_pcie_host_deinit(struct pcie_port *pp) +void dw_pcie_host_deinit(struct dw_pcie_rp *pp) { + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + pci_stop_root_bus(pp->bridge->bus); pci_remove_root_bus(pp->bridge->bus); + + dw_pcie_stop_link(pci); + if (pp->has_msi_ctrl) dw_pcie_free_msi(pp); + + if (pp->ops->host_deinit) + pp->ops->host_deinit(pp); } EXPORT_SYMBOL_GPL(dw_pcie_host_deinit); static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus, unsigned int devfn, int where) { - int type; - u32 busdev; - struct pcie_port *pp = bus->sysdata; + struct dw_pcie_rp *pp = bus->sysdata; struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + int type, ret; + u32 busdev; /* * Checking whether the link is up here is a last line of defense @@ -460,8 +554,10 @@ static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus, else type = PCIE_ATU_TYPE_CFG1; - - dw_pcie_prog_outbound_atu(pci, 0, type, pp->cfg0_base, busdev, pp->cfg0_size); + ret = dw_pcie_prog_outbound_atu(pci, 0, type, pp->cfg0_base, busdev, + pp->cfg0_size); + if (ret) + return NULL; return pp->va_cfg0_base + where; } @@ -469,33 +565,45 @@ static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus, static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { - int ret; - struct pcie_port *pp = bus->sysdata; + struct dw_pcie_rp *pp = bus->sysdata; struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + int ret; ret = pci_generic_config_read(bus, devfn, where, size, val); + if (ret != PCIBIOS_SUCCESSFUL) + return ret; - if (!ret && pci->io_cfg_atu_shared) - dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO, pp->io_base, - pp->io_bus_addr, pp->io_size); + if (pp->cfg0_io_shared) { + ret = dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO, + pp->io_base, pp->io_bus_addr, + pp->io_size); + if (ret) + return PCIBIOS_SET_FAILED; + } - return ret; + return PCIBIOS_SUCCESSFUL; } static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { - int ret; - struct pcie_port *pp = bus->sysdata; + struct dw_pcie_rp *pp = bus->sysdata; struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + int ret; ret = pci_generic_config_write(bus, devfn, where, size, val); + if (ret != PCIBIOS_SUCCESSFUL) + return ret; - if (!ret && pci->io_cfg_atu_shared) - dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO, pp->io_base, - pp->io_bus_addr, pp->io_size); + if (pp->cfg0_io_shared) { + ret = dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO, + pp->io_base, pp->io_bus_addr, + pp->io_size); + if (ret) + return PCIBIOS_SET_FAILED; + } - return ret; + return PCIBIOS_SUCCESSFUL; } static struct pci_ops dw_child_pcie_ops = { @@ -506,7 +614,7 @@ static struct pci_ops dw_child_pcie_ops = { void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn, int where) { - struct pcie_port *pp = bus->sysdata; + struct dw_pcie_rp *pp = bus->sysdata; struct dw_pcie *pci = to_dw_pcie_from_pp(pp); if (PCI_SLOT(devfn) > 0) @@ -522,11 +630,72 @@ static struct pci_ops dw_pcie_ops = { .write = pci_generic_config_write, }; -void dw_pcie_setup_rc(struct pcie_port *pp) +static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp) { - int i; - u32 val, ctrl, num_ctrls; struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct resource_entry *entry; + int i, ret; + + /* Note the very first outbound ATU is used for CFG IOs */ + if (!pci->num_ob_windows) { + dev_err(pci->dev, "No outbound iATU found\n"); + return -EINVAL; + } + + /* + * Ensure all outbound windows are disabled before proceeding with + * the MEM/IO ranges setups. + */ + for (i = 0; i < pci->num_ob_windows; i++) + dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_OB, i); + + i = 0; + resource_list_for_each_entry(entry, &pp->bridge->windows) { + if (resource_type(entry->res) != IORESOURCE_MEM) + continue; + + if (pci->num_ob_windows <= ++i) + break; + + ret = dw_pcie_prog_outbound_atu(pci, i, PCIE_ATU_TYPE_MEM, + entry->res->start, + entry->res->start - entry->offset, + resource_size(entry->res)); + if (ret) { + dev_err(pci->dev, "Failed to set MEM range %pr\n", + entry->res); + return ret; + } + } + + if (pp->io_size) { + if (pci->num_ob_windows > ++i) { + ret = dw_pcie_prog_outbound_atu(pci, i, PCIE_ATU_TYPE_IO, + pp->io_base, + pp->io_bus_addr, + pp->io_size); + if (ret) { + dev_err(pci->dev, "Failed to set IO range %pr\n", + entry->res); + return ret; + } + } else { + pp->cfg0_io_shared = true; + } + } + + if (pci->num_ob_windows <= i) + dev_warn(pci->dev, "Resources exceed number of ATU entries (%d)\n", + pci->num_ob_windows); + + return 0; +} + +int dw_pcie_setup_rc(struct dw_pcie_rp *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + u32 val, ctrl, num_ctrls; + int ret; /* * Enable DBI read-only registers for writing/updating configuration. @@ -541,7 +710,6 @@ void dw_pcie_setup_rc(struct pcie_port *pp) /* Initialize IRQ Status array */ for (ctrl = 0; ctrl < num_ctrls; ctrl++) { - pp->irq_mask[ctrl] = ~0; dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + (ctrl * MSI_REG_CTRL_BLOCK_SIZE), pp->irq_mask[ctrl]); @@ -576,45 +744,15 @@ void dw_pcie_setup_rc(struct pcie_port *pp) PCI_COMMAND_MASTER | PCI_COMMAND_SERR; dw_pcie_writel_dbi(pci, PCI_COMMAND, val); - /* Ensure all outbound windows are disabled so there are multiple matches */ - for (i = 0; i < pci->num_ob_windows; i++) - dw_pcie_disable_atu(pci, i, DW_PCIE_REGION_OUTBOUND); - /* * If the platform provides its own child bus config accesses, it means * the platform uses its own address translation component rather than * ATU, so we should not program the ATU here. */ if (pp->bridge->child_ops == &dw_child_pcie_ops) { - int atu_idx = 0; - struct resource_entry *entry; - - /* Get last memory resource entry */ - resource_list_for_each_entry(entry, &pp->bridge->windows) { - if (resource_type(entry->res) != IORESOURCE_MEM) - continue; - - if (pci->num_ob_windows <= ++atu_idx) - break; - - dw_pcie_prog_outbound_atu(pci, atu_idx, - PCIE_ATU_TYPE_MEM, entry->res->start, - entry->res->start - entry->offset, - resource_size(entry->res)); - } - - if (pp->io_size) { - if (pci->num_ob_windows > ++atu_idx) - dw_pcie_prog_outbound_atu(pci, atu_idx, - PCIE_ATU_TYPE_IO, pp->io_base, - pp->io_bus_addr, pp->io_size); - else - pci->io_cfg_atu_shared = true; - } - - if (pci->num_ob_windows <= atu_idx) - dev_warn(pci->dev, "Resources exceed number of ATU entries (%d)", - pci->num_ob_windows); + ret = dw_pcie_iatu_setup(pp); + if (ret) + return ret; } dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0); @@ -627,5 +765,7 @@ void dw_pcie_setup_rc(struct pcie_port *pp) dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val); dw_pcie_dbi_ro_wr_dis(pci); + + return 0; } EXPORT_SYMBOL_GPL(dw_pcie_setup_rc); diff --git a/drivers/pci/controller/dwc/pcie-designware-plat.c b/drivers/pci/controller/dwc/pcie-designware-plat.c index 0c5de87d3cc6..1fcfb840f238 100644 --- a/drivers/pci/controller/dwc/pcie-designware-plat.c +++ b/drivers/pci/controller/dwc/pcie-designware-plat.c @@ -17,13 +17,11 @@ #include <linux/platform_device.h> #include <linux/resource.h> #include <linux/types.h> -#include <linux/regmap.h> #include "pcie-designware.h" struct dw_plat_pcie { struct dw_pcie *pci; - struct regmap *regmap; enum dw_pcie_device_mode mode; }; @@ -31,20 +29,9 @@ struct dw_plat_pcie_of_data { enum dw_pcie_device_mode mode; }; -static const struct of_device_id dw_plat_pcie_of_match[]; - static const struct dw_pcie_host_ops dw_plat_pcie_host_ops = { }; -static int dw_plat_pcie_establish_link(struct dw_pcie *pci) -{ - return 0; -} - -static const struct dw_pcie_ops dw_pcie_ops = { - .start_link = dw_plat_pcie_establish_link, -}; - static void dw_plat_pcie_ep_init(struct dw_pcie_ep *ep) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); @@ -96,7 +83,7 @@ static int dw_plat_add_pcie_port(struct dw_plat_pcie *dw_plat_pcie, struct platform_device *pdev) { struct dw_pcie *pci = dw_plat_pcie->pci; - struct pcie_port *pp = &pci->pp; + struct dw_pcie_rp *pp = &pci->pp; struct device *dev = &pdev->dev; int ret; @@ -140,7 +127,6 @@ static int dw_plat_pcie_probe(struct platform_device *pdev) return -ENOMEM; pci->dev = dev; - pci->ops = &dw_pcie_ops; dw_plat_pcie->pci = pci; dw_plat_pcie->mode = mode; @@ -153,20 +139,21 @@ static int dw_plat_pcie_probe(struct platform_device *pdev) return -ENODEV; ret = dw_plat_add_pcie_port(dw_plat_pcie, pdev); - if (ret < 0) - return ret; break; case DW_PCIE_EP_TYPE: if (!IS_ENABLED(CONFIG_PCIE_DW_PLAT_EP)) return -ENODEV; pci->ep.ops = &pcie_ep_ops; - return dw_pcie_ep_init(&pci->ep); + ret = dw_pcie_ep_init(&pci->ep); + break; default: dev_err(dev, "INVALID device type %d\n", dw_plat_pcie->mode); + ret = -EINVAL; + break; } - return 0; + return ret; } static const struct dw_plat_pcie_of_data dw_plat_pcie_rc_of_data = { diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c index d92c8a25094f..c6725c519a47 100644 --- a/drivers/pci/controller/dwc/pcie-designware.c +++ b/drivers/pci/controller/dwc/pcie-designware.c @@ -8,14 +8,41 @@ * Author: Jingoo Han <jg1.han@samsung.com> */ +#include <linux/align.h> +#include <linux/bitops.h> #include <linux/delay.h> #include <linux/of.h> #include <linux/of_platform.h> +#include <linux/sizes.h> #include <linux/types.h> #include "../../pci.h" #include "pcie-designware.h" +void dw_pcie_version_detect(struct dw_pcie *pci) +{ + u32 ver; + + /* The content of the CSR is zero on DWC PCIe older than v4.70a */ + ver = dw_pcie_readl_dbi(pci, PCIE_VERSION_NUMBER); + if (!ver) + return; + + if (pci->version && pci->version != ver) + dev_warn(pci->dev, "Versions don't match (%08x != %08x)\n", + pci->version, ver); + else + pci->version = ver; + + ver = dw_pcie_readl_dbi(pci, PCIE_VERSION_TYPE); + + if (pci->type && pci->type != ver) + dev_warn(pci->dev, "Types don't match (%08x != %08x)\n", + pci->type, ver); + else + pci->type = ver; +} + /* * These interfaces resemble the pci_find_*capability() interfaces, but these * are for configuring host controllers, which are bridges *to* PCI devices but @@ -181,48 +208,61 @@ void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val) dev_err(pci->dev, "write DBI address failed\n"); } -static u32 dw_pcie_readl_atu(struct dw_pcie *pci, u32 reg) +static inline void __iomem *dw_pcie_select_atu(struct dw_pcie *pci, u32 dir, + u32 index) { + if (pci->iatu_unroll_enabled) + return pci->atu_base + PCIE_ATU_UNROLL_BASE(dir, index); + + dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, dir | index); + return pci->atu_base; +} + +static u32 dw_pcie_readl_atu(struct dw_pcie *pci, u32 dir, u32 index, u32 reg) +{ + void __iomem *base; int ret; u32 val; + base = dw_pcie_select_atu(pci, dir, index); + if (pci->ops && pci->ops->read_dbi) - return pci->ops->read_dbi(pci, pci->atu_base, reg, 4); + return pci->ops->read_dbi(pci, base, reg, 4); - ret = dw_pcie_read(pci->atu_base + reg, 4, &val); + ret = dw_pcie_read(base + reg, 4, &val); if (ret) dev_err(pci->dev, "Read ATU address failed\n"); return val; } -static void dw_pcie_writel_atu(struct dw_pcie *pci, u32 reg, u32 val) +static void dw_pcie_writel_atu(struct dw_pcie *pci, u32 dir, u32 index, + u32 reg, u32 val) { + void __iomem *base; int ret; + base = dw_pcie_select_atu(pci, dir, index); + if (pci->ops && pci->ops->write_dbi) { - pci->ops->write_dbi(pci, pci->atu_base, reg, 4, val); + pci->ops->write_dbi(pci, base, reg, 4, val); return; } - ret = dw_pcie_write(pci->atu_base + reg, 4, val); + ret = dw_pcie_write(base + reg, 4, val); if (ret) dev_err(pci->dev, "Write ATU address failed\n"); } -static u32 dw_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg) +static inline u32 dw_pcie_readl_atu_ob(struct dw_pcie *pci, u32 index, u32 reg) { - u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index); - - return dw_pcie_readl_atu(pci, offset + reg); + return dw_pcie_readl_atu(pci, PCIE_ATU_REGION_DIR_OB, index, reg); } -static void dw_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg, - u32 val) +static inline void dw_pcie_writel_atu_ob(struct dw_pcie *pci, u32 index, u32 reg, + u32 val) { - u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index); - - dw_pcie_writel_atu(pci, offset + reg, val); + dw_pcie_writel_atu(pci, PCIE_ATU_REGION_DIR_OB, index, reg, val); } static inline u32 dw_pcie_enable_ecrc(u32 val) @@ -266,264 +306,160 @@ static inline u32 dw_pcie_enable_ecrc(u32 val) return val | PCIE_ATU_TD; } -static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, u8 func_no, - int index, int type, - u64 cpu_addr, u64 pci_addr, - u64 size) +static int __dw_pcie_prog_outbound_atu(struct dw_pcie *pci, u8 func_no, + int index, int type, u64 cpu_addr, + u64 pci_addr, u64 size) { u32 retries, val; - u64 limit_addr = cpu_addr + size - 1; - - dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_BASE, - lower_32_bits(cpu_addr)); - dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_BASE, - upper_32_bits(cpu_addr)); - dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_LIMIT, - lower_32_bits(limit_addr)); - dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_LIMIT, - upper_32_bits(limit_addr)); - dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET, - lower_32_bits(pci_addr)); - dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET, - upper_32_bits(pci_addr)); - val = type | PCIE_ATU_FUNC_NUM(func_no); - val = upper_32_bits(size - 1) ? - val | PCIE_ATU_INCREASE_REGION_SIZE : val; - if (pci->version == 0x490A) - val = dw_pcie_enable_ecrc(val); - dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, val); - dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2, - PCIE_ATU_ENABLE); + u64 limit_addr; - /* - * Make sure ATU enable takes effect before any subsequent config - * and I/O accesses. - */ - for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { - val = dw_pcie_readl_ob_unroll(pci, index, - PCIE_ATU_UNR_REGION_CTRL2); - if (val & PCIE_ATU_ENABLE) - return; + if (pci->ops && pci->ops->cpu_addr_fixup) + cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr); - mdelay(LINK_WAIT_IATU); + limit_addr = cpu_addr + size - 1; + + if ((limit_addr & ~pci->region_limit) != (cpu_addr & ~pci->region_limit) || + !IS_ALIGNED(cpu_addr, pci->region_align) || + !IS_ALIGNED(pci_addr, pci->region_align) || !size) { + return -EINVAL; } - dev_err(pci->dev, "Outbound iATU is not being enabled\n"); -} -static void __dw_pcie_prog_outbound_atu(struct dw_pcie *pci, u8 func_no, - int index, int type, u64 cpu_addr, - u64 pci_addr, u64 size) -{ - u32 retries, val; + dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LOWER_BASE, + lower_32_bits(cpu_addr)); + dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_BASE, + upper_32_bits(cpu_addr)); - if (pci->ops && pci->ops->cpu_addr_fixup) - cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr); + dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LIMIT, + lower_32_bits(limit_addr)); + if (dw_pcie_ver_is_ge(pci, 460A)) + dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_LIMIT, + upper_32_bits(limit_addr)); - if (pci->iatu_unroll_enabled) { - dw_pcie_prog_outbound_atu_unroll(pci, func_no, index, type, - cpu_addr, pci_addr, size); - return; - } + dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LOWER_TARGET, + lower_32_bits(pci_addr)); + dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_TARGET, + upper_32_bits(pci_addr)); - dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, - PCIE_ATU_REGION_OUTBOUND | index); - dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_BASE, - lower_32_bits(cpu_addr)); - dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_BASE, - upper_32_bits(cpu_addr)); - dw_pcie_writel_dbi(pci, PCIE_ATU_LIMIT, - lower_32_bits(cpu_addr + size - 1)); - if (pci->version >= 0x460A) - dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_LIMIT, - upper_32_bits(cpu_addr + size - 1)); - dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, - lower_32_bits(pci_addr)); - dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, - upper_32_bits(pci_addr)); val = type | PCIE_ATU_FUNC_NUM(func_no); - val = ((upper_32_bits(size - 1)) && (pci->version >= 0x460A)) ? - val | PCIE_ATU_INCREASE_REGION_SIZE : val; - if (pci->version == 0x490A) + if (upper_32_bits(limit_addr) > upper_32_bits(cpu_addr) && + dw_pcie_ver_is_ge(pci, 460A)) + val |= PCIE_ATU_INCREASE_REGION_SIZE; + if (dw_pcie_ver_is(pci, 490A)) val = dw_pcie_enable_ecrc(val); - dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, val); - dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE); + dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_REGION_CTRL1, val); + + dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_REGION_CTRL2, PCIE_ATU_ENABLE); /* * Make sure ATU enable takes effect before any subsequent config * and I/O accesses. */ for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { - val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2); + val = dw_pcie_readl_atu_ob(pci, index, PCIE_ATU_REGION_CTRL2); if (val & PCIE_ATU_ENABLE) - return; + return 0; mdelay(LINK_WAIT_IATU); } + dev_err(pci->dev, "Outbound iATU is not being enabled\n"); -} -void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type, - u64 cpu_addr, u64 pci_addr, u64 size) -{ - __dw_pcie_prog_outbound_atu(pci, 0, index, type, - cpu_addr, pci_addr, size); + return -ETIMEDOUT; } -void dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index, - int type, u64 cpu_addr, u64 pci_addr, - u64 size) +int dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type, + u64 cpu_addr, u64 pci_addr, u64 size) { - __dw_pcie_prog_outbound_atu(pci, func_no, index, type, - cpu_addr, pci_addr, size); + return __dw_pcie_prog_outbound_atu(pci, 0, index, type, + cpu_addr, pci_addr, size); } -static u32 dw_pcie_readl_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg) +int dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index, + int type, u64 cpu_addr, u64 pci_addr, + u64 size) { - u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index); - - return dw_pcie_readl_atu(pci, offset + reg); + return __dw_pcie_prog_outbound_atu(pci, func_no, index, type, + cpu_addr, pci_addr, size); } -static void dw_pcie_writel_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg, - u32 val) +static inline u32 dw_pcie_readl_atu_ib(struct dw_pcie *pci, u32 index, u32 reg) { - u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index); - - dw_pcie_writel_atu(pci, offset + reg, val); + return dw_pcie_readl_atu(pci, PCIE_ATU_REGION_DIR_IB, index, reg); } -static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, u8 func_no, - int index, int bar, u64 cpu_addr, - enum dw_pcie_as_type as_type) +static inline void dw_pcie_writel_atu_ib(struct dw_pcie *pci, u32 index, u32 reg, + u32 val) { - int type; - u32 retries, val; - - dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET, - lower_32_bits(cpu_addr)); - dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET, - upper_32_bits(cpu_addr)); - - switch (as_type) { - case DW_PCIE_AS_MEM: - type = PCIE_ATU_TYPE_MEM; - break; - case DW_PCIE_AS_IO: - type = PCIE_ATU_TYPE_IO; - break; - default: - return -EINVAL; - } - - dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, type | - PCIE_ATU_FUNC_NUM(func_no)); - dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2, - PCIE_ATU_FUNC_NUM_MATCH_EN | - PCIE_ATU_ENABLE | - PCIE_ATU_BAR_MODE_ENABLE | (bar << 8)); - - /* - * Make sure ATU enable takes effect before any subsequent config - * and I/O accesses. - */ - for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { - val = dw_pcie_readl_ib_unroll(pci, index, - PCIE_ATU_UNR_REGION_CTRL2); - if (val & PCIE_ATU_ENABLE) - return 0; - - mdelay(LINK_WAIT_IATU); - } - dev_err(pci->dev, "Inbound iATU is not being enabled\n"); - - return -EBUSY; + dw_pcie_writel_atu(pci, PCIE_ATU_REGION_DIR_IB, index, reg, val); } int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, u8 func_no, int index, - int bar, u64 cpu_addr, - enum dw_pcie_as_type as_type) + int type, u64 cpu_addr, u8 bar) { - int type; u32 retries, val; - if (pci->iatu_unroll_enabled) - return dw_pcie_prog_inbound_atu_unroll(pci, func_no, index, bar, - cpu_addr, as_type); - - dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND | - index); - dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, lower_32_bits(cpu_addr)); - dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, upper_32_bits(cpu_addr)); - - switch (as_type) { - case DW_PCIE_AS_MEM: - type = PCIE_ATU_TYPE_MEM; - break; - case DW_PCIE_AS_IO: - type = PCIE_ATU_TYPE_IO; - break; - default: + if (!IS_ALIGNED(cpu_addr, pci->region_align)) return -EINVAL; - } - dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type | - PCIE_ATU_FUNC_NUM(func_no)); - dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE | - PCIE_ATU_FUNC_NUM_MATCH_EN | - PCIE_ATU_BAR_MODE_ENABLE | (bar << 8)); + dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LOWER_TARGET, + lower_32_bits(cpu_addr)); + dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_TARGET, + upper_32_bits(cpu_addr)); + + dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL1, type | + PCIE_ATU_FUNC_NUM(func_no)); + dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL2, + PCIE_ATU_ENABLE | PCIE_ATU_FUNC_NUM_MATCH_EN | + PCIE_ATU_BAR_MODE_ENABLE | (bar << 8)); /* * Make sure ATU enable takes effect before any subsequent config * and I/O accesses. */ for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { - val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2); + val = dw_pcie_readl_atu_ib(pci, index, PCIE_ATU_REGION_CTRL2); if (val & PCIE_ATU_ENABLE) return 0; mdelay(LINK_WAIT_IATU); } + dev_err(pci->dev, "Inbound iATU is not being enabled\n"); - return -EBUSY; + return -ETIMEDOUT; } -void dw_pcie_disable_atu(struct dw_pcie *pci, int index, - enum dw_pcie_region_type type) +void dw_pcie_disable_atu(struct dw_pcie *pci, u32 dir, int index) { - int region; - - switch (type) { - case DW_PCIE_REGION_INBOUND: - region = PCIE_ATU_REGION_INBOUND; - break; - case DW_PCIE_REGION_OUTBOUND: - region = PCIE_ATU_REGION_OUTBOUND; - break; - default: - return; - } - - dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, region | index); - dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, ~(u32)PCIE_ATU_ENABLE); + dw_pcie_writel_atu(pci, dir, index, PCIE_ATU_REGION_CTRL2, 0); } int dw_pcie_wait_for_link(struct dw_pcie *pci) { + u32 offset, val; int retries; /* Check if the link is up or not */ for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) { - if (dw_pcie_link_up(pci)) { - dev_info(pci->dev, "Link up\n"); - return 0; - } + if (dw_pcie_link_up(pci)) + break; + usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX); } - dev_info(pci->dev, "Phy link never came up\n"); + if (retries >= LINK_WAIT_MAX_RETRIES) { + dev_err(pci->dev, "Phy link never came up\n"); + return -ETIMEDOUT; + } - return -ETIMEDOUT; + offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); + val = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA); + + dev_info(pci->dev, "PCIe Gen.%u x%u link up\n", + FIELD_GET(PCI_EXP_LNKSTA_CLS, val), + FIELD_GET(PCI_EXP_LNKSTA_NLW, val)); + + return 0; } EXPORT_SYMBOL_GPL(dw_pcie_wait_for_link); @@ -534,7 +470,7 @@ int dw_pcie_link_up(struct dw_pcie *pci) if (pci->ops && pci->ops->link_up) return pci->ops->link_up(pci); - val = readl(pci->dbi_base + PCIE_PORT_DEBUG1); + val = dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG1); return ((val & PCIE_PORT_DEBUG1_LINK_UP) && (!(val & PCIE_PORT_DEBUG1_LINK_IN_TRAINING))); } @@ -586,95 +522,81 @@ static void dw_pcie_link_set_max_speed(struct dw_pcie *pci, u32 link_gen) } -static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci) +static bool dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci) { u32 val; val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT); if (val == 0xffffffff) - return 1; + return true; - return 0; + return false; } -static void dw_pcie_iatu_detect_regions_unroll(struct dw_pcie *pci) +static void dw_pcie_iatu_detect_regions(struct dw_pcie *pci) { - int max_region, i, ob = 0, ib = 0; - u32 val; + int max_region, ob, ib; + u32 val, min, dir; + u64 max; - max_region = min((int)pci->atu_size / 512, 256); - - for (i = 0; i < max_region; i++) { - dw_pcie_writel_ob_unroll(pci, i, PCIE_ATU_UNR_LOWER_TARGET, - 0x11110000); + if (pci->iatu_unroll_enabled) { + max_region = min((int)pci->atu_size / 512, 256); + } else { + dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, 0xFF); + max_region = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT) + 1; + } - val = dw_pcie_readl_ob_unroll(pci, i, PCIE_ATU_UNR_LOWER_TARGET); - if (val == 0x11110000) - ob++; - else + for (ob = 0; ob < max_region; ob++) { + dw_pcie_writel_atu_ob(pci, ob, PCIE_ATU_LOWER_TARGET, 0x11110000); + val = dw_pcie_readl_atu_ob(pci, ob, PCIE_ATU_LOWER_TARGET); + if (val != 0x11110000) break; } - for (i = 0; i < max_region; i++) { - dw_pcie_writel_ib_unroll(pci, i, PCIE_ATU_UNR_LOWER_TARGET, - 0x11110000); - - val = dw_pcie_readl_ib_unroll(pci, i, PCIE_ATU_UNR_LOWER_TARGET); - if (val == 0x11110000) - ib++; - else + for (ib = 0; ib < max_region; ib++) { + dw_pcie_writel_atu_ib(pci, ib, PCIE_ATU_LOWER_TARGET, 0x11110000); + val = dw_pcie_readl_atu_ib(pci, ib, PCIE_ATU_LOWER_TARGET); + if (val != 0x11110000) break; } - pci->num_ib_windows = ib; - pci->num_ob_windows = ob; -} - -static void dw_pcie_iatu_detect_regions(struct dw_pcie *pci) -{ - int max_region, i, ob = 0, ib = 0; - u32 val; - - dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, 0xFF); - max_region = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT) + 1; - for (i = 0; i < max_region; i++) { - dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_OUTBOUND | i); - dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, 0x11110000); - val = dw_pcie_readl_dbi(pci, PCIE_ATU_LOWER_TARGET); - if (val == 0x11110000) - ob++; - else - break; + if (ob) { + dir = PCIE_ATU_REGION_DIR_OB; + } else if (ib) { + dir = PCIE_ATU_REGION_DIR_IB; + } else { + dev_err(pci->dev, "No iATU regions found\n"); + return; } - for (i = 0; i < max_region; i++) { - dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND | i); - dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, 0x11110000); - val = dw_pcie_readl_dbi(pci, PCIE_ATU_LOWER_TARGET); - if (val == 0x11110000) - ib++; - else - break; + dw_pcie_writel_atu(pci, dir, 0, PCIE_ATU_LIMIT, 0x0); + min = dw_pcie_readl_atu(pci, dir, 0, PCIE_ATU_LIMIT); + + if (dw_pcie_ver_is_ge(pci, 460A)) { + dw_pcie_writel_atu(pci, dir, 0, PCIE_ATU_UPPER_LIMIT, 0xFFFFFFFF); + max = dw_pcie_readl_atu(pci, dir, 0, PCIE_ATU_UPPER_LIMIT); + } else { + max = 0; } - pci->num_ib_windows = ib; pci->num_ob_windows = ob; + pci->num_ib_windows = ib; + pci->region_align = 1 << fls(min); + pci->region_limit = (max << 32) | (SZ_4G - 1); } void dw_pcie_iatu_detect(struct dw_pcie *pci) { - struct device *dev = pci->dev; - struct platform_device *pdev = to_platform_device(dev); + struct platform_device *pdev = to_platform_device(pci->dev); - if (pci->version >= 0x480A || (!pci->version && - dw_pcie_iatu_unroll_enabled(pci))) { - pci->iatu_unroll_enabled = true; + pci->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pci); + if (pci->iatu_unroll_enabled) { if (!pci->atu_base) { struct resource *res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "atu"); if (res) { pci->atu_size = resource_size(res); - pci->atu_base = devm_ioremap_resource(dev, res); + pci->atu_base = devm_ioremap_resource(pci->dev, res); } if (!pci->atu_base || IS_ERR(pci->atu_base)) pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET; @@ -683,23 +605,25 @@ void dw_pcie_iatu_detect(struct dw_pcie *pci) if (!pci->atu_size) /* Pick a minimal default, enough for 8 in and 8 out windows */ pci->atu_size = SZ_4K; + } else { + pci->atu_base = pci->dbi_base + PCIE_ATU_VIEWPORT_BASE; + pci->atu_size = PCIE_ATU_VIEWPORT_SIZE; + } - dw_pcie_iatu_detect_regions_unroll(pci); - } else - dw_pcie_iatu_detect_regions(pci); + dw_pcie_iatu_detect_regions(pci); dev_info(pci->dev, "iATU unroll: %s\n", pci->iatu_unroll_enabled ? "enabled" : "disabled"); - dev_info(pci->dev, "Detected iATU regions: %u outbound, %u inbound", - pci->num_ob_windows, pci->num_ib_windows); + dev_info(pci->dev, "iATU regions: %u ob, %u ib, align %uK, limit %lluG\n", + pci->num_ob_windows, pci->num_ib_windows, + pci->region_align / SZ_1K, (pci->region_limit + 1) / SZ_1G); } void dw_pcie_setup(struct dw_pcie *pci) { + struct device_node *np = pci->dev->of_node; u32 val; - struct device *dev = pci->dev; - struct device_node *np = dev->of_node; if (pci->link_gen > 0) dw_pcie_link_set_max_speed(pci, pci->link_gen); @@ -726,6 +650,13 @@ void dw_pcie_setup(struct dw_pcie *pci) val |= PORT_LINK_DLL_LINK_EN; dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val); + if (of_property_read_bool(np, "snps,enable-cdm-check")) { + val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS); + val |= PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS | + PCIE_PL_CHK_REG_CHK_REG_START; + dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val); + } + of_property_read_u32(np, "num-lanes", &pci->num_lanes); if (!pci->num_lanes) { dev_dbg(pci->dev, "Using h/w default number of lanes\n"); @@ -772,11 +703,4 @@ void dw_pcie_setup(struct dw_pcie *pci) break; } dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val); - - if (of_property_read_bool(np, "snps,enable-cdm-check")) { - val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS); - val |= PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS | - PCIE_PL_CHK_REG_CHK_REG_START; - dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val); - } } diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h index 7d6e9b7576be..a871ae7eb59e 100644 --- a/drivers/pci/controller/dwc/pcie-designware.h +++ b/drivers/pci/controller/dwc/pcie-designware.h @@ -20,6 +20,29 @@ #include <linux/pci-epc.h> #include <linux/pci-epf.h> +/* DWC PCIe IP-core versions (native support since v4.70a) */ +#define DW_PCIE_VER_365A 0x3336352a +#define DW_PCIE_VER_460A 0x3436302a +#define DW_PCIE_VER_470A 0x3437302a +#define DW_PCIE_VER_480A 0x3438302a +#define DW_PCIE_VER_490A 0x3439302a +#define DW_PCIE_VER_520A 0x3532302a + +#define __dw_pcie_ver_cmp(_pci, _ver, _op) \ + ((_pci)->version _op DW_PCIE_VER_ ## _ver) + +#define dw_pcie_ver_is(_pci, _ver) __dw_pcie_ver_cmp(_pci, _ver, ==) + +#define dw_pcie_ver_is_ge(_pci, _ver) __dw_pcie_ver_cmp(_pci, _ver, >=) + +#define dw_pcie_ver_type_is(_pci, _ver, _type) \ + (__dw_pcie_ver_cmp(_pci, _ver, ==) && \ + __dw_pcie_ver_cmp(_pci, TYPE_ ## _type, ==)) + +#define dw_pcie_ver_type_is_ge(_pci, _ver, _type) \ + (__dw_pcie_ver_cmp(_pci, _ver, ==) && \ + __dw_pcie_ver_cmp(_pci, TYPE_ ## _type, >=)) + /* Parameters for the waiting for link up routine */ #define LINK_WAIT_MAX_RETRIES 10 #define LINK_WAIT_USLEEP_MIN 90000 @@ -74,13 +97,34 @@ #define PCIE_MSI_INTR0_MASK 0x82C #define PCIE_MSI_INTR0_STATUS 0x830 +#define GEN3_RELATED_OFF 0x890 +#define GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL BIT(0) +#define GEN3_RELATED_OFF_RXEQ_RGRDLESS_RXTS BIT(13) +#define GEN3_RELATED_OFF_GEN3_EQ_DISABLE BIT(16) +#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT 24 +#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK GENMASK(25, 24) + #define PCIE_PORT_MULTI_LANE_CTRL 0x8C0 #define PORT_MLTI_UPCFG_SUPPORT BIT(7) +#define PCIE_VERSION_NUMBER 0x8F8 +#define PCIE_VERSION_TYPE 0x8FC + +/* + * iATU inbound and outbound windows CSRs. Before the IP-core v4.80a each + * iATU region CSRs had been indirectly accessible by means of the dedicated + * viewport selector. The iATU/eDMA CSRs space was re-designed in DWC PCIe + * v4.80a in a way so the viewport was unrolled into the directly accessible + * iATU/eDMA CSRs space. + */ #define PCIE_ATU_VIEWPORT 0x900 -#define PCIE_ATU_REGION_INBOUND BIT(31) -#define PCIE_ATU_REGION_OUTBOUND 0 -#define PCIE_ATU_CR1 0x904 +#define PCIE_ATU_REGION_DIR_IB BIT(31) +#define PCIE_ATU_REGION_DIR_OB 0 +#define PCIE_ATU_VIEWPORT_BASE 0x904 +#define PCIE_ATU_UNROLL_BASE(dir, index) \ + (((index) << 9) | ((dir == PCIE_ATU_REGION_DIR_IB) ? BIT(8) : 0)) +#define PCIE_ATU_VIEWPORT_SIZE 0x2C +#define PCIE_ATU_REGION_CTRL1 0x000 #define PCIE_ATU_INCREASE_REGION_SIZE BIT(13) #define PCIE_ATU_TYPE_MEM 0x0 #define PCIE_ATU_TYPE_IO 0x2 @@ -88,19 +132,19 @@ #define PCIE_ATU_TYPE_CFG1 0x5 #define PCIE_ATU_TD BIT(8) #define PCIE_ATU_FUNC_NUM(pf) ((pf) << 20) -#define PCIE_ATU_CR2 0x908 +#define PCIE_ATU_REGION_CTRL2 0x004 #define PCIE_ATU_ENABLE BIT(31) #define PCIE_ATU_BAR_MODE_ENABLE BIT(30) #define PCIE_ATU_FUNC_NUM_MATCH_EN BIT(19) -#define PCIE_ATU_LOWER_BASE 0x90C -#define PCIE_ATU_UPPER_BASE 0x910 -#define PCIE_ATU_LIMIT 0x914 -#define PCIE_ATU_LOWER_TARGET 0x918 +#define PCIE_ATU_LOWER_BASE 0x008 +#define PCIE_ATU_UPPER_BASE 0x00C +#define PCIE_ATU_LIMIT 0x010 +#define PCIE_ATU_LOWER_TARGET 0x014 #define PCIE_ATU_BUS(x) FIELD_PREP(GENMASK(31, 24), x) #define PCIE_ATU_DEV(x) FIELD_PREP(GENMASK(23, 19), x) #define PCIE_ATU_FUNC(x) FIELD_PREP(GENMASK(18, 16), x) -#define PCIE_ATU_UPPER_TARGET 0x91C -#define PCIE_ATU_UPPER_LIMIT 0x924 +#define PCIE_ATU_UPPER_TARGET 0x018 +#define PCIE_ATU_UPPER_LIMIT 0x020 #define PCIE_MISC_CONTROL_1_OFF 0x8BC #define PCIE_DBI_RO_WR_EN BIT(0) @@ -131,6 +175,25 @@ #define PCIE_ATU_UNR_UPPER_LIMIT 0x20 /* + * RAS-DES register definitions + */ +#define PCIE_RAS_DES_EVENT_COUNTER_CONTROL 0x8 +#define EVENT_COUNTER_ALL_CLEAR 0x3 +#define EVENT_COUNTER_ENABLE_ALL 0x7 +#define EVENT_COUNTER_ENABLE_SHIFT 2 +#define EVENT_COUNTER_EVENT_SEL_MASK GENMASK(7, 0) +#define EVENT_COUNTER_EVENT_SEL_SHIFT 16 +#define EVENT_COUNTER_EVENT_Tx_L0S 0x2 +#define EVENT_COUNTER_EVENT_Rx_L0S 0x3 +#define EVENT_COUNTER_EVENT_L1 0x5 +#define EVENT_COUNTER_EVENT_L1_1 0x7 +#define EVENT_COUNTER_EVENT_L1_2 0x8 +#define EVENT_COUNTER_GROUP_SEL_SHIFT 24 +#define EVENT_COUNTER_GROUP_5 0x5 + +#define PCIE_RAS_DES_EVENT_COUNTER_DATA 0xc + +/* * The default address offset between dbi_base and atu_base. Root controller * drivers are not required to initialize atu_base if the offset matches this * default; the driver core automatically derives atu_base from dbi_base using @@ -138,13 +201,6 @@ */ #define DEFAULT_DBI_ATU_OFFSET (0x3 << 20) -/* Register address builder */ -#define PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(region) \ - ((region) << 9) - -#define PCIE_GET_ATU_INB_UNR_REG_OFFSET(region) \ - (((region) << 9) | BIT(8)) - #define MAX_MSI_IRQS 256 #define MAX_MSI_IRQS_PER_CTRL 32 #define MAX_MSI_CTRLS (MAX_MSI_IRQS / MAX_MSI_IRQS_PER_CTRL) @@ -155,16 +211,10 @@ #define MAX_IATU_IN 256 #define MAX_IATU_OUT 256 -struct pcie_port; struct dw_pcie; +struct dw_pcie_rp; struct dw_pcie_ep; -enum dw_pcie_region_type { - DW_PCIE_REGION_UNKNOWN, - DW_PCIE_REGION_INBOUND, - DW_PCIE_REGION_OUTBOUND, -}; - enum dw_pcie_device_mode { DW_PCIE_UNKNOWN_TYPE, DW_PCIE_EP_TYPE, @@ -173,12 +223,14 @@ enum dw_pcie_device_mode { }; struct dw_pcie_host_ops { - int (*host_init)(struct pcie_port *pp); - int (*msi_host_init)(struct pcie_port *pp); + int (*host_init)(struct dw_pcie_rp *pp); + void (*host_deinit)(struct dw_pcie_rp *pp); + int (*msi_host_init)(struct dw_pcie_rp *pp); }; -struct pcie_port { +struct dw_pcie_rp { bool has_msi_ctrl:1; + bool cfg0_io_shared:1; u64 cfg0_base; void __iomem *va_cfg0_base; u32 cfg0_size; @@ -187,10 +239,9 @@ struct pcie_port { u32 io_size; int irq; const struct dw_pcie_host_ops *ops; - int msi_irq; + int msi_irq[MAX_MSI_CTRLS]; struct irq_domain *irq_domain; struct irq_domain *msi_domain; - u16 msi_msg; dma_addr_t msi_data; struct irq_chip *msi_irq_chip; u32 num_vectors; @@ -200,12 +251,6 @@ struct pcie_port { DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS); }; -enum dw_pcie_as_type { - DW_PCIE_AS_UNKNOWN, - DW_PCIE_AS_MEM, - DW_PCIE_AS_IO, -}; - struct dw_pcie_ep_ops { void (*ep_init)(struct dw_pcie_ep *ep); int (*raise_irq)(struct dw_pcie_ep *ep, u8 func_no, @@ -261,20 +306,21 @@ struct dw_pcie { struct device *dev; void __iomem *dbi_base; void __iomem *dbi_base2; - /* Used when iatu_unroll_enabled is true */ void __iomem *atu_base; size_t atu_size; u32 num_ib_windows; u32 num_ob_windows; - struct pcie_port pp; + u32 region_align; + u64 region_limit; + struct dw_pcie_rp pp; struct dw_pcie_ep ep; const struct dw_pcie_ops *ops; - unsigned int version; + u32 version; + u32 type; int num_lanes; int link_gen; u8 n_fts[2]; bool iatu_unroll_enabled: 1; - bool io_cfg_atu_shared: 1; }; #define to_dw_pcie_from_pp(port) container_of((port), struct dw_pcie, pp) @@ -282,6 +328,8 @@ struct dw_pcie { #define to_dw_pcie_from_ep(endpoint) \ container_of((endpoint), struct dw_pcie, ep) +void dw_pcie_version_detect(struct dw_pcie *pci); + u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap); u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap); @@ -294,17 +342,13 @@ void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val); int dw_pcie_link_up(struct dw_pcie *pci); void dw_pcie_upconfig_setup(struct dw_pcie *pci); int dw_pcie_wait_for_link(struct dw_pcie *pci); -void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, - int type, u64 cpu_addr, u64 pci_addr, - u64 size); -void dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index, - int type, u64 cpu_addr, u64 pci_addr, - u64 size); +int dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type, + u64 cpu_addr, u64 pci_addr, u64 size); +int dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index, + int type, u64 cpu_addr, u64 pci_addr, u64 size); int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, u8 func_no, int index, - int bar, u64 cpu_addr, - enum dw_pcie_as_type as_type); -void dw_pcie_disable_atu(struct dw_pcie *pci, int index, - enum dw_pcie_region_type type); + int type, u64 cpu_addr, u8 bar); +void dw_pcie_disable_atu(struct dw_pcie *pci, u32 dir, int index); void dw_pcie_setup(struct dw_pcie *pci); void dw_pcie_iatu_detect(struct dw_pcie *pci); @@ -365,34 +409,49 @@ static inline void dw_pcie_dbi_ro_wr_dis(struct dw_pcie *pci) dw_pcie_writel_dbi(pci, reg, val); } +static inline int dw_pcie_start_link(struct dw_pcie *pci) +{ + if (pci->ops && pci->ops->start_link) + return pci->ops->start_link(pci); + + return 0; +} + +static inline void dw_pcie_stop_link(struct dw_pcie *pci) +{ + if (pci->ops && pci->ops->stop_link) + pci->ops->stop_link(pci); +} + #ifdef CONFIG_PCIE_DW_HOST -irqreturn_t dw_handle_msi_irq(struct pcie_port *pp); -void dw_pcie_setup_rc(struct pcie_port *pp); -int dw_pcie_host_init(struct pcie_port *pp); -void dw_pcie_host_deinit(struct pcie_port *pp); -int dw_pcie_allocate_domains(struct pcie_port *pp); +irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp); +int dw_pcie_setup_rc(struct dw_pcie_rp *pp); +int dw_pcie_host_init(struct dw_pcie_rp *pp); +void dw_pcie_host_deinit(struct dw_pcie_rp *pp); +int dw_pcie_allocate_domains(struct dw_pcie_rp *pp); void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn, int where); #else -static inline irqreturn_t dw_handle_msi_irq(struct pcie_port *pp) +static inline irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp) { return IRQ_NONE; } -static inline void dw_pcie_setup_rc(struct pcie_port *pp) +static inline int dw_pcie_setup_rc(struct dw_pcie_rp *pp) { + return 0; } -static inline int dw_pcie_host_init(struct pcie_port *pp) +static inline int dw_pcie_host_init(struct dw_pcie_rp *pp) { return 0; } -static inline void dw_pcie_host_deinit(struct pcie_port *pp) +static inline void dw_pcie_host_deinit(struct dw_pcie_rp *pp) { } -static inline int dw_pcie_allocate_domains(struct pcie_port *pp) +static inline int dw_pcie_allocate_domains(struct dw_pcie_rp *pp) { return 0; } diff --git a/drivers/pci/controller/dwc/pcie-dw-rockchip.c b/drivers/pci/controller/dwc/pcie-dw-rockchip.c index c9b341e55cbb..c1e7653e508e 100644 --- a/drivers/pci/controller/dwc/pcie-dw-rockchip.c +++ b/drivers/pci/controller/dwc/pcie-dw-rockchip.c @@ -10,9 +10,12 @@ #include <linux/clk.h> #include <linux/gpio/consumer.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/irqdomain.h> #include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/of_device.h> +#include <linux/of_irq.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> #include <linux/regmap.h> @@ -26,6 +29,7 @@ */ #define HIWORD_UPDATE(mask, val) (((mask) << 16) | (val)) #define HIWORD_UPDATE_BIT(val) HIWORD_UPDATE(val, val) +#define HIWORD_DISABLE_BIT(val) HIWORD_UPDATE(val, ~val) #define to_rockchip_pcie(x) dev_get_drvdata((x)->dev) @@ -36,10 +40,12 @@ #define PCIE_LINKUP (PCIE_SMLH_LINKUP | PCIE_RDLH_LINKUP) #define PCIE_L0S_ENTRY 0x11 #define PCIE_CLIENT_GENERAL_CONTROL 0x0 +#define PCIE_CLIENT_INTR_STATUS_LEGACY 0x8 +#define PCIE_CLIENT_INTR_MASK_LEGACY 0x1c #define PCIE_CLIENT_GENERAL_DEBUG 0x104 -#define PCIE_CLIENT_HOT_RESET_CTRL 0x180 +#define PCIE_CLIENT_HOT_RESET_CTRL 0x180 #define PCIE_CLIENT_LTSSM_STATUS 0x300 -#define PCIE_LTSSM_ENABLE_ENHANCE BIT(4) +#define PCIE_LTSSM_ENABLE_ENHANCE BIT(4) #define PCIE_LTSSM_STATUS_MASK GENMASK(5, 0) struct rockchip_pcie { @@ -51,6 +57,7 @@ struct rockchip_pcie { struct reset_control *rst; struct gpio_desc *rst_gpio; struct regulator *vpcie3v3; + struct irq_domain *irq_domain; }; static int rockchip_pcie_readl_apb(struct rockchip_pcie *rockchip, @@ -65,6 +72,78 @@ static void rockchip_pcie_writel_apb(struct rockchip_pcie *rockchip, writel_relaxed(val, rockchip->apb_base + reg); } +static void rockchip_pcie_legacy_int_handler(struct irq_desc *desc) +{ + struct irq_chip *chip = irq_desc_get_chip(desc); + struct rockchip_pcie *rockchip = irq_desc_get_handler_data(desc); + unsigned long reg, hwirq; + + chained_irq_enter(chip, desc); + + reg = rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_INTR_STATUS_LEGACY); + + for_each_set_bit(hwirq, ®, 4) + generic_handle_domain_irq(rockchip->irq_domain, hwirq); + + chained_irq_exit(chip, desc); +} + +static void rockchip_intx_mask(struct irq_data *data) +{ + rockchip_pcie_writel_apb(irq_data_get_irq_chip_data(data), + HIWORD_UPDATE_BIT(BIT(data->hwirq)), + PCIE_CLIENT_INTR_MASK_LEGACY); +}; + +static void rockchip_intx_unmask(struct irq_data *data) +{ + rockchip_pcie_writel_apb(irq_data_get_irq_chip_data(data), + HIWORD_DISABLE_BIT(BIT(data->hwirq)), + PCIE_CLIENT_INTR_MASK_LEGACY); +}; + +static struct irq_chip rockchip_intx_irq_chip = { + .name = "INTx", + .irq_mask = rockchip_intx_mask, + .irq_unmask = rockchip_intx_unmask, + .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND, +}; + +static int rockchip_pcie_intx_map(struct irq_domain *domain, unsigned int irq, + irq_hw_number_t hwirq) +{ + irq_set_chip_and_handler(irq, &rockchip_intx_irq_chip, handle_level_irq); + irq_set_chip_data(irq, domain->host_data); + + return 0; +} + +static const struct irq_domain_ops intx_domain_ops = { + .map = rockchip_pcie_intx_map, +}; + +static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip) +{ + struct device *dev = rockchip->pci.dev; + struct device_node *intc; + + intc = of_get_child_by_name(dev->of_node, "legacy-interrupt-controller"); + if (!intc) { + dev_err(dev, "missing child interrupt-controller node\n"); + return -EINVAL; + } + + rockchip->irq_domain = irq_domain_add_linear(intc, PCI_NUM_INTX, + &intx_domain_ops, rockchip); + of_node_put(intc); + if (!rockchip->irq_domain) { + dev_err(dev, "failed to get a INTx IRQ domain\n"); + return -EINVAL; + } + + return 0; +} + static void rockchip_pcie_enable_ltssm(struct rockchip_pcie *rockchip) { rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_ENABLE_LTSSM, @@ -107,11 +186,24 @@ static int rockchip_pcie_start_link(struct dw_pcie *pci) return 0; } -static int rockchip_pcie_host_init(struct pcie_port *pp) +static int rockchip_pcie_host_init(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct rockchip_pcie *rockchip = to_rockchip_pcie(pci); + struct device *dev = rockchip->pci.dev; u32 val = HIWORD_UPDATE_BIT(PCIE_LTSSM_ENABLE_ENHANCE); + int irq, ret; + + irq = of_irq_get_byname(dev->of_node, "legacy"); + if (irq < 0) + return irq; + + ret = rockchip_pcie_init_irq_domain(rockchip); + if (ret < 0) + dev_err(dev, "failed to init irq domain\n"); + + irq_set_chained_handler_and_data(irq, rockchip_pcie_legacy_int_handler, + rockchip); /* LTSSM enable control mode */ rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_HOT_RESET_CTRL); @@ -152,6 +244,11 @@ static int rockchip_pcie_resource_get(struct platform_device *pdev, if (IS_ERR(rockchip->rst_gpio)) return PTR_ERR(rockchip->rst_gpio); + rockchip->rst = devm_reset_control_array_get_exclusive(&pdev->dev); + if (IS_ERR(rockchip->rst)) + return dev_err_probe(&pdev->dev, PTR_ERR(rockchip->rst), + "failed to get reset lines\n"); + return 0; } @@ -182,18 +279,6 @@ static void rockchip_pcie_phy_deinit(struct rockchip_pcie *rockchip) phy_power_off(rockchip->phy); } -static int rockchip_pcie_reset_control_release(struct rockchip_pcie *rockchip) -{ - struct device *dev = rockchip->pci.dev; - - rockchip->rst = devm_reset_control_array_get_exclusive(dev); - if (IS_ERR(rockchip->rst)) - return dev_err_probe(dev, PTR_ERR(rockchip->rst), - "failed to get reset lines\n"); - - return reset_control_deassert(rockchip->rst); -} - static const struct dw_pcie_ops dw_pcie_ops = { .link_up = rockchip_pcie_link_up, .start_link = rockchip_pcie_start_link, @@ -203,7 +288,7 @@ static int rockchip_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct rockchip_pcie *rockchip; - struct pcie_port *pp; + struct dw_pcie_rp *pp; int ret; rockchip = devm_kzalloc(dev, sizeof(*rockchip), GFP_KERNEL); @@ -222,6 +307,10 @@ static int rockchip_pcie_probe(struct platform_device *pdev) if (ret) return ret; + ret = reset_control_assert(rockchip->rst); + if (ret) + return ret; + /* DON'T MOVE ME: must be enable before PHY init */ rockchip->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3"); if (IS_ERR(rockchip->vpcie3v3)) { @@ -241,7 +330,7 @@ static int rockchip_pcie_probe(struct platform_device *pdev) if (ret) goto disable_regulator; - ret = rockchip_pcie_reset_control_release(rockchip); + ret = reset_control_deassert(rockchip->rst); if (ret) goto deinit_phy; diff --git a/drivers/pci/controller/dwc/pcie-fu740.c b/drivers/pci/controller/dwc/pcie-fu740.c index 00cde9a248b5..0c90583c078b 100644 --- a/drivers/pci/controller/dwc/pcie-fu740.c +++ b/drivers/pci/controller/dwc/pcie-fu740.c @@ -16,11 +16,9 @@ #include <linux/gpio.h> #include <linux/gpio/consumer.h> #include <linux/kernel.h> -#include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/platform_device.h> -#include <linux/regulator/consumer.h> #include <linux/resource.h> #include <linux/types.h> #include <linux/interrupt.h> @@ -181,13 +179,62 @@ static int fu740_pcie_start_link(struct dw_pcie *pci) { struct device *dev = pci->dev; struct fu740_pcie *afp = dev_get_drvdata(dev); + u8 cap_exp = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); + int ret; + u32 orig, tmp; + + /* + * Force 2.5GT/s when starting the link, due to some devices not + * probing at higher speeds. This happens with the PCIe switch + * on the Unmatched board when U-Boot has not initialised the PCIe. + * The fix in U-Boot is to force 2.5GT/s, which then gets cleared + * by the soft reset done by this driver. + */ + dev_dbg(dev, "cap_exp at %x\n", cap_exp); + dw_pcie_dbi_ro_wr_en(pci); + + tmp = dw_pcie_readl_dbi(pci, cap_exp + PCI_EXP_LNKCAP); + orig = tmp & PCI_EXP_LNKCAP_SLS; + tmp &= ~PCI_EXP_LNKCAP_SLS; + tmp |= PCI_EXP_LNKCAP_SLS_2_5GB; + dw_pcie_writel_dbi(pci, cap_exp + PCI_EXP_LNKCAP, tmp); /* Enable LTSSM */ writel_relaxed(0x1, afp->mgmt_base + PCIEX8MGMT_APP_LTSSM_ENABLE); - return 0; + + ret = dw_pcie_wait_for_link(pci); + if (ret) { + dev_err(dev, "error: link did not start\n"); + goto err; + } + + tmp = dw_pcie_readl_dbi(pci, cap_exp + PCI_EXP_LNKCAP); + if ((tmp & PCI_EXP_LNKCAP_SLS) != orig) { + dev_dbg(dev, "changing speed back to original\n"); + + tmp &= ~PCI_EXP_LNKCAP_SLS; + tmp |= orig; + dw_pcie_writel_dbi(pci, cap_exp + PCI_EXP_LNKCAP, tmp); + + tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); + tmp |= PORT_LOGIC_SPEED_CHANGE; + dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp); + + ret = dw_pcie_wait_for_link(pci); + if (ret) { + dev_err(dev, "error: link did not start at new speed\n"); + goto err; + } + } + + ret = 0; +err: + WARN_ON(ret); /* we assume that errors will be very rare */ + dw_pcie_dbi_ro_wr_dis(pci); + return ret; } -static int fu740_pcie_host_init(struct pcie_port *pp) +static int fu740_pcie_host_init(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct fu740_pcie *afp = to_fu740_pcie(pci); @@ -224,7 +271,7 @@ static int fu740_pcie_host_init(struct pcie_port *pp) /* Clear hold_phy_rst */ writel_relaxed(0x0, afp->mgmt_base + PCIEX8MGMT_APP_HOLD_PHY_RST); /* Enable pcieauxclk */ - ret = clk_prepare_enable(afp->pcie_aux); + clk_prepare_enable(afp->pcie_aux); /* Set RC mode */ writel_relaxed(0x4, afp->mgmt_base + PCIEX8MGMT_DEVICE_TYPE); @@ -259,11 +306,11 @@ static int fu740_pcie_probe(struct platform_device *pdev) return PTR_ERR(afp->mgmt_base); /* Fetch GPIOs */ - afp->reset = devm_gpiod_get_optional(dev, "reset-gpios", GPIOD_OUT_LOW); + afp->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(afp->reset)) return dev_err_probe(dev, PTR_ERR(afp->reset), "unable to get reset-gpios\n"); - afp->pwren = devm_gpiod_get_optional(dev, "pwren-gpios", GPIOD_OUT_LOW); + afp->pwren = devm_gpiod_get_optional(dev, "pwren", GPIOD_OUT_LOW); if (IS_ERR(afp->pwren)) return dev_err_probe(dev, PTR_ERR(afp->pwren), "unable to get pwren-gpios\n"); diff --git a/drivers/pci/controller/dwc/pcie-histb.c b/drivers/pci/controller/dwc/pcie-histb.c index 410555dccb6d..e2b80f10030d 100644 --- a/drivers/pci/controller/dwc/pcie-histb.c +++ b/drivers/pci/controller/dwc/pcie-histb.c @@ -74,7 +74,7 @@ static void histb_pcie_writel(struct histb_pcie *histb_pcie, u32 reg, u32 val) writel(val, histb_pcie->ctrl + reg); } -static void histb_pcie_dbi_w_mode(struct pcie_port *pp, bool enable) +static void histb_pcie_dbi_w_mode(struct dw_pcie_rp *pp, bool enable) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct histb_pcie *hipcie = to_histb_pcie(pci); @@ -88,7 +88,7 @@ static void histb_pcie_dbi_w_mode(struct pcie_port *pp, bool enable) histb_pcie_writel(hipcie, PCIE_SYS_CTRL0, val); } -static void histb_pcie_dbi_r_mode(struct pcie_port *pp, bool enable) +static void histb_pcie_dbi_r_mode(struct dw_pcie_rp *pp, bool enable) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct histb_pcie *hipcie = to_histb_pcie(pci); @@ -180,7 +180,7 @@ static int histb_pcie_start_link(struct dw_pcie *pci) return 0; } -static int histb_pcie_host_init(struct pcie_port *pp) +static int histb_pcie_host_init(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct histb_pcie *hipcie = to_histb_pcie(pci); @@ -219,7 +219,7 @@ static void histb_pcie_host_disable(struct histb_pcie *hipcie) regulator_disable(hipcie->vpcie); } -static int histb_pcie_host_enable(struct pcie_port *pp) +static int histb_pcie_host_enable(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct histb_pcie *hipcie = to_histb_pcie(pci); @@ -297,7 +297,7 @@ static int histb_pcie_probe(struct platform_device *pdev) { struct histb_pcie *hipcie; struct dw_pcie *pci; - struct pcie_port *pp; + struct dw_pcie_rp *pp; struct device_node *np = pdev->dev.of_node; struct device *dev = &pdev->dev; enum of_gpio_flags of_flags; diff --git a/drivers/pci/controller/dwc/pcie-intel-gw.c b/drivers/pci/controller/dwc/pcie-intel-gw.c index 5ba144924ff8..333c33d98a70 100644 --- a/drivers/pci/controller/dwc/pcie-intel-gw.c +++ b/drivers/pci/controller/dwc/pcie-intel-gw.c @@ -58,10 +58,6 @@ #define BUS_IATU_OFFSET SZ_256M #define RESET_INTERVAL_MS 100 -struct intel_pcie_soc { - unsigned int pcie_ver; -}; - struct intel_pcie { struct dw_pcie pci; void __iomem *app_base; @@ -306,7 +302,11 @@ static int intel_pcie_host_setup(struct intel_pcie *pcie) intel_pcie_ltssm_disable(pcie); intel_pcie_link_setup(pcie); intel_pcie_init_n_fts(pci); - dw_pcie_setup_rc(&pci->pp); + + ret = dw_pcie_setup_rc(&pci->pp); + if (ret) + goto app_init_err; + dw_pcie_upconfig_setup(pci); intel_pcie_device_rst_deassert(pcie); @@ -343,7 +343,7 @@ static void __intel_pcie_remove(struct intel_pcie *pcie) static int intel_pcie_remove(struct platform_device *pdev) { struct intel_pcie *pcie = platform_get_drvdata(pdev); - struct pcie_port *pp = &pcie->pci.pp; + struct dw_pcie_rp *pp = &pcie->pci.pp; dw_pcie_host_deinit(pp); __intel_pcie_remove(pcie); @@ -351,7 +351,7 @@ static int intel_pcie_remove(struct platform_device *pdev) return 0; } -static int __maybe_unused intel_pcie_suspend_noirq(struct device *dev) +static int intel_pcie_suspend_noirq(struct device *dev) { struct intel_pcie *pcie = dev_get_drvdata(dev); int ret; @@ -366,14 +366,14 @@ static int __maybe_unused intel_pcie_suspend_noirq(struct device *dev) return ret; } -static int __maybe_unused intel_pcie_resume_noirq(struct device *dev) +static int intel_pcie_resume_noirq(struct device *dev) { struct intel_pcie *pcie = dev_get_drvdata(dev); return intel_pcie_host_setup(pcie); } -static int intel_pcie_rc_init(struct pcie_port *pp) +static int intel_pcie_rc_init(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct intel_pcie *pcie = dev_get_drvdata(pci->dev); @@ -394,16 +394,11 @@ static const struct dw_pcie_host_ops intel_pcie_dw_ops = { .host_init = intel_pcie_rc_init, }; -static const struct intel_pcie_soc pcie_data = { - .pcie_ver = 0x520A, -}; - static int intel_pcie_probe(struct platform_device *pdev) { - const struct intel_pcie_soc *data; struct device *dev = &pdev->dev; struct intel_pcie *pcie; - struct pcie_port *pp; + struct dw_pcie_rp *pp; struct dw_pcie *pci; int ret; @@ -424,12 +419,7 @@ static int intel_pcie_probe(struct platform_device *pdev) if (ret) return ret; - data = device_get_match_data(dev); - if (!data) - return -ENODEV; - pci->ops = &intel_pcie_ops; - pci->version = data->pcie_ver; pp->ops = &intel_pcie_dw_ops; ret = dw_pcie_host_init(pp); @@ -442,12 +432,12 @@ static int intel_pcie_probe(struct platform_device *pdev) } static const struct dev_pm_ops intel_pcie_pm_ops = { - SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(intel_pcie_suspend_noirq, - intel_pcie_resume_noirq) + NOIRQ_SYSTEM_SLEEP_PM_OPS(intel_pcie_suspend_noirq, + intel_pcie_resume_noirq) }; static const struct of_device_id of_intel_pcie_match[] = { - { .compatible = "intel,lgm-pcie", .data = &pcie_data }, + { .compatible = "intel,lgm-pcie" }, {} }; diff --git a/drivers/pci/controller/dwc/pcie-keembay.c b/drivers/pci/controller/dwc/pcie-keembay.c index 1ac29a6eef22..f90f36bac018 100644 --- a/drivers/pci/controller/dwc/pcie-keembay.c +++ b/drivers/pci/controller/dwc/pcie-keembay.c @@ -231,7 +231,7 @@ static void keembay_pcie_msi_irq_handler(struct irq_desc *desc) struct keembay_pcie *pcie = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); u32 val, mask, status; - struct pcie_port *pp; + struct dw_pcie_rp *pp; /* * Keem Bay PCIe Controller provides an additional IP logic on top of @@ -332,13 +332,13 @@ static int keembay_pcie_add_pcie_port(struct keembay_pcie *pcie, struct platform_device *pdev) { struct dw_pcie *pci = &pcie->pci; - struct pcie_port *pp = &pci->pp; + struct dw_pcie_rp *pp = &pci->pp; struct device *dev = &pdev->dev; u32 val; int ret; pp->ops = &keembay_pcie_host_ops; - pp->msi_irq = -ENODEV; + pp->msi_irq[0] = -ENODEV; ret = keembay_pcie_setup_msi_irq(pcie); if (ret) diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c index fa6886d66488..d09507f822a7 100644 --- a/drivers/pci/controller/dwc/pcie-kirin.c +++ b/drivers/pci/controller/dwc/pcie-kirin.c @@ -13,6 +13,7 @@ #include <linux/delay.h> #include <linux/err.h> #include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <linux/interrupt.h> #include <linux/mfd/syscon.h> #include <linux/of_address.h> @@ -332,9 +333,6 @@ static int hi3660_pcie_phy_init(struct platform_device *pdev, pcie->phy_priv = phy; phy->dev = dev; - /* registers */ - pdev = container_of(dev, struct platform_device, dev); - ret = hi3660_pcie_phy_get_clk(phy); if (ret) return ret; @@ -369,12 +367,11 @@ static int kirin_pcie_get_gpio_enable(struct kirin_pcie *pcie, struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct device_node *np = dev->of_node; char name[32]; int ret, i; /* This is an optional property */ - ret = of_gpio_named_count(np, "hisilicon,clken-gpios"); + ret = gpiod_count(dev, "hisilicon,clken"); if (ret < 0) return 0; @@ -623,7 +620,7 @@ static int kirin_pcie_start_link(struct dw_pcie *pci) return 0; } -static int kirin_pcie_host_init(struct pcie_port *pp) +static int kirin_pcie_host_init(struct dw_pcie_rp *pp) { pp->bridge->ops = &kirin_pci_ops; @@ -756,22 +753,28 @@ static int __exit kirin_pcie_remove(struct platform_device *pdev) return 0; } +struct kirin_pcie_data { + enum pcie_kirin_phy_type phy_type; +}; + +static const struct kirin_pcie_data kirin_960_data = { + .phy_type = PCIE_KIRIN_INTERNAL_PHY, +}; + +static const struct kirin_pcie_data kirin_970_data = { + .phy_type = PCIE_KIRIN_EXTERNAL_PHY, +}; + static const struct of_device_id kirin_pcie_match[] = { - { - .compatible = "hisilicon,kirin960-pcie", - .data = (void *)PCIE_KIRIN_INTERNAL_PHY - }, - { - .compatible = "hisilicon,kirin970-pcie", - .data = (void *)PCIE_KIRIN_EXTERNAL_PHY - }, + { .compatible = "hisilicon,kirin960-pcie", .data = &kirin_960_data }, + { .compatible = "hisilicon,kirin970-pcie", .data = &kirin_970_data }, {}, }; static int kirin_pcie_probe(struct platform_device *pdev) { - enum pcie_kirin_phy_type phy_type; struct device *dev = &pdev->dev; + const struct kirin_pcie_data *data; struct kirin_pcie *kirin_pcie; struct dw_pcie *pci; int ret; @@ -781,13 +784,12 @@ static int kirin_pcie_probe(struct platform_device *pdev) return -EINVAL; } - phy_type = (long)of_device_get_match_data(dev); - if (!phy_type) { + data = of_device_get_match_data(dev); + if (!data) { dev_err(dev, "OF data missing\n"); return -EINVAL; } - kirin_pcie = devm_kzalloc(dev, sizeof(struct kirin_pcie), GFP_KERNEL); if (!kirin_pcie) return -ENOMEM; @@ -800,7 +802,7 @@ static int kirin_pcie_probe(struct platform_device *pdev) pci->ops = &kirin_dw_pcie_ops; pci->pp.ops = &kirin_pcie_host_ops; kirin_pcie->pci = pci; - kirin_pcie->type = phy_type; + kirin_pcie->type = data->phy_type; ret = kirin_pcie_get_resource(kirin_pcie, pdev); if (ret) diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c index 6ce8eddf3a37..6d0d1b759ca2 100644 --- a/drivers/pci/controller/dwc/pcie-qcom-ep.c +++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c @@ -10,6 +10,7 @@ */ #include <linux/clk.h> +#include <linux/debugfs.h> #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/mfd/syscon.h> @@ -26,6 +27,7 @@ #define PARF_SYS_CTRL 0x00 #define PARF_DB_CTRL 0x10 #define PARF_PM_CTRL 0x20 +#define PARF_MHI_CLOCK_RESET_CTRL 0x174 #define PARF_MHI_BASE_ADDR_LOWER 0x178 #define PARF_MHI_BASE_ADDR_UPPER 0x17c #define PARF_DEBUG_INT_EN 0x190 @@ -45,6 +47,11 @@ #define PARF_ATU_BASE_ADDR 0x634 #define PARF_ATU_BASE_ADDR_HI 0x638 #define PARF_SRIS_MODE 0x644 +#define PARF_DEBUG_CNT_PM_LINKST_IN_L2 0xc04 +#define PARF_DEBUG_CNT_PM_LINKST_IN_L1 0xc0c +#define PARF_DEBUG_CNT_PM_LINKST_IN_L0S 0xc10 +#define PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1 0xc84 +#define PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2 0xc88 #define PARF_DEVICE_TYPE 0x1000 #define PARF_BDF_TO_SID_CFG 0x2c00 @@ -83,6 +90,9 @@ #define PARF_PM_CTRL_READY_ENTR_L23 BIT(2) #define PARF_PM_CTRL_REQ_NOT_ENTR_L1 BIT(5) +/* PARF_MHI_CLOCK_RESET_CTRL fields */ +#define PARF_MSTR_AXI_CLK_EN BIT(1) + /* PARF_AXI_MSTR_RD_HALT_NO_WRITES register fields */ #define PARF_AXI_MSTR_RD_HALT_NO_WRITE_EN BIT(0) @@ -95,6 +105,7 @@ /* PARF_SYS_CTRL register fields */ #define PARF_SYS_CTRL_AUX_PWR_DET BIT(4) #define PARF_SYS_CTRL_CORE_CLK_CGC_DIS BIT(6) +#define PARF_SYS_CTRL_MSTR_ACLK_CGC_DIS BIT(10) #define PARF_SYS_CTRL_SLV_DBI_WAKE_DISABLE BIT(11) /* PARF_DB_CTRL register fields */ @@ -130,21 +141,33 @@ enum qcom_pcie_ep_link_status { QCOM_PCIE_EP_LINK_DOWN, }; -static struct clk_bulk_data qcom_pcie_ep_clks[] = { - { .id = "cfg" }, - { .id = "aux" }, - { .id = "bus_master" }, - { .id = "bus_slave" }, - { .id = "ref" }, - { .id = "sleep" }, - { .id = "slave_q2a" }, -}; - +/** + * struct qcom_pcie_ep - Qualcomm PCIe Endpoint Controller + * @pci: Designware PCIe controller struct + * @parf: Qualcomm PCIe specific PARF register base + * @elbi: Designware PCIe specific ELBI register base + * @mmio: MMIO register base + * @perst_map: PERST regmap + * @mmio_res: MMIO region resource + * @core_reset: PCIe Endpoint core reset + * @reset: PERST# GPIO + * @wake: WAKE# GPIO + * @phy: PHY controller block + * @debugfs: PCIe Endpoint Debugfs directory + * @clks: PCIe clocks + * @num_clks: PCIe clocks count + * @perst_en: Flag for PERST enable + * @perst_sep_en: Flag for PERST separation enable + * @link_status: PCIe Link status + * @global_irq: Qualcomm PCIe specific Global IRQ + * @perst_irq: PERST# IRQ + */ struct qcom_pcie_ep { struct dw_pcie pci; void __iomem *parf; void __iomem *elbi; + void __iomem *mmio; struct regmap *perst_map; struct resource *mmio_res; @@ -152,6 +175,10 @@ struct qcom_pcie_ep { struct gpio_desc *reset; struct gpio_desc *wake; struct phy *phy; + struct dentry *debugfs; + + struct clk_bulk_data *clks; + int num_clks; u32 perst_en; u32 perst_sep_en; @@ -193,8 +220,10 @@ static int qcom_pcie_ep_core_reset(struct qcom_pcie_ep *pcie_ep) */ static void qcom_pcie_ep_configure_tcsr(struct qcom_pcie_ep *pcie_ep) { - regmap_write(pcie_ep->perst_map, pcie_ep->perst_en, 0); - regmap_write(pcie_ep->perst_map, pcie_ep->perst_sep_en, 0); + if (pcie_ep->perst_map) { + regmap_write(pcie_ep->perst_map, pcie_ep->perst_en, 0); + regmap_write(pcie_ep->perst_map, pcie_ep->perst_sep_en, 0); + } } static int qcom_pcie_dw_link_up(struct dw_pcie *pci) @@ -223,15 +252,11 @@ static void qcom_pcie_dw_stop_link(struct dw_pcie *pci) disable_irq(pcie_ep->perst_irq); } -static int qcom_pcie_perst_deassert(struct dw_pcie *pci) +static int qcom_pcie_enable_resources(struct qcom_pcie_ep *pcie_ep) { - struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci); - struct device *dev = pci->dev; - u32 val, offset; int ret; - ret = clk_bulk_prepare_enable(ARRAY_SIZE(qcom_pcie_ep_clks), - qcom_pcie_ep_clks); + ret = clk_bulk_prepare_enable(pcie_ep->num_clks, pcie_ep->clks); if (ret) return ret; @@ -247,6 +272,36 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci) if (ret) goto err_phy_exit; + return 0; + +err_phy_exit: + phy_exit(pcie_ep->phy); +err_disable_clk: + clk_bulk_disable_unprepare(pcie_ep->num_clks, pcie_ep->clks); + + return ret; +} + +static void qcom_pcie_disable_resources(struct qcom_pcie_ep *pcie_ep) +{ + phy_power_off(pcie_ep->phy); + phy_exit(pcie_ep->phy); + clk_bulk_disable_unprepare(pcie_ep->num_clks, pcie_ep->clks); +} + +static int qcom_pcie_perst_deassert(struct dw_pcie *pci) +{ + struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci); + struct device *dev = pci->dev; + u32 val, offset; + int ret; + + ret = qcom_pcie_enable_resources(pcie_ep); + if (ret) { + dev_err(dev, "Failed to enable resources: %d\n", ret); + return ret; + } + /* Assert WAKE# to RC to indicate device is ready */ gpiod_set_value_cansleep(pcie_ep->wake, 1); usleep_range(WAKE_DELAY_US, WAKE_DELAY_US + 500); @@ -289,8 +344,14 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci) val &= ~PARF_Q2A_FLUSH_EN; writel_relaxed(val, pcie_ep->parf + PARF_Q2A_FLUSH); - /* Disable DBI Wakeup, core clock CGC and enable AUX power */ + /* + * Disable Master AXI clock during idle. Do not allow DBI access + * to take the core out of L1. Disable core clock gating that + * gates PIPE clock from propagating to core clock. Report to the + * host that Vaux is present. + */ val = readl_relaxed(pcie_ep->parf + PARF_SYS_CTRL); + val &= ~PARF_SYS_CTRL_MSTR_ACLK_CGC_DIS; val |= PARF_SYS_CTRL_SLV_DBI_WAKE_DISABLE | PARF_SYS_CTRL_CORE_CLK_CGC_DIS | PARF_SYS_CTRL_AUX_PWR_DET; @@ -335,7 +396,7 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci) ret = dw_pcie_ep_init_complete(&pcie_ep->pci.ep); if (ret) { dev_err(dev, "Failed to complete initialization: %d\n", ret); - goto err_phy_power_off; + goto err_disable_resources; } /* @@ -346,6 +407,11 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci) pcie_ep->parf + PARF_MHI_BASE_ADDR_LOWER); writel_relaxed(0, pcie_ep->parf + PARF_MHI_BASE_ADDR_UPPER); + /* Gate Master AXI clock to MHI bus during L1SS */ + val = readl_relaxed(pcie_ep->parf + PARF_MHI_CLOCK_RESET_CTRL); + val &= ~PARF_MSTR_AXI_CLK_EN; + val = readl_relaxed(pcie_ep->parf + PARF_MHI_CLOCK_RESET_CTRL); + dw_pcie_ep_init_notify(&pcie_ep->pci.ep); /* Enable LTSSM */ @@ -355,13 +421,8 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci) return 0; -err_phy_power_off: - phy_power_off(pcie_ep->phy); -err_phy_exit: - phy_exit(pcie_ep->phy); -err_disable_clk: - clk_bulk_disable_unprepare(ARRAY_SIZE(qcom_pcie_ep_clks), - qcom_pcie_ep_clks); +err_disable_resources: + qcom_pcie_disable_resources(pcie_ep); return ret; } @@ -376,10 +437,7 @@ static void qcom_pcie_perst_assert(struct dw_pcie *pci) return; } - phy_power_off(pcie_ep->phy); - phy_exit(pcie_ep->phy); - clk_bulk_disable_unprepare(ARRAY_SIZE(qcom_pcie_ep_clks), - qcom_pcie_ep_clks); + qcom_pcie_disable_resources(pcie_ep); pcie_ep->link_status = QCOM_PCIE_EP_LINK_DISABLED; } @@ -416,11 +474,19 @@ static int qcom_pcie_ep_get_io_resources(struct platform_device *pdev, pcie_ep->mmio_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mmio"); + if (!pcie_ep->mmio_res) { + dev_err(dev, "Failed to get mmio resource\n"); + return -EINVAL; + } + + pcie_ep->mmio = devm_pci_remap_cfg_resource(dev, pcie_ep->mmio_res); + if (IS_ERR(pcie_ep->mmio)) + return PTR_ERR(pcie_ep->mmio); syscon = of_parse_phandle(dev->of_node, "qcom,perst-regs", 0); if (!syscon) { - dev_err(dev, "Failed to parse qcom,perst-regs\n"); - return -EINVAL; + dev_dbg(dev, "PERST separation not available\n"); + return 0; } pcie_ep->perst_map = syscon_node_to_regmap(syscon); @@ -453,14 +519,15 @@ static int qcom_pcie_ep_get_resources(struct platform_device *pdev, ret = qcom_pcie_ep_get_io_resources(pdev, pcie_ep); if (ret) { - dev_err(&pdev->dev, "Failed to get io resources %d\n", ret); + dev_err(dev, "Failed to get io resources %d\n", ret); return ret; } - ret = devm_clk_bulk_get(dev, ARRAY_SIZE(qcom_pcie_ep_clks), - qcom_pcie_ep_clks); - if (ret) - return ret; + pcie_ep->num_clks = devm_clk_bulk_get_all(dev, &pcie_ep->clks); + if (pcie_ep->num_clks < 0) { + dev_err(dev, "Failed to get clocks\n"); + return pcie_ep->num_clks; + } pcie_ep->core_reset = devm_reset_control_get_exclusive(dev, "core"); if (IS_ERR(pcie_ep->core_reset)) @@ -474,7 +541,7 @@ static int qcom_pcie_ep_get_resources(struct platform_device *pdev, if (IS_ERR(pcie_ep->wake)) return PTR_ERR(pcie_ep->wake); - pcie_ep->phy = devm_phy_optional_get(&pdev->dev, "pciephy"); + pcie_ep->phy = devm_phy_optional_get(dev, "pciephy"); if (IS_ERR(pcie_ep->phy)) ret = PTR_ERR(pcie_ep->phy); @@ -550,13 +617,13 @@ static irqreturn_t qcom_pcie_ep_perst_irq_thread(int irq, void *data) static int qcom_pcie_ep_enable_irq_resources(struct platform_device *pdev, struct qcom_pcie_ep *pcie_ep) { - int irq, ret; + int ret; - irq = platform_get_irq_byname(pdev, "global"); - if (irq < 0) - return irq; + pcie_ep->global_irq = platform_get_irq_byname(pdev, "global"); + if (pcie_ep->global_irq < 0) + return pcie_ep->global_irq; - ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, + ret = devm_request_threaded_irq(&pdev->dev, pcie_ep->global_irq, NULL, qcom_pcie_ep_global_irq_thread, IRQF_ONESHOT, "global_irq", pcie_ep); @@ -573,7 +640,7 @@ static int qcom_pcie_ep_enable_irq_resources(struct platform_device *pdev, "perst_irq", pcie_ep); if (ret) { dev_err(&pdev->dev, "Failed to request PERST IRQ\n"); - disable_irq(irq); + disable_irq(pcie_ep->global_irq); return ret; } @@ -596,6 +663,37 @@ static int qcom_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no, } } +static int qcom_pcie_ep_link_transition_count(struct seq_file *s, void *data) +{ + struct qcom_pcie_ep *pcie_ep = (struct qcom_pcie_ep *) + dev_get_drvdata(s->private); + + seq_printf(s, "L0s transition count: %u\n", + readl_relaxed(pcie_ep->mmio + PARF_DEBUG_CNT_PM_LINKST_IN_L0S)); + + seq_printf(s, "L1 transition count: %u\n", + readl_relaxed(pcie_ep->mmio + PARF_DEBUG_CNT_PM_LINKST_IN_L1)); + + seq_printf(s, "L1.1 transition count: %u\n", + readl_relaxed(pcie_ep->mmio + PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1)); + + seq_printf(s, "L1.2 transition count: %u\n", + readl_relaxed(pcie_ep->mmio + PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2)); + + seq_printf(s, "L2 transition count: %u\n", + readl_relaxed(pcie_ep->mmio + PARF_DEBUG_CNT_PM_LINKST_IN_L2)); + + return 0; +} + +static void qcom_pcie_ep_init_debugfs(struct qcom_pcie_ep *pcie_ep) +{ + struct dw_pcie *pci = &pcie_ep->pci; + + debugfs_create_devm_seqfile(pci->dev, "link_transition_count", pcie_ep->debugfs, + qcom_pcie_ep_link_transition_count); +} + static const struct pci_epc_features qcom_pcie_epc_features = { .linkup_notifier = true, .core_init_notifier = true, @@ -628,6 +726,7 @@ static int qcom_pcie_ep_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct qcom_pcie_ep *pcie_ep; + char *name; int ret; pcie_ep = devm_kzalloc(dev, sizeof(*pcie_ep), GFP_KERNEL); @@ -643,43 +742,39 @@ static int qcom_pcie_ep_probe(struct platform_device *pdev) if (ret) return ret; - ret = clk_bulk_prepare_enable(ARRAY_SIZE(qcom_pcie_ep_clks), - qcom_pcie_ep_clks); - if (ret) + ret = qcom_pcie_enable_resources(pcie_ep); + if (ret) { + dev_err(dev, "Failed to enable resources: %d\n", ret); return ret; - - ret = qcom_pcie_ep_core_reset(pcie_ep); - if (ret) - goto err_disable_clk; - - ret = phy_init(pcie_ep->phy); - if (ret) - goto err_disable_clk; - - /* PHY needs to be powered on for dw_pcie_ep_init() */ - ret = phy_power_on(pcie_ep->phy); - if (ret) - goto err_phy_exit; + } ret = dw_pcie_ep_init(&pcie_ep->pci.ep); if (ret) { dev_err(dev, "Failed to initialize endpoint: %d\n", ret); - goto err_phy_power_off; + goto err_disable_resources; } ret = qcom_pcie_ep_enable_irq_resources(pdev, pcie_ep); if (ret) - goto err_phy_power_off; + goto err_disable_resources; + + name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node); + if (!name) { + ret = -ENOMEM; + goto err_disable_irqs; + } + + pcie_ep->debugfs = debugfs_create_dir(name, NULL); + qcom_pcie_ep_init_debugfs(pcie_ep); return 0; -err_phy_power_off: - phy_power_off(pcie_ep->phy); -err_phy_exit: - phy_exit(pcie_ep->phy); -err_disable_clk: - clk_bulk_disable_unprepare(ARRAY_SIZE(qcom_pcie_ep_clks), - qcom_pcie_ep_clks); +err_disable_irqs: + disable_irq(pcie_ep->global_irq); + disable_irq(pcie_ep->perst_irq); + +err_disable_resources: + qcom_pcie_disable_resources(pcie_ep); return ret; } @@ -688,21 +783,25 @@ static int qcom_pcie_ep_remove(struct platform_device *pdev) { struct qcom_pcie_ep *pcie_ep = platform_get_drvdata(pdev); + disable_irq(pcie_ep->global_irq); + disable_irq(pcie_ep->perst_irq); + + debugfs_remove_recursive(pcie_ep->debugfs); + if (pcie_ep->link_status == QCOM_PCIE_EP_LINK_DISABLED) return 0; - phy_power_off(pcie_ep->phy); - phy_exit(pcie_ep->phy); - clk_bulk_disable_unprepare(ARRAY_SIZE(qcom_pcie_ep_clks), - qcom_pcie_ep_clks); + qcom_pcie_disable_resources(pcie_ep); return 0; } static const struct of_device_id qcom_pcie_ep_match[] = { { .compatible = "qcom,sdx55-pcie-ep", }, + { .compatible = "qcom,sm8450-pcie-ep", }, { } }; +MODULE_DEVICE_TABLE(of, qcom_pcie_ep_match); static struct platform_driver qcom_pcie_ep_driver = { .probe = qcom_pcie_ep_probe, diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c index c19cd506ed3f..f711acacaeaf 100644 --- a/drivers/pci/controller/dwc/pcie-qcom.c +++ b/drivers/pci/controller/dwc/pcie-qcom.c @@ -41,6 +41,9 @@ #define L23_CLK_RMV_DIS BIT(2) #define L1_CLK_RMV_DIS BIT(1) +#define PCIE20_PARF_PM_CTRL 0x20 +#define REQ_NOT_ENTR_L1 BIT(5) + #define PCIE20_PARF_PHY_CTRL 0x40 #define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK GENMASK(20, 16) #define PHY_CTRL_PHY_TX0_TERM_OFFSET(x) ((x) << 16) @@ -52,6 +55,10 @@ #define PCIE20_PARF_DBI_BASE_ADDR 0x168 #define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C #define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174 +#define AHB_CLK_EN BIT(0) +#define MSTR_AXI_CLK_EN BIT(1) +#define BYPASS BIT(4) + #define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT 0x178 #define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1A8 #define PCIE20_PARF_LTSSM 0x1B0 @@ -69,7 +76,20 @@ #define PCIE20_AXI_MSTR_RESP_COMP_CTRL1 0x81c #define CFG_BRIDGE_SB_INIT BIT(0) -#define PCIE_CAP_LINK1_VAL 0x2FD7F +#define PCIE_CAP_SLOT_POWER_LIMIT_VAL FIELD_PREP(PCI_EXP_SLTCAP_SPLV, \ + 250) +#define PCIE_CAP_SLOT_POWER_LIMIT_SCALE FIELD_PREP(PCI_EXP_SLTCAP_SPLS, \ + 1) +#define PCIE_CAP_SLOT_VAL (PCI_EXP_SLTCAP_ABP | \ + PCI_EXP_SLTCAP_PCP | \ + PCI_EXP_SLTCAP_MRLSP | \ + PCI_EXP_SLTCAP_AIP | \ + PCI_EXP_SLTCAP_PIP | \ + PCI_EXP_SLTCAP_HPS | \ + PCI_EXP_SLTCAP_HPC | \ + PCI_EXP_SLTCAP_EIP | \ + PCIE_CAP_SLOT_POWER_LIMIT_VAL | \ + PCIE_CAP_SLOT_POWER_LIMIT_SCALE) #define PCIE20_PARF_Q2A_FLUSH 0x1AC @@ -128,7 +148,6 @@ struct qcom_pcie_resources_2_3_2 { struct clk *master_clk; struct clk *slave_clk; struct clk *cfg_clk; - struct clk *pipe_clk; struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY]; }; @@ -161,14 +180,15 @@ struct qcom_pcie_resources_2_3_3 { /* 6 clocks typically, 7 for sm8250 */ struct qcom_pcie_resources_2_7_0 { - struct clk_bulk_data clks[7]; + struct clk_bulk_data clks[12]; int num_clks; struct regulator_bulk_data supplies[2]; struct reset_control *pci_reset; - struct clk *pipe_clk; - struct clk *pipe_clk_src; - struct clk *phy_pipe_clk; - struct clk *ref_clk_src; +}; + +struct qcom_pcie_resources_2_9_0 { + struct clk_bulk_data clks[5]; + struct reset_control *rst; }; union qcom_pcie_resources { @@ -178,6 +198,7 @@ union qcom_pcie_resources { struct qcom_pcie_resources_2_3_3 v2_3_3; struct qcom_pcie_resources_2_4_0 v2_4_0; struct qcom_pcie_resources_2_7_0 v2_7_0; + struct qcom_pcie_resources_2_9_0 v2_9_0; }; struct qcom_pcie; @@ -187,14 +208,12 @@ struct qcom_pcie_ops { int (*init)(struct qcom_pcie *pcie); int (*post_init)(struct qcom_pcie *pcie); void (*deinit)(struct qcom_pcie *pcie); - void (*post_deinit)(struct qcom_pcie *pcie); void (*ltssm_enable)(struct qcom_pcie *pcie); int (*config_sid)(struct qcom_pcie *pcie); }; struct qcom_pcie_cfg { const struct qcom_pcie_ops *ops; - unsigned int pipe_clk_need_muxing:1; }; struct qcom_pcie { @@ -204,8 +223,7 @@ struct qcom_pcie { union qcom_pcie_resources res; struct phy *phy; struct gpio_desc *reset; - const struct qcom_pcie_ops *ops; - unsigned int pipe_clk_need_muxing:1; + const struct qcom_pcie_cfg *cfg; }; #define to_qcom_pcie(x) dev_get_drvdata((x)->dev) @@ -229,8 +247,8 @@ static int qcom_pcie_start_link(struct dw_pcie *pci) struct qcom_pcie *pcie = to_qcom_pcie(pci); /* Enable Link Training state machine */ - if (pcie->ops->ltssm_enable) - pcie->ops->ltssm_enable(pcie); + if (pcie->cfg->ops->ltssm_enable) + pcie->cfg->ops->ltssm_enable(pcie); return 0; } @@ -322,8 +340,6 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie) struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; struct dw_pcie *pci = pcie->pci; struct device *dev = pci->dev; - struct device_node *node = dev->of_node; - u32 val; int ret; /* reset the PCIe interface as uboot can leave it undefined state */ @@ -334,8 +350,6 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie) reset_control_assert(res->ext_reset); reset_control_assert(res->phy_reset); - writel(1, pcie->parf + PCIE20_PARF_PHY_CTRL); - ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); if (ret < 0) { dev_err(dev, "cannot enable regulators\n"); @@ -378,15 +392,42 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie) goto err_deassert_axi; } - ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks); - if (ret) - goto err_clks; + return 0; + +err_deassert_axi: + reset_control_assert(res->por_reset); +err_deassert_por: + reset_control_assert(res->pci_reset); +err_deassert_pci: + reset_control_assert(res->phy_reset); +err_deassert_phy: + reset_control_assert(res->ext_reset); +err_deassert_ext: + reset_control_assert(res->ahb_reset); +err_deassert_ahb: + regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); + + return ret; +} + +static int qcom_pcie_post_init_2_1_0(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; + struct dw_pcie *pci = pcie->pci; + struct device *dev = pci->dev; + struct device_node *node = dev->of_node; + u32 val; + int ret; /* enable PCIe clocks and resets */ val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); val &= ~BIT(0); writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); + ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks); + if (ret) + return ret; + if (of_device_is_compatible(node, "qcom,pcie-ipq8064") || of_device_is_compatible(node, "qcom,pcie-ipq8064-v2")) { writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) | @@ -425,23 +466,6 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie) pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL1); return 0; - -err_clks: - reset_control_assert(res->axi_reset); -err_deassert_axi: - reset_control_assert(res->por_reset); -err_deassert_por: - reset_control_assert(res->pci_reset); -err_deassert_pci: - reset_control_assert(res->phy_reset); -err_deassert_phy: - reset_control_assert(res->ext_reset); -err_deassert_ext: - reset_control_assert(res->ahb_reset); -err_deassert_ahb: - regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); - - return ret; } static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie) @@ -529,16 +553,6 @@ static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie) goto err_slave; } - /* change DBI base address */ - writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); - - if (IS_ENABLED(CONFIG_PCI_MSI)) { - u32 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT); - - val |= BIT(31); - writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT); - } - return 0; err_slave: clk_disable_unprepare(res->slave_bus); @@ -554,6 +568,21 @@ err_res: return ret; } +static int qcom_pcie_post_init_1_0_0(struct qcom_pcie *pcie) +{ + /* change DBI base address */ + writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); + + if (IS_ENABLED(CONFIG_PCI_MSI)) { + u32 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT); + + val |= BIT(31); + writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT); + } + + return 0; +} + static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie) { u32 val; @@ -594,8 +623,7 @@ static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie) if (IS_ERR(res->slave_clk)) return PTR_ERR(res->slave_clk); - res->pipe_clk = devm_clk_get(dev, "pipe"); - return PTR_ERR_OR_ZERO(res->pipe_clk); + return 0; } static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie) @@ -610,19 +638,11 @@ static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie) regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); } -static void qcom_pcie_post_deinit_2_3_2(struct qcom_pcie *pcie) -{ - struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; - - clk_disable_unprepare(res->pipe_clk); -} - static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie) { struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; struct dw_pcie *pci = pcie->pci; struct device *dev = pci->dev; - u32 val; int ret; ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); @@ -655,6 +675,25 @@ static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie) goto err_slave_clk; } + return 0; + +err_slave_clk: + clk_disable_unprepare(res->master_clk); +err_master_clk: + clk_disable_unprepare(res->cfg_clk); +err_cfg_clk: + clk_disable_unprepare(res->aux_clk); + +err_aux_clk: + regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); + + return ret; +} + +static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie) +{ + u32 val; + /* enable PCIe clocks and resets */ val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); val &= ~BIT(0); @@ -677,34 +716,6 @@ static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie) writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); return 0; - -err_slave_clk: - clk_disable_unprepare(res->master_clk); -err_master_clk: - clk_disable_unprepare(res->cfg_clk); -err_cfg_clk: - clk_disable_unprepare(res->aux_clk); - -err_aux_clk: - regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); - - return ret; -} - -static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie) -{ - struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; - struct dw_pcie *pci = pcie->pci; - struct device *dev = pci->dev; - int ret; - - ret = clk_prepare_enable(res->pipe_clk); - if (ret) { - dev_err(dev, "cannot prepare/enable pipe clock\n"); - return ret; - } - - return 0; } static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie) @@ -811,7 +822,6 @@ static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie) struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; struct dw_pcie *pci = pcie->pci; struct device *dev = pci->dev; - u32 val; int ret; ret = reset_control_assert(res->axi_m_reset); @@ -936,6 +946,33 @@ static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie) if (ret) goto err_clks; + return 0; + +err_clks: + reset_control_assert(res->ahb_reset); +err_rst_ahb: + reset_control_assert(res->pwr_reset); +err_rst_pwr: + reset_control_assert(res->axi_s_reset); +err_rst_axi_s: + reset_control_assert(res->axi_m_sticky_reset); +err_rst_axi_m_sticky: + reset_control_assert(res->axi_m_reset); +err_rst_axi_m: + reset_control_assert(res->pipe_sticky_reset); +err_rst_pipe_sticky: + reset_control_assert(res->pipe_reset); +err_rst_pipe: + reset_control_assert(res->phy_reset); +err_rst_phy: + reset_control_assert(res->phy_ahb_reset); + return ret; +} + +static int qcom_pcie_post_init_2_4_0(struct qcom_pcie *pcie) +{ + u32 val; + /* enable PCIe clocks and resets */ val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); val &= ~BIT(0); @@ -958,26 +995,6 @@ static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie) writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); return 0; - -err_clks: - reset_control_assert(res->ahb_reset); -err_rst_ahb: - reset_control_assert(res->pwr_reset); -err_rst_pwr: - reset_control_assert(res->axi_s_reset); -err_rst_axi_s: - reset_control_assert(res->axi_m_sticky_reset); -err_rst_axi_m_sticky: - reset_control_assert(res->axi_m_reset); -err_rst_axi_m: - reset_control_assert(res->pipe_sticky_reset); -err_rst_pipe_sticky: - reset_control_assert(res->pipe_reset); -err_rst_pipe: - reset_control_assert(res->phy_reset); -err_rst_phy: - reset_control_assert(res->phy_ahb_reset); - return ret; } static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie) @@ -1035,9 +1052,7 @@ static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie) struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; struct dw_pcie *pci = pcie->pci; struct device *dev = pci->dev; - u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); int i, ret; - u32 val; for (i = 0; i < ARRAY_SIZE(res->rst); i++) { ret = reset_control_assert(res->rst[i]); @@ -1094,6 +1109,33 @@ static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie) goto err_clk_aux; } + return 0; + +err_clk_aux: + clk_disable_unprepare(res->ahb_clk); +err_clk_ahb: + clk_disable_unprepare(res->axi_s_clk); +err_clk_axi_s: + clk_disable_unprepare(res->axi_m_clk); +err_clk_axi_m: + clk_disable_unprepare(res->iface); +err_clk_iface: + /* + * Not checking for failure, will anyway return + * the original failure in 'ret'. + */ + for (i = 0; i < ARRAY_SIZE(res->rst); i++) + reset_control_assert(res->rst[i]); + + return ret; +} + +static int qcom_pcie_post_init_2_3_3(struct qcom_pcie *pcie) +{ + struct dw_pcie *pci = pcie->pci; + u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); + u32 val; + writel(SLV_ADDR_SPACE_SZ, pcie->parf + PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE); @@ -1111,7 +1153,7 @@ static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie) writel(PCI_COMMAND_MASTER, pci->dbi_base + PCI_COMMAND); writel(DBI_RO_WR_EN, pci->dbi_base + PCIE20_MISC_CONTROL_1_REG); - writel(PCIE_CAP_LINK1_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP); + writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP); val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP); val &= ~PCI_EXP_LNKCAP_ASPMS; @@ -1121,24 +1163,6 @@ static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie) PCI_EXP_DEVCTL2); return 0; - -err_clk_aux: - clk_disable_unprepare(res->ahb_clk); -err_clk_ahb: - clk_disable_unprepare(res->axi_s_clk); -err_clk_axi_s: - clk_disable_unprepare(res->axi_m_clk); -err_clk_axi_m: - clk_disable_unprepare(res->iface); -err_clk_iface: - /* - * Not checking for failure, will anyway return - * the original failure in 'ret'. - */ - for (i = 0; i < ARRAY_SIZE(res->rst); i++) - reset_control_assert(res->rst[i]); - - return ret; } static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie) @@ -1146,6 +1170,8 @@ static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie) struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; struct dw_pcie *pci = pcie->pci; struct device *dev = pci->dev; + unsigned int num_clks, num_opt_clks; + unsigned int idx; int ret; res->pci_reset = devm_reset_control_get_exclusive(dev, "pci"); @@ -1159,39 +1185,35 @@ static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie) if (ret) return ret; - res->clks[0].id = "aux"; - res->clks[1].id = "cfg"; - res->clks[2].id = "bus_master"; - res->clks[3].id = "bus_slave"; - res->clks[4].id = "slave_q2a"; - res->clks[5].id = "tbu"; - if (of_device_is_compatible(dev->of_node, "qcom,pcie-sm8250")) { - res->clks[6].id = "ddrss_sf_tbu"; - res->num_clks = 7; - } else { - res->num_clks = 6; - } + idx = 0; + res->clks[idx++].id = "aux"; + res->clks[idx++].id = "cfg"; + res->clks[idx++].id = "bus_master"; + res->clks[idx++].id = "bus_slave"; + res->clks[idx++].id = "slave_q2a"; - ret = devm_clk_bulk_get(dev, res->num_clks, res->clks); + num_clks = idx; + + ret = devm_clk_bulk_get(dev, num_clks, res->clks); if (ret < 0) return ret; - if (pcie->pipe_clk_need_muxing) { - res->pipe_clk_src = devm_clk_get(dev, "pipe_mux"); - if (IS_ERR(res->pipe_clk_src)) - return PTR_ERR(res->pipe_clk_src); + res->clks[idx++].id = "tbu"; + res->clks[idx++].id = "ddrss_sf_tbu"; + res->clks[idx++].id = "aggre0"; + res->clks[idx++].id = "aggre1"; + res->clks[idx++].id = "noc_aggr_4"; + res->clks[idx++].id = "noc_aggr_south_sf"; + res->clks[idx++].id = "cnoc_qx"; - res->phy_pipe_clk = devm_clk_get(dev, "phy_pipe"); - if (IS_ERR(res->phy_pipe_clk)) - return PTR_ERR(res->phy_pipe_clk); + num_opt_clks = idx - num_clks; + res->num_clks = idx; - res->ref_clk_src = devm_clk_get(dev, "ref"); - if (IS_ERR(res->ref_clk_src)) - return PTR_ERR(res->ref_clk_src); - } + ret = devm_clk_bulk_get_optional(dev, num_opt_clks, res->clks + num_clks); + if (ret < 0) + return ret; - res->pipe_clk = devm_clk_get(dev, "pipe"); - return PTR_ERR_OR_ZERO(res->pipe_clk); + return 0; } static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie) @@ -1208,10 +1230,6 @@ static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie) return ret; } - /* Set TCXO as clock source for pcie_pipe_clk_src */ - if (pcie->pipe_clk_need_muxing) - clk_set_parent(res->pipe_clk_src, res->ref_clk_src); - ret = clk_bulk_prepare_enable(res->num_clks, res->clks); if (ret < 0) goto err_disable_regulators; @@ -1230,11 +1248,8 @@ static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie) goto err_disable_clocks; } - ret = clk_prepare_enable(res->pipe_clk); - if (ret) { - dev_err(dev, "cannot prepare/enable pipe clock\n"); - goto err_disable_clocks; - } + /* Wait for reset to complete, required on SM8450 */ + usleep_range(1000, 1500); /* configure PCIe to RC mode */ writel(DEVICE_TYPE_RC, pcie->parf + PCIE20_PARF_DEVICE_TYPE); @@ -1256,6 +1271,11 @@ static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie) val |= BIT(4); writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); + /* Enable L1 and L1SS */ + val = readl(pcie->parf + PCIE20_PARF_PM_CTRL); + val &= ~REQ_NOT_ENTR_L1; + writel(val, pcie->parf + PCIE20_PARF_PM_CTRL); + if (IS_ENABLED(CONFIG_PCI_MSI)) { val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT); val |= BIT(31); @@ -1276,25 +1296,114 @@ static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie) struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; clk_bulk_disable_unprepare(res->num_clks, res->clks); + regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); } -static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie) +static int qcom_pcie_get_resources_2_9_0(struct qcom_pcie *pcie) { - struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; + struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0; + struct dw_pcie *pci = pcie->pci; + struct device *dev = pci->dev; + int ret; + + res->clks[0].id = "iface"; + res->clks[1].id = "axi_m"; + res->clks[2].id = "axi_s"; + res->clks[3].id = "axi_bridge"; + res->clks[4].id = "rchng"; + + ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks); + if (ret < 0) + return ret; - /* Set pipe clock as clock source for pcie_pipe_clk_src */ - if (pcie->pipe_clk_need_muxing) - clk_set_parent(res->pipe_clk_src, res->phy_pipe_clk); + res->rst = devm_reset_control_array_get_exclusive(dev); + if (IS_ERR(res->rst)) + return PTR_ERR(res->rst); - return clk_prepare_enable(res->pipe_clk); + return 0; } -static void qcom_pcie_post_deinit_2_7_0(struct qcom_pcie *pcie) +static void qcom_pcie_deinit_2_9_0(struct qcom_pcie *pcie) { - struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; + struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0; + + clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks); +} + +static int qcom_pcie_init_2_9_0(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0; + struct device *dev = pcie->pci->dev; + int ret; + + ret = reset_control_assert(res->rst); + if (ret) { + dev_err(dev, "reset assert failed (%d)\n", ret); + return ret; + } + + /* + * Delay periods before and after reset deassert are working values + * from downstream Codeaurora kernel + */ + usleep_range(2000, 2500); + + ret = reset_control_deassert(res->rst); + if (ret) { + dev_err(dev, "reset deassert failed (%d)\n", ret); + return ret; + } + + usleep_range(2000, 2500); + + return clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks); +} + +static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie) +{ + struct dw_pcie *pci = pcie->pci; + u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); + u32 val; + int i; + + writel(SLV_ADDR_SPACE_SZ, + pcie->parf + PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE); + + val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); + val &= ~BIT(0); + writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); + + writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); + + writel(DEVICE_TYPE_RC, pcie->parf + PCIE20_PARF_DEVICE_TYPE); + writel(BYPASS | MSTR_AXI_CLK_EN | AHB_CLK_EN, + pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); + writel(GEN3_RELATED_OFF_RXEQ_RGRDLESS_RXTS | + GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL, + pci->dbi_base + GEN3_RELATED_OFF); + + writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS | + SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS | + AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS, + pcie->parf + PCIE20_PARF_SYS_CTRL); + + writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH); + + dw_pcie_dbi_ro_wr_en(pci); + writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP); + + val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP); + val &= ~PCI_EXP_LNKCAP_ASPMS; + writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP); + + writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset + + PCI_EXP_DEVCTL2); + + for (i = 0; i < 256; i++) + writel(0, pcie->parf + PCIE20_PARF_BDF_TO_SID_TABLE_N + (4 * i)); - clk_disable_unprepare(res->pipe_clk); + return 0; } static int qcom_pcie_link_up(struct dw_pcie *pci) @@ -1376,7 +1485,7 @@ static int qcom_pcie_config_sid_sm8250(struct qcom_pcie *pcie) return 0; } -static int qcom_pcie_host_init(struct pcie_port *pp) +static int qcom_pcie_host_init(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct qcom_pcie *pcie = to_qcom_pcie(pci); @@ -1384,7 +1493,7 @@ static int qcom_pcie_host_init(struct pcie_port *pp) qcom_ep_reset_assert(pcie); - ret = pcie->ops->init(pcie); + ret = pcie->cfg->ops->init(pcie); if (ret) return ret; @@ -1392,30 +1501,28 @@ static int qcom_pcie_host_init(struct pcie_port *pp) if (ret) goto err_deinit; - if (pcie->ops->post_init) { - ret = pcie->ops->post_init(pcie); + if (pcie->cfg->ops->post_init) { + ret = pcie->cfg->ops->post_init(pcie); if (ret) goto err_disable_phy; } qcom_ep_reset_deassert(pcie); - if (pcie->ops->config_sid) { - ret = pcie->ops->config_sid(pcie); + if (pcie->cfg->ops->config_sid) { + ret = pcie->cfg->ops->config_sid(pcie); if (ret) - goto err; + goto err_assert_reset; } return 0; -err: +err_assert_reset: qcom_ep_reset_assert(pcie); - if (pcie->ops->post_deinit) - pcie->ops->post_deinit(pcie); err_disable_phy: phy_power_off(pcie->phy); err_deinit: - pcie->ops->deinit(pcie); + pcie->cfg->ops->deinit(pcie); return ret; } @@ -1428,6 +1535,7 @@ static const struct dw_pcie_host_ops qcom_pcie_dw_ops = { static const struct qcom_pcie_ops ops_2_1_0 = { .get_resources = qcom_pcie_get_resources_2_1_0, .init = qcom_pcie_init_2_1_0, + .post_init = qcom_pcie_post_init_2_1_0, .deinit = qcom_pcie_deinit_2_1_0, .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable, }; @@ -1436,6 +1544,7 @@ static const struct qcom_pcie_ops ops_2_1_0 = { static const struct qcom_pcie_ops ops_1_0_0 = { .get_resources = qcom_pcie_get_resources_1_0_0, .init = qcom_pcie_init_1_0_0, + .post_init = qcom_pcie_post_init_1_0_0, .deinit = qcom_pcie_deinit_1_0_0, .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable, }; @@ -1446,7 +1555,6 @@ static const struct qcom_pcie_ops ops_2_3_2 = { .init = qcom_pcie_init_2_3_2, .post_init = qcom_pcie_post_init_2_3_2, .deinit = qcom_pcie_deinit_2_3_2, - .post_deinit = qcom_pcie_post_deinit_2_3_2, .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, }; @@ -1454,6 +1562,7 @@ static const struct qcom_pcie_ops ops_2_3_2 = { static const struct qcom_pcie_ops ops_2_4_0 = { .get_resources = qcom_pcie_get_resources_2_4_0, .init = qcom_pcie_init_2_4_0, + .post_init = qcom_pcie_post_init_2_4_0, .deinit = qcom_pcie_deinit_2_4_0, .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, }; @@ -1462,6 +1571,7 @@ static const struct qcom_pcie_ops ops_2_4_0 = { static const struct qcom_pcie_ops ops_2_3_3 = { .get_resources = qcom_pcie_get_resources_2_3_3, .init = qcom_pcie_init_2_3_3, + .post_init = qcom_pcie_post_init_2_3_3, .deinit = qcom_pcie_deinit_2_3_3, .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, }; @@ -1472,8 +1582,6 @@ static const struct qcom_pcie_ops ops_2_7_0 = { .init = qcom_pcie_init_2_7_0, .deinit = qcom_pcie_deinit_2_7_0, .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, - .post_init = qcom_pcie_post_init_2_7_0, - .post_deinit = qcom_pcie_post_deinit_2_7_0, }; /* Qcom IP rev.: 1.9.0 */ @@ -1482,42 +1590,48 @@ static const struct qcom_pcie_ops ops_1_9_0 = { .init = qcom_pcie_init_2_7_0, .deinit = qcom_pcie_deinit_2_7_0, .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, - .post_init = qcom_pcie_post_init_2_7_0, - .post_deinit = qcom_pcie_post_deinit_2_7_0, .config_sid = qcom_pcie_config_sid_sm8250, }; -static const struct qcom_pcie_cfg apq8084_cfg = { +/* Qcom IP rev.: 2.9.0 Synopsys IP rev.: 5.00a */ +static const struct qcom_pcie_ops ops_2_9_0 = { + .get_resources = qcom_pcie_get_resources_2_9_0, + .init = qcom_pcie_init_2_9_0, + .post_init = qcom_pcie_post_init_2_9_0, + .deinit = qcom_pcie_deinit_2_9_0, + .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, +}; + +static const struct qcom_pcie_cfg cfg_1_0_0 = { .ops = &ops_1_0_0, }; -static const struct qcom_pcie_cfg ipq8064_cfg = { +static const struct qcom_pcie_cfg cfg_1_9_0 = { + .ops = &ops_1_9_0, +}; + +static const struct qcom_pcie_cfg cfg_2_1_0 = { .ops = &ops_2_1_0, }; -static const struct qcom_pcie_cfg msm8996_cfg = { +static const struct qcom_pcie_cfg cfg_2_3_2 = { .ops = &ops_2_3_2, }; -static const struct qcom_pcie_cfg ipq8074_cfg = { +static const struct qcom_pcie_cfg cfg_2_3_3 = { .ops = &ops_2_3_3, }; -static const struct qcom_pcie_cfg ipq4019_cfg = { +static const struct qcom_pcie_cfg cfg_2_4_0 = { .ops = &ops_2_4_0, }; -static const struct qcom_pcie_cfg sdm845_cfg = { +static const struct qcom_pcie_cfg cfg_2_7_0 = { .ops = &ops_2_7_0, }; -static const struct qcom_pcie_cfg sm8250_cfg = { - .ops = &ops_1_9_0, -}; - -static const struct qcom_pcie_cfg sc7280_cfg = { - .ops = &ops_1_9_0, - .pipe_clk_need_muxing = true, +static const struct qcom_pcie_cfg cfg_2_9_0 = { + .ops = &ops_2_9_0, }; static const struct dw_pcie_ops dw_pcie_ops = { @@ -1528,7 +1642,7 @@ static const struct dw_pcie_ops dw_pcie_ops = { static int qcom_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct pcie_port *pp; + struct dw_pcie_rp *pp; struct dw_pcie *pci; struct qcom_pcie *pcie; const struct qcom_pcie_cfg *pcie_cfg; @@ -1559,8 +1673,7 @@ static int qcom_pcie_probe(struct platform_device *pdev) pcie->pci = pci; - pcie->ops = pcie_cfg->ops; - pcie->pipe_clk_need_muxing = pcie_cfg->pipe_clk_need_muxing; + pcie->cfg = pcie_cfg; pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH); if (IS_ERR(pcie->reset)) { @@ -1586,29 +1699,28 @@ static int qcom_pcie_probe(struct platform_device *pdev) goto err_pm_runtime_put; } - ret = pcie->ops->get_resources(pcie); + ret = pcie->cfg->ops->get_resources(pcie); if (ret) goto err_pm_runtime_put; pp->ops = &qcom_pcie_dw_ops; ret = phy_init(pcie->phy); - if (ret) { - pm_runtime_disable(&pdev->dev); + if (ret) goto err_pm_runtime_put; - } platform_set_drvdata(pdev, pcie); ret = dw_pcie_host_init(pp); if (ret) { dev_err(dev, "cannot initialize host\n"); - pm_runtime_disable(&pdev->dev); - goto err_pm_runtime_put; + goto err_phy_exit; } return 0; +err_phy_exit: + phy_exit(pcie->phy); err_pm_runtime_put: pm_runtime_put(dev); pm_runtime_disable(dev); @@ -1617,24 +1729,30 @@ err_pm_runtime_put: } static const struct of_device_id qcom_pcie_match[] = { - { .compatible = "qcom,pcie-apq8084", .data = &apq8084_cfg }, - { .compatible = "qcom,pcie-ipq8064", .data = &ipq8064_cfg }, - { .compatible = "qcom,pcie-ipq8064-v2", .data = &ipq8064_cfg }, - { .compatible = "qcom,pcie-apq8064", .data = &ipq8064_cfg }, - { .compatible = "qcom,pcie-msm8996", .data = &msm8996_cfg }, - { .compatible = "qcom,pcie-ipq8074", .data = &ipq8074_cfg }, - { .compatible = "qcom,pcie-ipq4019", .data = &ipq4019_cfg }, - { .compatible = "qcom,pcie-qcs404", .data = &ipq4019_cfg }, - { .compatible = "qcom,pcie-sdm845", .data = &sdm845_cfg }, - { .compatible = "qcom,pcie-sm8250", .data = &sm8250_cfg }, - { .compatible = "qcom,pcie-sc8180x", .data = &sm8250_cfg }, - { .compatible = "qcom,pcie-sc7280", .data = &sc7280_cfg }, + { .compatible = "qcom,pcie-apq8064", .data = &cfg_2_1_0 }, + { .compatible = "qcom,pcie-apq8084", .data = &cfg_1_0_0 }, + { .compatible = "qcom,pcie-ipq4019", .data = &cfg_2_4_0 }, + { .compatible = "qcom,pcie-ipq6018", .data = &cfg_2_9_0 }, + { .compatible = "qcom,pcie-ipq8064", .data = &cfg_2_1_0 }, + { .compatible = "qcom,pcie-ipq8064-v2", .data = &cfg_2_1_0 }, + { .compatible = "qcom,pcie-ipq8074", .data = &cfg_2_3_3 }, + { .compatible = "qcom,pcie-msm8996", .data = &cfg_2_3_2 }, + { .compatible = "qcom,pcie-qcs404", .data = &cfg_2_4_0 }, + { .compatible = "qcom,pcie-sa8540p", .data = &cfg_1_9_0 }, + { .compatible = "qcom,pcie-sc7280", .data = &cfg_1_9_0 }, + { .compatible = "qcom,pcie-sc8180x", .data = &cfg_1_9_0 }, + { .compatible = "qcom,pcie-sc8280xp", .data = &cfg_1_9_0 }, + { .compatible = "qcom,pcie-sdm845", .data = &cfg_2_7_0 }, + { .compatible = "qcom,pcie-sm8150", .data = &cfg_1_9_0 }, + { .compatible = "qcom,pcie-sm8250", .data = &cfg_1_9_0 }, + { .compatible = "qcom,pcie-sm8450-pcie0", .data = &cfg_1_9_0 }, + { .compatible = "qcom,pcie-sm8450-pcie1", .data = &cfg_1_9_0 }, { } }; static void qcom_fixup_class(struct pci_dev *dev) { - dev->class = PCI_CLASS_BRIDGE_PCI << 8; + dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL; } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0101, qcom_fixup_class); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0104, qcom_fixup_class); diff --git a/drivers/pci/controller/dwc/pcie-spear13xx.c b/drivers/pci/controller/dwc/pcie-spear13xx.c index 1569e82b5568..99d47ae80331 100644 --- a/drivers/pci/controller/dwc/pcie-spear13xx.c +++ b/drivers/pci/controller/dwc/pcie-spear13xx.c @@ -85,7 +85,7 @@ static irqreturn_t spear13xx_pcie_irq_handler(int irq, void *arg) struct spear13xx_pcie *spear13xx_pcie = arg; struct pcie_app_reg __iomem *app_reg = spear13xx_pcie->app_base; struct dw_pcie *pci = spear13xx_pcie->pci; - struct pcie_port *pp = &pci->pp; + struct dw_pcie_rp *pp = &pci->pp; unsigned int status; status = readl(&app_reg->int_sts); @@ -121,7 +121,7 @@ static int spear13xx_pcie_link_up(struct dw_pcie *pci) return 0; } -static int spear13xx_pcie_host_init(struct pcie_port *pp) +static int spear13xx_pcie_host_init(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pci); @@ -155,7 +155,7 @@ static int spear13xx_add_pcie_port(struct spear13xx_pcie *spear13xx_pcie, struct platform_device *pdev) { struct dw_pcie *pci = spear13xx_pcie->pci; - struct pcie_port *pp = &pci->pp; + struct dw_pcie_rp *pp = &pci->pp; struct device *dev = &pdev->dev; int ret; @@ -172,7 +172,7 @@ static int spear13xx_add_pcie_port(struct spear13xx_pcie *spear13xx_pcie, } pp->ops = &spear13xx_pcie_host_ops; - pp->msi_irq = -ENODEV; + pp->msi_irq[0] = -ENODEV; ret = dw_pcie_host_init(pp); if (ret) { @@ -258,7 +258,7 @@ static struct platform_driver spear13xx_pcie_driver = { .probe = spear13xx_pcie_probe, .driver = { .name = "spear-pcie", - .of_match_table = of_match_ptr(spear13xx_pcie_of_match), + .of_match_table = spear13xx_pcie_of_match, .suppress_bind_attrs = true, }, }; diff --git a/drivers/pci/controller/dwc/pcie-tegra194-acpi.c b/drivers/pci/controller/dwc/pcie-tegra194-acpi.c index c2de6ed4d86f..55f61914a986 100644 --- a/drivers/pci/controller/dwc/pcie-tegra194-acpi.c +++ b/drivers/pci/controller/dwc/pcie-tegra194-acpi.c @@ -39,7 +39,8 @@ static int tegra194_acpi_init(struct pci_config_window *cfg) static void atu_reg_write(struct tegra194_pcie_ecam *pcie_ecam, int index, u32 val, u32 reg) { - u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index); + u32 offset = PCIE_ATU_UNROLL_BASE(PCIE_ATU_REGION_DIR_OB, index) + + PCIE_ATU_VIEWPORT_BASE; writel(val, pcie_ecam->iatu_base + offset + reg); } @@ -58,8 +59,8 @@ static void program_outbound_atu(struct tegra194_pcie_ecam *pcie_ecam, PCIE_ATU_LIMIT); atu_reg_write(pcie_ecam, index, upper_32_bits(pci_addr), PCIE_ATU_UPPER_TARGET); - atu_reg_write(pcie_ecam, index, type, PCIE_ATU_CR1); - atu_reg_write(pcie_ecam, index, PCIE_ATU_ENABLE, PCIE_ATU_CR2); + atu_reg_write(pcie_ecam, index, type, PCIE_ATU_REGION_CTRL1); + atu_reg_write(pcie_ecam, index, PCIE_ATU_ENABLE, PCIE_ATU_REGION_CTRL2); } static void __iomem *tegra194_map_bus(struct pci_bus *bus, diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c index b1b5f836a806..1b6b437823d2 100644 --- a/drivers/pci/controller/dwc/pcie-tegra194.c +++ b/drivers/pci/controller/dwc/pcie-tegra194.c @@ -1,8 +1,10 @@ // SPDX-License-Identifier: GPL-2.0+ /* - * PCIe host controller driver for Tegra194 SoC + * PCIe host controller driver for the following SoCs + * Tegra194 + * Tegra234 * - * Copyright (C) 2019 NVIDIA Corporation. + * Copyright (C) 2019-2022 NVIDIA Corporation. * * Author: Vidya Sagar <vidyas@nvidia.com> */ @@ -35,6 +37,9 @@ #include <soc/tegra/bpmp-abi.h> #include "../../pci.h" +#define TEGRA194_DWC_IP_VER 0x490A +#define TEGRA234_DWC_IP_VER 0x562A + #define APPL_PINMUX 0x0 #define APPL_PINMUX_PEX_RST BIT(0) #define APPL_PINMUX_CLKREQ_OVERRIDE_EN BIT(2) @@ -49,6 +54,7 @@ #define APPL_CTRL_HW_HOT_RST_MODE_MASK GENMASK(1, 0) #define APPL_CTRL_HW_HOT_RST_MODE_SHIFT 22 #define APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST 0x1 +#define APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST_LTSSM_EN 0x2 #define APPL_INTR_EN_L0_0 0x8 #define APPL_INTR_EN_L0_0_LINK_STATE_INT_EN BIT(0) @@ -170,35 +176,14 @@ #define CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF 0x718 #define CFG_TIMER_CTRL_ACK_NAK_SHIFT (19) -#define EVENT_COUNTER_ALL_CLEAR 0x3 -#define EVENT_COUNTER_ENABLE_ALL 0x7 -#define EVENT_COUNTER_ENABLE_SHIFT 2 -#define EVENT_COUNTER_EVENT_SEL_MASK GENMASK(7, 0) -#define EVENT_COUNTER_EVENT_SEL_SHIFT 16 -#define EVENT_COUNTER_EVENT_Tx_L0S 0x2 -#define EVENT_COUNTER_EVENT_Rx_L0S 0x3 -#define EVENT_COUNTER_EVENT_L1 0x5 -#define EVENT_COUNTER_EVENT_L1_1 0x7 -#define EVENT_COUNTER_EVENT_L1_2 0x8 -#define EVENT_COUNTER_GROUP_SEL_SHIFT 24 -#define EVENT_COUNTER_GROUP_5 0x5 - #define N_FTS_VAL 52 #define FTS_VAL 52 -#define PORT_LOGIC_MSI_CTRL_INT_0_EN 0x828 - #define GEN3_EQ_CONTROL_OFF 0x8a8 #define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT 8 #define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK GENMASK(23, 8) #define GEN3_EQ_CONTROL_OFF_FB_MODE_MASK GENMASK(3, 0) -#define GEN3_RELATED_OFF 0x890 -#define GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL BIT(0) -#define GEN3_RELATED_OFF_GEN3_EQ_DISABLE BIT(16) -#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT 24 -#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK GENMASK(25, 24) - #define PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT 0x8D0 #define AMBA_ERROR_RESPONSE_CRS_SHIFT 3 #define AMBA_ERROR_RESPONSE_CRS_MASK GENMASK(1, 0) @@ -245,7 +230,19 @@ static const unsigned int pcie_gen_freq[] = { GEN4_CORE_CLK_FREQ }; -struct tegra194_pcie { +struct tegra_pcie_dw_of_data { + u32 version; + enum dw_pcie_device_mode mode; + bool has_msix_doorbell_access_fix; + bool has_sbr_reset_fix; + bool has_l1ss_exit_fix; + bool has_ltr_req_fix; + u32 cdm_chk_int_en_bit; + u32 gen4_preset_vec; + u8 n_fts[2]; +}; + +struct tegra_pcie_dw { struct device *dev; struct resource *appl_res; struct resource *dbi_res; @@ -257,17 +254,20 @@ struct tegra194_pcie { struct dw_pcie pci; struct tegra_bpmp *bpmp; - enum dw_pcie_device_mode mode; + struct tegra_pcie_dw_of_data *of_data; bool supports_clkreq; bool enable_cdm_check; + bool enable_srns; bool link_state; bool update_fc_fixup; + bool enable_ext_refclk; u8 init_link_width; u32 msi_ctrl_int; u32 num_lanes; u32 cid; u32 cfg_link_cap_l1sub; + u32 ras_des_cap; u32 pcie_cap_base; u32 aspm_cmrt; u32 aspm_pwr_on_t; @@ -289,22 +289,18 @@ struct tegra194_pcie { int ep_state; }; -struct tegra194_pcie_of_data { - enum dw_pcie_device_mode mode; -}; - -static inline struct tegra194_pcie *to_tegra_pcie(struct dw_pcie *pci) +static inline struct tegra_pcie_dw *to_tegra_pcie(struct dw_pcie *pci) { - return container_of(pci, struct tegra194_pcie, pci); + return container_of(pci, struct tegra_pcie_dw, pci); } -static inline void appl_writel(struct tegra194_pcie *pcie, const u32 value, +static inline void appl_writel(struct tegra_pcie_dw *pcie, const u32 value, const u32 reg) { writel_relaxed(value, pcie->appl_base + reg); } -static inline u32 appl_readl(struct tegra194_pcie *pcie, const u32 reg) +static inline u32 appl_readl(struct tegra_pcie_dw *pcie, const u32 reg) { return readl_relaxed(pcie->appl_base + reg); } @@ -313,10 +309,10 @@ struct tegra_pcie_soc { enum dw_pcie_device_mode mode; }; -static void apply_bad_link_workaround(struct pcie_port *pp) +static void apply_bad_link_workaround(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct tegra194_pcie *pcie = to_tegra_pcie(pci); + struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); u32 current_link_width; u16 val; @@ -349,18 +345,18 @@ static void apply_bad_link_workaround(struct pcie_port *pp) static irqreturn_t tegra_pcie_rp_irq_handler(int irq, void *arg) { - struct tegra194_pcie *pcie = arg; + struct tegra_pcie_dw *pcie = arg; struct dw_pcie *pci = &pcie->pci; - struct pcie_port *pp = &pci->pp; - u32 val, tmp; + struct dw_pcie_rp *pp = &pci->pp; + u32 val, status_l0, status_l1; u16 val_w; - val = appl_readl(pcie, APPL_INTR_STATUS_L0); - if (val & APPL_INTR_STATUS_L0_LINK_STATE_INT) { - val = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0); - if (val & APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED) { - appl_writel(pcie, val, APPL_INTR_STATUS_L1_0_0); - + status_l0 = appl_readl(pcie, APPL_INTR_STATUS_L0); + if (status_l0 & APPL_INTR_STATUS_L0_LINK_STATE_INT) { + status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0); + appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_0_0); + if (!pcie->of_data->has_sbr_reset_fix && + status_l1 & APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED) { /* SBR & Surprise Link Down WAR */ val = appl_readl(pcie, APPL_CAR_RESET_OVRD); val &= ~APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N; @@ -376,15 +372,21 @@ static irqreturn_t tegra_pcie_rp_irq_handler(int irq, void *arg) } } - if (val & APPL_INTR_STATUS_L0_INT_INT) { - val = appl_readl(pcie, APPL_INTR_STATUS_L1_8_0); - if (val & APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS) { + if (status_l0 & APPL_INTR_STATUS_L0_INT_INT) { + status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_8_0); + if (status_l1 & APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS) { appl_writel(pcie, APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS, APPL_INTR_STATUS_L1_8_0); apply_bad_link_workaround(pp); } - if (val & APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS) { + if (status_l1 & APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS) { + val_w = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + + PCI_EXP_LNKSTA); + val_w |= PCI_EXP_LNKSTA_LBMS; + dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + + PCI_EXP_LNKSTA, val_w); + appl_writel(pcie, APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS, APPL_INTR_STATUS_L1_8_0); @@ -396,31 +398,30 @@ static irqreturn_t tegra_pcie_rp_irq_handler(int irq, void *arg) } } - val = appl_readl(pcie, APPL_INTR_STATUS_L0); - if (val & APPL_INTR_STATUS_L0_CDM_REG_CHK_INT) { - val = appl_readl(pcie, APPL_INTR_STATUS_L1_18); - tmp = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS); - if (val & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT) { + if (status_l0 & APPL_INTR_STATUS_L0_CDM_REG_CHK_INT) { + status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_18); + val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS); + if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT) { dev_info(pci->dev, "CDM check complete\n"); - tmp |= PCIE_PL_CHK_REG_CHK_REG_COMPLETE; + val |= PCIE_PL_CHK_REG_CHK_REG_COMPLETE; } - if (val & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR) { + if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR) { dev_err(pci->dev, "CDM comparison mismatch\n"); - tmp |= PCIE_PL_CHK_REG_CHK_REG_COMPARISON_ERROR; + val |= PCIE_PL_CHK_REG_CHK_REG_COMPARISON_ERROR; } - if (val & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR) { + if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR) { dev_err(pci->dev, "CDM Logic error\n"); - tmp |= PCIE_PL_CHK_REG_CHK_REG_LOGIC_ERROR; + val |= PCIE_PL_CHK_REG_CHK_REG_LOGIC_ERROR; } - dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, tmp); - tmp = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_ERR_ADDR); - dev_err(pci->dev, "CDM Error Address Offset = 0x%08X\n", tmp); + dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val); + val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_ERR_ADDR); + dev_err(pci->dev, "CDM Error Address Offset = 0x%08X\n", val); } return IRQ_HANDLED; } -static void pex_ep_event_hot_rst_done(struct tegra194_pcie *pcie) +static void pex_ep_event_hot_rst_done(struct tegra_pcie_dw *pcie) { u32 val; @@ -448,7 +449,7 @@ static void pex_ep_event_hot_rst_done(struct tegra194_pcie *pcie) static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg) { - struct tegra194_pcie *pcie = arg; + struct tegra_pcie_dw *pcie = arg; struct dw_pcie *pci = &pcie->pci; u32 val, speed; @@ -456,6 +457,9 @@ static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg) PCI_EXP_LNKSTA_CLS; clk_set_rate(pcie->core_clk, pcie_gen_freq[speed - 1]); + if (pcie->of_data->has_ltr_req_fix) + return IRQ_HANDLED; + /* If EP doesn't advertise L1SS, just return */ val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub); if (!(val & (PCI_L1SS_CAP_ASPM_L1_1 | PCI_L1SS_CAP_ASPM_L1_2))) @@ -494,7 +498,7 @@ static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg) static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg) { - struct tegra194_pcie *pcie = arg; + struct tegra_pcie_dw *pcie = arg; struct dw_pcie_ep *ep = &pcie->pci.ep; int spurious = 1; u32 status_l0, status_l1, link_status; @@ -537,16 +541,21 @@ static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg) return IRQ_HANDLED; } -static int tegra194_pcie_rd_own_conf(struct pci_bus *bus, u32 devfn, int where, +static int tegra_pcie_dw_rd_own_conf(struct pci_bus *bus, u32 devfn, int where, int size, u32 *val) { + struct dw_pcie_rp *pp = bus->sysdata; + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); + /* * This is an endpoint mode specific register happen to appear even * when controller is operating in root port mode and system hangs * when it is accessed with link being in ASPM-L1 state. * So skip accessing it altogether */ - if (!PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL) { + if (!pcie->of_data->has_msix_doorbell_access_fix && + !PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL) { *val = 0x00000000; return PCIBIOS_SUCCESSFUL; } @@ -554,16 +563,21 @@ static int tegra194_pcie_rd_own_conf(struct pci_bus *bus, u32 devfn, int where, return pci_generic_config_read(bus, devfn, where, size, val); } -static int tegra194_pcie_wr_own_conf(struct pci_bus *bus, u32 devfn, int where, +static int tegra_pcie_dw_wr_own_conf(struct pci_bus *bus, u32 devfn, int where, int size, u32 val) { + struct dw_pcie_rp *pp = bus->sysdata; + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); + /* * This is an endpoint mode specific register happen to appear even * when controller is operating in root port mode and system hangs * when it is accessed with link being in ASPM-L1 state. * So skip accessing it altogether */ - if (!PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL) + if (!pcie->of_data->has_msix_doorbell_access_fix && + !PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL) return PCIBIOS_SUCCESSFUL; return pci_generic_config_write(bus, devfn, where, size, val); @@ -571,30 +585,12 @@ static int tegra194_pcie_wr_own_conf(struct pci_bus *bus, u32 devfn, int where, static struct pci_ops tegra_pci_ops = { .map_bus = dw_pcie_own_conf_map_bus, - .read = tegra194_pcie_rd_own_conf, - .write = tegra194_pcie_wr_own_conf, + .read = tegra_pcie_dw_rd_own_conf, + .write = tegra_pcie_dw_wr_own_conf, }; #if defined(CONFIG_PCIEASPM) -static const u32 event_cntr_ctrl_offset[] = { - 0x1d8, - 0x1a8, - 0x1a8, - 0x1a8, - 0x1c4, - 0x1d8 -}; - -static const u32 event_cntr_data_offset[] = { - 0x1dc, - 0x1ac, - 0x1ac, - 0x1ac, - 0x1c8, - 0x1dc -}; - -static void disable_aspm_l11(struct tegra194_pcie *pcie) +static void disable_aspm_l11(struct tegra_pcie_dw *pcie) { u32 val; @@ -603,7 +599,7 @@ static void disable_aspm_l11(struct tegra194_pcie *pcie) dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val); } -static void disable_aspm_l12(struct tegra194_pcie *pcie) +static void disable_aspm_l12(struct tegra_pcie_dw *pcie) { u32 val; @@ -612,24 +608,27 @@ static void disable_aspm_l12(struct tegra194_pcie *pcie) dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val); } -static inline u32 event_counter_prog(struct tegra194_pcie *pcie, u32 event) +static inline u32 event_counter_prog(struct tegra_pcie_dw *pcie, u32 event) { u32 val; - val = dw_pcie_readl_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid]); + val = dw_pcie_readl_dbi(&pcie->pci, pcie->ras_des_cap + + PCIE_RAS_DES_EVENT_COUNTER_CONTROL); val &= ~(EVENT_COUNTER_EVENT_SEL_MASK << EVENT_COUNTER_EVENT_SEL_SHIFT); val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT; val |= event << EVENT_COUNTER_EVENT_SEL_SHIFT; val |= EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT; - dw_pcie_writel_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid], val); - val = dw_pcie_readl_dbi(&pcie->pci, event_cntr_data_offset[pcie->cid]); + dw_pcie_writel_dbi(&pcie->pci, pcie->ras_des_cap + + PCIE_RAS_DES_EVENT_COUNTER_CONTROL, val); + val = dw_pcie_readl_dbi(&pcie->pci, pcie->ras_des_cap + + PCIE_RAS_DES_EVENT_COUNTER_DATA); return val; } static int aspm_state_cnt(struct seq_file *s, void *data) { - struct tegra194_pcie *pcie = (struct tegra194_pcie *) + struct tegra_pcie_dw *pcie = (struct tegra_pcie_dw *) dev_get_drvdata(s->private); u32 val; @@ -649,18 +648,20 @@ static int aspm_state_cnt(struct seq_file *s, void *data) event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1_2)); /* Clear all counters */ - dw_pcie_writel_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid], + dw_pcie_writel_dbi(&pcie->pci, pcie->ras_des_cap + + PCIE_RAS_DES_EVENT_COUNTER_CONTROL, EVENT_COUNTER_ALL_CLEAR); /* Re-enable counting */ val = EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT; val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT; - dw_pcie_writel_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid], val); + dw_pcie_writel_dbi(&pcie->pci, pcie->ras_des_cap + + PCIE_RAS_DES_EVENT_COUNTER_CONTROL, val); return 0; } -static void init_host_aspm(struct tegra194_pcie *pcie) +static void init_host_aspm(struct tegra_pcie_dw *pcie) { struct dw_pcie *pci = &pcie->pci; u32 val; @@ -668,10 +669,14 @@ static void init_host_aspm(struct tegra194_pcie *pcie) val = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_L1SS); pcie->cfg_link_cap_l1sub = val + PCI_L1SS_CAP; + pcie->ras_des_cap = dw_pcie_find_ext_capability(&pcie->pci, + PCI_EXT_CAP_ID_VNDR); + /* Enable ASPM counters */ val = EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT; val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT; - dw_pcie_writel_dbi(pci, event_cntr_ctrl_offset[pcie->cid], val); + dw_pcie_writel_dbi(pci, pcie->ras_des_cap + + PCIE_RAS_DES_EVENT_COUNTER_CONTROL, val); /* Program T_cmrt and T_pwr_on values */ val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub); @@ -688,22 +693,22 @@ static void init_host_aspm(struct tegra194_pcie *pcie) dw_pcie_writel_dbi(pci, PCIE_PORT_AFR, val); } -static void init_debugfs(struct tegra194_pcie *pcie) +static void init_debugfs(struct tegra_pcie_dw *pcie) { debugfs_create_devm_seqfile(pcie->dev, "aspm_state_cnt", pcie->debugfs, aspm_state_cnt); } #else -static inline void disable_aspm_l12(struct tegra194_pcie *pcie) { return; } -static inline void disable_aspm_l11(struct tegra194_pcie *pcie) { return; } -static inline void init_host_aspm(struct tegra194_pcie *pcie) { return; } -static inline void init_debugfs(struct tegra194_pcie *pcie) { return; } +static inline void disable_aspm_l12(struct tegra_pcie_dw *pcie) { return; } +static inline void disable_aspm_l11(struct tegra_pcie_dw *pcie) { return; } +static inline void init_host_aspm(struct tegra_pcie_dw *pcie) { return; } +static inline void init_debugfs(struct tegra_pcie_dw *pcie) { return; } #endif -static void tegra_pcie_enable_system_interrupts(struct pcie_port *pp) +static void tegra_pcie_enable_system_interrupts(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct tegra194_pcie *pcie = to_tegra_pcie(pci); + struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); u32 val; u16 val_w; @@ -711,13 +716,15 @@ static void tegra_pcie_enable_system_interrupts(struct pcie_port *pp) val |= APPL_INTR_EN_L0_0_LINK_STATE_INT_EN; appl_writel(pcie, val, APPL_INTR_EN_L0_0); - val = appl_readl(pcie, APPL_INTR_EN_L1_0_0); - val |= APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN; - appl_writel(pcie, val, APPL_INTR_EN_L1_0_0); + if (!pcie->of_data->has_sbr_reset_fix) { + val = appl_readl(pcie, APPL_INTR_EN_L1_0_0); + val |= APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN; + appl_writel(pcie, val, APPL_INTR_EN_L1_0_0); + } if (pcie->enable_cdm_check) { val = appl_readl(pcie, APPL_INTR_EN_L0_0); - val |= APPL_INTR_EN_L0_0_CDM_REG_CHK_INT_EN; + val |= pcie->of_data->cdm_chk_int_en_bit; appl_writel(pcie, val, APPL_INTR_EN_L0_0); val = appl_readl(pcie, APPL_INTR_EN_L1_18); @@ -738,10 +745,10 @@ static void tegra_pcie_enable_system_interrupts(struct pcie_port *pp) val_w); } -static void tegra_pcie_enable_legacy_interrupts(struct pcie_port *pp) +static void tegra_pcie_enable_legacy_interrupts(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct tegra194_pcie *pcie = to_tegra_pcie(pci); + struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); u32 val; /* Enable legacy interrupt generation */ @@ -759,10 +766,10 @@ static void tegra_pcie_enable_legacy_interrupts(struct pcie_port *pp) appl_writel(pcie, val, APPL_INTR_EN_L1_8_0); } -static void tegra_pcie_enable_msi_interrupts(struct pcie_port *pp) +static void tegra_pcie_enable_msi_interrupts(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct tegra194_pcie *pcie = to_tegra_pcie(pci); + struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); u32 val; /* Enable MSI interrupt generation */ @@ -772,10 +779,10 @@ static void tegra_pcie_enable_msi_interrupts(struct pcie_port *pp) appl_writel(pcie, val, APPL_INTR_EN_L0_0); } -static void tegra_pcie_enable_interrupts(struct pcie_port *pp) +static void tegra_pcie_enable_interrupts(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct tegra194_pcie *pcie = to_tegra_pcie(pci); + struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); /* Clear interrupt statuses before enabling interrupts */ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0); @@ -800,7 +807,7 @@ static void tegra_pcie_enable_interrupts(struct pcie_port *pp) tegra_pcie_enable_msi_interrupts(pp); } -static void config_gen3_gen4_eq_presets(struct tegra194_pcie *pcie) +static void config_gen3_gen4_eq_presets(struct tegra_pcie_dw *pcie) { struct dw_pcie *pci = &pcie->pci; u32 val, offset, i; @@ -844,7 +851,8 @@ static void config_gen3_gen4_eq_presets(struct tegra194_pcie *pcie) val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF); val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK; - val |= (0x360 << GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT); + val |= (pcie->of_data->gen4_preset_vec << + GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT); val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK; dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val); @@ -853,11 +861,12 @@ static void config_gen3_gen4_eq_presets(struct tegra194_pcie *pcie) dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val); } -static int tegra194_pcie_host_init(struct pcie_port *pp) +static int tegra_pcie_dw_host_init(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct tegra194_pcie *pcie = to_tegra_pcie(pci); + struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); u32 val; + u16 val_16; pp->bridge->ops = &tegra_pci_ops; @@ -865,6 +874,11 @@ static int tegra194_pcie_host_init(struct pcie_port *pp) pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci, PCI_CAP_ID_EXP); + val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_DEVCTL); + val_16 &= ~PCI_EXP_DEVCTL_PAYLOAD; + val_16 |= PCI_EXP_DEVCTL_PAYLOAD_256B; + dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_DEVCTL, val_16); + val = dw_pcie_readl_dbi(pci, PCI_IO_BASE); val &= ~(IO_BASE_IO_DECODE | IO_BASE_IO_DECODE_BIT8); dw_pcie_writel_dbi(pci, PCI_IO_BASE, val); @@ -889,6 +903,15 @@ static int tegra194_pcie_host_init(struct pcie_port *pp) val |= (pcie->num_lanes << PCI_EXP_LNKSTA_NLW_SHIFT); dw_pcie_writel_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP, val); + /* Clear Slot Clock Configuration bit if SRNS configuration */ + if (pcie->enable_srns) { + val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + + PCI_EXP_LNKSTA); + val_16 &= ~PCI_EXP_LNKSTA_SLC; + dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA, + val_16); + } + config_gen3_gen4_eq_presets(pcie); init_host_aspm(pcie); @@ -899,9 +922,11 @@ static int tegra194_pcie_host_init(struct pcie_port *pp) disable_aspm_l12(pcie); } - val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); - val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL; - dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val); + if (!pcie->of_data->has_l1ss_exit_fix) { + val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); + val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL; + dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val); + } if (pcie->update_fc_fixup) { val = dw_pcie_readl_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF); @@ -914,14 +939,14 @@ static int tegra194_pcie_host_init(struct pcie_port *pp) return 0; } -static int tegra194_pcie_start_link(struct dw_pcie *pci) +static int tegra_pcie_dw_start_link(struct dw_pcie *pci) { u32 val, offset, speed, tmp; - struct tegra194_pcie *pcie = to_tegra_pcie(pci); - struct pcie_port *pp = &pci->pp; + struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); + struct dw_pcie_rp *pp = &pci->pp; bool retry = true; - if (pcie->mode == DW_PCIE_EP_TYPE) { + if (pcie->of_data->mode == DW_PCIE_EP_TYPE) { enable_irq(pcie->pex_rst_irq); return 0; } @@ -980,9 +1005,9 @@ retry_link: offset = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_DLF); val = dw_pcie_readl_dbi(pci, offset + PCI_DLF_CAP); val &= ~PCI_DLF_EXCHANGE_ENABLE; - dw_pcie_writel_dbi(pci, offset, val); + dw_pcie_writel_dbi(pci, offset + PCI_DLF_CAP, val); - tegra194_pcie_host_init(pp); + tegra_pcie_dw_host_init(pp); dw_pcie_setup_rc(pp); retry = false; @@ -998,32 +1023,32 @@ retry_link: return 0; } -static int tegra194_pcie_link_up(struct dw_pcie *pci) +static int tegra_pcie_dw_link_up(struct dw_pcie *pci) { - struct tegra194_pcie *pcie = to_tegra_pcie(pci); + struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); u32 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA); return !!(val & PCI_EXP_LNKSTA_DLLLA); } -static void tegra194_pcie_stop_link(struct dw_pcie *pci) +static void tegra_pcie_dw_stop_link(struct dw_pcie *pci) { - struct tegra194_pcie *pcie = to_tegra_pcie(pci); + struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); disable_irq(pcie->pex_rst_irq); } static const struct dw_pcie_ops tegra_dw_pcie_ops = { - .link_up = tegra194_pcie_link_up, - .start_link = tegra194_pcie_start_link, - .stop_link = tegra194_pcie_stop_link, + .link_up = tegra_pcie_dw_link_up, + .start_link = tegra_pcie_dw_start_link, + .stop_link = tegra_pcie_dw_stop_link, }; -static const struct dw_pcie_host_ops tegra194_pcie_host_ops = { - .host_init = tegra194_pcie_host_init, +static const struct dw_pcie_host_ops tegra_pcie_dw_host_ops = { + .host_init = tegra_pcie_dw_host_init, }; -static void tegra_pcie_disable_phy(struct tegra194_pcie *pcie) +static void tegra_pcie_disable_phy(struct tegra_pcie_dw *pcie) { unsigned int phy_count = pcie->phy_count; @@ -1033,7 +1058,7 @@ static void tegra_pcie_disable_phy(struct tegra194_pcie *pcie) } } -static int tegra_pcie_enable_phy(struct tegra194_pcie *pcie) +static int tegra_pcie_enable_phy(struct tegra_pcie_dw *pcie) { unsigned int i; int ret; @@ -1060,7 +1085,7 @@ phy_exit: return ret; } -static int tegra194_pcie_parse_dt(struct tegra194_pcie *pcie) +static int tegra_pcie_dw_parse_dt(struct tegra_pcie_dw *pcie) { struct platform_device *pdev = to_platform_device(pcie->dev); struct device_node *np = pcie->dev->of_node; @@ -1113,13 +1138,27 @@ static int tegra194_pcie_parse_dt(struct tegra194_pcie *pcie) if (of_property_read_bool(np, "nvidia,update-fc-fixup")) pcie->update_fc_fixup = true; + /* RP using an external REFCLK is supported only in Tegra234 */ + if (pcie->of_data->version == TEGRA194_DWC_IP_VER) { + if (pcie->of_data->mode == DW_PCIE_EP_TYPE) + pcie->enable_ext_refclk = true; + } else { + pcie->enable_ext_refclk = + of_property_read_bool(pcie->dev->of_node, + "nvidia,enable-ext-refclk"); + } + pcie->supports_clkreq = of_property_read_bool(pcie->dev->of_node, "supports-clkreq"); pcie->enable_cdm_check = of_property_read_bool(np, "snps,enable-cdm-check"); - if (pcie->mode == DW_PCIE_RC_TYPE) + if (pcie->of_data->version == TEGRA234_DWC_IP_VER) + pcie->enable_srns = + of_property_read_bool(np, "nvidia,enable-srns"); + + if (pcie->of_data->mode == DW_PCIE_RC_TYPE) return 0; /* Endpoint mode specific DT entries */ @@ -1156,15 +1195,18 @@ static int tegra194_pcie_parse_dt(struct tegra194_pcie *pcie) return 0; } -static int tegra_pcie_bpmp_set_ctrl_state(struct tegra194_pcie *pcie, +static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie, bool enable) { struct mrq_uphy_response resp; struct tegra_bpmp_message msg; struct mrq_uphy_request req; - /* Controller-5 doesn't need to have its state set by BPMP-FW */ - if (pcie->cid == 5) + /* + * Controller-5 doesn't need to have its state set by BPMP-FW in + * Tegra194 + */ + if (pcie->of_data->version == TEGRA194_DWC_IP_VER && pcie->cid == 5) return 0; memset(&req, 0, sizeof(req)); @@ -1184,7 +1226,7 @@ static int tegra_pcie_bpmp_set_ctrl_state(struct tegra194_pcie *pcie, return tegra_bpmp_transfer(pcie->bpmp, &msg); } -static int tegra_pcie_bpmp_set_pll_state(struct tegra194_pcie *pcie, +static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie, bool enable) { struct mrq_uphy_response resp; @@ -1212,9 +1254,9 @@ static int tegra_pcie_bpmp_set_pll_state(struct tegra194_pcie *pcie, return tegra_bpmp_transfer(pcie->bpmp, &msg); } -static void tegra_pcie_downstream_dev_to_D0(struct tegra194_pcie *pcie) +static void tegra_pcie_downstream_dev_to_D0(struct tegra_pcie_dw *pcie) { - struct pcie_port *pp = &pcie->pci.pp; + struct dw_pcie_rp *pp = &pcie->pci.pp; struct pci_bus *child, *root_bus = NULL; struct pci_dev *pdev; @@ -1250,7 +1292,7 @@ static void tegra_pcie_downstream_dev_to_D0(struct tegra194_pcie *pcie) } } -static int tegra_pcie_get_slot_regulators(struct tegra194_pcie *pcie) +static int tegra_pcie_get_slot_regulators(struct tegra_pcie_dw *pcie) { pcie->slot_ctl_3v3 = devm_regulator_get_optional(pcie->dev, "vpcie3v3"); if (IS_ERR(pcie->slot_ctl_3v3)) { @@ -1271,7 +1313,7 @@ static int tegra_pcie_get_slot_regulators(struct tegra194_pcie *pcie) return 0; } -static int tegra_pcie_enable_slot_regulators(struct tegra194_pcie *pcie) +static int tegra_pcie_enable_slot_regulators(struct tegra_pcie_dw *pcie) { int ret; @@ -1309,7 +1351,7 @@ fail_12v_enable: return ret; } -static void tegra_pcie_disable_slot_regulators(struct tegra194_pcie *pcie) +static void tegra_pcie_disable_slot_regulators(struct tegra_pcie_dw *pcie) { if (pcie->slot_ctl_12v) regulator_disable(pcie->slot_ctl_12v); @@ -1317,7 +1359,7 @@ static void tegra_pcie_disable_slot_regulators(struct tegra194_pcie *pcie) regulator_disable(pcie->slot_ctl_3v3); } -static int tegra_pcie_config_controller(struct tegra194_pcie *pcie, +static int tegra_pcie_config_controller(struct tegra_pcie_dw *pcie, bool en_hw_hot_rst) { int ret; @@ -1330,6 +1372,14 @@ static int tegra_pcie_config_controller(struct tegra194_pcie *pcie, return ret; } + if (pcie->enable_ext_refclk) { + ret = tegra_pcie_bpmp_set_pll_state(pcie, true); + if (ret) { + dev_err(pcie->dev, "Failed to init UPHY: %d\n", ret); + goto fail_pll_init; + } + } + ret = tegra_pcie_enable_slot_regulators(pcie); if (ret < 0) goto fail_slot_reg_en; @@ -1353,11 +1403,13 @@ static int tegra_pcie_config_controller(struct tegra194_pcie *pcie, goto fail_core_apb_rst; } - if (en_hw_hot_rst) { + if (en_hw_hot_rst || pcie->of_data->has_sbr_reset_fix) { /* Enable HW_HOT_RST mode */ val = appl_readl(pcie, APPL_CTRL); val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK << APPL_CTRL_HW_HOT_RST_MODE_SHIFT); + val |= (APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST_LTSSM_EN << + APPL_CTRL_HW_HOT_RST_MODE_SHIFT); val |= APPL_CTRL_HW_HOT_RST_EN; appl_writel(pcie, val, APPL_CTRL); } @@ -1384,6 +1436,19 @@ static int tegra_pcie_config_controller(struct tegra194_pcie *pcie, val |= (APPL_CFG_MISC_ARCACHE_VAL << APPL_CFG_MISC_ARCACHE_SHIFT); appl_writel(pcie, val, APPL_CFG_MISC); + if (pcie->enable_srns || pcie->enable_ext_refclk) { + /* + * When Tegra PCIe RP is using external clock, it cannot supply + * same clock to its downstream hierarchy. Hence, gate PCIe RP + * REFCLK out pads when RP & EP are using separate clocks or RP + * is using an external REFCLK. + */ + val = appl_readl(pcie, APPL_PINMUX); + val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN; + val &= ~APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE; + appl_writel(pcie, val, APPL_PINMUX); + } + if (!pcie->supports_clkreq) { val = appl_readl(pcie, APPL_PINMUX); val |= APPL_PINMUX_CLKREQ_OVERRIDE_EN; @@ -1409,12 +1474,15 @@ fail_core_clk: fail_reg_en: tegra_pcie_disable_slot_regulators(pcie); fail_slot_reg_en: + if (pcie->enable_ext_refclk) + tegra_pcie_bpmp_set_pll_state(pcie, false); +fail_pll_init: tegra_pcie_bpmp_set_ctrl_state(pcie, false); return ret; } -static void tegra_pcie_unconfig_controller(struct tegra194_pcie *pcie) +static void tegra_pcie_unconfig_controller(struct tegra_pcie_dw *pcie) { int ret; @@ -1436,23 +1504,29 @@ static void tegra_pcie_unconfig_controller(struct tegra194_pcie *pcie) tegra_pcie_disable_slot_regulators(pcie); + if (pcie->enable_ext_refclk) { + ret = tegra_pcie_bpmp_set_pll_state(pcie, false); + if (ret) + dev_err(pcie->dev, "Failed to deinit UPHY: %d\n", ret); + } + ret = tegra_pcie_bpmp_set_ctrl_state(pcie, false); if (ret) dev_err(pcie->dev, "Failed to disable controller %d: %d\n", pcie->cid, ret); } -static int tegra_pcie_init_controller(struct tegra194_pcie *pcie) +static int tegra_pcie_init_controller(struct tegra_pcie_dw *pcie) { struct dw_pcie *pci = &pcie->pci; - struct pcie_port *pp = &pci->pp; + struct dw_pcie_rp *pp = &pci->pp; int ret; ret = tegra_pcie_config_controller(pcie, false); if (ret < 0) return ret; - pp->ops = &tegra194_pcie_host_ops; + pp->ops = &tegra_pcie_dw_host_ops; ret = dw_pcie_host_init(pp); if (ret < 0) { @@ -1467,11 +1541,11 @@ fail_host_init: return ret; } -static int tegra_pcie_try_link_l2(struct tegra194_pcie *pcie) +static int tegra_pcie_try_link_l2(struct tegra_pcie_dw *pcie) { u32 val; - if (!tegra194_pcie_link_up(&pcie->pci)) + if (!tegra_pcie_dw_link_up(&pcie->pci)) return 0; val = appl_readl(pcie, APPL_RADM_STATUS); @@ -1483,12 +1557,12 @@ static int tegra_pcie_try_link_l2(struct tegra194_pcie *pcie) 1, PME_ACK_TIMEOUT); } -static void tegra194_pcie_pme_turnoff(struct tegra194_pcie *pcie) +static void tegra_pcie_dw_pme_turnoff(struct tegra_pcie_dw *pcie) { u32 data; int err; - if (!tegra194_pcie_link_up(&pcie->pci)) { + if (!tegra_pcie_dw_link_up(&pcie->pci)) { dev_dbg(pcie->dev, "PCIe link is not up...!\n"); return; } @@ -1545,15 +1619,15 @@ static void tegra194_pcie_pme_turnoff(struct tegra194_pcie *pcie) appl_writel(pcie, data, APPL_PINMUX); } -static void tegra_pcie_deinit_controller(struct tegra194_pcie *pcie) +static void tegra_pcie_deinit_controller(struct tegra_pcie_dw *pcie) { tegra_pcie_downstream_dev_to_D0(pcie); dw_pcie_host_deinit(&pcie->pci.pp); - tegra194_pcie_pme_turnoff(pcie); + tegra_pcie_dw_pme_turnoff(pcie); tegra_pcie_unconfig_controller(pcie); } -static int tegra_pcie_config_rp(struct tegra194_pcie *pcie) +static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie) { struct device *dev = pcie->dev; char *name; @@ -1580,7 +1654,7 @@ static int tegra_pcie_config_rp(struct tegra194_pcie *pcie) goto fail_pm_get_sync; } - pcie->link_state = tegra194_pcie_link_up(&pcie->pci); + pcie->link_state = tegra_pcie_dw_link_up(&pcie->pci); if (!pcie->link_state) { ret = -ENOMEDIUM; goto fail_host_init; @@ -1605,7 +1679,7 @@ fail_pm_get_sync: return ret; } -static void pex_ep_event_pex_rst_assert(struct tegra194_pcie *pcie) +static void pex_ep_event_pex_rst_assert(struct tegra_pcie_dw *pcie) { u32 val; int ret; @@ -1636,6 +1710,13 @@ static void pex_ep_event_pex_rst_assert(struct tegra194_pcie *pcie) pm_runtime_put_sync(pcie->dev); + if (pcie->enable_ext_refclk) { + ret = tegra_pcie_bpmp_set_pll_state(pcie, false); + if (ret) + dev_err(pcie->dev, "Failed to turn off UPHY: %d\n", + ret); + } + ret = tegra_pcie_bpmp_set_pll_state(pcie, false); if (ret) dev_err(pcie->dev, "Failed to turn off UPHY: %d\n", ret); @@ -1644,13 +1725,14 @@ static void pex_ep_event_pex_rst_assert(struct tegra194_pcie *pcie) dev_dbg(pcie->dev, "Uninitialization of endpoint is completed\n"); } -static void pex_ep_event_pex_rst_deassert(struct tegra194_pcie *pcie) +static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie) { struct dw_pcie *pci = &pcie->pci; struct dw_pcie_ep *ep = &pci->ep; struct device *dev = pcie->dev; u32 val; int ret; + u16 val_16; if (pcie->ep_state == EP_STATE_ENABLED) return; @@ -1662,10 +1744,20 @@ static void pex_ep_event_pex_rst_deassert(struct tegra194_pcie *pcie) return; } - ret = tegra_pcie_bpmp_set_pll_state(pcie, true); + ret = tegra_pcie_bpmp_set_ctrl_state(pcie, true); if (ret) { - dev_err(dev, "Failed to init UPHY for PCIe EP: %d\n", ret); - goto fail_pll_init; + dev_err(pcie->dev, "Failed to enable controller %u: %d\n", + pcie->cid, ret); + goto fail_set_ctrl_state; + } + + if (pcie->enable_ext_refclk) { + ret = tegra_pcie_bpmp_set_pll_state(pcie, true); + if (ret) { + dev_err(dev, "Failed to init UPHY for PCIe EP: %d\n", + ret); + goto fail_pll_init; + } } ret = clk_prepare_enable(pcie->core_clk); @@ -1762,12 +1854,29 @@ static void pex_ep_event_pex_rst_deassert(struct tegra194_pcie *pcie) disable_aspm_l12(pcie); } - val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); - val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL; - dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val); + if (!pcie->of_data->has_l1ss_exit_fix) { + val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); + val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL; + dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val); + } pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci, PCI_CAP_ID_EXP); + + val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_DEVCTL); + val_16 &= ~PCI_EXP_DEVCTL_PAYLOAD; + val_16 |= PCI_EXP_DEVCTL_PAYLOAD_256B; + dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_DEVCTL, val_16); + + /* Clear Slot Clock Configuration bit if SRNS configuration */ + if (pcie->enable_srns) { + val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + + PCI_EXP_LNKSTA); + val_16 &= ~PCI_EXP_LNKSTA_SLC; + dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA, + val_16); + } + clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ); val = (ep->msi_mem_phys & MSIX_ADDR_MATCH_LOW_OFF_MASK); @@ -1784,6 +1893,13 @@ static void pex_ep_event_pex_rst_deassert(struct tegra194_pcie *pcie) dw_pcie_ep_init_notify(ep); + /* Program the private control to allow sending LTR upstream */ + if (pcie->of_data->has_ltr_req_fix) { + val = appl_readl(pcie, APPL_LTR_MSG_2); + val |= APPL_LTR_MSG_2_LTR_MSG_REQ_STATE; + appl_writel(pcie, val, APPL_LTR_MSG_2); + } + /* Enable LTSSM */ val = appl_readl(pcie, APPL_CTRL); val |= APPL_CTRL_LTSSM_EN; @@ -1804,12 +1920,14 @@ fail_core_apb_rst: fail_core_clk_enable: tegra_pcie_bpmp_set_pll_state(pcie, false); fail_pll_init: + tegra_pcie_bpmp_set_ctrl_state(pcie, false); +fail_set_ctrl_state: pm_runtime_put_sync(dev); } static irqreturn_t tegra_pcie_ep_pex_rst_irq(int irq, void *arg) { - struct tegra194_pcie *pcie = arg; + struct tegra_pcie_dw *pcie = arg; if (gpiod_get_value(pcie->pex_rst_gpiod)) pex_ep_event_pex_rst_assert(pcie); @@ -1819,7 +1937,7 @@ static irqreturn_t tegra_pcie_ep_pex_rst_irq(int irq, void *arg) return IRQ_HANDLED; } -static int tegra_pcie_ep_raise_legacy_irq(struct tegra194_pcie *pcie, u16 irq) +static int tegra_pcie_ep_raise_legacy_irq(struct tegra_pcie_dw *pcie, u16 irq) { /* Tegra194 supports only INTA */ if (irq > 1) @@ -1831,7 +1949,7 @@ static int tegra_pcie_ep_raise_legacy_irq(struct tegra194_pcie *pcie, u16 irq) return 0; } -static int tegra_pcie_ep_raise_msi_irq(struct tegra194_pcie *pcie, u16 irq) +static int tegra_pcie_ep_raise_msi_irq(struct tegra_pcie_dw *pcie, u16 irq) { if (unlikely(irq > 31)) return -EINVAL; @@ -1841,7 +1959,7 @@ static int tegra_pcie_ep_raise_msi_irq(struct tegra194_pcie *pcie, u16 irq) return 0; } -static int tegra_pcie_ep_raise_msix_irq(struct tegra194_pcie *pcie, u16 irq) +static int tegra_pcie_ep_raise_msix_irq(struct tegra_pcie_dw *pcie, u16 irq) { struct dw_pcie_ep *ep = &pcie->pci.ep; @@ -1855,7 +1973,7 @@ static int tegra_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no, u16 interrupt_num) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); - struct tegra194_pcie *pcie = to_tegra_pcie(pci); + struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); switch (type) { case PCI_EPC_IRQ_LEGACY: @@ -1896,7 +2014,7 @@ static const struct dw_pcie_ep_ops pcie_ep_ops = { .get_features = tegra_pcie_ep_get_features, }; -static int tegra_pcie_config_ep(struct tegra194_pcie *pcie, +static int tegra_pcie_config_ep(struct tegra_pcie_dw *pcie, struct platform_device *pdev) { struct dw_pcie *pci = &pcie->pci; @@ -1951,19 +2069,20 @@ static int tegra_pcie_config_ep(struct tegra194_pcie *pcie, if (ret) { dev_err(dev, "Failed to initialize DWC Endpoint subsystem: %d\n", ret); + pm_runtime_disable(dev); return ret; } return 0; } -static int tegra194_pcie_probe(struct platform_device *pdev) +static int tegra_pcie_dw_probe(struct platform_device *pdev) { - const struct tegra194_pcie_of_data *data; + const struct tegra_pcie_dw_of_data *data; struct device *dev = &pdev->dev; struct resource *atu_dma_res; - struct tegra194_pcie *pcie; - struct pcie_port *pp; + struct tegra_pcie_dw *pcie; + struct dw_pcie_rp *pp; struct dw_pcie *pci; struct phy **phys; char *name; @@ -1979,16 +2098,14 @@ static int tegra194_pcie_probe(struct platform_device *pdev) pci = &pcie->pci; pci->dev = &pdev->dev; pci->ops = &tegra_dw_pcie_ops; - pci->n_fts[0] = N_FTS_VAL; - pci->n_fts[1] = FTS_VAL; - pci->version = 0x490A; - + pcie->dev = &pdev->dev; + pcie->of_data = (struct tegra_pcie_dw_of_data *)data; + pci->n_fts[0] = pcie->of_data->n_fts[0]; + pci->n_fts[1] = pcie->of_data->n_fts[1]; pp = &pci->pp; pp->num_vectors = MAX_MSI_IRQS; - pcie->dev = &pdev->dev; - pcie->mode = (enum dw_pcie_device_mode)data->mode; - ret = tegra194_pcie_parse_dt(pcie); + ret = tegra_pcie_dw_parse_dt(pcie); if (ret < 0) { const char *level = KERN_ERR; @@ -2103,7 +2220,7 @@ static int tegra194_pcie_probe(struct platform_device *pdev) platform_set_drvdata(pdev, pcie); - switch (pcie->mode) { + switch (pcie->of_data->mode) { case DW_PCIE_RC_TYPE: ret = devm_request_irq(dev, pp->irq, tegra_pcie_rp_irq_handler, IRQF_SHARED, "tegra-pcie-intr", pcie); @@ -2138,7 +2255,8 @@ static int tegra194_pcie_probe(struct platform_device *pdev) break; default: - dev_err(dev, "Invalid PCIe device type %d\n", pcie->mode); + dev_err(dev, "Invalid PCIe device type %d\n", + pcie->of_data->mode); } fail: @@ -2146,16 +2264,22 @@ fail: return ret; } -static int tegra194_pcie_remove(struct platform_device *pdev) +static int tegra_pcie_dw_remove(struct platform_device *pdev) { - struct tegra194_pcie *pcie = platform_get_drvdata(pdev); + struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev); - if (!pcie->link_state) - return 0; + if (pcie->of_data->mode == DW_PCIE_RC_TYPE) { + if (!pcie->link_state) + return 0; + + debugfs_remove_recursive(pcie->debugfs); + tegra_pcie_deinit_controller(pcie); + pm_runtime_put_sync(pcie->dev); + } else { + disable_irq(pcie->pex_rst_irq); + pex_ep_event_pex_rst_assert(pcie); + } - debugfs_remove_recursive(pcie->debugfs); - tegra_pcie_deinit_controller(pcie); - pm_runtime_put_sync(pcie->dev); pm_runtime_disable(pcie->dev); tegra_bpmp_put(pcie->bpmp); if (pcie->pex_refclk_sel_gpiod) @@ -2164,44 +2288,48 @@ static int tegra194_pcie_remove(struct platform_device *pdev) return 0; } -static int tegra194_pcie_suspend_late(struct device *dev) +static int tegra_pcie_dw_suspend_late(struct device *dev) { - struct tegra194_pcie *pcie = dev_get_drvdata(dev); + struct tegra_pcie_dw *pcie = dev_get_drvdata(dev); u32 val; + if (pcie->of_data->mode == DW_PCIE_EP_TYPE) { + dev_err(dev, "Failed to Suspend as Tegra PCIe is in EP mode\n"); + return -EPERM; + } + if (!pcie->link_state) return 0; /* Enable HW_HOT_RST mode */ - val = appl_readl(pcie, APPL_CTRL); - val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK << - APPL_CTRL_HW_HOT_RST_MODE_SHIFT); - val |= APPL_CTRL_HW_HOT_RST_EN; - appl_writel(pcie, val, APPL_CTRL); + if (!pcie->of_data->has_sbr_reset_fix) { + val = appl_readl(pcie, APPL_CTRL); + val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK << + APPL_CTRL_HW_HOT_RST_MODE_SHIFT); + val |= APPL_CTRL_HW_HOT_RST_EN; + appl_writel(pcie, val, APPL_CTRL); + } return 0; } -static int tegra194_pcie_suspend_noirq(struct device *dev) +static int tegra_pcie_dw_suspend_noirq(struct device *dev) { - struct tegra194_pcie *pcie = dev_get_drvdata(dev); + struct tegra_pcie_dw *pcie = dev_get_drvdata(dev); if (!pcie->link_state) return 0; - /* Save MSI interrupt vector */ - pcie->msi_ctrl_int = dw_pcie_readl_dbi(&pcie->pci, - PORT_LOGIC_MSI_CTRL_INT_0_EN); tegra_pcie_downstream_dev_to_D0(pcie); - tegra194_pcie_pme_turnoff(pcie); + tegra_pcie_dw_pme_turnoff(pcie); tegra_pcie_unconfig_controller(pcie); return 0; } -static int tegra194_pcie_resume_noirq(struct device *dev) +static int tegra_pcie_dw_resume_noirq(struct device *dev) { - struct tegra194_pcie *pcie = dev_get_drvdata(dev); + struct tegra_pcie_dw *pcie = dev_get_drvdata(dev); int ret; if (!pcie->link_state) @@ -2211,7 +2339,7 @@ static int tegra194_pcie_resume_noirq(struct device *dev) if (ret < 0) return ret; - ret = tegra194_pcie_host_init(&pcie->pci.pp); + ret = tegra_pcie_dw_host_init(&pcie->pci.pp); if (ret < 0) { dev_err(dev, "Failed to init host: %d\n", ret); goto fail_host_init; @@ -2219,14 +2347,10 @@ static int tegra194_pcie_resume_noirq(struct device *dev) dw_pcie_setup_rc(&pcie->pci.pp); - ret = tegra194_pcie_start_link(&pcie->pci); + ret = tegra_pcie_dw_start_link(&pcie->pci); if (ret < 0) goto fail_host_init; - /* Restore MSI interrupt vector */ - dw_pcie_writel_dbi(&pcie->pci, PORT_LOGIC_MSI_CTRL_INT_0_EN, - pcie->msi_ctrl_int); - return 0; fail_host_init: @@ -2234,12 +2358,12 @@ fail_host_init: return ret; } -static int tegra194_pcie_resume_early(struct device *dev) +static int tegra_pcie_dw_resume_early(struct device *dev) { - struct tegra194_pcie *pcie = dev_get_drvdata(dev); + struct tegra_pcie_dw *pcie = dev_get_drvdata(dev); u32 val; - if (pcie->mode == DW_PCIE_EP_TYPE) { + if (pcie->of_data->mode == DW_PCIE_EP_TYPE) { dev_err(dev, "Suspend is not supported in EP mode"); return -ENOTSUPP; } @@ -2248,75 +2372,124 @@ static int tegra194_pcie_resume_early(struct device *dev) return 0; /* Disable HW_HOT_RST mode */ - val = appl_readl(pcie, APPL_CTRL); - val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK << - APPL_CTRL_HW_HOT_RST_MODE_SHIFT); - val |= APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST << - APPL_CTRL_HW_HOT_RST_MODE_SHIFT; - val &= ~APPL_CTRL_HW_HOT_RST_EN; - appl_writel(pcie, val, APPL_CTRL); + if (!pcie->of_data->has_sbr_reset_fix) { + val = appl_readl(pcie, APPL_CTRL); + val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK << + APPL_CTRL_HW_HOT_RST_MODE_SHIFT); + val |= APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST << + APPL_CTRL_HW_HOT_RST_MODE_SHIFT; + val &= ~APPL_CTRL_HW_HOT_RST_EN; + appl_writel(pcie, val, APPL_CTRL); + } return 0; } -static void tegra194_pcie_shutdown(struct platform_device *pdev) +static void tegra_pcie_dw_shutdown(struct platform_device *pdev) { - struct tegra194_pcie *pcie = platform_get_drvdata(pdev); + struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev); - if (!pcie->link_state) - return; + if (pcie->of_data->mode == DW_PCIE_RC_TYPE) { + if (!pcie->link_state) + return; - debugfs_remove_recursive(pcie->debugfs); - tegra_pcie_downstream_dev_to_D0(pcie); + debugfs_remove_recursive(pcie->debugfs); + tegra_pcie_downstream_dev_to_D0(pcie); - disable_irq(pcie->pci.pp.irq); - if (IS_ENABLED(CONFIG_PCI_MSI)) - disable_irq(pcie->pci.pp.msi_irq); + disable_irq(pcie->pci.pp.irq); + if (IS_ENABLED(CONFIG_PCI_MSI)) + disable_irq(pcie->pci.pp.msi_irq[0]); - tegra194_pcie_pme_turnoff(pcie); - tegra_pcie_unconfig_controller(pcie); + tegra_pcie_dw_pme_turnoff(pcie); + tegra_pcie_unconfig_controller(pcie); + pm_runtime_put_sync(pcie->dev); + } else { + disable_irq(pcie->pex_rst_irq); + pex_ep_event_pex_rst_assert(pcie); + } } -static const struct tegra194_pcie_of_data tegra194_pcie_rc_of_data = { +static const struct tegra_pcie_dw_of_data tegra194_pcie_dw_rc_of_data = { + .version = TEGRA194_DWC_IP_VER, + .mode = DW_PCIE_RC_TYPE, + .cdm_chk_int_en_bit = BIT(19), + /* Gen4 - 5, 6, 8 and 9 presets enabled */ + .gen4_preset_vec = 0x360, + .n_fts = { 52, 52 }, +}; + +static const struct tegra_pcie_dw_of_data tegra194_pcie_dw_ep_of_data = { + .version = TEGRA194_DWC_IP_VER, + .mode = DW_PCIE_EP_TYPE, + .cdm_chk_int_en_bit = BIT(19), + /* Gen4 - 5, 6, 8 and 9 presets enabled */ + .gen4_preset_vec = 0x360, + .n_fts = { 52, 52 }, +}; + +static const struct tegra_pcie_dw_of_data tegra234_pcie_dw_rc_of_data = { + .version = TEGRA234_DWC_IP_VER, .mode = DW_PCIE_RC_TYPE, + .has_msix_doorbell_access_fix = true, + .has_sbr_reset_fix = true, + .has_l1ss_exit_fix = true, + .cdm_chk_int_en_bit = BIT(18), + /* Gen4 - 6, 8 and 9 presets enabled */ + .gen4_preset_vec = 0x340, + .n_fts = { 52, 80 }, }; -static const struct tegra194_pcie_of_data tegra194_pcie_ep_of_data = { +static const struct tegra_pcie_dw_of_data tegra234_pcie_dw_ep_of_data = { + .version = TEGRA234_DWC_IP_VER, .mode = DW_PCIE_EP_TYPE, + .has_l1ss_exit_fix = true, + .has_ltr_req_fix = true, + .cdm_chk_int_en_bit = BIT(18), + /* Gen4 - 6, 8 and 9 presets enabled */ + .gen4_preset_vec = 0x340, + .n_fts = { 52, 80 }, }; -static const struct of_device_id tegra194_pcie_of_match[] = { +static const struct of_device_id tegra_pcie_dw_of_match[] = { { .compatible = "nvidia,tegra194-pcie", - .data = &tegra194_pcie_rc_of_data, + .data = &tegra194_pcie_dw_rc_of_data, }, { .compatible = "nvidia,tegra194-pcie-ep", - .data = &tegra194_pcie_ep_of_data, + .data = &tegra194_pcie_dw_ep_of_data, + }, + { + .compatible = "nvidia,tegra234-pcie", + .data = &tegra234_pcie_dw_rc_of_data, + }, + { + .compatible = "nvidia,tegra234-pcie-ep", + .data = &tegra234_pcie_dw_ep_of_data, }, - {}, + {} }; -static const struct dev_pm_ops tegra194_pcie_pm_ops = { - .suspend_late = tegra194_pcie_suspend_late, - .suspend_noirq = tegra194_pcie_suspend_noirq, - .resume_noirq = tegra194_pcie_resume_noirq, - .resume_early = tegra194_pcie_resume_early, +static const struct dev_pm_ops tegra_pcie_dw_pm_ops = { + .suspend_late = tegra_pcie_dw_suspend_late, + .suspend_noirq = tegra_pcie_dw_suspend_noirq, + .resume_noirq = tegra_pcie_dw_resume_noirq, + .resume_early = tegra_pcie_dw_resume_early, }; -static struct platform_driver tegra194_pcie_driver = { - .probe = tegra194_pcie_probe, - .remove = tegra194_pcie_remove, - .shutdown = tegra194_pcie_shutdown, +static struct platform_driver tegra_pcie_dw_driver = { + .probe = tegra_pcie_dw_probe, + .remove = tegra_pcie_dw_remove, + .shutdown = tegra_pcie_dw_shutdown, .driver = { .name = "tegra194-pcie", - .pm = &tegra194_pcie_pm_ops, - .of_match_table = tegra194_pcie_of_match, + .pm = &tegra_pcie_dw_pm_ops, + .of_match_table = tegra_pcie_dw_of_match, }, }; -module_platform_driver(tegra194_pcie_driver); +module_platform_driver(tegra_pcie_dw_driver); -MODULE_DEVICE_TABLE(of, tegra194_pcie_of_match); +MODULE_DEVICE_TABLE(of, tegra_pcie_dw_of_match); MODULE_AUTHOR("Vidya Sagar <vidyas@nvidia.com>"); MODULE_DESCRIPTION("NVIDIA PCIe host controller driver"); diff --git a/drivers/pci/controller/dwc/pcie-uniphier-ep.c b/drivers/pci/controller/dwc/pcie-uniphier-ep.c index 69810c6b0d58..4d0a587c0ba5 100644 --- a/drivers/pci/controller/dwc/pcie-uniphier-ep.c +++ b/drivers/pci/controller/dwc/pcie-uniphier-ep.c @@ -10,6 +10,7 @@ #include <linux/clk.h> #include <linux/delay.h> #include <linux/init.h> +#include <linux/iopoll.h> #include <linux/of_device.h> #include <linux/pci.h> #include <linux/phy/phy.h> @@ -31,6 +32,17 @@ #define PCL_RSTCTRL2 0x0024 #define PCL_RSTCTRL_PHY_RESET BIT(0) +#define PCL_PINCTRL0 0x002c +#define PCL_PERST_PLDN_REGEN BIT(12) +#define PCL_PERST_NOE_REGEN BIT(11) +#define PCL_PERST_OUT_REGEN BIT(8) +#define PCL_PERST_PLDN_REGVAL BIT(4) +#define PCL_PERST_NOE_REGVAL BIT(3) +#define PCL_PERST_OUT_REGVAL BIT(0) + +#define PCL_PIPEMON 0x0044 +#define PCL_PCLK_ALIVE BIT(15) + #define PCL_MODE 0x8000 #define PCL_MODE_REGEN BIT(8) #define PCL_MODE_REGVAL BIT(0) @@ -51,6 +63,9 @@ #define PCL_APP_INTX 0x8074 #define PCL_APP_INTX_SYS_INT BIT(0) +#define PCL_APP_PM0 0x8078 +#define PCL_SYS_AUX_PWR_DET BIT(8) + /* assertion time of INTx in usec */ #define PCL_INTX_WIDTH_USEC 30 @@ -60,7 +75,14 @@ struct uniphier_pcie_ep_priv { struct clk *clk, *clk_gio; struct reset_control *rst, *rst_gio; struct phy *phy; - const struct pci_epc_features *features; + const struct uniphier_pcie_ep_soc_data *data; +}; + +struct uniphier_pcie_ep_soc_data { + bool has_gio; + void (*init)(struct uniphier_pcie_ep_priv *priv); + int (*wait)(struct uniphier_pcie_ep_priv *priv); + const struct pci_epc_features features; }; #define to_uniphier_pcie(x) dev_get_drvdata((x)->dev) @@ -91,7 +113,7 @@ static void uniphier_pcie_phy_reset(struct uniphier_pcie_ep_priv *priv, writel(val, priv->base + PCL_RSTCTRL2); } -static void uniphier_pcie_init_ep(struct uniphier_pcie_ep_priv *priv) +static void uniphier_pcie_pro5_init_ep(struct uniphier_pcie_ep_priv *priv) { u32 val; @@ -116,6 +138,55 @@ static void uniphier_pcie_init_ep(struct uniphier_pcie_ep_priv *priv) msleep(100); } +static void uniphier_pcie_nx1_init_ep(struct uniphier_pcie_ep_priv *priv) +{ + u32 val; + + /* set EP mode */ + val = readl(priv->base + PCL_MODE); + val |= PCL_MODE_REGEN | PCL_MODE_REGVAL; + writel(val, priv->base + PCL_MODE); + + /* use auxiliary power detection */ + val = readl(priv->base + PCL_APP_PM0); + val |= PCL_SYS_AUX_PWR_DET; + writel(val, priv->base + PCL_APP_PM0); + + /* assert PERST# */ + val = readl(priv->base + PCL_PINCTRL0); + val &= ~(PCL_PERST_NOE_REGVAL | PCL_PERST_OUT_REGVAL + | PCL_PERST_PLDN_REGVAL); + val |= PCL_PERST_NOE_REGEN | PCL_PERST_OUT_REGEN + | PCL_PERST_PLDN_REGEN; + writel(val, priv->base + PCL_PINCTRL0); + + uniphier_pcie_ltssm_enable(priv, false); + + usleep_range(100000, 200000); + + /* deassert PERST# */ + val = readl(priv->base + PCL_PINCTRL0); + val |= PCL_PERST_OUT_REGVAL | PCL_PERST_OUT_REGEN; + writel(val, priv->base + PCL_PINCTRL0); +} + +static int uniphier_pcie_nx1_wait_ep(struct uniphier_pcie_ep_priv *priv) +{ + u32 status; + int ret; + + /* wait PIPE clock */ + ret = readl_poll_timeout(priv->base + PCL_PIPEMON, status, + status & PCL_PCLK_ALIVE, 100000, 1000000); + if (ret) { + dev_err(priv->pci.dev, + "Failed to initialize controller in EP mode\n"); + return ret; + } + + return 0; +} + static int uniphier_pcie_start_link(struct dw_pcie *pci) { struct uniphier_pcie_ep_priv *priv = to_uniphier_pcie(pci); @@ -209,7 +280,7 @@ uniphier_pcie_get_features(struct dw_pcie_ep *ep) struct dw_pcie *pci = to_dw_pcie_from_ep(ep); struct uniphier_pcie_ep_priv *priv = to_uniphier_pcie(pci); - return priv->features; + return &priv->data->features; } static const struct dw_pcie_ep_ops uniphier_pcie_ep_ops = { @@ -238,7 +309,8 @@ static int uniphier_pcie_ep_enable(struct uniphier_pcie_ep_priv *priv) if (ret) goto out_rst_assert; - uniphier_pcie_init_ep(priv); + if (priv->data->init) + priv->data->init(priv); uniphier_pcie_phy_reset(priv, true); @@ -248,8 +320,16 @@ static int uniphier_pcie_ep_enable(struct uniphier_pcie_ep_priv *priv) uniphier_pcie_phy_reset(priv, false); + if (priv->data->wait) { + ret = priv->data->wait(priv); + if (ret) + goto out_phy_exit; + } + return 0; +out_phy_exit: + phy_exit(priv->phy); out_rst_gio_assert: reset_control_assert(priv->rst_gio); out_rst_assert: @@ -277,8 +357,8 @@ static int uniphier_pcie_ep_probe(struct platform_device *pdev) if (!priv) return -ENOMEM; - priv->features = of_device_get_match_data(dev); - if (WARN_ON(!priv->features)) + priv->data = of_device_get_match_data(dev); + if (WARN_ON(!priv->data)) return -EINVAL; priv->pci.dev = dev; @@ -288,13 +368,15 @@ static int uniphier_pcie_ep_probe(struct platform_device *pdev) if (IS_ERR(priv->base)) return PTR_ERR(priv->base); - priv->clk_gio = devm_clk_get(dev, "gio"); - if (IS_ERR(priv->clk_gio)) - return PTR_ERR(priv->clk_gio); + if (priv->data->has_gio) { + priv->clk_gio = devm_clk_get(dev, "gio"); + if (IS_ERR(priv->clk_gio)) + return PTR_ERR(priv->clk_gio); - priv->rst_gio = devm_reset_control_get_shared(dev, "gio"); - if (IS_ERR(priv->rst_gio)) - return PTR_ERR(priv->rst_gio); + priv->rst_gio = devm_reset_control_get_shared(dev, "gio"); + if (IS_ERR(priv->rst_gio)) + return PTR_ERR(priv->rst_gio); + } priv->clk = devm_clk_get(dev, "link"); if (IS_ERR(priv->clk)) @@ -321,13 +403,31 @@ static int uniphier_pcie_ep_probe(struct platform_device *pdev) return dw_pcie_ep_init(&priv->pci.ep); } -static const struct pci_epc_features uniphier_pro5_data = { - .linkup_notifier = false, - .msi_capable = true, - .msix_capable = false, - .align = 1 << 16, - .bar_fixed_64bit = BIT(BAR_0) | BIT(BAR_2) | BIT(BAR_4), - .reserved_bar = BIT(BAR_4), +static const struct uniphier_pcie_ep_soc_data uniphier_pro5_data = { + .has_gio = true, + .init = uniphier_pcie_pro5_init_ep, + .wait = NULL, + .features = { + .linkup_notifier = false, + .msi_capable = true, + .msix_capable = false, + .align = 1 << 16, + .bar_fixed_64bit = BIT(BAR_0) | BIT(BAR_2) | BIT(BAR_4), + .reserved_bar = BIT(BAR_4), + }, +}; + +static const struct uniphier_pcie_ep_soc_data uniphier_nx1_data = { + .has_gio = false, + .init = uniphier_pcie_nx1_init_ep, + .wait = uniphier_pcie_nx1_wait_ep, + .features = { + .linkup_notifier = false, + .msi_capable = true, + .msix_capable = false, + .align = 1 << 12, + .bar_fixed_64bit = BIT(BAR_0) | BIT(BAR_2) | BIT(BAR_4), + }, }; static const struct of_device_id uniphier_pcie_ep_match[] = { @@ -335,6 +435,10 @@ static const struct of_device_id uniphier_pcie_ep_match[] = { .compatible = "socionext,uniphier-pro5-pcie-ep", .data = &uniphier_pro5_data, }, + { + .compatible = "socionext,uniphier-nx1-pcie-ep", + .data = &uniphier_nx1_data, + }, { /* sentinel */ }, }; diff --git a/drivers/pci/controller/dwc/pcie-uniphier.c b/drivers/pci/controller/dwc/pcie-uniphier.c index b45ac3754242..48c3eba817b4 100644 --- a/drivers/pci/controller/dwc/pcie-uniphier.c +++ b/drivers/pci/controller/dwc/pcie-uniphier.c @@ -171,7 +171,7 @@ static void uniphier_pcie_irq_enable(struct uniphier_pcie *pcie) static void uniphier_pcie_irq_mask(struct irq_data *d) { - struct pcie_port *pp = irq_data_get_irq_chip_data(d); + struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d); struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct uniphier_pcie *pcie = to_uniphier_pcie(pci); unsigned long flags; @@ -188,7 +188,7 @@ static void uniphier_pcie_irq_mask(struct irq_data *d) static void uniphier_pcie_irq_unmask(struct irq_data *d) { - struct pcie_port *pp = irq_data_get_irq_chip_data(d); + struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d); struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct uniphier_pcie *pcie = to_uniphier_pcie(pci); unsigned long flags; @@ -225,7 +225,7 @@ static const struct irq_domain_ops uniphier_intx_domain_ops = { static void uniphier_pcie_irq_handler(struct irq_desc *desc) { - struct pcie_port *pp = irq_desc_get_handler_data(desc); + struct dw_pcie_rp *pp = irq_desc_get_handler_data(desc); struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct uniphier_pcie *pcie = to_uniphier_pcie(pci); struct irq_chip *chip = irq_desc_get_chip(desc); @@ -258,7 +258,7 @@ static void uniphier_pcie_irq_handler(struct irq_desc *desc) chained_irq_exit(chip, desc); } -static int uniphier_pcie_config_legacy_irq(struct pcie_port *pp) +static int uniphier_pcie_config_legacy_irq(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct uniphier_pcie *pcie = to_uniphier_pcie(pci); @@ -295,7 +295,7 @@ out_put_node: return ret; } -static int uniphier_pcie_host_init(struct pcie_port *pp) +static int uniphier_pcie_host_init(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct uniphier_pcie *pcie = to_uniphier_pcie(pci); diff --git a/drivers/pci/controller/dwc/pcie-visconti.c b/drivers/pci/controller/dwc/pcie-visconti.c index 50f80f07e4db..71026fefa366 100644 --- a/drivers/pci/controller/dwc/pcie-visconti.c +++ b/drivers/pci/controller/dwc/pcie-visconti.c @@ -178,7 +178,7 @@ static void visconti_pcie_stop_link(struct dw_pcie *pci) */ static u64 visconti_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 cpu_addr) { - struct pcie_port *pp = &pci->pp; + struct dw_pcie_rp *pp = &pci->pp; return cpu_addr & ~pp->io_base; } @@ -190,7 +190,7 @@ static const struct dw_pcie_ops dw_pcie_ops = { .stop_link = visconti_pcie_stop_link, }; -static int visconti_pcie_host_init(struct pcie_port *pp) +static int visconti_pcie_host_init(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct visconti_pcie *pcie = dev_get_drvdata(pci->dev); @@ -278,7 +278,7 @@ static int visconti_add_pcie_port(struct visconti_pcie *pcie, struct platform_device *pdev) { struct dw_pcie *pci = &pcie->pci; - struct pcie_port *pp = &pci->pp; + struct dw_pcie_rp *pp = &pci->pp; pp->irq = platform_get_irq_byname(pdev, "intr"); if (pp->irq < 0) diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c index f3547aa60140..31a7bdebe540 100644 --- a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c +++ b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c @@ -295,7 +295,7 @@ int mobiveil_host_init(struct mobiveil_pcie *pcie, bool reinit) /* fixup for PCIe class register */ value = mobiveil_csr_readl(pcie, PAB_INTP_AXI_PIO_CLASS); value &= 0xff; - value |= (PCI_CLASS_BRIDGE_PCI << 16); + value |= PCI_CLASS_BRIDGE_PCI_NORMAL << 8; mobiveil_csr_writel(pcie, value, PAB_INTP_AXI_PIO_CLASS); return 0; diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c index 4f5b44827d21..ba36bbc5897d 100644 --- a/drivers/pci/controller/pci-aardvark.c +++ b/drivers/pci/controller/pci-aardvark.c @@ -8,6 +8,7 @@ * Author: Hezi Shahmoon <hezi.shahmoon@marvell.com> */ +#include <linux/bitfield.h> #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/interrupt.h> @@ -32,16 +33,14 @@ #define PCIE_CORE_DEV_ID_REG 0x0 #define PCIE_CORE_CMD_STATUS_REG 0x4 #define PCIE_CORE_DEV_REV_REG 0x8 +#define PCIE_CORE_SSDEV_ID_REG 0x2c #define PCIE_CORE_PCIEXP_CAP 0xc0 +#define PCIE_CORE_PCIERR_CAP 0x100 #define PCIE_CORE_ERR_CAPCTL_REG 0x118 #define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX BIT(5) #define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN BIT(6) #define PCIE_CORE_ERR_CAPCTL_ECRC_CHCK BIT(7) #define PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV BIT(8) -#define PCIE_CORE_INT_A_ASSERT_ENABLE 1 -#define PCIE_CORE_INT_B_ASSERT_ENABLE 2 -#define PCIE_CORE_INT_C_ASSERT_ENABLE 3 -#define PCIE_CORE_INT_D_ASSERT_ENABLE 4 /* PIO registers base address and register offsets */ #define PIO_BASE_ADDR 0x4000 #define PIO_CTRL (PIO_BASE_ADDR + 0x0) @@ -102,6 +101,10 @@ #define PCIE_MSG_PM_PME_MASK BIT(7) #define PCIE_ISR0_MASK_REG (CONTROL_BASE_ADDR + 0x44) #define PCIE_ISR0_MSI_INT_PENDING BIT(24) +#define PCIE_ISR0_CORR_ERR BIT(11) +#define PCIE_ISR0_NFAT_ERR BIT(12) +#define PCIE_ISR0_FAT_ERR BIT(13) +#define PCIE_ISR0_ERR_MASK GENMASK(13, 11) #define PCIE_ISR0_INTX_ASSERT(val) BIT(16 + (val)) #define PCIE_ISR0_INTX_DEASSERT(val) BIT(20 + (val)) #define PCIE_ISR0_ALL_MASK GENMASK(31, 0) @@ -272,17 +275,15 @@ struct advk_pcie { u32 actions; } wins[OB_WIN_COUNT]; u8 wins_count; + struct irq_domain *rp_irq_domain; struct irq_domain *irq_domain; struct irq_chip irq_chip; raw_spinlock_t irq_lock; struct irq_domain *msi_domain; struct irq_domain *msi_inner_domain; - struct irq_chip msi_bottom_irq_chip; - struct irq_chip msi_irq_chip; - struct msi_domain_info msi_domain_info; + raw_spinlock_t msi_irq_lock; DECLARE_BITMAP(msi_used, MSI_IRQ_NUM); struct mutex msi_used_lock; - u16 msi_msg; int link_gen; struct pci_bridge_emul bridge; struct gpio_desc *reset_gpio; @@ -477,6 +478,7 @@ static void advk_pcie_disable_ob_win(struct advk_pcie *pcie, u8 win_num) static void advk_pcie_setup_hw(struct advk_pcie *pcie) { + phys_addr_t msi_addr; u32 reg; int i; @@ -529,7 +531,7 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie) */ reg = advk_readl(pcie, PCIE_CORE_DEV_REV_REG); reg &= ~0xffffff00; - reg |= (PCI_CLASS_BRIDGE_PCI << 8) << 8; + reg |= PCI_CLASS_BRIDGE_PCI_NORMAL << 8; advk_writel(pcie, reg, PCIE_CORE_DEV_REV_REG); /* Disable Root Bridge I/O space, memory space and bus mastering */ @@ -565,6 +567,11 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie) reg |= LANE_COUNT_1; advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG); + /* Set MSI address */ + msi_addr = virt_to_phys(pcie); + advk_writel(pcie, lower_32_bits(msi_addr), PCIE_MSI_ADDR_LOW_REG); + advk_writel(pcie, upper_32_bits(msi_addr), PCIE_MSI_ADDR_HIGH_REG); + /* Enable MSI */ reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG); reg |= PCIE_CORE_CTRL2_MSI_ENABLE; @@ -576,15 +583,20 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie) advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG); advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG); - /* Disable All ISR0/1 Sources */ - reg = PCIE_ISR0_ALL_MASK; + /* Disable All ISR0/1 and MSI Sources */ + advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_MASK_REG); + advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_MASK_REG); + advk_writel(pcie, PCIE_MSI_ALL_MASK, PCIE_MSI_MASK_REG); + + /* Unmask summary MSI interrupt */ + reg = advk_readl(pcie, PCIE_ISR0_MASK_REG); reg &= ~PCIE_ISR0_MSI_INT_PENDING; advk_writel(pcie, reg, PCIE_ISR0_MASK_REG); - advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_MASK_REG); - - /* Unmask all MSIs */ - advk_writel(pcie, ~(u32)PCIE_MSI_ALL_MASK, PCIE_MSI_MASK_REG); + /* Unmask PME interrupt for processing of PME requester */ + reg = advk_readl(pcie, PCIE_ISR0_MASK_REG); + reg &= ~PCIE_MSG_PM_PME_MASK; + advk_writel(pcie, reg, PCIE_ISR0_MASK_REG); /* Enable summary interrupt for GIC SPI source */ reg = PCIE_IRQ_ALL_MASK & (~PCIE_IRQ_ENABLE_INTS_MASK); @@ -778,11 +790,15 @@ advk_pci_bridge_emul_base_conf_read(struct pci_bridge_emul *bridge, case PCI_INTERRUPT_LINE: { /* * From the whole 32bit register we support reading from HW only - * one bit: PCI_BRIDGE_CTL_BUS_RESET. + * two bits: PCI_BRIDGE_CTL_BUS_RESET and PCI_BRIDGE_CTL_SERR. * Other bits are retrieved only from emulated config buffer. */ __le32 *cfgspace = (__le32 *)&bridge->conf; u32 val = le32_to_cpu(cfgspace[PCI_INTERRUPT_LINE / 4]); + if (advk_readl(pcie, PCIE_ISR0_MASK_REG) & PCIE_ISR0_ERR_MASK) + val &= ~(PCI_BRIDGE_CTL_SERR << 16); + else + val |= PCI_BRIDGE_CTL_SERR << 16; if (advk_readl(pcie, PCIE_CORE_CTRL1_REG) & HOT_RESET_GEN) val |= PCI_BRIDGE_CTL_BUS_RESET << 16; else @@ -808,6 +824,19 @@ advk_pci_bridge_emul_base_conf_write(struct pci_bridge_emul *bridge, break; case PCI_INTERRUPT_LINE: + /* + * According to Figure 6-3: Pseudo Logic Diagram for Error + * Message Controls in PCIe base specification, SERR# Enable bit + * in Bridge Control register enable receiving of ERR_* messages + */ + if (mask & (PCI_BRIDGE_CTL_SERR << 16)) { + u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG); + if (new & (PCI_BRIDGE_CTL_SERR << 16)) + val &= ~PCIE_ISR0_ERR_MASK; + else + val |= PCIE_ISR0_ERR_MASK; + advk_writel(pcie, val, PCIE_ISR0_MASK_REG); + } if (mask & (PCI_BRIDGE_CTL_BUS_RESET << 16)) { u32 val = advk_readl(pcie, PCIE_CORE_CTRL1_REG); if (new & (PCI_BRIDGE_CTL_BUS_RESET << 16)) @@ -831,24 +860,12 @@ advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge, switch (reg) { - case PCI_EXP_SLTCTL: - *value = PCI_EXP_SLTSTA_PDS << 16; - return PCI_BRIDGE_EMUL_HANDLED; - - case PCI_EXP_RTCTL: { - u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG); - *value = (val & PCIE_MSG_PM_PME_MASK) ? 0 : PCI_EXP_RTCTL_PMEIE; - *value |= le16_to_cpu(bridge->pcie_conf.rootctl) & PCI_EXP_RTCTL_CRSSVE; - *value |= PCI_EXP_RTCAP_CRSVIS << 16; - return PCI_BRIDGE_EMUL_HANDLED; - } - - case PCI_EXP_RTSTA: { - u32 isr0 = advk_readl(pcie, PCIE_ISR0_REG); - u32 msglog = advk_readl(pcie, PCIE_MSG_LOG_REG); - *value = (isr0 & PCIE_MSG_PM_PME_MASK) << 16 | (msglog >> 16); - return PCI_BRIDGE_EMUL_HANDLED; - } + /* + * PCI_EXP_SLTCAP, PCI_EXP_SLTCTL, PCI_EXP_RTCTL and PCI_EXP_RTSTA are + * also supported, but do not need to be handled here, because their + * values are stored in emulated config space buffer, and we read them + * from there when needed. + */ case PCI_EXP_LNKCAP: { u32 val = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg); @@ -903,19 +920,18 @@ advk_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge, break; case PCI_EXP_RTCTL: { - /* Only mask/unmask PME interrupt */ - u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG) & - ~PCIE_MSG_PM_PME_MASK; - if ((new & PCI_EXP_RTCTL_PMEIE) == 0) - val |= PCIE_MSG_PM_PME_MASK; - advk_writel(pcie, val, PCIE_ISR0_MASK_REG); + u16 rootctl = le16_to_cpu(bridge->pcie_conf.rootctl); + /* Only emulation of PMEIE and CRSSVE bits is provided */ + rootctl &= PCI_EXP_RTCTL_PMEIE | PCI_EXP_RTCTL_CRSSVE; + bridge->pcie_conf.rootctl = cpu_to_le16(rootctl); break; } - case PCI_EXP_RTSTA: - new = (new & PCI_EXP_RTSTA_PME) >> 9; - advk_writel(pcie, new, PCIE_ISR0_REG); - break; + /* + * PCI_EXP_RTSTA is also supported, but does not need to be handled + * here, because its value is stored in emulated config space buffer, + * and we write it there when needed. + */ case PCI_EXP_DEVCTL: case PCI_EXP_DEVCTL2: @@ -928,11 +944,89 @@ advk_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge, } } -static struct pci_bridge_emul_ops advk_pci_bridge_emul_ops = { +static pci_bridge_emul_read_status_t +advk_pci_bridge_emul_ext_conf_read(struct pci_bridge_emul *bridge, + int reg, u32 *value) +{ + struct advk_pcie *pcie = bridge->data; + + switch (reg) { + case 0: + *value = advk_readl(pcie, PCIE_CORE_PCIERR_CAP + reg); + + /* + * PCI_EXT_CAP_NEXT bits are set to offset 0x150, but Armada + * 3700 Functional Specification does not document registers + * at those addresses. + * + * Thus we clear PCI_EXT_CAP_NEXT bits to make Advanced Error + * Reporting Capability header the last Extended Capability. + * If we obtain documentation for those registers in the + * future, this can be changed. + */ + *value &= 0x000fffff; + return PCI_BRIDGE_EMUL_HANDLED; + + case PCI_ERR_UNCOR_STATUS: + case PCI_ERR_UNCOR_MASK: + case PCI_ERR_UNCOR_SEVER: + case PCI_ERR_COR_STATUS: + case PCI_ERR_COR_MASK: + case PCI_ERR_CAP: + case PCI_ERR_HEADER_LOG + 0: + case PCI_ERR_HEADER_LOG + 4: + case PCI_ERR_HEADER_LOG + 8: + case PCI_ERR_HEADER_LOG + 12: + case PCI_ERR_ROOT_COMMAND: + case PCI_ERR_ROOT_STATUS: + case PCI_ERR_ROOT_ERR_SRC: + *value = advk_readl(pcie, PCIE_CORE_PCIERR_CAP + reg); + return PCI_BRIDGE_EMUL_HANDLED; + + default: + return PCI_BRIDGE_EMUL_NOT_HANDLED; + } +} + +static void +advk_pci_bridge_emul_ext_conf_write(struct pci_bridge_emul *bridge, + int reg, u32 old, u32 new, u32 mask) +{ + struct advk_pcie *pcie = bridge->data; + + switch (reg) { + /* These are W1C registers, so clear other bits */ + case PCI_ERR_UNCOR_STATUS: + case PCI_ERR_COR_STATUS: + case PCI_ERR_ROOT_STATUS: + new &= mask; + fallthrough; + + case PCI_ERR_UNCOR_MASK: + case PCI_ERR_UNCOR_SEVER: + case PCI_ERR_COR_MASK: + case PCI_ERR_CAP: + case PCI_ERR_HEADER_LOG + 0: + case PCI_ERR_HEADER_LOG + 4: + case PCI_ERR_HEADER_LOG + 8: + case PCI_ERR_HEADER_LOG + 12: + case PCI_ERR_ROOT_COMMAND: + case PCI_ERR_ROOT_ERR_SRC: + advk_writel(pcie, new, PCIE_CORE_PCIERR_CAP + reg); + break; + + default: + break; + } +} + +static const struct pci_bridge_emul_ops advk_pci_bridge_emul_ops = { .read_base = advk_pci_bridge_emul_base_conf_read, .write_base = advk_pci_bridge_emul_base_conf_write, .read_pcie = advk_pci_bridge_emul_pcie_conf_read, .write_pcie = advk_pci_bridge_emul_pcie_conf_write, + .read_ext = advk_pci_bridge_emul_ext_conf_read, + .write_ext = advk_pci_bridge_emul_ext_conf_write, }; /* @@ -959,15 +1053,35 @@ static int advk_sw_pci_bridge_init(struct advk_pcie *pcie) bridge->conf.pref_mem_limit = cpu_to_le16(PCI_PREF_RANGE_TYPE_64); /* Support interrupt A for MSI feature */ - bridge->conf.intpin = PCIE_CORE_INT_A_ASSERT_ENABLE; + bridge->conf.intpin = PCI_INTERRUPT_INTA; - /* Aardvark HW provides PCIe Capability structure in version 2 */ - bridge->pcie_conf.cap = cpu_to_le16(2); + /* + * Aardvark HW provides PCIe Capability structure in version 2 and + * indicate slot support, which is emulated. + */ + bridge->pcie_conf.cap = cpu_to_le16(2 | PCI_EXP_FLAGS_SLOT); + + /* + * Set Presence Detect State bit permanently since there is no support + * for unplugging the card nor detecting whether it is plugged. (If a + * platform exists in the future that supports it, via a GPIO for + * example, it should be implemented via this bit.) + * + * Set physical slot number to 1 since there is only one port and zero + * value is reserved for ports within the same silicon as Root Port + * which is not our case. + */ + bridge->pcie_conf.slotcap = cpu_to_le32(FIELD_PREP(PCI_EXP_SLTCAP_PSN, + 1)); + bridge->pcie_conf.slotsta = cpu_to_le16(PCI_EXP_SLTSTA_PDS); /* Indicates supports for Completion Retry Status */ bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_CRSVIS); + bridge->subsystem_vendor_id = advk_readl(pcie, PCIE_CORE_SSDEV_ID_REG) & 0xffff; + bridge->subsystem_id = advk_readl(pcie, PCIE_CORE_SSDEV_ID_REG) >> 16; bridge->has_pcie = true; + bridge->pcie_start = PCIE_CORE_PCIEXP_CAP; bridge->data = pcie; bridge->ops = &advk_pci_bridge_emul_ops; @@ -981,8 +1095,12 @@ static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus, return false; /* - * If the link goes down after we check for link-up, nothing bad - * happens but the config access times out. + * If the link goes down after we check for link-up, we have a problem: + * if a PIO request is executed while link-down, the whole controller + * gets stuck in a non-functional state, and even after link comes up + * again, PIO requests won't work anymore, and a reset of the whole PCIe + * controller is needed. Therefore we need to prevent sending PIO + * requests while the link is down. */ if (!pci_is_root_bus(bus) && !advk_pcie_link_up(pcie)) return false; @@ -1180,11 +1298,11 @@ static void advk_msi_irq_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) { struct advk_pcie *pcie = irq_data_get_irq_chip_data(data); - phys_addr_t msi_msg = virt_to_phys(&pcie->msi_msg); + phys_addr_t msi_addr = virt_to_phys(pcie); - msg->address_lo = lower_32_bits(msi_msg); - msg->address_hi = upper_32_bits(msi_msg); - msg->data = data->irq; + msg->address_lo = lower_32_bits(msi_addr); + msg->address_hi = upper_32_bits(msi_addr); + msg->data = data->hwirq; } static int advk_msi_set_affinity(struct irq_data *irq_data, @@ -1193,6 +1311,54 @@ static int advk_msi_set_affinity(struct irq_data *irq_data, return -EINVAL; } +static void advk_msi_irq_mask(struct irq_data *d) +{ + struct advk_pcie *pcie = d->domain->host_data; + irq_hw_number_t hwirq = irqd_to_hwirq(d); + unsigned long flags; + u32 mask; + + raw_spin_lock_irqsave(&pcie->msi_irq_lock, flags); + mask = advk_readl(pcie, PCIE_MSI_MASK_REG); + mask |= BIT(hwirq); + advk_writel(pcie, mask, PCIE_MSI_MASK_REG); + raw_spin_unlock_irqrestore(&pcie->msi_irq_lock, flags); +} + +static void advk_msi_irq_unmask(struct irq_data *d) +{ + struct advk_pcie *pcie = d->domain->host_data; + irq_hw_number_t hwirq = irqd_to_hwirq(d); + unsigned long flags; + u32 mask; + + raw_spin_lock_irqsave(&pcie->msi_irq_lock, flags); + mask = advk_readl(pcie, PCIE_MSI_MASK_REG); + mask &= ~BIT(hwirq); + advk_writel(pcie, mask, PCIE_MSI_MASK_REG); + raw_spin_unlock_irqrestore(&pcie->msi_irq_lock, flags); +} + +static void advk_msi_top_irq_mask(struct irq_data *d) +{ + pci_msi_mask_irq(d); + irq_chip_mask_parent(d); +} + +static void advk_msi_top_irq_unmask(struct irq_data *d) +{ + pci_msi_unmask_irq(d); + irq_chip_unmask_parent(d); +} + +static struct irq_chip advk_msi_bottom_irq_chip = { + .name = "MSI", + .irq_compose_msi_msg = advk_msi_irq_compose_msi_msg, + .irq_set_affinity = advk_msi_set_affinity, + .irq_mask = advk_msi_irq_mask, + .irq_unmask = advk_msi_irq_unmask, +}; + static int advk_msi_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *args) @@ -1201,19 +1367,15 @@ static int advk_msi_irq_domain_alloc(struct irq_domain *domain, int hwirq, i; mutex_lock(&pcie->msi_used_lock); - hwirq = bitmap_find_next_zero_area(pcie->msi_used, MSI_IRQ_NUM, - 0, nr_irqs, 0); - if (hwirq >= MSI_IRQ_NUM) { - mutex_unlock(&pcie->msi_used_lock); - return -ENOSPC; - } - - bitmap_set(pcie->msi_used, hwirq, nr_irqs); + hwirq = bitmap_find_free_region(pcie->msi_used, MSI_IRQ_NUM, + order_base_2(nr_irqs)); mutex_unlock(&pcie->msi_used_lock); + if (hwirq < 0) + return -ENOSPC; for (i = 0; i < nr_irqs; i++) irq_domain_set_info(domain, virq + i, hwirq + i, - &pcie->msi_bottom_irq_chip, + &advk_msi_bottom_irq_chip, domain->host_data, handle_simple_irq, NULL, NULL); @@ -1227,7 +1389,7 @@ static void advk_msi_irq_domain_free(struct irq_domain *domain, struct advk_pcie *pcie = domain->host_data; mutex_lock(&pcie->msi_used_lock); - bitmap_clear(pcie->msi_used, d->hwirq, nr_irqs); + bitmap_release_region(pcie->msi_used, d->hwirq, order_base_2(nr_irqs)); mutex_unlock(&pcie->msi_used_lock); } @@ -1269,7 +1431,6 @@ static int advk_pcie_irq_map(struct irq_domain *h, { struct advk_pcie *pcie = h->host_data; - advk_pcie_irq_mask(irq_get_irq_data(virq)); irq_set_status_flags(virq, IRQ_LEVEL); irq_set_chip_and_handler(virq, &pcie->irq_chip, handle_level_irq); @@ -1283,37 +1444,25 @@ static const struct irq_domain_ops advk_pcie_irq_domain_ops = { .xlate = irq_domain_xlate_onecell, }; +static struct irq_chip advk_msi_irq_chip = { + .name = "advk-MSI", + .irq_mask = advk_msi_top_irq_mask, + .irq_unmask = advk_msi_top_irq_unmask, +}; + +static struct msi_domain_info advk_msi_domain_info = { + .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX, + .chip = &advk_msi_irq_chip, +}; + static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie) { struct device *dev = &pcie->pdev->dev; - struct device_node *node = dev->of_node; - struct irq_chip *bottom_ic, *msi_ic; - struct msi_domain_info *msi_di; - phys_addr_t msi_msg_phys; + raw_spin_lock_init(&pcie->msi_irq_lock); mutex_init(&pcie->msi_used_lock); - bottom_ic = &pcie->msi_bottom_irq_chip; - - bottom_ic->name = "MSI"; - bottom_ic->irq_compose_msi_msg = advk_msi_irq_compose_msi_msg; - bottom_ic->irq_set_affinity = advk_msi_set_affinity; - - msi_ic = &pcie->msi_irq_chip; - msi_ic->name = "advk-MSI"; - - msi_di = &pcie->msi_domain_info; - msi_di->flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_MULTI_PCI_MSI; - msi_di->chip = msi_ic; - - msi_msg_phys = virt_to_phys(&pcie->msi_msg); - - advk_writel(pcie, lower_32_bits(msi_msg_phys), - PCIE_MSI_ADDR_LOW_REG); - advk_writel(pcie, upper_32_bits(msi_msg_phys), - PCIE_MSI_ADDR_HIGH_REG); - pcie->msi_inner_domain = irq_domain_add_linear(NULL, MSI_IRQ_NUM, &advk_msi_domain_ops, pcie); @@ -1321,8 +1470,9 @@ static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie) return -ENOMEM; pcie->msi_domain = - pci_msi_create_irq_domain(of_node_to_fwnode(node), - msi_di, pcie->msi_inner_domain); + pci_msi_create_irq_domain(dev_fwnode(dev), + &advk_msi_domain_info, + pcie->msi_inner_domain); if (!pcie->msi_domain) { irq_domain_remove(pcie->msi_inner_domain); return -ENOMEM; @@ -1363,7 +1513,6 @@ static int advk_pcie_init_irq_domain(struct advk_pcie *pcie) } irq_chip->irq_mask = advk_pcie_irq_mask; - irq_chip->irq_mask_ack = advk_pcie_irq_mask; irq_chip->irq_unmask = advk_pcie_irq_unmask; pcie->irq_domain = @@ -1385,10 +1534,73 @@ static void advk_pcie_remove_irq_domain(struct advk_pcie *pcie) irq_domain_remove(pcie->irq_domain); } +static struct irq_chip advk_rp_irq_chip = { + .name = "advk-RP", +}; + +static int advk_pcie_rp_irq_map(struct irq_domain *h, + unsigned int virq, irq_hw_number_t hwirq) +{ + struct advk_pcie *pcie = h->host_data; + + irq_set_chip_and_handler(virq, &advk_rp_irq_chip, handle_simple_irq); + irq_set_chip_data(virq, pcie); + + return 0; +} + +static const struct irq_domain_ops advk_pcie_rp_irq_domain_ops = { + .map = advk_pcie_rp_irq_map, + .xlate = irq_domain_xlate_onecell, +}; + +static int advk_pcie_init_rp_irq_domain(struct advk_pcie *pcie) +{ + pcie->rp_irq_domain = irq_domain_add_linear(NULL, 1, + &advk_pcie_rp_irq_domain_ops, + pcie); + if (!pcie->rp_irq_domain) { + dev_err(&pcie->pdev->dev, "Failed to add Root Port IRQ domain\n"); + return -ENOMEM; + } + + return 0; +} + +static void advk_pcie_remove_rp_irq_domain(struct advk_pcie *pcie) +{ + irq_domain_remove(pcie->rp_irq_domain); +} + +static void advk_pcie_handle_pme(struct advk_pcie *pcie) +{ + u32 requester = advk_readl(pcie, PCIE_MSG_LOG_REG) >> 16; + + advk_writel(pcie, PCIE_MSG_PM_PME_MASK, PCIE_ISR0_REG); + + /* + * PCIE_MSG_LOG_REG contains the last inbound message, so store + * the requester ID only when PME was not asserted yet. + * Also do not trigger PME interrupt when PME is still asserted. + */ + if (!(le32_to_cpu(pcie->bridge.pcie_conf.rootsta) & PCI_EXP_RTSTA_PME)) { + pcie->bridge.pcie_conf.rootsta = cpu_to_le32(requester | PCI_EXP_RTSTA_PME); + + /* + * Trigger PME interrupt only if PMEIE bit in Root Control is set. + * Aardvark HW returns zero for PCI_EXP_FLAGS_IRQ, so use PCIe interrupt 0. + */ + if (!(le16_to_cpu(pcie->bridge.pcie_conf.rootctl) & PCI_EXP_RTCTL_PMEIE)) + return; + + if (generic_handle_domain_irq(pcie->rp_irq_domain, 0) == -EINVAL) + dev_err_ratelimited(&pcie->pdev->dev, "unhandled PME IRQ\n"); + } +} + static void advk_pcie_handle_msi(struct advk_pcie *pcie) { u32 msi_val, msi_mask, msi_status, msi_idx; - u16 msi_data; msi_mask = advk_readl(pcie, PCIE_MSI_MASK_REG); msi_val = advk_readl(pcie, PCIE_MSI_STATUS_REG); @@ -1398,13 +1610,9 @@ static void advk_pcie_handle_msi(struct advk_pcie *pcie) if (!(BIT(msi_idx) & msi_status)) continue; - /* - * msi_idx contains bits [4:0] of the msi_data and msi_data - * contains 16bit MSI interrupt number - */ advk_writel(pcie, BIT(msi_idx), PCIE_MSI_STATUS_REG); - msi_data = advk_readl(pcie, PCIE_MSI_PAYLOAD_REG) & PCIE_MSI_DATA_MASK; - generic_handle_irq(msi_data); + if (generic_handle_domain_irq(pcie->msi_inner_domain, msi_idx) == -EINVAL) + dev_err_ratelimited(&pcie->pdev->dev, "unexpected MSI 0x%02x\n", msi_idx); } advk_writel(pcie, PCIE_ISR0_MSI_INT_PENDING, @@ -1425,6 +1633,22 @@ static void advk_pcie_handle_int(struct advk_pcie *pcie) isr1_mask = advk_readl(pcie, PCIE_ISR1_MASK_REG); isr1_status = isr1_val & ((~isr1_mask) & PCIE_ISR1_ALL_MASK); + /* Process PME interrupt as the first one to do not miss PME requester id */ + if (isr0_status & PCIE_MSG_PM_PME_MASK) + advk_pcie_handle_pme(pcie); + + /* Process ERR interrupt */ + if (isr0_status & PCIE_ISR0_ERR_MASK) { + advk_writel(pcie, PCIE_ISR0_ERR_MASK, PCIE_ISR0_REG); + + /* + * Aardvark HW returns zero for PCI_ERR_ROOT_AER_IRQ, so use + * PCIe interrupt 0 + */ + if (generic_handle_domain_irq(pcie->rp_irq_domain, 0) == -EINVAL) + dev_err_ratelimited(&pcie->pdev->dev, "unhandled ERR IRQ\n"); + } + /* Process MSI interrupts */ if (isr0_status & PCIE_ISR0_MSI_INT_PENDING) advk_pcie_handle_msi(pcie); @@ -1437,7 +1661,9 @@ static void advk_pcie_handle_int(struct advk_pcie *pcie) advk_writel(pcie, PCIE_ISR1_INTX_ASSERT(i), PCIE_ISR1_REG); - generic_handle_domain_irq(pcie->irq_domain, i); + if (generic_handle_domain_irq(pcie->irq_domain, i) == -EINVAL) + dev_err_ratelimited(&pcie->pdev->dev, "unexpected INT%c IRQ\n", + (char)i + 'A'); } } @@ -1458,7 +1684,22 @@ static irqreturn_t advk_pcie_irq_handler(int irq, void *arg) return IRQ_HANDLED; } -static void __maybe_unused advk_pcie_disable_phy(struct advk_pcie *pcie) +static int advk_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + struct advk_pcie *pcie = dev->bus->sysdata; + + /* + * Emulated root bridge has its own emulated irq chip and irq domain. + * Argument pin is the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD) and + * hwirq for irq_create_mapping() is indexed from zero. + */ + if (pci_is_root_bus(dev->bus)) + return irq_create_mapping(pcie->rp_irq_domain, pin - 1); + else + return of_irq_parse_and_map_pci(dev, slot, pin); +} + +static void advk_pcie_disable_phy(struct advk_pcie *pcie) { phy_power_off(pcie->phy); phy_exit(pcie->phy); @@ -1482,9 +1723,7 @@ static int advk_pcie_enable_phy(struct advk_pcie *pcie) } ret = phy_power_on(pcie->phy); - if (ret == -EOPNOTSUPP) { - dev_warn(&pcie->pdev->dev, "PHY unsupported by firmware\n"); - } else if (ret) { + if (ret) { phy_exit(pcie->phy); return ret; } @@ -1667,11 +1906,21 @@ static int advk_pcie_probe(struct platform_device *pdev) return ret; } + ret = advk_pcie_init_rp_irq_domain(pcie); + if (ret) { + dev_err(dev, "Failed to initialize irq\n"); + advk_pcie_remove_msi_irq_domain(pcie); + advk_pcie_remove_irq_domain(pcie); + return ret; + } + bridge->sysdata = pcie; bridge->ops = &advk_pcie_ops; + bridge->map_irq = advk_pcie_map_irq; ret = pci_host_probe(bridge); if (ret < 0) { + advk_pcie_remove_rp_irq_domain(pcie); advk_pcie_remove_msi_irq_domain(pcie); advk_pcie_remove_irq_domain(pcie); return ret; @@ -1720,6 +1969,7 @@ static int advk_pcie_remove(struct platform_device *pdev) advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG); /* Remove IRQ domains */ + advk_pcie_remove_rp_irq_domain(pcie); advk_pcie_remove_msi_irq_domain(pcie); advk_pcie_remove_irq_domain(pcie); diff --git a/drivers/pci/controller/pci-ftpci100.c b/drivers/pci/controller/pci-ftpci100.c index 88980a44461d..0cfd9d5a497c 100644 --- a/drivers/pci/controller/pci-ftpci100.c +++ b/drivers/pci/controller/pci-ftpci100.c @@ -103,13 +103,6 @@ #define FARADAY_PCI_DMA_MEM2_BASE 0x00000000 #define FARADAY_PCI_DMA_MEM3_BASE 0x00000000 -/* Defines for PCI configuration command register */ -#define PCI_CONF_ENABLE BIT(31) -#define PCI_CONF_WHERE(r) ((r) & 0xFC) -#define PCI_CONF_BUS(b) (((b) & 0xFF) << 16) -#define PCI_CONF_DEVICE(d) (((d) & 0x1F) << 11) -#define PCI_CONF_FUNCTION(f) (((f) & 0x07) << 8) - /** * struct faraday_pci_variant - encodes IP block differences * @cascaded_irq: this host has cascaded IRQs from an interrupt controller @@ -190,11 +183,8 @@ static int faraday_raw_pci_read_config(struct faraday_pci *p, int bus_number, unsigned int fn, int config, int size, u32 *value) { - writel(PCI_CONF_BUS(bus_number) | - PCI_CONF_DEVICE(PCI_SLOT(fn)) | - PCI_CONF_FUNCTION(PCI_FUNC(fn)) | - PCI_CONF_WHERE(config) | - PCI_CONF_ENABLE, + writel(PCI_CONF1_ADDRESS(bus_number, PCI_SLOT(fn), + PCI_FUNC(fn), config), p->base + FTPCI_CONFIG); *value = readl(p->base + FTPCI_DATA); @@ -225,11 +215,8 @@ static int faraday_raw_pci_write_config(struct faraday_pci *p, int bus_number, { int ret = PCIBIOS_SUCCESSFUL; - writel(PCI_CONF_BUS(bus_number) | - PCI_CONF_DEVICE(PCI_SLOT(fn)) | - PCI_CONF_FUNCTION(PCI_FUNC(fn)) | - PCI_CONF_WHERE(config) | - PCI_CONF_ENABLE, + writel(PCI_CONF1_ADDRESS(bus_number, PCI_SLOT(fn), + PCI_FUNC(fn), config), p->base + FTPCI_CONFIG); switch (size) { diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c index 20ea2ee330b8..e7c6f6629e7c 100644 --- a/drivers/pci/controller/pci-hyperv.c +++ b/drivers/pci/controller/pci-hyperv.c @@ -92,6 +92,13 @@ static enum pci_protocol_version_t pci_protocol_versions[] = { #define SLOT_NAME_SIZE 11 /* + * Size of requestor for VMbus; the value is based on the observation + * that having more than one request outstanding is 'rare', and so 64 + * should be generous in ensuring that we don't ever run out. + */ +#define HV_PCI_RQSTOR_SIZE 64 + +/* * Message Types */ @@ -604,17 +611,137 @@ static unsigned int hv_msi_get_int_vector(struct irq_data *data) return cfg->vector; } -static void hv_set_msi_entry_from_desc(union hv_msi_entry *msi_entry, - struct msi_desc *msi_desc) +static int hv_msi_prepare(struct irq_domain *domain, struct device *dev, + int nvec, msi_alloc_info_t *info) { - msi_entry->address.as_uint32 = msi_desc->msg.address_lo; - msi_entry->data.as_uint32 = msi_desc->msg.data; + int ret = pci_msi_prepare(domain, dev, nvec, info); + + /* + * By using the interrupt remapper in the hypervisor IOMMU, contiguous + * CPU vectors is not needed for multi-MSI + */ + if (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI) + info->flags &= ~X86_IRQ_ALLOC_CONTIGUOUS_VECTORS; + + return ret; } -static int hv_msi_prepare(struct irq_domain *domain, struct device *dev, - int nvec, msi_alloc_info_t *info) +/** + * hv_arch_irq_unmask() - "Unmask" the IRQ by setting its current + * affinity. + * @data: Describes the IRQ + * + * Build new a destination for the MSI and make a hypercall to + * update the Interrupt Redirection Table. "Device Logical ID" + * is built out of this PCI bus's instance GUID and the function + * number of the device. + */ +static void hv_arch_irq_unmask(struct irq_data *data) { - return pci_msi_prepare(domain, dev, nvec, info); + struct msi_desc *msi_desc = irq_data_get_msi_desc(data); + struct hv_retarget_device_interrupt *params; + struct tran_int_desc *int_desc; + struct hv_pcibus_device *hbus; + const struct cpumask *dest; + cpumask_var_t tmp; + struct pci_bus *pbus; + struct pci_dev *pdev; + unsigned long flags; + u32 var_size = 0; + int cpu, nr_bank; + u64 res; + + dest = irq_data_get_effective_affinity_mask(data); + pdev = msi_desc_to_pci_dev(msi_desc); + pbus = pdev->bus; + hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata); + int_desc = data->chip_data; + + spin_lock_irqsave(&hbus->retarget_msi_interrupt_lock, flags); + + params = &hbus->retarget_msi_interrupt_params; + memset(params, 0, sizeof(*params)); + params->partition_id = HV_PARTITION_ID_SELF; + params->int_entry.source = HV_INTERRUPT_SOURCE_MSI; + params->int_entry.msi_entry.address.as_uint32 = int_desc->address & 0xffffffff; + params->int_entry.msi_entry.data.as_uint32 = int_desc->data; + params->device_id = (hbus->hdev->dev_instance.b[5] << 24) | + (hbus->hdev->dev_instance.b[4] << 16) | + (hbus->hdev->dev_instance.b[7] << 8) | + (hbus->hdev->dev_instance.b[6] & 0xf8) | + PCI_FUNC(pdev->devfn); + params->int_target.vector = hv_msi_get_int_vector(data); + + /* + * Honoring apic->delivery_mode set to APIC_DELIVERY_MODE_FIXED by + * setting the HV_DEVICE_INTERRUPT_TARGET_MULTICAST flag results in a + * spurious interrupt storm. Not doing so does not seem to have a + * negative effect (yet?). + */ + + if (hbus->protocol_version >= PCI_PROTOCOL_VERSION_1_2) { + /* + * PCI_PROTOCOL_VERSION_1_2 supports the VP_SET version of the + * HVCALL_RETARGET_INTERRUPT hypercall, which also coincides + * with >64 VP support. + * ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED + * is not sufficient for this hypercall. + */ + params->int_target.flags |= + HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET; + + if (!alloc_cpumask_var(&tmp, GFP_ATOMIC)) { + res = 1; + goto exit_unlock; + } + + cpumask_and(tmp, dest, cpu_online_mask); + nr_bank = cpumask_to_vpset(¶ms->int_target.vp_set, tmp); + free_cpumask_var(tmp); + + if (nr_bank <= 0) { + res = 1; + goto exit_unlock; + } + + /* + * var-sized hypercall, var-size starts after vp_mask (thus + * vp_set.format does not count, but vp_set.valid_bank_mask + * does). + */ + var_size = 1 + nr_bank; + } else { + for_each_cpu_and(cpu, dest, cpu_online_mask) { + params->int_target.vp_mask |= + (1ULL << hv_cpu_number_to_vp_number(cpu)); + } + } + + res = hv_do_hypercall(HVCALL_RETARGET_INTERRUPT | (var_size << 17), + params, NULL); + +exit_unlock: + spin_unlock_irqrestore(&hbus->retarget_msi_interrupt_lock, flags); + + /* + * During hibernation, when a CPU is offlined, the kernel tries + * to move the interrupt to the remaining CPUs that haven't + * been offlined yet. In this case, the below hv_do_hypercall() + * always fails since the vmbus channel has been closed: + * refer to cpu_disable_common() -> fixup_irqs() -> + * irq_migrate_all_off_this_cpu() -> migrate_one_irq(). + * + * Suppress the error message for hibernation because the failure + * during hibernation does not matter (at this time all the devices + * have been frozen). Note: the correct affinity info is still updated + * into the irqdata data structure in migrate_one_irq() -> + * irq_do_set_affinity() -> hv_set_affinity(), so later when the VM + * resumes, hv_pci_restore_msi_state() is able to correctly restore + * the interrupt with the correct affinity. + */ + if (!hv_result_success(res) && hbus->state != hv_pcibus_removing) + dev_err(&hbus->hdev->device, + "%s() failed: %#llx", __func__, res); } #elif defined(CONFIG_ARM64) /* @@ -651,14 +778,6 @@ static unsigned int hv_msi_get_int_vector(struct irq_data *irqd) return irqd->parent_data->hwirq; } -static void hv_set_msi_entry_from_desc(union hv_msi_entry *msi_entry, - struct msi_desc *msi_desc) -{ - msi_entry->address = ((u64)msi_desc->msg.address_hi << 32) | - msi_desc->msg.address_lo; - msi_entry->data = msi_desc->msg.data; -} - /* * @nr_bm_irqs: Indicates the number of IRQs that were allocated from * the bitmap. @@ -839,6 +958,12 @@ static struct irq_domain *hv_pci_get_root_domain(void) { return hv_msi_gic_irq_domain; } + +/* + * SPIs are used for interrupts of PCI devices and SPIs is managed via GICD + * registers which Hyper-V already supports, so no hypercall needed. + */ +static void hv_arch_irq_unmask(struct irq_data *data) { } #endif /* CONFIG_ARM64 */ /** @@ -856,11 +981,7 @@ static void hv_pci_generic_compl(void *context, struct pci_response *resp, { struct hv_pci_compl *comp_pkt = context; - if (resp_packet_size >= offsetofend(struct pci_response, status)) - comp_pkt->completion_status = resp->status; - else - comp_pkt->completion_status = -1; - + comp_pkt->completion_status = resp->status; complete(&comp_pkt->host_event); } @@ -1400,6 +1521,10 @@ static void hv_int_desc_free(struct hv_pci_dev *hpdev, u8 buffer[sizeof(struct pci_delete_interrupt)]; } ctxt; + if (!int_desc->vector_count) { + kfree(int_desc); + return; + } memset(&ctxt, 0, sizeof(ctxt)); int_pkt = (struct pci_delete_interrupt *)&ctxt.pkt.message; int_pkt->message_type.type = @@ -1407,7 +1532,7 @@ static void hv_int_desc_free(struct hv_pci_dev *hpdev, int_pkt->wslot.slot = hpdev->desc.win_slot.slot; int_pkt->int_desc = *int_desc; vmbus_sendpacket(hpdev->hbus->hdev->channel, int_pkt, sizeof(*int_pkt), - (unsigned long)&ctxt.pkt, VM_PKT_DATA_INBAND, 0); + 0, VM_PKT_DATA_INBAND, 0); kfree(int_desc); } @@ -1456,119 +1581,9 @@ static void hv_irq_mask(struct irq_data *data) irq_chip_mask_parent(data); } -/** - * hv_irq_unmask() - "Unmask" the IRQ by setting its current - * affinity. - * @data: Describes the IRQ - * - * Build new a destination for the MSI and make a hypercall to - * update the Interrupt Redirection Table. "Device Logical ID" - * is built out of this PCI bus's instance GUID and the function - * number of the device. - */ static void hv_irq_unmask(struct irq_data *data) { - struct msi_desc *msi_desc = irq_data_get_msi_desc(data); - struct hv_retarget_device_interrupt *params; - struct hv_pcibus_device *hbus; - struct cpumask *dest; - cpumask_var_t tmp; - struct pci_bus *pbus; - struct pci_dev *pdev; - unsigned long flags; - u32 var_size = 0; - int cpu, nr_bank; - u64 res; - - dest = irq_data_get_effective_affinity_mask(data); - pdev = msi_desc_to_pci_dev(msi_desc); - pbus = pdev->bus; - hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata); - - spin_lock_irqsave(&hbus->retarget_msi_interrupt_lock, flags); - - params = &hbus->retarget_msi_interrupt_params; - memset(params, 0, sizeof(*params)); - params->partition_id = HV_PARTITION_ID_SELF; - params->int_entry.source = HV_INTERRUPT_SOURCE_MSI; - hv_set_msi_entry_from_desc(¶ms->int_entry.msi_entry, msi_desc); - params->device_id = (hbus->hdev->dev_instance.b[5] << 24) | - (hbus->hdev->dev_instance.b[4] << 16) | - (hbus->hdev->dev_instance.b[7] << 8) | - (hbus->hdev->dev_instance.b[6] & 0xf8) | - PCI_FUNC(pdev->devfn); - params->int_target.vector = hv_msi_get_int_vector(data); - - /* - * Honoring apic->delivery_mode set to APIC_DELIVERY_MODE_FIXED by - * setting the HV_DEVICE_INTERRUPT_TARGET_MULTICAST flag results in a - * spurious interrupt storm. Not doing so does not seem to have a - * negative effect (yet?). - */ - - if (hbus->protocol_version >= PCI_PROTOCOL_VERSION_1_2) { - /* - * PCI_PROTOCOL_VERSION_1_2 supports the VP_SET version of the - * HVCALL_RETARGET_INTERRUPT hypercall, which also coincides - * with >64 VP support. - * ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED - * is not sufficient for this hypercall. - */ - params->int_target.flags |= - HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET; - - if (!alloc_cpumask_var(&tmp, GFP_ATOMIC)) { - res = 1; - goto exit_unlock; - } - - cpumask_and(tmp, dest, cpu_online_mask); - nr_bank = cpumask_to_vpset(¶ms->int_target.vp_set, tmp); - free_cpumask_var(tmp); - - if (nr_bank <= 0) { - res = 1; - goto exit_unlock; - } - - /* - * var-sized hypercall, var-size starts after vp_mask (thus - * vp_set.format does not count, but vp_set.valid_bank_mask - * does). - */ - var_size = 1 + nr_bank; - } else { - for_each_cpu_and(cpu, dest, cpu_online_mask) { - params->int_target.vp_mask |= - (1ULL << hv_cpu_number_to_vp_number(cpu)); - } - } - - res = hv_do_hypercall(HVCALL_RETARGET_INTERRUPT | (var_size << 17), - params, NULL); - -exit_unlock: - spin_unlock_irqrestore(&hbus->retarget_msi_interrupt_lock, flags); - - /* - * During hibernation, when a CPU is offlined, the kernel tries - * to move the interrupt to the remaining CPUs that haven't - * been offlined yet. In this case, the below hv_do_hypercall() - * always fails since the vmbus channel has been closed: - * refer to cpu_disable_common() -> fixup_irqs() -> - * irq_migrate_all_off_this_cpu() -> migrate_one_irq(). - * - * Suppress the error message for hibernation because the failure - * during hibernation does not matter (at this time all the devices - * have been frozen). Note: the correct affinity info is still updated - * into the irqdata data structure in migrate_one_irq() -> - * irq_do_set_affinity() -> hv_set_affinity(), so later when the VM - * resumes, hv_pci_restore_msi_state() is able to correctly restore - * the interrupt with the correct affinity. - */ - if (!hv_result_success(res) && hbus->state != hv_pcibus_removing) - dev_err(&hbus->hdev->device, - "%s() failed: %#llx", __func__, res); + hv_arch_irq_unmask(data); if (data->parent_data->chip->irq_unmask) irq_chip_unmask_parent(data); @@ -1587,19 +1602,24 @@ static void hv_pci_compose_compl(void *context, struct pci_response *resp, struct pci_create_int_response *int_resp = (struct pci_create_int_response *)resp; + if (resp_packet_size < sizeof(*int_resp)) { + comp_pkt->comp_pkt.completion_status = -1; + goto out; + } comp_pkt->comp_pkt.completion_status = resp->status; comp_pkt->int_desc = int_resp->int_desc; +out: complete(&comp_pkt->comp_pkt.host_event); } static u32 hv_compose_msi_req_v1( - struct pci_create_interrupt *int_pkt, struct cpumask *affinity, - u32 slot, u8 vector) + struct pci_create_interrupt *int_pkt, const struct cpumask *affinity, + u32 slot, u8 vector, u8 vector_count) { int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE; int_pkt->wslot.slot = slot; int_pkt->int_desc.vector = vector; - int_pkt->int_desc.vector_count = 1; + int_pkt->int_desc.vector_count = vector_count; int_pkt->int_desc.delivery_mode = DELIVERY_MODE; /* @@ -1615,21 +1635,21 @@ static u32 hv_compose_msi_req_v1( * Create MSI w/ dummy vCPU set targeting just one vCPU, overwritten * by subsequent retarget in hv_irq_unmask(). */ -static int hv_compose_msi_req_get_cpu(struct cpumask *affinity) +static int hv_compose_msi_req_get_cpu(const struct cpumask *affinity) { return cpumask_first_and(affinity, cpu_online_mask); } static u32 hv_compose_msi_req_v2( - struct pci_create_interrupt2 *int_pkt, struct cpumask *affinity, - u32 slot, u8 vector) + struct pci_create_interrupt2 *int_pkt, const struct cpumask *affinity, + u32 slot, u8 vector, u8 vector_count) { int cpu; int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE2; int_pkt->wslot.slot = slot; int_pkt->int_desc.vector = vector; - int_pkt->int_desc.vector_count = 1; + int_pkt->int_desc.vector_count = vector_count; int_pkt->int_desc.delivery_mode = DELIVERY_MODE; cpu = hv_compose_msi_req_get_cpu(affinity); int_pkt->int_desc.processor_array[0] = @@ -1640,8 +1660,8 @@ static u32 hv_compose_msi_req_v2( } static u32 hv_compose_msi_req_v3( - struct pci_create_interrupt3 *int_pkt, struct cpumask *affinity, - u32 slot, u32 vector) + struct pci_create_interrupt3 *int_pkt, const struct cpumask *affinity, + u32 slot, u32 vector, u8 vector_count) { int cpu; @@ -1649,7 +1669,7 @@ static u32 hv_compose_msi_req_v3( int_pkt->wslot.slot = slot; int_pkt->int_desc.vector = vector; int_pkt->int_desc.reserved = 0; - int_pkt->int_desc.vector_count = 1; + int_pkt->int_desc.vector_count = vector_count; int_pkt->int_desc.delivery_mode = DELIVERY_MODE; cpu = hv_compose_msi_req_get_cpu(affinity); int_pkt->int_desc.processor_array[0] = @@ -1677,9 +1697,11 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) struct hv_pci_dev *hpdev; struct pci_bus *pbus; struct pci_dev *pdev; - struct cpumask *dest; + const struct cpumask *dest; struct compose_comp_ctxt comp; struct tran_int_desc *int_desc; + struct msi_desc *msi_desc; + u8 vector, vector_count; struct { struct pci_packet pci_pkt; union { @@ -1688,11 +1710,21 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) struct pci_create_interrupt3 v3; } int_pkts; } __packed ctxt; - + u64 trans_id; u32 size; int ret; - pdev = msi_desc_to_pci_dev(irq_data_get_msi_desc(data)); + /* Reuse the previous allocation */ + if (data->chip_data) { + int_desc = data->chip_data; + msg->address_hi = int_desc->address >> 32; + msg->address_lo = int_desc->address & 0xffffffff; + msg->data = int_desc->data; + return; + } + + msi_desc = irq_data_get_msi_desc(data); + pdev = msi_desc_to_pci_dev(msi_desc); dest = irq_data_get_effective_affinity_mask(data); pbus = pdev->bus; hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata); @@ -1701,17 +1733,40 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) if (!hpdev) goto return_null_message; - /* Free any previous message that might have already been composed. */ - if (data->chip_data) { - int_desc = data->chip_data; - data->chip_data = NULL; - hv_int_desc_free(hpdev, int_desc); - } - int_desc = kzalloc(sizeof(*int_desc), GFP_ATOMIC); if (!int_desc) goto drop_reference; + if (!msi_desc->pci.msi_attrib.is_msix && msi_desc->nvec_used > 1) { + /* + * If this is not the first MSI of Multi MSI, we already have + * a mapping. Can exit early. + */ + if (msi_desc->irq != data->irq) { + data->chip_data = int_desc; + int_desc->address = msi_desc->msg.address_lo | + (u64)msi_desc->msg.address_hi << 32; + int_desc->data = msi_desc->msg.data + + (data->irq - msi_desc->irq); + msg->address_hi = msi_desc->msg.address_hi; + msg->address_lo = msi_desc->msg.address_lo; + msg->data = int_desc->data; + put_pcichild(hpdev); + return; + } + /* + * The vector we select here is a dummy value. The correct + * value gets sent to the hypervisor in unmask(). This needs + * to be aligned with the count, and also not zero. Multi-msi + * is powers of 2 up to 32, so 32 will always work here. + */ + vector = 32; + vector_count = msi_desc->nvec_used; + } else { + vector = hv_msi_get_int_vector(data); + vector_count = 1; + } + memset(&ctxt, 0, sizeof(ctxt)); init_completion(&comp.comp_pkt.host_event); ctxt.pci_pkt.completion_func = hv_pci_compose_compl; @@ -1722,7 +1777,8 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) size = hv_compose_msi_req_v1(&ctxt.int_pkts.v1, dest, hpdev->desc.win_slot.slot, - hv_msi_get_int_vector(data)); + vector, + vector_count); break; case PCI_PROTOCOL_VERSION_1_2: @@ -1730,14 +1786,16 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) size = hv_compose_msi_req_v2(&ctxt.int_pkts.v2, dest, hpdev->desc.win_slot.slot, - hv_msi_get_int_vector(data)); + vector, + vector_count); break; case PCI_PROTOCOL_VERSION_1_4: size = hv_compose_msi_req_v3(&ctxt.int_pkts.v3, dest, hpdev->desc.win_slot.slot, - hv_msi_get_int_vector(data)); + vector, + vector_count); break; default: @@ -1750,10 +1808,10 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) goto free_int_desc; } - ret = vmbus_sendpacket(hpdev->hbus->hdev->channel, &ctxt.int_pkts, - size, (unsigned long)&ctxt.pci_pkt, - VM_PKT_DATA_INBAND, - VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); + ret = vmbus_sendpacket_getid(hpdev->hbus->hdev->channel, &ctxt.int_pkts, + size, (unsigned long)&ctxt.pci_pkt, + &trans_id, VM_PKT_DATA_INBAND, + VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); if (ret) { dev_err(&hbus->hdev->device, "Sending request for interrupt failed: 0x%x", @@ -1832,6 +1890,15 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) enable_tasklet: tasklet_enable(&channel->callback_event); + /* + * The completion packet on the stack becomes invalid after 'return'; + * remove the ID from the VMbus requestor if the identifier is still + * mapped to/associated with the packet. (The identifier could have + * been 're-used', i.e., already removed and (re-)mapped.) + * + * Cf. hv_pci_onchannelcallback(). + */ + vmbus_request_addr_match(channel, trans_id, (unsigned long)&ctxt.pci_pkt); free_int_desc: kfree(int_desc); drop_reference: @@ -2079,12 +2146,17 @@ static void prepopulate_bars(struct hv_pcibus_device *hbus) } } if (high_size <= 1 && low_size <= 1) { - /* Set the memory enable bit. */ - _hv_pcifront_read_config(hpdev, PCI_COMMAND, 2, - &command); - command |= PCI_COMMAND_MEMORY; - _hv_pcifront_write_config(hpdev, PCI_COMMAND, 2, - command); + /* + * No need to set the PCI_COMMAND_MEMORY bit as + * the core PCI driver doesn't require the bit + * to be pre-set. Actually here we intentionally + * keep the bit off so that the PCI BAR probing + * in the core PCI driver doesn't cause Hyper-V + * to unnecessarily unmap/map the virtual BARs + * from/to the physical BARs multiple times. + * This reduces the VM boot time significantly + * if the BAR sizes are huge. + */ break; } } @@ -2155,8 +2227,17 @@ static void hv_pci_assign_numa_node(struct hv_pcibus_device *hbus) if (!hv_dev) continue; - if (hv_dev->desc.flags & HV_PCI_DEVICE_FLAG_NUMA_AFFINITY) - set_dev_node(&dev->dev, hv_dev->desc.virtual_numa_node); + if (hv_dev->desc.flags & HV_PCI_DEVICE_FLAG_NUMA_AFFINITY && + hv_dev->desc.virtual_numa_node < num_possible_nodes()) + /* + * The kernel may boot with some NUMA nodes offline + * (e.g. in a KDUMP kernel) or with NUMA disabled via + * "numa=off". In those cases, adjust the host provided + * NUMA node to a valid NUMA node used by the kernel. + */ + set_dev_node(&dev->dev, + numa_map_to_online_node( + hv_dev->desc.virtual_numa_node)); put_pcichild(hv_dev); } @@ -2211,12 +2292,14 @@ static void q_resource_requirements(void *context, struct pci_response *resp, struct q_res_req_compl *completion = context; struct pci_q_res_req_response *q_res_req = (struct pci_q_res_req_response *)resp; + s32 status; int i; - if (resp->status < 0) { + status = (resp_packet_size < sizeof(*q_res_req)) ? -1 : resp->status; + if (status < 0) { dev_err(&completion->hpdev->hbus->hdev->device, "query resource requirements failed: %x\n", - resp->status); + status); } else { for (i = 0; i < PCI_STD_NUM_BARS; i++) { completion->hpdev->probed_bar[i] = @@ -2640,7 +2723,7 @@ static void hv_eject_device_work(struct work_struct *work) ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE; ejct_pkt->wslot.slot = hpdev->desc.win_slot.slot; vmbus_sendpacket(hbus->hdev->channel, ejct_pkt, - sizeof(*ejct_pkt), (unsigned long)&ctxt.pkt, + sizeof(*ejct_pkt), 0, VM_PKT_DATA_INBAND, 0); /* For the get_pcichild() in hv_pci_eject_device() */ @@ -2687,8 +2770,9 @@ static void hv_pci_onchannelcallback(void *context) const int packet_size = 0x100; int ret; struct hv_pcibus_device *hbus = context; + struct vmbus_channel *chan = hbus->hdev->channel; u32 bytes_recvd; - u64 req_id; + u64 req_id, req_addr; struct vmpacket_descriptor *desc; unsigned char *buffer; int bufferlen = packet_size; @@ -2700,14 +2784,15 @@ static void hv_pci_onchannelcallback(void *context) struct pci_dev_inval_block *inval; struct pci_dev_incoming *dev_message; struct hv_pci_dev *hpdev; + unsigned long flags; buffer = kmalloc(bufferlen, GFP_ATOMIC); if (!buffer) return; while (1) { - ret = vmbus_recvpacket_raw(hbus->hdev->channel, buffer, - bufferlen, &bytes_recvd, &req_id); + ret = vmbus_recvpacket_raw(chan, buffer, bufferlen, + &bytes_recvd, &req_id); if (ret == -ENOBUFS) { kfree(buffer); @@ -2734,15 +2819,29 @@ static void hv_pci_onchannelcallback(void *context) switch (desc->type) { case VM_PKT_COMP: + lock_requestor(chan, flags); + req_addr = __vmbus_request_addr_match(chan, req_id, + VMBUS_RQST_ADDR_ANY); + if (req_addr == VMBUS_RQST_ERROR) { + unlock_requestor(chan, flags); + dev_err(&hbus->hdev->device, + "Invalid transaction ID %llx\n", + req_id); + break; + } + comp_packet = (struct pci_packet *)req_addr; + response = (struct pci_response *)buffer; /* - * The host is trusted, and thus it's safe to interpret - * this transaction ID as a pointer. + * Call ->completion_func() within the critical section to make + * sure that the packet pointer is still valid during the call: + * here 'valid' means that there's a task still waiting for the + * completion, and that the packet data is still on the waiting + * task's stack. Cf. hv_compose_msi_msg(). */ - comp_packet = (struct pci_packet *)req_id; - response = (struct pci_response *)buffer; comp_packet->completion_func(comp_packet->compl_ctxt, response, bytes_recvd); + unlock_requestor(chan, flags); break; case VM_PKT_DATA_INBAND: @@ -2752,7 +2851,8 @@ static void hv_pci_onchannelcallback(void *context) case PCI_BUS_RELATIONS: bus_rel = (struct pci_bus_relations *)buffer; - if (bytes_recvd < + if (bytes_recvd < sizeof(*bus_rel) || + bytes_recvd < struct_size(bus_rel, func, bus_rel->device_count)) { dev_err(&hbus->hdev->device, @@ -2766,7 +2866,8 @@ static void hv_pci_onchannelcallback(void *context) case PCI_BUS_RELATIONS2: bus_rel2 = (struct pci_bus_relations2 *)buffer; - if (bytes_recvd < + if (bytes_recvd < sizeof(*bus_rel2) || + bytes_recvd < struct_size(bus_rel2, func, bus_rel2->device_count)) { dev_err(&hbus->hdev->device, @@ -2780,6 +2881,11 @@ static void hv_pci_onchannelcallback(void *context) case PCI_EJECT: dev_message = (struct pci_dev_incoming *)buffer; + if (bytes_recvd < sizeof(*dev_message)) { + dev_err(&hbus->hdev->device, + "eject message too small\n"); + break; + } hpdev = get_pcichild_wslot(hbus, dev_message->wslot.slot); if (hpdev) { @@ -2791,6 +2897,11 @@ static void hv_pci_onchannelcallback(void *context) case PCI_INVALIDATE_BLOCK: inval = (struct pci_dev_inval_block *)buffer; + if (bytes_recvd < sizeof(*inval)) { + dev_err(&hbus->hdev->device, + "invalidate message too small\n"); + break; + } hpdev = get_pcichild_wslot(hbus, inval->wslot.slot); if (hpdev) { @@ -3395,6 +3506,15 @@ static int hv_pci_probe(struct hv_device *hdev, hbus->bridge->domain_nr = dom; #ifdef CONFIG_X86 hbus->sysdata.domain = dom; +#elif defined(CONFIG_ARM64) + /* + * Set the PCI bus parent to be the corresponding VMbus + * device. Then the VMbus device will be assigned as the + * ACPI companion in pcibios_root_bridge_prepare() and + * pci_dma_configure() will propagate device coherence + * information to devices created on the bus. + */ + hbus->sysdata.parent = hdev->device.parent; #endif hbus->hdev = hdev; @@ -3410,6 +3530,10 @@ static int hv_pci_probe(struct hv_device *hdev, goto free_dom; } + hdev->channel->next_request_id_callback = vmbus_next_request_id; + hdev->channel->request_addr_callback = vmbus_request_addr; + hdev->channel->rqstor_size = HV_PCI_RQSTOR_SIZE; + ret = vmbus_open(hdev->channel, pci_ring_size, pci_ring_size, NULL, 0, hv_pci_onchannelcallback, hbus); if (ret) @@ -3540,6 +3664,7 @@ free_bus: static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs) { struct hv_pcibus_device *hbus = hv_get_drvdata(hdev); + struct vmbus_channel *chan = hdev->channel; struct { struct pci_packet teardown_packet; u8 buffer[sizeof(struct pci_message)]; @@ -3547,13 +3672,14 @@ static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs) struct hv_pci_compl comp_pkt; struct hv_pci_dev *hpdev, *tmp; unsigned long flags; + u64 trans_id; int ret; /* * After the host sends the RESCIND_CHANNEL message, it doesn't * access the per-channel ringbuffer any longer. */ - if (hdev->channel->rescind) + if (chan->rescind) return 0; if (!keep_devs) { @@ -3590,16 +3716,26 @@ static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs) pkt.teardown_packet.compl_ctxt = &comp_pkt; pkt.teardown_packet.message[0].type = PCI_BUS_D0EXIT; - ret = vmbus_sendpacket(hdev->channel, &pkt.teardown_packet.message, - sizeof(struct pci_message), - (unsigned long)&pkt.teardown_packet, - VM_PKT_DATA_INBAND, - VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); + ret = vmbus_sendpacket_getid(chan, &pkt.teardown_packet.message, + sizeof(struct pci_message), + (unsigned long)&pkt.teardown_packet, + &trans_id, VM_PKT_DATA_INBAND, + VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); if (ret) return ret; - if (wait_for_completion_timeout(&comp_pkt.host_event, 10 * HZ) == 0) + if (wait_for_completion_timeout(&comp_pkt.host_event, 10 * HZ) == 0) { + /* + * The completion packet on the stack becomes invalid after + * 'return'; remove the ID from the VMbus requestor if the + * identifier is still mapped to/associated with the packet. + * + * Cf. hv_pci_onchannelcallback(). + */ + vmbus_request_addr_match(chan, trans_id, + (unsigned long)&pkt.teardown_packet); return -ETIMEDOUT; + } return 0; } @@ -3740,6 +3876,10 @@ static int hv_pci_resume(struct hv_device *hdev) hbus->state = hv_pcibus_init; + hdev->channel->next_request_id_callback = vmbus_next_request_id; + hdev->channel->request_addr_callback = vmbus_request_addr; + hdev->channel->rqstor_size = HV_PCI_RQSTOR_SIZE; + ret = vmbus_open(hdev->channel, pci_ring_size, pci_ring_size, NULL, 0, hv_pci_onchannelcallback, hbus); if (ret) diff --git a/drivers/pci/controller/pci-loongson.c b/drivers/pci/controller/pci-loongson.c index 48169b1e3817..05c50408f13b 100644 --- a/drivers/pci/controller/pci-loongson.c +++ b/drivers/pci/controller/pci-loongson.c @@ -9,6 +9,8 @@ #include <linux/of_pci.h> #include <linux/pci.h> #include <linux/pci_ids.h> +#include <linux/pci-acpi.h> +#include <linux/pci-ecam.h> #include "../pci.h" @@ -18,24 +20,37 @@ #define DEV_PCIE_PORT_2 0x7a29 #define DEV_LS2K_APB 0x7a02 -#define DEV_LS7A_CONF 0x7a10 +#define DEV_LS7A_GMAC 0x7a03 +#define DEV_LS7A_DC1 0x7a06 #define DEV_LS7A_LPC 0x7a0c +#define DEV_LS7A_AHCI 0x7a08 +#define DEV_LS7A_CONF 0x7a10 +#define DEV_LS7A_GNET 0x7a13 +#define DEV_LS7A_EHCI 0x7a14 +#define DEV_LS7A_DC2 0x7a36 +#define DEV_LS7A_HDMI 0x7a37 #define FLAG_CFG0 BIT(0) #define FLAG_CFG1 BIT(1) #define FLAG_DEV_FIX BIT(2) +#define FLAG_DEV_HIDDEN BIT(3) + +struct loongson_pci_data { + u32 flags; + struct pci_ops *ops; +}; struct loongson_pci { void __iomem *cfg0_base; void __iomem *cfg1_base; struct platform_device *pdev; - u32 flags; + const struct loongson_pci_data *data; }; /* Fixup wrong class code in PCIe bridges */ static void bridge_class_quirk(struct pci_dev *dev) { - dev->class = PCI_CLASS_BRIDGE_PCI << 8; + dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL; } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON, DEV_PCIE_PORT_0, bridge_class_quirk); @@ -92,55 +107,106 @@ static void loongson_mrrs_quirk(struct pci_dev *dev) } DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, loongson_mrrs_quirk); -static void __iomem *cfg1_map(struct loongson_pci *priv, int bus, - unsigned int devfn, int where) +static void loongson_pci_pin_quirk(struct pci_dev *pdev) { - unsigned long addroff = 0x0; + pdev->pin = 1 + (PCI_FUNC(pdev->devfn) & 3); +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, + DEV_LS7A_DC1, loongson_pci_pin_quirk); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, + DEV_LS7A_DC2, loongson_pci_pin_quirk); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, + DEV_LS7A_GMAC, loongson_pci_pin_quirk); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, + DEV_LS7A_AHCI, loongson_pci_pin_quirk); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, + DEV_LS7A_EHCI, loongson_pci_pin_quirk); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, + DEV_LS7A_GNET, loongson_pci_pin_quirk); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, + DEV_LS7A_HDMI, loongson_pci_pin_quirk); + +static struct loongson_pci *pci_bus_to_loongson_pci(struct pci_bus *bus) +{ + struct pci_config_window *cfg; - if (bus != 0) - addroff |= BIT(28); /* Type 1 Access */ - addroff |= (where & 0xff) | ((where & 0xf00) << 16); - addroff |= (bus << 16) | (devfn << 8); - return priv->cfg1_base + addroff; + if (acpi_disabled) + return (struct loongson_pci *)(bus->sysdata); + + cfg = bus->sysdata; + return (struct loongson_pci *)(cfg->priv); } -static void __iomem *cfg0_map(struct loongson_pci *priv, int bus, - unsigned int devfn, int where) +static void __iomem *cfg0_map(struct loongson_pci *priv, struct pci_bus *bus, + unsigned int devfn, int where) { unsigned long addroff = 0x0; + unsigned char busnum = bus->number; - if (bus != 0) + if (!pci_is_root_bus(bus)) { addroff |= BIT(24); /* Type 1 Access */ - addroff |= (bus << 16) | (devfn << 8) | where; + addroff |= (busnum << 16); + } + addroff |= (devfn << 8) | where; return priv->cfg0_base + addroff; } -static void __iomem *pci_loongson_map_bus(struct pci_bus *bus, unsigned int devfn, - int where) +static void __iomem *cfg1_map(struct loongson_pci *priv, struct pci_bus *bus, + unsigned int devfn, int where) { + unsigned long addroff = 0x0; unsigned char busnum = bus->number; - struct pci_host_bridge *bridge = pci_find_host_bridge(bus); - struct loongson_pci *priv = pci_host_bridge_priv(bridge); + + if (!pci_is_root_bus(bus)) { + addroff |= BIT(28); /* Type 1 Access */ + addroff |= (busnum << 16); + } + addroff |= (devfn << 8) | (where & 0xff) | ((where & 0xf00) << 16); + return priv->cfg1_base + addroff; +} + +static bool pdev_may_exist(struct pci_bus *bus, unsigned int device, + unsigned int function) +{ + return !(pci_is_root_bus(bus) && + (device >= 9 && device <= 20) && (function > 0)); +} + +static void __iomem *pci_loongson_map_bus(struct pci_bus *bus, + unsigned int devfn, int where) +{ + unsigned int device = PCI_SLOT(devfn); + unsigned int function = PCI_FUNC(devfn); + struct loongson_pci *priv = pci_bus_to_loongson_pci(bus); /* * Do not read more than one device on the bus other than - * the host bus. For our hardware the root bus is always bus 0. + * the host bus. */ - if (priv->flags & FLAG_DEV_FIX && busnum != 0 && - PCI_SLOT(devfn) > 0) - return NULL; + if ((priv->data->flags & FLAG_DEV_FIX) && bus->self) { + if (!pci_is_root_bus(bus) && (device > 0)) + return NULL; + } + + /* Don't access non-existent devices */ + if (priv->data->flags & FLAG_DEV_HIDDEN) { + if (!pdev_may_exist(bus, device, function)) + return NULL; + } /* CFG0 can only access standard space */ if (where < PCI_CFG_SPACE_SIZE && priv->cfg0_base) - return cfg0_map(priv, busnum, devfn, where); + return cfg0_map(priv, bus, devfn, where); /* CFG1 can access extended space */ if (where < PCI_CFG_SPACE_EXP_SIZE && priv->cfg1_base) - return cfg1_map(priv, busnum, devfn, where); + return cfg1_map(priv, bus, devfn, where); return NULL; } +#ifdef CONFIG_OF + static int loongson_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { int irq; @@ -159,20 +225,42 @@ static int loongson_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) return val; } -/* H/w only accept 32-bit PCI operations */ +/* LS2K/LS7A accept 8/16/32-bit PCI config operations */ static struct pci_ops loongson_pci_ops = { .map_bus = pci_loongson_map_bus, + .read = pci_generic_config_read, + .write = pci_generic_config_write, +}; + +/* RS780/SR5690 only accept 32-bit PCI config operations */ +static struct pci_ops loongson_pci_ops32 = { + .map_bus = pci_loongson_map_bus, .read = pci_generic_config_read32, .write = pci_generic_config_write32, }; +static const struct loongson_pci_data ls2k_pci_data = { + .flags = FLAG_CFG1 | FLAG_DEV_FIX | FLAG_DEV_HIDDEN, + .ops = &loongson_pci_ops, +}; + +static const struct loongson_pci_data ls7a_pci_data = { + .flags = FLAG_CFG1 | FLAG_DEV_FIX | FLAG_DEV_HIDDEN, + .ops = &loongson_pci_ops, +}; + +static const struct loongson_pci_data rs780e_pci_data = { + .flags = FLAG_CFG0, + .ops = &loongson_pci_ops32, +}; + static const struct of_device_id loongson_pci_of_match[] = { { .compatible = "loongson,ls2k-pci", - .data = (void *)(FLAG_CFG0 | FLAG_CFG1 | FLAG_DEV_FIX), }, + .data = &ls2k_pci_data, }, { .compatible = "loongson,ls7a-pci", - .data = (void *)(FLAG_CFG0 | FLAG_CFG1 | FLAG_DEV_FIX), }, + .data = &ls7a_pci_data, }, { .compatible = "loongson,rs780e-pci", - .data = (void *)(FLAG_CFG0), }, + .data = &rs780e_pci_data, }, {} }; @@ -193,20 +281,20 @@ static int loongson_pci_probe(struct platform_device *pdev) priv = pci_host_bridge_priv(bridge); priv->pdev = pdev; - priv->flags = (unsigned long)of_device_get_match_data(dev); + priv->data = of_device_get_match_data(dev); - regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!regs) { - dev_err(dev, "missing mem resources for cfg0\n"); - return -EINVAL; + if (priv->data->flags & FLAG_CFG0) { + regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!regs) + dev_err(dev, "missing mem resources for cfg0\n"); + else { + priv->cfg0_base = devm_pci_remap_cfg_resource(dev, regs); + if (IS_ERR(priv->cfg0_base)) + return PTR_ERR(priv->cfg0_base); + } } - priv->cfg0_base = devm_pci_remap_cfg_resource(dev, regs); - if (IS_ERR(priv->cfg0_base)) - return PTR_ERR(priv->cfg0_base); - - /* CFG1 is optional */ - if (priv->flags & FLAG_CFG1) { + if (priv->data->flags & FLAG_CFG1) { regs = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (!regs) dev_info(dev, "missing mem resource for cfg1\n"); @@ -218,7 +306,7 @@ static int loongson_pci_probe(struct platform_device *pdev) } bridge->sysdata = priv; - bridge->ops = &loongson_pci_ops; + bridge->ops = priv->data->ops; bridge->map_irq = loongson_map_irq; return pci_host_probe(bridge); @@ -232,3 +320,41 @@ static struct platform_driver loongson_pci_driver = { .probe = loongson_pci_probe, }; builtin_platform_driver(loongson_pci_driver); + +#endif + +#ifdef CONFIG_ACPI + +static int loongson_pci_ecam_init(struct pci_config_window *cfg) +{ + struct device *dev = cfg->parent; + struct loongson_pci *priv; + struct loongson_pci_data *data; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + cfg->priv = priv; + data->flags = FLAG_CFG1 | FLAG_DEV_HIDDEN; + priv->data = data; + priv->cfg1_base = cfg->win - (cfg->busr.start << 16); + + return 0; +} + +const struct pci_ecam_ops loongson_pci_ecam_ops = { + .bus_shift = 16, + .init = loongson_pci_ecam_init, + .pci_ops = { + .map_bus = pci_loongson_map_bus, + .read = pci_generic_config_read, + .write = pci_generic_config_write, + } +}; + +#endif diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c index 71258ea3d35f..1ced73726a26 100644 --- a/drivers/pci/controller/pci-mvebu.c +++ b/drivers/pci/controller/pci-mvebu.c @@ -8,6 +8,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> +#include <linux/bitfield.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/gpio.h> @@ -32,8 +33,9 @@ #define PCIE_DEV_REV_OFF 0x0008 #define PCIE_BAR_LO_OFF(n) (0x0010 + ((n) << 3)) #define PCIE_BAR_HI_OFF(n) (0x0014 + ((n) << 3)) +#define PCIE_SSDEV_ID_OFF 0x002c #define PCIE_CAP_PCIEXP 0x0060 -#define PCIE_HEADER_LOG_4_OFF 0x0128 +#define PCIE_CAP_PCIERR_OFF 0x0100 #define PCIE_BAR_CTRL_OFF(n) (0x1804 + (((n) - 1) * 4)) #define PCIE_WIN04_CTRL_OFF(n) (0x1820 + ((n) << 4)) #define PCIE_WIN04_BASE_OFF(n) (0x1824 + ((n) << 4)) @@ -53,9 +55,10 @@ PCIE_CONF_ADDR_EN) #define PCIE_CONF_DATA_OFF 0x18fc #define PCIE_INT_CAUSE_OFF 0x1900 +#define PCIE_INT_UNMASK_OFF 0x1910 +#define PCIE_INT_INTX(i) BIT(24+i) #define PCIE_INT_PM_PME BIT(28) -#define PCIE_MASK_OFF 0x1910 -#define PCIE_MASK_ENABLE_INTS 0x0f000000 +#define PCIE_INT_ALL_MASK GENMASK(31, 0) #define PCIE_CTRL_OFF 0x1a00 #define PCIE_CTRL_X1_MODE 0x0001 #define PCIE_CTRL_RC_MODE BIT(1) @@ -64,6 +67,12 @@ #define PCIE_STAT_BUS 0xff00 #define PCIE_STAT_DEV 0x1f0000 #define PCIE_STAT_LINK_DOWN BIT(0) +#define PCIE_SSPL_OFF 0x1a0c +#define PCIE_SSPL_VALUE_SHIFT 0 +#define PCIE_SSPL_VALUE_MASK GENMASK(7, 0) +#define PCIE_SSPL_SCALE_SHIFT 8 +#define PCIE_SSPL_SCALE_MASK GENMASK(9, 8) +#define PCIE_SSPL_ENABLE BIT(16) #define PCIE_RC_RTSTA 0x1a14 #define PCIE_DEBUG_CTRL 0x1a60 #define PCIE_DEBUG_SOFT_RESET BIT(20) @@ -93,6 +102,7 @@ struct mvebu_pcie_port { void __iomem *base; u32 port; u32 lane; + bool is_x4; int devfn; unsigned int mem_target; unsigned int mem_attr; @@ -108,6 +118,11 @@ struct mvebu_pcie_port { struct mvebu_pcie_window iowin; u32 saved_pcie_stat; struct resource regs; + u8 slot_power_limit_value; + u8 slot_power_limit_scale; + struct irq_domain *intx_irq_domain; + raw_spinlock_t irq_lock; + int intx_irq; }; static inline void mvebu_writel(struct mvebu_pcie_port *port, u32 val, u32 reg) @@ -233,13 +248,25 @@ static void mvebu_pcie_setup_wins(struct mvebu_pcie_port *port) static void mvebu_pcie_setup_hw(struct mvebu_pcie_port *port) { - u32 ctrl, cmd, dev_rev, mask; + u32 ctrl, lnkcap, cmd, dev_rev, unmask, sspl; /* Setup PCIe controller to Root Complex mode. */ ctrl = mvebu_readl(port, PCIE_CTRL_OFF); ctrl |= PCIE_CTRL_RC_MODE; mvebu_writel(port, ctrl, PCIE_CTRL_OFF); + /* + * Set Maximum Link Width to X1 or X4 in Root Port's PCIe Link + * Capability register. This register is defined by PCIe specification + * as read-only but this mvebu controller has it as read-write and must + * be set to number of SerDes PCIe lanes (1 or 4). If this register is + * not set correctly then link with endpoint card is not established. + */ + lnkcap = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP); + lnkcap &= ~PCI_EXP_LNKCAP_MLW; + lnkcap |= (port->is_x4 ? 4 : 1) << 4; + mvebu_writel(port, lnkcap, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP); + /* Disable Root Bridge I/O space, memory space and bus mastering. */ cmd = mvebu_readl(port, PCIE_CMD_OFF); cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); @@ -268,23 +295,71 @@ static void mvebu_pcie_setup_hw(struct mvebu_pcie_port *port) */ dev_rev = mvebu_readl(port, PCIE_DEV_REV_OFF); dev_rev &= ~0xffffff00; - dev_rev |= (PCI_CLASS_BRIDGE_PCI << 8) << 8; + dev_rev |= PCI_CLASS_BRIDGE_PCI_NORMAL << 8; mvebu_writel(port, dev_rev, PCIE_DEV_REV_OFF); /* Point PCIe unit MBUS decode windows to DRAM space. */ mvebu_pcie_setup_wins(port); - /* Enable interrupt lines A-D. */ - mask = mvebu_readl(port, PCIE_MASK_OFF); - mask |= PCIE_MASK_ENABLE_INTS; - mvebu_writel(port, mask, PCIE_MASK_OFF); + /* + * Program Root Port to automatically send Set_Slot_Power_Limit + * PCIe Message when changing status from Dl_Down to Dl_Up and valid + * slot power limit was specified. + */ + sspl = mvebu_readl(port, PCIE_SSPL_OFF); + sspl &= ~(PCIE_SSPL_VALUE_MASK | PCIE_SSPL_SCALE_MASK | PCIE_SSPL_ENABLE); + if (port->slot_power_limit_value) { + sspl |= port->slot_power_limit_value << PCIE_SSPL_VALUE_SHIFT; + sspl |= port->slot_power_limit_scale << PCIE_SSPL_SCALE_SHIFT; + sspl |= PCIE_SSPL_ENABLE; + } + mvebu_writel(port, sspl, PCIE_SSPL_OFF); + + /* Mask all interrupt sources. */ + mvebu_writel(port, ~PCIE_INT_ALL_MASK, PCIE_INT_UNMASK_OFF); + + /* Clear all interrupt causes. */ + mvebu_writel(port, ~PCIE_INT_ALL_MASK, PCIE_INT_CAUSE_OFF); + + /* Check if "intx" interrupt was specified in DT. */ + if (port->intx_irq > 0) + return; + + /* + * Fallback code when "intx" interrupt was not specified in DT: + * Unmask all legacy INTx interrupts as driver does not provide a way + * for masking and unmasking of individual legacy INTx interrupts. + * Legacy INTx are reported via one shared GIC source and therefore + * kernel cannot distinguish which individual legacy INTx was triggered. + * These interrupts are shared, so it should not cause any issue. Just + * performance penalty as every PCIe interrupt handler needs to be + * called when some interrupt is triggered. + */ + unmask = mvebu_readl(port, PCIE_INT_UNMASK_OFF); + unmask |= PCIE_INT_INTX(0) | PCIE_INT_INTX(1) | + PCIE_INT_INTX(2) | PCIE_INT_INTX(3); + mvebu_writel(port, unmask, PCIE_INT_UNMASK_OFF); } -static int mvebu_pcie_hw_rd_conf(struct mvebu_pcie_port *port, - struct pci_bus *bus, - u32 devfn, int where, int size, u32 *val) +static struct mvebu_pcie_port *mvebu_pcie_find_port(struct mvebu_pcie *pcie, + struct pci_bus *bus, + int devfn); + +static int mvebu_pcie_child_rd_conf(struct pci_bus *bus, u32 devfn, int where, + int size, u32 *val) { - void __iomem *conf_data = port->base + PCIE_CONF_DATA_OFF; + struct mvebu_pcie *pcie = bus->sysdata; + struct mvebu_pcie_port *port; + void __iomem *conf_data; + + port = mvebu_pcie_find_port(pcie, bus, devfn); + if (!port) + return PCIBIOS_DEVICE_NOT_FOUND; + + if (!mvebu_pcie_link_up(port)) + return PCIBIOS_DEVICE_NOT_FOUND; + + conf_data = port->base + PCIE_CONF_DATA_OFF; mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where), PCIE_CONF_ADDR_OFF); @@ -300,18 +375,27 @@ static int mvebu_pcie_hw_rd_conf(struct mvebu_pcie_port *port, *val = readl_relaxed(conf_data); break; default: - *val = 0xffffffff; return PCIBIOS_BAD_REGISTER_NUMBER; } return PCIBIOS_SUCCESSFUL; } -static int mvebu_pcie_hw_wr_conf(struct mvebu_pcie_port *port, - struct pci_bus *bus, - u32 devfn, int where, int size, u32 val) +static int mvebu_pcie_child_wr_conf(struct pci_bus *bus, u32 devfn, + int where, int size, u32 val) { - void __iomem *conf_data = port->base + PCIE_CONF_DATA_OFF; + struct mvebu_pcie *pcie = bus->sysdata; + struct mvebu_pcie_port *port; + void __iomem *conf_data; + + port = mvebu_pcie_find_port(pcie, bus, devfn); + if (!port) + return PCIBIOS_DEVICE_NOT_FOUND; + + if (!mvebu_pcie_link_up(port)) + return PCIBIOS_DEVICE_NOT_FOUND; + + conf_data = port->base + PCIE_CONF_DATA_OFF; mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where), PCIE_CONF_ADDR_OFF); @@ -333,6 +417,11 @@ static int mvebu_pcie_hw_wr_conf(struct mvebu_pcie_port *port, return PCIBIOS_SUCCESSFUL; } +static struct pci_ops mvebu_pcie_child_ops = { + .read = mvebu_pcie_child_rd_conf, + .write = mvebu_pcie_child_wr_conf, +}; + /* * Remove windows, starting from the largest ones to the smallest * ones. @@ -434,16 +523,10 @@ static int mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port) /* Are the new iobase/iolimit values invalid? */ if (conf->iolimit < conf->iobase || - conf->iolimitupper < conf->iobaseupper) + le16_to_cpu(conf->iolimitupper) < le16_to_cpu(conf->iobaseupper)) return mvebu_pcie_set_window(port, port->io_target, port->io_attr, &desired, &port->iowin); - if (!mvebu_has_ioport(port)) { - dev_WARN(&port->pcie->pdev->dev, - "Attempt to set IO when IO is disabled\n"); - return -EOPNOTSUPP; - } - /* * We read the PCI-to-PCI bridge emulated registers, and * calculate the base address and size of the address decoding @@ -452,10 +535,10 @@ static int mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port) * is the CPU address. */ desired.remap = ((conf->iobase & 0xF0) << 8) | - (conf->iobaseupper << 16); + (le16_to_cpu(conf->iobaseupper) << 16); desired.base = port->pcie->io.start + desired.remap; desired.size = ((0xFFF | ((conf->iolimit & 0xF0) << 8) | - (conf->iolimitupper << 16)) - + (le16_to_cpu(conf->iolimitupper) << 16)) - desired.remap) + 1; @@ -469,7 +552,7 @@ static int mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port) struct pci_bridge_emul_conf *conf = &port->bridge.conf; /* Are the new membase/memlimit values invalid? */ - if (conf->memlimit < conf->membase) + if (le16_to_cpu(conf->memlimit) < le16_to_cpu(conf->membase)) return mvebu_pcie_set_window(port, port->mem_target, port->mem_attr, &desired, &port->memwin); @@ -479,8 +562,8 @@ static int mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port) * window to setup, according to the PCI-to-PCI bridge * specifications. */ - desired.base = ((conf->membase & 0xFFF0) << 16); - desired.size = (((conf->memlimit & 0xFFF0) << 16) | 0xFFFFF) - + desired.base = ((le16_to_cpu(conf->membase) & 0xFFF0) << 16); + desired.size = (((le16_to_cpu(conf->memlimit) & 0xFFF0) << 16) | 0xFFFFF) - desired.base + 1; return mvebu_pcie_set_window(port, port->mem_target, port->mem_attr, &desired, @@ -552,20 +635,40 @@ mvebu_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge, case PCI_EXP_LNKCAP: /* - * PCIe requires the clock power management capability to be - * hard-wired to zero for downstream ports + * PCIe requires that the Clock Power Management capability bit + * is hard-wired to zero for downstream ports but HW returns 1. + * Additionally enable Data Link Layer Link Active Reporting + * Capable bit as DL_Active indication is provided too. */ - *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP) & - ~PCI_EXP_LNKCAP_CLKPM; + *value = (mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP) & + ~PCI_EXP_LNKCAP_CLKPM) | PCI_EXP_LNKCAP_DLLLARC; break; case PCI_EXP_LNKCTL: - *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL); + /* DL_Active indication is provided via PCIE_STAT_OFF */ + *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL) | + (mvebu_pcie_link_up(port) ? + (PCI_EXP_LNKSTA_DLLLA << 16) : 0); break; - case PCI_EXP_SLTCTL: - *value = PCI_EXP_SLTSTA_PDS << 16; + case PCI_EXP_SLTCTL: { + u16 slotctl = le16_to_cpu(bridge->pcie_conf.slotctl); + u16 slotsta = le16_to_cpu(bridge->pcie_conf.slotsta); + u32 val = 0; + /* + * When slot power limit was not specified in DT then + * ASPL_DISABLE bit is stored only in emulated config space. + * Otherwise reflect status of PCIE_SSPL_ENABLE bit in HW. + */ + if (!port->slot_power_limit_value) + val |= slotctl & PCI_EXP_SLTCTL_ASPL_DISABLE; + else if (!(mvebu_readl(port, PCIE_SSPL_OFF) & PCIE_SSPL_ENABLE)) + val |= PCI_EXP_SLTCTL_ASPL_DISABLE; + /* This callback is 32-bit and in high bits is slot status. */ + val |= slotsta << 16; + *value = val; break; + } case PCI_EXP_RTSTA: *value = mvebu_readl(port, PCIE_RC_RTSTA); @@ -590,6 +693,37 @@ mvebu_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge, return PCI_BRIDGE_EMUL_HANDLED; } +static pci_bridge_emul_read_status_t +mvebu_pci_bridge_emul_ext_conf_read(struct pci_bridge_emul *bridge, + int reg, u32 *value) +{ + struct mvebu_pcie_port *port = bridge->data; + + switch (reg) { + case 0: + case PCI_ERR_UNCOR_STATUS: + case PCI_ERR_UNCOR_MASK: + case PCI_ERR_UNCOR_SEVER: + case PCI_ERR_COR_STATUS: + case PCI_ERR_COR_MASK: + case PCI_ERR_CAP: + case PCI_ERR_HEADER_LOG+0: + case PCI_ERR_HEADER_LOG+4: + case PCI_ERR_HEADER_LOG+8: + case PCI_ERR_HEADER_LOG+12: + case PCI_ERR_ROOT_COMMAND: + case PCI_ERR_ROOT_STATUS: + case PCI_ERR_ROOT_ERR_SRC: + *value = mvebu_readl(port, PCIE_CAP_PCIERR_OFF + reg); + break; + + default: + return PCI_BRIDGE_EMUL_NOT_HANDLED; + } + + return PCI_BRIDGE_EMUL_HANDLED; +} + static void mvebu_pci_bridge_emul_base_conf_write(struct pci_bridge_emul *bridge, int reg, u32 old, u32 new, u32 mask) @@ -599,24 +733,18 @@ mvebu_pci_bridge_emul_base_conf_write(struct pci_bridge_emul *bridge, switch (reg) { case PCI_COMMAND: - if (!mvebu_has_ioport(port)) { - conf->command = cpu_to_le16( - le16_to_cpu(conf->command) & ~PCI_COMMAND_IO); - new &= ~PCI_COMMAND_IO; - } - mvebu_writel(port, new, PCIE_CMD_OFF); break; case PCI_IO_BASE: - if ((mask & 0xffff) && mvebu_pcie_handle_iobase_change(port)) { + if ((mask & 0xffff) && mvebu_has_ioport(port) && + mvebu_pcie_handle_iobase_change(port)) { /* On error disable IO range */ conf->iobase &= ~0xf0; conf->iolimit &= ~0xf0; + conf->iobase |= 0xf0; conf->iobaseupper = cpu_to_le16(0x0000); conf->iolimitupper = cpu_to_le16(0x0000); - if (mvebu_has_ioport(port)) - conf->iobase |= 0xf0; } break; @@ -630,14 +758,14 @@ mvebu_pci_bridge_emul_base_conf_write(struct pci_bridge_emul *bridge, break; case PCI_IO_BASE_UPPER16: - if (mvebu_pcie_handle_iobase_change(port)) { + if (mvebu_has_ioport(port) && + mvebu_pcie_handle_iobase_change(port)) { /* On error disable IO range */ conf->iobase &= ~0xf0; conf->iolimit &= ~0xf0; + conf->iobase |= 0xf0; conf->iobaseupper = cpu_to_le16(0x0000); conf->iolimitupper = cpu_to_le16(0x0000); - if (mvebu_has_ioport(port)) - conf->iobase |= 0xf0; } break; @@ -675,16 +803,31 @@ mvebu_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge, case PCI_EXP_LNKCTL: /* - * If we don't support CLKREQ, we must ensure that the - * CLKREQ enable bit always reads zero. Since we haven't - * had this capability, and it's dependent on board wiring, - * disable it for the time being. + * PCIe requires that the Enable Clock Power Management bit + * is hard-wired to zero for downstream ports but HW allows + * to change it. */ new &= ~PCI_EXP_LNKCTL_CLKREQ_EN; mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL); break; + case PCI_EXP_SLTCTL: + /* + * Allow to change PCIE_SSPL_ENABLE bit only when slot power + * limit was specified in DT and configured into HW. + */ + if ((mask & PCI_EXP_SLTCTL_ASPL_DISABLE) && + port->slot_power_limit_value) { + u32 sspl = mvebu_readl(port, PCIE_SSPL_OFF); + if (new & PCI_EXP_SLTCTL_ASPL_DISABLE) + sspl &= ~PCIE_SSPL_ENABLE; + else + sspl |= PCIE_SSPL_ENABLE; + mvebu_writel(port, sspl, PCIE_SSPL_OFF); + } + break; + case PCI_EXP_RTSTA: /* * PME Status bit in Root Status Register (PCIE_RC_RTSTA) @@ -709,11 +852,45 @@ mvebu_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge, } } -static struct pci_bridge_emul_ops mvebu_pci_bridge_emul_ops = { +static void +mvebu_pci_bridge_emul_ext_conf_write(struct pci_bridge_emul *bridge, + int reg, u32 old, u32 new, u32 mask) +{ + struct mvebu_pcie_port *port = bridge->data; + + switch (reg) { + /* These are W1C registers, so clear other bits */ + case PCI_ERR_UNCOR_STATUS: + case PCI_ERR_COR_STATUS: + case PCI_ERR_ROOT_STATUS: + new &= mask; + fallthrough; + + case PCI_ERR_UNCOR_MASK: + case PCI_ERR_UNCOR_SEVER: + case PCI_ERR_COR_MASK: + case PCI_ERR_CAP: + case PCI_ERR_HEADER_LOG+0: + case PCI_ERR_HEADER_LOG+4: + case PCI_ERR_HEADER_LOG+8: + case PCI_ERR_HEADER_LOG+12: + case PCI_ERR_ROOT_COMMAND: + case PCI_ERR_ROOT_ERR_SRC: + mvebu_writel(port, new, PCIE_CAP_PCIERR_OFF + reg); + break; + + default: + break; + } +} + +static const struct pci_bridge_emul_ops mvebu_pci_bridge_emul_ops = { .read_base = mvebu_pci_bridge_emul_base_conf_read, .write_base = mvebu_pci_bridge_emul_base_conf_write, .read_pcie = mvebu_pci_bridge_emul_pcie_conf_read, .write_pcie = mvebu_pci_bridge_emul_pcie_conf_write, + .read_ext = mvebu_pci_bridge_emul_ext_conf_read, + .write_ext = mvebu_pci_bridge_emul_ext_conf_write, }; /* @@ -722,32 +899,58 @@ static struct pci_bridge_emul_ops mvebu_pci_bridge_emul_ops = { */ static int mvebu_pci_bridge_emul_init(struct mvebu_pcie_port *port) { + unsigned int bridge_flags = PCI_BRIDGE_EMUL_NO_PREFMEM_FORWARD; struct pci_bridge_emul *bridge = &port->bridge; + u32 dev_id = mvebu_readl(port, PCIE_DEV_ID_OFF); + u32 dev_rev = mvebu_readl(port, PCIE_DEV_REV_OFF); + u32 ssdev_id = mvebu_readl(port, PCIE_SSDEV_ID_OFF); u32 pcie_cap = mvebu_readl(port, PCIE_CAP_PCIEXP); u8 pcie_cap_ver = ((pcie_cap >> 16) & PCI_EXP_FLAGS_VERS); - bridge->conf.vendor = PCI_VENDOR_ID_MARVELL; - bridge->conf.device = mvebu_readl(port, PCIE_DEV_ID_OFF) >> 16; - bridge->conf.class_revision = - mvebu_readl(port, PCIE_DEV_REV_OFF) & 0xff; + bridge->conf.vendor = cpu_to_le16(dev_id & 0xffff); + bridge->conf.device = cpu_to_le16(dev_id >> 16); + bridge->conf.class_revision = cpu_to_le32(dev_rev & 0xff); if (mvebu_has_ioport(port)) { /* We support 32 bits I/O addressing */ bridge->conf.iobase = PCI_IO_RANGE_TYPE_32; bridge->conf.iolimit = PCI_IO_RANGE_TYPE_32; + } else { + bridge_flags |= PCI_BRIDGE_EMUL_NO_IO_FORWARD; } /* * Older mvebu hardware provides PCIe Capability structure only in * version 1. New hardware provides it in version 2. + * Enable slot support which is emulated. */ - bridge->pcie_conf.cap = cpu_to_le16(pcie_cap_ver); + bridge->pcie_conf.cap = cpu_to_le16(pcie_cap_ver | PCI_EXP_FLAGS_SLOT); + /* + * Set Presence Detect State bit permanently as there is no support for + * unplugging PCIe card from the slot. Assume that PCIe card is always + * connected in slot. + * + * Set physical slot number to port+1 as mvebu ports are indexed from + * zero and zero value is reserved for ports within the same silicon + * as Root Port which is not mvebu case. + * + * Also set correct slot power limit. + */ + bridge->pcie_conf.slotcap = cpu_to_le32( + FIELD_PREP(PCI_EXP_SLTCAP_SPLV, port->slot_power_limit_value) | + FIELD_PREP(PCI_EXP_SLTCAP_SPLS, port->slot_power_limit_scale) | + FIELD_PREP(PCI_EXP_SLTCAP_PSN, port->port+1)); + bridge->pcie_conf.slotsta = cpu_to_le16(PCI_EXP_SLTSTA_PDS); + + bridge->subsystem_vendor_id = ssdev_id & 0xffff; + bridge->subsystem_id = ssdev_id >> 16; bridge->has_pcie = true; + bridge->pcie_start = PCIE_CAP_PCIEXP; bridge->data = port; bridge->ops = &mvebu_pci_bridge_emul_ops; - return pci_bridge_emul_init(bridge, PCI_BRIDGE_EMUL_NO_PREFETCHABLE_BAR); + return pci_bridge_emul_init(bridge, bridge_flags); } static inline struct mvebu_pcie *sys_to_pcie(struct pci_sys_data *sys) @@ -784,25 +987,12 @@ static int mvebu_pcie_wr_conf(struct pci_bus *bus, u32 devfn, { struct mvebu_pcie *pcie = bus->sysdata; struct mvebu_pcie_port *port; - int ret; port = mvebu_pcie_find_port(pcie, bus, devfn); if (!port) return PCIBIOS_DEVICE_NOT_FOUND; - /* Access the emulated PCI-to-PCI bridge */ - if (bus->number == 0) - return pci_bridge_emul_conf_write(&port->bridge, where, - size, val); - - if (!mvebu_pcie_link_up(port)) - return PCIBIOS_DEVICE_NOT_FOUND; - - /* Access the real PCIe interface */ - ret = mvebu_pcie_hw_wr_conf(port, bus, devfn, - where, size, val); - - return ret; + return pci_bridge_emul_conf_write(&port->bridge, where, size, val); } /* PCI configuration space read function */ @@ -811,25 +1001,12 @@ static int mvebu_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, { struct mvebu_pcie *pcie = bus->sysdata; struct mvebu_pcie_port *port; - int ret; port = mvebu_pcie_find_port(pcie, bus, devfn); if (!port) return PCIBIOS_DEVICE_NOT_FOUND; - /* Access the emulated PCI-to-PCI bridge */ - if (bus->number == 0) - return pci_bridge_emul_conf_read(&port->bridge, where, - size, val); - - if (!mvebu_pcie_link_up(port)) - return PCIBIOS_DEVICE_NOT_FOUND; - - /* Access the real PCIe interface */ - ret = mvebu_pcie_hw_rd_conf(port, bus, devfn, - where, size, val); - - return ret; + return pci_bridge_emul_conf_read(&port->bridge, where, size, val); } static struct pci_ops mvebu_pcie_ops = { @@ -837,6 +1014,108 @@ static struct pci_ops mvebu_pcie_ops = { .write = mvebu_pcie_wr_conf, }; +static void mvebu_pcie_intx_irq_mask(struct irq_data *d) +{ + struct mvebu_pcie_port *port = d->domain->host_data; + irq_hw_number_t hwirq = irqd_to_hwirq(d); + unsigned long flags; + u32 unmask; + + raw_spin_lock_irqsave(&port->irq_lock, flags); + unmask = mvebu_readl(port, PCIE_INT_UNMASK_OFF); + unmask &= ~PCIE_INT_INTX(hwirq); + mvebu_writel(port, unmask, PCIE_INT_UNMASK_OFF); + raw_spin_unlock_irqrestore(&port->irq_lock, flags); +} + +static void mvebu_pcie_intx_irq_unmask(struct irq_data *d) +{ + struct mvebu_pcie_port *port = d->domain->host_data; + irq_hw_number_t hwirq = irqd_to_hwirq(d); + unsigned long flags; + u32 unmask; + + raw_spin_lock_irqsave(&port->irq_lock, flags); + unmask = mvebu_readl(port, PCIE_INT_UNMASK_OFF); + unmask |= PCIE_INT_INTX(hwirq); + mvebu_writel(port, unmask, PCIE_INT_UNMASK_OFF); + raw_spin_unlock_irqrestore(&port->irq_lock, flags); +} + +static struct irq_chip intx_irq_chip = { + .name = "mvebu-INTx", + .irq_mask = mvebu_pcie_intx_irq_mask, + .irq_unmask = mvebu_pcie_intx_irq_unmask, +}; + +static int mvebu_pcie_intx_irq_map(struct irq_domain *h, + unsigned int virq, irq_hw_number_t hwirq) +{ + struct mvebu_pcie_port *port = h->host_data; + + irq_set_status_flags(virq, IRQ_LEVEL); + irq_set_chip_and_handler(virq, &intx_irq_chip, handle_level_irq); + irq_set_chip_data(virq, port); + + return 0; +} + +static const struct irq_domain_ops mvebu_pcie_intx_irq_domain_ops = { + .map = mvebu_pcie_intx_irq_map, + .xlate = irq_domain_xlate_onecell, +}; + +static int mvebu_pcie_init_irq_domain(struct mvebu_pcie_port *port) +{ + struct device *dev = &port->pcie->pdev->dev; + struct device_node *pcie_intc_node; + + raw_spin_lock_init(&port->irq_lock); + + pcie_intc_node = of_get_next_child(port->dn, NULL); + if (!pcie_intc_node) { + dev_err(dev, "No PCIe Intc node found for %s\n", port->name); + return -ENODEV; + } + + port->intx_irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, + &mvebu_pcie_intx_irq_domain_ops, + port); + of_node_put(pcie_intc_node); + if (!port->intx_irq_domain) { + dev_err(dev, "Failed to get INTx IRQ domain for %s\n", port->name); + return -ENOMEM; + } + + return 0; +} + +static void mvebu_pcie_irq_handler(struct irq_desc *desc) +{ + struct mvebu_pcie_port *port = irq_desc_get_handler_data(desc); + struct irq_chip *chip = irq_desc_get_chip(desc); + struct device *dev = &port->pcie->pdev->dev; + u32 cause, unmask, status; + int i; + + chained_irq_enter(chip, desc); + + cause = mvebu_readl(port, PCIE_INT_CAUSE_OFF); + unmask = mvebu_readl(port, PCIE_INT_UNMASK_OFF); + status = cause & unmask; + + /* Process legacy INTx interrupts */ + for (i = 0; i < PCI_NUM_INTX; i++) { + if (!(status & PCIE_INT_INTX(i))) + continue; + + if (generic_handle_domain_irq(port->intx_irq_domain, i) == -EINVAL) + dev_err_ratelimited(dev, "unexpected INT%c IRQ\n", (char)i+'A'); + } + + chained_irq_exit(chip, desc); +} + static int mvebu_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { /* Interrupt support on mvebu emulated bridges is not implemented yet */ @@ -938,7 +1217,6 @@ static int mvebu_get_tgt_attr(struct device_node *np, int devfn, return -ENOENT; } -#ifdef CONFIG_PM_SLEEP static int mvebu_pcie_suspend(struct device *dev) { struct mvebu_pcie *pcie; @@ -971,7 +1249,6 @@ static int mvebu_pcie_resume(struct device *dev) return 0; } -#endif static void mvebu_pcie_port_clk_put(void *data) { @@ -985,7 +1262,9 @@ static int mvebu_pcie_parse_port(struct mvebu_pcie *pcie, { struct device *dev = &pcie->pdev->dev; enum of_gpio_flags flags; + u32 slot_power_limit; int reset_gpio, ret; + u32 num_lanes; port->pcie = pcie; @@ -998,6 +1277,9 @@ static int mvebu_pcie_parse_port(struct mvebu_pcie *pcie, if (of_property_read_u32(child, "marvell,pcie-lane", &port->lane)) port->lane = 0; + if (!of_property_read_u32(child, "num-lanes", &num_lanes) && num_lanes == 4) + port->is_x4 = true; + port->name = devm_kasprintf(dev, GFP_KERNEL, "pcie%d.%d", port->port, port->lane); if (!port->name) { @@ -1030,6 +1312,21 @@ static int mvebu_pcie_parse_port(struct mvebu_pcie *pcie, port->io_attr = -1; } + /* + * Old DT bindings do not contain "intx" interrupt + * so do not fail probing driver when interrupt does not exist. + */ + port->intx_irq = of_irq_get_byname(child, "intx"); + if (port->intx_irq == -EPROBE_DEFER) { + ret = port->intx_irq; + goto err; + } + if (port->intx_irq <= 0) { + dev_warn(dev, "%s: legacy INTx interrupts cannot be masked individually, " + "%pOF does not contain intx interrupt\n", + port->name, child); + } + reset_gpio = of_get_named_gpio_flags(child, "reset-gpios", 0, &flags); if (reset_gpio == -EPROBE_DEFER) { ret = reset_gpio; @@ -1066,6 +1363,15 @@ static int mvebu_pcie_parse_port(struct mvebu_pcie *pcie, port->reset_gpio = gpio_to_desc(reset_gpio); } + slot_power_limit = of_pci_get_slot_power_limit(child, + &port->slot_power_limit_value, + &port->slot_power_limit_scale); + if (slot_power_limit) + dev_info(dev, "%s: Slot power limit %u.%uW\n", + port->name, + slot_power_limit / 1000, + (slot_power_limit / 100) % 10); + port->clk = of_clk_get_by_name(child, NULL); if (IS_ERR(port->clk)) { dev_err(dev, "%s: cannot get clock\n", port->name); @@ -1226,6 +1532,7 @@ static int mvebu_pcie_probe(struct platform_device *pdev) for (i = 0; i < pcie->nports; i++) { struct mvebu_pcie_port *port = &pcie->ports[i]; + int irq = port->intx_irq; child = port->dn; if (!child) @@ -1253,6 +1560,22 @@ static int mvebu_pcie_probe(struct platform_device *pdev) continue; } + if (irq > 0) { + ret = mvebu_pcie_init_irq_domain(port); + if (ret) { + dev_err(dev, "%s: cannot init irq domain\n", + port->name); + pci_bridge_emul_cleanup(&port->bridge); + devm_iounmap(dev, port->base); + port->base = NULL; + mvebu_pcie_powerdown(port); + continue; + } + irq_set_chained_handler_and_data(irq, + mvebu_pcie_irq_handler, + port); + } + /* * PCIe topology exported by mvebu hw is quite complicated. In * reality has something like N fully independent host bridges @@ -1329,13 +1652,13 @@ static int mvebu_pcie_probe(struct platform_device *pdev) * indirectly via kernel emulated PCI bridge driver. */ mvebu_pcie_setup_hw(port); - mvebu_pcie_set_local_dev_nr(port, 0); + mvebu_pcie_set_local_dev_nr(port, 1); + mvebu_pcie_set_local_bus_nr(port, 0); } - pcie->nports = i; - bridge->sysdata = pcie; bridge->ops = &mvebu_pcie_ops; + bridge->child_ops = &mvebu_pcie_child_ops; bridge->align_resource = mvebu_pcie_align_resource; bridge->map_irq = mvebu_pcie_map_irq; @@ -1346,7 +1669,7 @@ static int mvebu_pcie_remove(struct platform_device *pdev) { struct mvebu_pcie *pcie = platform_get_drvdata(pdev); struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie); - u32 cmd; + u32 cmd, sspl; int i; /* Remove PCI bus with all devices. */ @@ -1357,6 +1680,7 @@ static int mvebu_pcie_remove(struct platform_device *pdev) for (i = 0; i < pcie->nports; i++) { struct mvebu_pcie_port *port = &pcie->ports[i]; + int irq = port->intx_irq; if (!port->base) continue; @@ -1367,11 +1691,26 @@ static int mvebu_pcie_remove(struct platform_device *pdev) mvebu_writel(port, cmd, PCIE_CMD_OFF); /* Mask all interrupt sources. */ - mvebu_writel(port, 0, PCIE_MASK_OFF); + mvebu_writel(port, ~PCIE_INT_ALL_MASK, PCIE_INT_UNMASK_OFF); + + /* Clear all interrupt causes. */ + mvebu_writel(port, ~PCIE_INT_ALL_MASK, PCIE_INT_CAUSE_OFF); + + if (irq > 0) + irq_set_chained_handler_and_data(irq, NULL, NULL); + + /* Remove IRQ domains. */ + if (port->intx_irq_domain) + irq_domain_remove(port->intx_irq_domain); /* Free config space for emulated root bridge. */ pci_bridge_emul_cleanup(&port->bridge); + /* Disable sending Set_Slot_Power_Limit PCIe Message. */ + sspl = mvebu_readl(port, PCIE_SSPL_OFF); + sspl &= ~(PCIE_SSPL_VALUE_MASK | PCIE_SSPL_SCALE_MASK | PCIE_SSPL_ENABLE); + mvebu_writel(port, sspl, PCIE_SSPL_OFF); + /* Disable and clear BARs and windows. */ mvebu_pcie_disable_wins(port); @@ -1397,7 +1736,7 @@ static const struct of_device_id mvebu_pcie_of_match_table[] = { }; static const struct dev_pm_ops mvebu_pcie_pm_ops = { - SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mvebu_pcie_suspend, mvebu_pcie_resume) + NOIRQ_SYSTEM_SLEEP_PM_OPS(mvebu_pcie_suspend, mvebu_pcie_resume) }; static struct platform_driver mvebu_pcie_driver = { diff --git a/drivers/pci/controller/pci-rcar-gen2.c b/drivers/pci/controller/pci-rcar-gen2.c index 35804ea394fd..839695791757 100644 --- a/drivers/pci/controller/pci-rcar-gen2.c +++ b/drivers/pci/controller/pci-rcar-gen2.c @@ -328,6 +328,7 @@ static const struct of_device_id rcar_pci_of_match[] = { { .compatible = "renesas,pci-r8a7791", }, { .compatible = "renesas,pci-r8a7794", }, { .compatible = "renesas,pci-rcar-gen2", }, + { .compatible = "renesas,pci-rzn1", }, { }, }; diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c index cb0aa65d6934..24478ae5a345 100644 --- a/drivers/pci/controller/pci-tegra.c +++ b/drivers/pci/controller/pci-tegra.c @@ -415,13 +415,6 @@ static inline u32 pads_readl(struct tegra_pcie *pcie, unsigned long offset) * address (access to which generates correct config transaction) falls in * this 4 KiB region. */ -static unsigned int tegra_pcie_conf_offset(u8 bus, unsigned int devfn, - unsigned int where) -{ - return ((where & 0xf00) << 16) | (bus << 16) | (PCI_SLOT(devfn) << 11) | - (PCI_FUNC(devfn) << 8) | (where & 0xff); -} - static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, int where) @@ -443,7 +436,9 @@ static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus, unsigned int offset; u32 base; - offset = tegra_pcie_conf_offset(bus->number, devfn, where); + offset = PCI_CONF1_EXT_ADDRESS(bus->number, PCI_SLOT(devfn), + PCI_FUNC(devfn), where) & + ~PCI_CONF1_ENABLE; /* move 4 KiB window to offset within the FPCI region */ base = 0xfe100000 + ((offset & ~(SZ_4K - 1)) >> 8); @@ -726,7 +721,7 @@ static void tegra_pcie_port_free(struct tegra_pcie_port *port) /* Tegra PCIE root complex wrongly reports device class */ static void tegra_pcie_fixup_class(struct pci_dev *dev) { - dev->class = PCI_CLASS_BRIDGE_PCI << 8; + dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL; } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_fixup_class); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class); @@ -2707,7 +2702,7 @@ static int tegra_pcie_remove(struct platform_device *pdev) return 0; } -static int __maybe_unused tegra_pcie_pm_suspend(struct device *dev) +static int tegra_pcie_pm_suspend(struct device *dev) { struct tegra_pcie *pcie = dev_get_drvdata(dev); struct tegra_pcie_port *port; @@ -2742,7 +2737,7 @@ static int __maybe_unused tegra_pcie_pm_suspend(struct device *dev) return 0; } -static int __maybe_unused tegra_pcie_pm_resume(struct device *dev) +static int tegra_pcie_pm_resume(struct device *dev) { struct tegra_pcie *pcie = dev_get_drvdata(dev); int err; @@ -2798,9 +2793,8 @@ poweroff: } static const struct dev_pm_ops tegra_pcie_pm_ops = { - SET_RUNTIME_PM_OPS(tegra_pcie_pm_suspend, tegra_pcie_pm_resume, NULL) - SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(tegra_pcie_pm_suspend, - tegra_pcie_pm_resume) + RUNTIME_PM_OPS(tegra_pcie_pm_suspend, tegra_pcie_pm_resume, NULL) + NOIRQ_SYSTEM_SLEEP_PM_OPS(tegra_pcie_pm_suspend, tegra_pcie_pm_resume) }; static struct platform_driver tegra_pcie_driver = { diff --git a/drivers/pci/controller/pci-versatile.c b/drivers/pci/controller/pci-versatile.c index 653d5d0ecf81..7991d334e0f1 100644 --- a/drivers/pci/controller/pci-versatile.c +++ b/drivers/pci/controller/pci-versatile.c @@ -31,10 +31,9 @@ static u32 pci_slot_ignore; static int __init versatile_pci_slot_ignore(char *str) { - int retval; int slot; - while ((retval = get_option(&str, &slot))) { + while (get_option(&str, &slot)) { if ((slot < 0) || (slot > 31)) pr_err("Illegal slot value: %d\n", slot); else diff --git a/drivers/pci/controller/pci-xgene.c b/drivers/pci/controller/pci-xgene.c index 0d5acbfc7143..549d3bd6d1c2 100644 --- a/drivers/pci/controller/pci-xgene.c +++ b/drivers/pci/controller/pci-xgene.c @@ -49,7 +49,6 @@ #define EN_REG 0x00000001 #define OB_LO_IO 0x00000002 #define XGENE_PCIE_DEVICEID 0xE004 -#define SZ_1T (SZ_1G*1024ULL) #define PIPE_PHY_RATE_RD(src) ((0xc000 & (u32)(src)) >> 0xe) #define XGENE_V1_PCI_EXP_CAP 0x40 @@ -465,7 +464,7 @@ static int xgene_pcie_select_ib_reg(u8 *ib_reg_mask, u64 size) return 1; } - if ((size > SZ_1K) && (size < SZ_4G) && !(*ib_reg_mask & (1 << 0))) { + if ((size > SZ_1K) && (size < SZ_1T) && !(*ib_reg_mask & (1 << 0))) { *ib_reg_mask |= (1 << 0); return 0; } @@ -479,28 +478,27 @@ static int xgene_pcie_select_ib_reg(u8 *ib_reg_mask, u64 size) } static void xgene_pcie_setup_ib_reg(struct xgene_pcie *port, - struct resource_entry *entry, - u8 *ib_reg_mask) + struct of_pci_range *range, u8 *ib_reg_mask) { void __iomem *cfg_base = port->cfg_base; struct device *dev = port->dev; void __iomem *bar_addr; u32 pim_reg; - u64 cpu_addr = entry->res->start; - u64 pci_addr = cpu_addr - entry->offset; - u64 size = resource_size(entry->res); + u64 cpu_addr = range->cpu_addr; + u64 pci_addr = range->pci_addr; + u64 size = range->size; u64 mask = ~(size - 1) | EN_REG; u32 flags = PCI_BASE_ADDRESS_MEM_TYPE_64; u32 bar_low; int region; - region = xgene_pcie_select_ib_reg(ib_reg_mask, size); + region = xgene_pcie_select_ib_reg(ib_reg_mask, range->size); if (region < 0) { dev_warn(dev, "invalid pcie dma-range config\n"); return; } - if (entry->res->flags & IORESOURCE_PREFETCH) + if (range->flags & IORESOURCE_PREFETCH) flags |= PCI_BASE_ADDRESS_MEM_PREFETCH; bar_low = pcie_bar_low_val((u32)cpu_addr, flags); @@ -531,13 +529,25 @@ static void xgene_pcie_setup_ib_reg(struct xgene_pcie *port, static int xgene_pcie_parse_map_dma_ranges(struct xgene_pcie *port) { - struct pci_host_bridge *bridge = pci_host_bridge_from_priv(port); - struct resource_entry *entry; + struct device_node *np = port->node; + struct of_pci_range range; + struct of_pci_range_parser parser; + struct device *dev = port->dev; u8 ib_reg_mask = 0; - resource_list_for_each_entry(entry, &bridge->dma_ranges) - xgene_pcie_setup_ib_reg(port, entry, &ib_reg_mask); + if (of_pci_dma_range_parser_init(&parser, np)) { + dev_err(dev, "missing dma-ranges property\n"); + return -EINVAL; + } + + /* Get the dma-ranges from DT */ + for_each_of_pci_range(&parser, &range) { + u64 end = range.cpu_addr + range.size - 1; + dev_dbg(dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n", + range.flags, range.cpu_addr, end, range.pci_addr); + xgene_pcie_setup_ib_reg(port, &range, &ib_reg_mask); + } return 0; } @@ -631,7 +641,7 @@ static const struct of_device_id xgene_pcie_match_table[] = { static struct platform_driver xgene_pcie_driver = { .driver = { .name = "xgene-pcie", - .of_match_table = of_match_ptr(xgene_pcie_match_table), + .of_match_table = xgene_pcie_match_table, .suppress_bind_attrs = true, }, .probe = xgene_pcie_probe, diff --git a/drivers/pci/controller/pcie-apple.c b/drivers/pci/controller/pcie-apple.c index 854d95163112..66f37e403a09 100644 --- a/drivers/pci/controller/pcie-apple.c +++ b/drivers/pci/controller/pcie-apple.c @@ -219,7 +219,7 @@ static int apple_msi_domain_alloc(struct irq_domain *domain, unsigned int virq, if (hwirq < 0) return -ENOSPC; - fwspec.param[1] += hwirq; + fwspec.param[fwspec.param_count - 2] += hwirq; ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &fwspec); if (ret) @@ -516,8 +516,8 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie, u32 stat, idx; int ret, i; - reset = gpiod_get_from_of_node(np, "reset-gpios", 0, - GPIOD_OUT_LOW, "PERST#"); + reset = devm_fwnode_gpiod_get(pcie->dev, of_fwnode_handle(np), "reset", + GPIOD_OUT_LOW, "PERST#"); if (IS_ERR(reset)) return PTR_ERR(reset); diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c index 375c0c40bbf8..521acd632f1a 100644 --- a/drivers/pci/controller/pcie-brcmstb.c +++ b/drivers/pci/controller/pcie-brcmstb.c @@ -191,13 +191,6 @@ /* Forward declarations */ struct brcm_pcie; -static inline void brcm_pcie_bridge_sw_init_set_7278(struct brcm_pcie *pcie, u32 val); -static inline void brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie, u32 val); -static inline void brcm_pcie_perst_set_4908(struct brcm_pcie *pcie, u32 val); -static inline void brcm_pcie_perst_set_7278(struct brcm_pcie *pcie, u32 val); -static inline void brcm_pcie_perst_set_generic(struct brcm_pcie *pcie, u32 val); -static int brcm_pcie_linkup(struct brcm_pcie *pcie); -static int brcm_pcie_add_bus(struct pci_bus *bus); enum { RGR1_SW_INIT_1, @@ -226,74 +219,11 @@ struct pcie_cfg_data { void (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val); }; -static const int pcie_offsets[] = { - [RGR1_SW_INIT_1] = 0x9210, - [EXT_CFG_INDEX] = 0x9000, - [EXT_CFG_DATA] = 0x9004, -}; - -static const int pcie_offsets_bmips_7425[] = { - [RGR1_SW_INIT_1] = 0x8010, - [EXT_CFG_INDEX] = 0x8300, - [EXT_CFG_DATA] = 0x8304, -}; - -static const struct pcie_cfg_data generic_cfg = { - .offsets = pcie_offsets, - .type = GENERIC, - .perst_set = brcm_pcie_perst_set_generic, - .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic, -}; - -static const struct pcie_cfg_data bcm7425_cfg = { - .offsets = pcie_offsets_bmips_7425, - .type = BCM7425, - .perst_set = brcm_pcie_perst_set_generic, - .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic, -}; - -static const struct pcie_cfg_data bcm7435_cfg = { - .offsets = pcie_offsets, - .type = BCM7435, - .perst_set = brcm_pcie_perst_set_generic, - .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic, -}; - -static const struct pcie_cfg_data bcm4908_cfg = { - .offsets = pcie_offsets, - .type = BCM4908, - .perst_set = brcm_pcie_perst_set_4908, - .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic, -}; - -static const int pcie_offset_bcm7278[] = { - [RGR1_SW_INIT_1] = 0xc010, - [EXT_CFG_INDEX] = 0x9000, - [EXT_CFG_DATA] = 0x9004, -}; - -static const struct pcie_cfg_data bcm7278_cfg = { - .offsets = pcie_offset_bcm7278, - .type = BCM7278, - .perst_set = brcm_pcie_perst_set_7278, - .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_7278, -}; - -static const struct pcie_cfg_data bcm2711_cfg = { - .offsets = pcie_offsets, - .type = BCM2711, - .perst_set = brcm_pcie_perst_set_generic, - .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic, -}; - struct subdev_regulators { unsigned int num_supplies; struct regulator_bulk_data supplies[]; }; -static int pci_subdev_regulators_add_bus(struct pci_bus *bus); -static void pci_subdev_regulators_remove_bus(struct pci_bus *bus); - struct brcm_msi { struct device *dev; void __iomem *base; @@ -331,7 +261,6 @@ struct brcm_pcie { u32 hw_rev; void (*perst_set)(struct brcm_pcie *pcie, u32 val); void (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val); - bool refusal_mode; struct subdev_regulators *sr; bool ep_wakeup_capable; }; @@ -450,99 +379,6 @@ static int brcm_pcie_set_ssc(struct brcm_pcie *pcie) return ssc && pll ? 0 : -EIO; } -static void *alloc_subdev_regulators(struct device *dev) -{ - static const char * const supplies[] = { - "vpcie3v3", - "vpcie3v3aux", - "vpcie12v", - }; - const size_t size = sizeof(struct subdev_regulators) - + sizeof(struct regulator_bulk_data) * ARRAY_SIZE(supplies); - struct subdev_regulators *sr; - int i; - - sr = devm_kzalloc(dev, size, GFP_KERNEL); - if (sr) { - sr->num_supplies = ARRAY_SIZE(supplies); - for (i = 0; i < ARRAY_SIZE(supplies); i++) - sr->supplies[i].supply = supplies[i]; - } - - return sr; -} - -static int pci_subdev_regulators_add_bus(struct pci_bus *bus) -{ - struct device *dev = &bus->dev; - struct subdev_regulators *sr; - int ret; - - if (!dev->of_node || !bus->parent || !pci_is_root_bus(bus->parent)) - return 0; - - if (dev->driver_data) - dev_err(dev, "dev.driver_data unexpectedly non-NULL\n"); - - sr = alloc_subdev_regulators(dev); - if (!sr) - return -ENOMEM; - - dev->driver_data = sr; - ret = regulator_bulk_get(dev, sr->num_supplies, sr->supplies); - if (ret) - return ret; - - ret = regulator_bulk_enable(sr->num_supplies, sr->supplies); - if (ret) { - dev_err(dev, "failed to enable regulators for downstream device\n"); - return ret; - } - - return 0; -} - -static int brcm_pcie_add_bus(struct pci_bus *bus) -{ - struct device *dev = &bus->dev; - struct brcm_pcie *pcie = (struct brcm_pcie *) bus->sysdata; - int ret; - - if (!dev->of_node || !bus->parent || !pci_is_root_bus(bus->parent)) - return 0; - - ret = pci_subdev_regulators_add_bus(bus); - if (ret) - return ret; - - /* Grab the regulators for suspend/resume */ - pcie->sr = bus->dev.driver_data; - - /* - * If we have failed linkup there is no point to return an error as - * currently it will cause a WARNING() from pci_alloc_child_bus(). - * We return 0 and turn on the "refusal_mode" so that any further - * accesses to the pci_dev just get 0xffffffff - */ - if (brcm_pcie_linkup(pcie) != 0) - pcie->refusal_mode = true; - - return 0; -} - -static void pci_subdev_regulators_remove_bus(struct pci_bus *bus) -{ - struct device *dev = &bus->dev; - struct subdev_regulators *sr = dev->driver_data; - - if (!sr || !bus->parent || !pci_is_root_bus(bus->parent)) - return; - - if (regulator_bulk_disable(sr->num_supplies, sr->supplies)) - dev_err(dev, "failed to disable regulators for downstream device\n"); - dev->driver_data = NULL; -} - /* Limits operation to a specific generation (1, 2, or 3) */ static void brcm_pcie_set_gen(struct brcm_pcie *pcie, int gen) { @@ -848,66 +684,48 @@ static bool brcm_pcie_link_up(struct brcm_pcie *pcie) return dla && plu; } -static void __iomem *brcm_pcie_map_conf(struct pci_bus *bus, unsigned int devfn, - int where) +static void __iomem *brcm_pcie_map_bus(struct pci_bus *bus, + unsigned int devfn, int where) { struct brcm_pcie *pcie = bus->sysdata; void __iomem *base = pcie->base; int idx; - /* Accesses to the RC go right to the RC registers if slot==0 */ + /* Accesses to the RC go right to the RC registers if !devfn */ if (pci_is_root_bus(bus)) - return PCI_SLOT(devfn) ? NULL : base + where; - if (pcie->refusal_mode) { - /* - * At this point we do not have link. There will be a CPU - * abort -- a quirk with this controller --if Linux tries - * to read any config-space registers besides those - * targeting the host bridge. To prevent this we hijack - * the address to point to a safe access that will return - * 0xffffffff. - */ - writel(0xffffffff, base + PCIE_MISC_RC_BAR2_CONFIG_HI); - return base + PCIE_MISC_RC_BAR2_CONFIG_HI + (where & 0x3); - } + return devfn ? NULL : base + PCIE_ECAM_REG(where); + + /* An access to our HW w/o link-up will cause a CPU Abort */ + if (!brcm_pcie_link_up(pcie)) + return NULL; /* For devices, write to the config space index register */ idx = PCIE_ECAM_OFFSET(bus->number, devfn, 0); writel(idx, pcie->base + PCIE_EXT_CFG_INDEX); - return base + PCIE_EXT_CFG_DATA + where; + return base + PCIE_EXT_CFG_DATA + PCIE_ECAM_REG(where); } -static void __iomem *brcm_pcie_map_conf32(struct pci_bus *bus, unsigned int devfn, - int where) +static void __iomem *brcm7425_pcie_map_bus(struct pci_bus *bus, + unsigned int devfn, int where) { struct brcm_pcie *pcie = bus->sysdata; void __iomem *base = pcie->base; int idx; - /* Accesses to the RC go right to the RC registers if slot==0 */ + /* Accesses to the RC go right to the RC registers if !devfn */ if (pci_is_root_bus(bus)) - return PCI_SLOT(devfn) ? NULL : base + (where & ~0x3); + return devfn ? NULL : base + PCIE_ECAM_REG(where); + + /* An access to our HW w/o link-up will cause a CPU Abort */ + if (!brcm_pcie_link_up(pcie)) + return NULL; /* For devices, write to the config space index register */ - idx = PCIE_ECAM_OFFSET(bus->number, devfn, (where & ~3)); + idx = PCIE_ECAM_OFFSET(bus->number, devfn, where); writel(idx, base + IDX_ADDR(pcie)); return base + DATA_ADDR(pcie); } -static struct pci_ops brcm_pcie_ops = { - .map_bus = brcm_pcie_map_conf, - .read = pci_generic_config_read, - .write = pci_generic_config_write, - .add_bus = brcm_pcie_add_bus, - .remove_bus = pci_subdev_regulators_remove_bus, -}; - -static struct pci_ops brcm_pcie_ops32 = { - .map_bus = brcm_pcie_map_conf32, - .read = pci_generic_config_read32, - .write = pci_generic_config_write32, -}; - static inline void brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie, u32 val) { u32 tmp, mask = RGR1_SW_INIT_1_INIT_GENERIC_MASK; @@ -1049,8 +867,11 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie) { u64 rc_bar2_offset, rc_bar2_size; void __iomem *base = pcie->base; - int ret, memc; + struct pci_host_bridge *bridge; + struct resource_entry *entry; u32 tmp, burst, aspm_support; + int num_out_wins = 0; + int ret, memc; /* Reset the bridge */ pcie->bridge_sw_init_set(pcie, 1); @@ -1126,6 +947,11 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie) else pcie->msi_target_addr = BRCM_MSI_TARGET_ADDR_GT_4GB; + if (!brcm_pcie_rc_mode(pcie)) { + dev_err(pcie->dev, "PCIe RC controller misconfigured as Endpoint\n"); + return -EINVAL; + } + /* disable the PCIe->GISB memory window (RC_BAR1) */ tmp = readl(base + PCIE_MISC_RC_BAR1_CONFIG_LO); tmp &= ~PCIE_MISC_RC_BAR1_CONFIG_LO_SIZE_MASK; @@ -1136,9 +962,6 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie) tmp &= ~PCIE_MISC_RC_BAR3_CONFIG_LO_SIZE_MASK; writel(tmp, base + PCIE_MISC_RC_BAR3_CONFIG_LO); - if (pcie->gen) - brcm_pcie_set_gen(pcie, pcie->gen); - /* Don't advertise L0s capability if 'aspm-no-l0s' */ aspm_support = PCIE_LINK_STATE_L1; if (!of_property_read_bool(pcie->np, "aspm-no-l0s")) @@ -1157,44 +980,9 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie) PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK); writel(tmp, base + PCIE_RC_CFG_PRIV1_ID_VAL3); - return 0; -} - -static int brcm_pcie_linkup(struct brcm_pcie *pcie) -{ - struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie); - struct device *dev = pcie->dev; - void __iomem *base = pcie->base; - struct resource_entry *entry; - struct resource *res; - int num_out_wins = 0; - u16 nlw, cls, lnksta; - bool ssc_good = false; - u32 tmp; - int ret, i; - - /* Unassert the fundamental reset */ - pcie->perst_set(pcie, 0); - - /* - * Give the RC/EP time to wake up, before trying to configure RC. - * Intermittently check status for link-up, up to a total of 100ms. - */ - for (i = 0; i < 100 && !brcm_pcie_link_up(pcie); i += 5) - msleep(5); - - if (!brcm_pcie_link_up(pcie)) { - dev_err(dev, "link down\n"); - return -ENODEV; - } - - if (!brcm_pcie_rc_mode(pcie)) { - dev_err(dev, "PCIe misconfigured; is in EP mode\n"); - return -EINVAL; - } - + bridge = pci_host_bridge_from_priv(pcie); resource_list_for_each_entry(entry, &bridge->windows) { - res = entry->res; + struct resource *res = entry->res; if (resource_type(res) != IORESOURCE_MEM) continue; @@ -1223,6 +1011,42 @@ static int brcm_pcie_linkup(struct brcm_pcie *pcie) num_out_wins++; } + /* PCIe->SCB endian mode for BAR */ + tmp = readl(base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1); + u32p_replace_bits(&tmp, PCIE_RC_CFG_VENDOR_SPCIFIC_REG1_LITTLE_ENDIAN, + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BAR2_MASK); + writel(tmp, base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1); + + return 0; +} + +static int brcm_pcie_start_link(struct brcm_pcie *pcie) +{ + struct device *dev = pcie->dev; + void __iomem *base = pcie->base; + u16 nlw, cls, lnksta; + bool ssc_good = false; + u32 tmp; + int ret, i; + + /* Unassert the fundamental reset */ + pcie->perst_set(pcie, 0); + + /* + * Give the RC/EP time to wake up, before trying to configure RC. + * Intermittently check status for link-up, up to a total of 100ms. + */ + for (i = 0; i < 100 && !brcm_pcie_link_up(pcie); i += 5) + msleep(5); + + if (!brcm_pcie_link_up(pcie)) { + dev_err(dev, "link down\n"); + return -ENODEV; + } + + if (pcie->gen) + brcm_pcie_set_gen(pcie, pcie->gen); + if (pcie->ssc) { ret = brcm_pcie_set_ssc(pcie); if (ret == 0) @@ -1238,12 +1062,6 @@ static int brcm_pcie_linkup(struct brcm_pcie *pcie) pci_speed_string(pcie_link_speed[cls]), nlw, ssc_good ? "(SSC)" : "(!SSC)"); - /* PCIe->SCB endian mode for BAR */ - tmp = readl(base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1); - u32p_replace_bits(&tmp, PCIE_RC_CFG_VENDOR_SPCIFIC_REG1_LITTLE_ENDIAN, - PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BAR2_MASK); - writel(tmp, base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1); - /* * Refclk from RC should be gated with CLKREQ# input when ASPM L0s,L1 * is enabled => setting the CLKREQ_DEBUG_ENABLE field to 1. @@ -1255,6 +1073,82 @@ static int brcm_pcie_linkup(struct brcm_pcie *pcie) return 0; } +static const char * const supplies[] = { + "vpcie3v3", + "vpcie3v3aux", + "vpcie12v", +}; + +static void *alloc_subdev_regulators(struct device *dev) +{ + const size_t size = sizeof(struct subdev_regulators) + + sizeof(struct regulator_bulk_data) * ARRAY_SIZE(supplies); + struct subdev_regulators *sr; + int i; + + sr = devm_kzalloc(dev, size, GFP_KERNEL); + if (sr) { + sr->num_supplies = ARRAY_SIZE(supplies); + for (i = 0; i < ARRAY_SIZE(supplies); i++) + sr->supplies[i].supply = supplies[i]; + } + + return sr; +} + +static int brcm_pcie_add_bus(struct pci_bus *bus) +{ + struct brcm_pcie *pcie = bus->sysdata; + struct device *dev = &bus->dev; + struct subdev_regulators *sr; + int ret; + + if (!bus->parent || !pci_is_root_bus(bus->parent)) + return 0; + + if (dev->of_node) { + sr = alloc_subdev_regulators(dev); + if (!sr) { + dev_info(dev, "Can't allocate regulators for downstream device\n"); + goto no_regulators; + } + + pcie->sr = sr; + + ret = regulator_bulk_get(dev, sr->num_supplies, sr->supplies); + if (ret) { + dev_info(dev, "No regulators for downstream device\n"); + goto no_regulators; + } + + ret = regulator_bulk_enable(sr->num_supplies, sr->supplies); + if (ret) { + dev_err(dev, "Can't enable regulators for downstream device\n"); + regulator_bulk_free(sr->num_supplies, sr->supplies); + pcie->sr = NULL; + } + } + +no_regulators: + brcm_pcie_start_link(pcie); + return 0; +} + +static void brcm_pcie_remove_bus(struct pci_bus *bus) +{ + struct brcm_pcie *pcie = bus->sysdata; + struct subdev_regulators *sr = pcie->sr; + struct device *dev = &bus->dev; + + if (!sr) + return; + + if (regulator_bulk_disable(sr->num_supplies, sr->supplies)) + dev_err(dev, "Failed to disable regulators for downstream device\n"); + regulator_bulk_free(sr->num_supplies, sr->supplies); + pcie->sr = NULL; +} + /* L23 is a low-power PCIe link state */ static void brcm_pcie_enter_l23(struct brcm_pcie *pcie) { @@ -1357,12 +1251,12 @@ static int pci_dev_may_wakeup(struct pci_dev *dev, void *data) if (device_may_wakeup(&dev->dev)) { *ret = true; - dev_info(&dev->dev, "disable cancelled for wake-up device\n"); + dev_info(&dev->dev, "Possible wake-up device; regulators will not be disabled\n"); } return (int) *ret; } -static int brcm_pcie_suspend(struct device *dev) +static int brcm_pcie_suspend_noirq(struct device *dev) { struct brcm_pcie *pcie = dev_get_drvdata(dev); struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie); @@ -1407,7 +1301,7 @@ static int brcm_pcie_suspend(struct device *dev) return 0; } -static int brcm_pcie_resume(struct device *dev) +static int brcm_pcie_resume_noirq(struct device *dev) { struct brcm_pcie *pcie = dev_get_drvdata(dev); void __iomem *base; @@ -1419,28 +1313,9 @@ static int brcm_pcie_resume(struct device *dev) if (ret) return ret; - if (pcie->sr) { - if (pcie->ep_wakeup_capable) { - /* - * We are resuming from a suspend. In the suspend we - * did not disable the power supplies, so there is - * no need to enable them (and falsely increase their - * usage count). - */ - pcie->ep_wakeup_capable = false; - } else { - ret = regulator_bulk_enable(pcie->sr->num_supplies, - pcie->sr->supplies); - if (ret) { - dev_err(dev, "Could not turn on regulators\n"); - goto err_disable_clk; - } - } - } - ret = reset_control_reset(pcie->rescal); if (ret) - goto err_regulator; + goto err_disable_clk; ret = brcm_phy_start(pcie); if (ret) @@ -1461,20 +1336,39 @@ static int brcm_pcie_resume(struct device *dev) if (ret) goto err_reset; - ret = brcm_pcie_linkup(pcie); + if (pcie->sr) { + if (pcie->ep_wakeup_capable) { + /* + * We are resuming from a suspend. In the suspend we + * did not disable the power supplies, so there is + * no need to enable them (and falsely increase their + * usage count). + */ + pcie->ep_wakeup_capable = false; + } else { + ret = regulator_bulk_enable(pcie->sr->num_supplies, + pcie->sr->supplies); + if (ret) { + dev_err(dev, "Could not turn on regulators\n"); + goto err_reset; + } + } + } + + ret = brcm_pcie_start_link(pcie); if (ret) - goto err_reset; + goto err_regulator; if (pcie->msi) brcm_msi_set_regs(pcie->msi); return 0; -err_reset: - reset_control_rearm(pcie->rescal); err_regulator: if (pcie->sr) regulator_bulk_disable(pcie->sr->num_supplies, pcie->sr->supplies); +err_reset: + reset_control_rearm(pcie->rescal); err_disable_clk: clk_disable_unprepare(pcie->clk); return ret; @@ -1503,6 +1397,66 @@ static int brcm_pcie_remove(struct platform_device *pdev) return 0; } +static const int pcie_offsets[] = { + [RGR1_SW_INIT_1] = 0x9210, + [EXT_CFG_INDEX] = 0x9000, + [EXT_CFG_DATA] = 0x9004, +}; + +static const int pcie_offsets_bmips_7425[] = { + [RGR1_SW_INIT_1] = 0x8010, + [EXT_CFG_INDEX] = 0x8300, + [EXT_CFG_DATA] = 0x8304, +}; + +static const struct pcie_cfg_data generic_cfg = { + .offsets = pcie_offsets, + .type = GENERIC, + .perst_set = brcm_pcie_perst_set_generic, + .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic, +}; + +static const struct pcie_cfg_data bcm7425_cfg = { + .offsets = pcie_offsets_bmips_7425, + .type = BCM7425, + .perst_set = brcm_pcie_perst_set_generic, + .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic, +}; + +static const struct pcie_cfg_data bcm7435_cfg = { + .offsets = pcie_offsets, + .type = BCM7435, + .perst_set = brcm_pcie_perst_set_generic, + .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic, +}; + +static const struct pcie_cfg_data bcm4908_cfg = { + .offsets = pcie_offsets, + .type = BCM4908, + .perst_set = brcm_pcie_perst_set_4908, + .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic, +}; + +static const int pcie_offset_bcm7278[] = { + [RGR1_SW_INIT_1] = 0xc010, + [EXT_CFG_INDEX] = 0x9000, + [EXT_CFG_DATA] = 0x9004, +}; + +static const struct pcie_cfg_data bcm7278_cfg = { + .offsets = pcie_offset_bcm7278, + .type = BCM7278, + .perst_set = brcm_pcie_perst_set_7278, + .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_7278, +}; + +static const struct pcie_cfg_data bcm2711_cfg = { + .offsets = pcie_offsets, + .type = BCM2711, + .perst_set = brcm_pcie_perst_set_generic, + .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic, +}; + static const struct of_device_id brcm_pcie_match[] = { { .compatible = "brcm,bcm2711-pcie", .data = &bcm2711_cfg }, { .compatible = "brcm,bcm4908-pcie", .data = &bcm4908_cfg }, @@ -1515,6 +1469,22 @@ static const struct of_device_id brcm_pcie_match[] = { {}, }; +static struct pci_ops brcm_pcie_ops = { + .map_bus = brcm_pcie_map_bus, + .read = pci_generic_config_read, + .write = pci_generic_config_write, + .add_bus = brcm_pcie_add_bus, + .remove_bus = brcm_pcie_remove_bus, +}; + +static struct pci_ops brcm7425_pcie_ops = { + .map_bus = brcm7425_pcie_map_bus, + .read = pci_generic_config_read32, + .write = pci_generic_config_write32, + .add_bus = brcm_pcie_add_bus, + .remove_bus = brcm_pcie_remove_bus, +}; + static int brcm_pcie_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node, *msi_np; @@ -1601,7 +1571,7 @@ static int brcm_pcie_probe(struct platform_device *pdev) } } - bridge->ops = pcie->type == BCM7425 ? &brcm_pcie_ops32 : &brcm_pcie_ops; + bridge->ops = pcie->type == BCM7425 ? &brcm7425_pcie_ops : &brcm_pcie_ops; bridge->sysdata = pcie; platform_set_drvdata(pdev, pcie); @@ -1625,8 +1595,8 @@ fail: MODULE_DEVICE_TABLE(of, brcm_pcie_match); static const struct dev_pm_ops brcm_pcie_pm_ops = { - .suspend_noirq = brcm_pcie_suspend, - .resume_noirq = brcm_pcie_resume, + .suspend_noirq = brcm_pcie_suspend_noirq, + .resume_noirq = brcm_pcie_resume_noirq, }; static struct platform_driver brcm_pcie_driver = { diff --git a/drivers/pci/controller/pcie-iproc-bcma.c b/drivers/pci/controller/pcie-iproc-bcma.c index 54b6e6d5bc64..99a99900444d 100644 --- a/drivers/pci/controller/pcie-iproc-bcma.c +++ b/drivers/pci/controller/pcie-iproc-bcma.c @@ -18,7 +18,7 @@ /* NS: CLASS field is R/O, and set to wrong 0x200 value */ static void bcma_pcie2_fixup_class(struct pci_dev *dev) { - dev->class = PCI_CLASS_BRIDGE_PCI << 8; + dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL; } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x8011, bcma_pcie2_fixup_class); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x8012, bcma_pcie2_fixup_class); diff --git a/drivers/pci/controller/pcie-iproc-msi.c b/drivers/pci/controller/pcie-iproc-msi.c index 757b7fbcdc59..fee036b07cd4 100644 --- a/drivers/pci/controller/pcie-iproc-msi.c +++ b/drivers/pci/controller/pcie-iproc-msi.c @@ -589,8 +589,8 @@ int iproc_msi_init(struct iproc_pcie *pcie, struct device_node *node) msi->has_inten_reg = true; msi->nr_msi_vecs = msi->nr_irqs * EQ_LEN; - msi->bitmap = devm_kcalloc(pcie->dev, BITS_TO_LONGS(msi->nr_msi_vecs), - sizeof(*msi->bitmap), GFP_KERNEL); + msi->bitmap = devm_bitmap_zalloc(pcie->dev, msi->nr_msi_vecs, + GFP_KERNEL); if (!msi->bitmap) return -ENOMEM; diff --git a/drivers/pci/controller/pcie-iproc.c b/drivers/pci/controller/pcie-iproc.c index b3e75bc61ff1..2519201b0e51 100644 --- a/drivers/pci/controller/pcie-iproc.c +++ b/drivers/pci/controller/pcie-iproc.c @@ -789,14 +789,13 @@ static int iproc_pcie_check_link(struct iproc_pcie *pcie) return -EFAULT; } - /* force class to PCI_CLASS_BRIDGE_PCI (0x0604) */ + /* force class to PCI_CLASS_BRIDGE_PCI_NORMAL (0x060400) */ #define PCI_BRIDGE_CTRL_REG_OFFSET 0x43c -#define PCI_CLASS_BRIDGE_MASK 0xffff00 -#define PCI_CLASS_BRIDGE_SHIFT 8 +#define PCI_BRIDGE_CTRL_REG_CLASS_MASK 0xffffff iproc_pci_raw_config_read32(pcie, 0, PCI_BRIDGE_CTRL_REG_OFFSET, 4, &class); - class &= ~PCI_CLASS_BRIDGE_MASK; - class |= (PCI_CLASS_BRIDGE_PCI << PCI_CLASS_BRIDGE_SHIFT); + class &= ~PCI_BRIDGE_CTRL_REG_CLASS_MASK; + class |= PCI_CLASS_BRIDGE_PCI_NORMAL; iproc_pci_raw_config_write32(pcie, 0, PCI_BRIDGE_CTRL_REG_OFFSET, 4, class); @@ -1581,7 +1580,7 @@ static void quirk_paxc_bridge(struct pci_dev *pdev) * code that the bridge is not an Ethernet device. */ if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) - pdev->class = PCI_CLASS_BRIDGE_PCI << 8; + pdev->class = PCI_CLASS_BRIDGE_PCI_NORMAL; /* * MPSS is not being set properly (as it is currently 0). This is diff --git a/drivers/pci/controller/pcie-mediatek-gen3.c b/drivers/pci/controller/pcie-mediatek-gen3.c index 7705d61fba4c..b8612ce5f4d0 100644 --- a/drivers/pci/controller/pcie-mediatek-gen3.c +++ b/drivers/pci/controller/pcie-mediatek-gen3.c @@ -153,6 +153,37 @@ struct mtk_gen3_pcie { DECLARE_BITMAP(msi_irq_in_use, PCIE_MSI_IRQS_NUM); }; +/* LTSSM state in PCIE_LTSSM_STATUS_REG bit[28:24] */ +static const char *const ltssm_str[] = { + "detect.quiet", /* 0x00 */ + "detect.active", /* 0x01 */ + "polling.active", /* 0x02 */ + "polling.compliance", /* 0x03 */ + "polling.configuration", /* 0x04 */ + "config.linkwidthstart", /* 0x05 */ + "config.linkwidthaccept", /* 0x06 */ + "config.lanenumwait", /* 0x07 */ + "config.lanenumaccept", /* 0x08 */ + "config.complete", /* 0x09 */ + "config.idle", /* 0x0A */ + "recovery.receiverlock", /* 0x0B */ + "recovery.equalization", /* 0x0C */ + "recovery.speed", /* 0x0D */ + "recovery.receiverconfig", /* 0x0E */ + "recovery.idle", /* 0x0F */ + "L0", /* 0x10 */ + "L0s", /* 0x11 */ + "L1.entry", /* 0x12 */ + "L1.idle", /* 0x13 */ + "L2.idle", /* 0x14 */ + "L2.transmitwake", /* 0x15 */ + "disable", /* 0x16 */ + "loopback.entry", /* 0x17 */ + "loopback.active", /* 0x18 */ + "loopback.exit", /* 0x19 */ + "hotreset", /* 0x1A */ +}; + /** * mtk_pcie_config_tlp_header() - Configure a configuration TLP header * @bus: PCI bus to query @@ -292,7 +323,7 @@ static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie) /* Set class code */ val = readl_relaxed(pcie->base + PCIE_PCI_IDS_1); val &= ~GENMASK(31, 8); - val |= PCI_CLASS(PCI_CLASS_BRIDGE_PCI << 8); + val |= PCI_CLASS(PCI_CLASS_BRIDGE_PCI_NORMAL); writel_relaxed(val, pcie->base + PCIE_PCI_IDS_1); /* Mask all INTx interrupts */ @@ -327,8 +358,16 @@ static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie) !!(val & PCIE_PORT_LINKUP), 20, PCI_PM_D3COLD_WAIT * USEC_PER_MSEC); if (err) { + const char *ltssm_state; + int ltssm_index; + val = readl_relaxed(pcie->base + PCIE_LTSSM_STATUS_REG); - dev_err(pcie->dev, "PCIe link down, ltssm reg val: %#x\n", val); + ltssm_index = PCIE_LTSSM_STATE(val); + ltssm_state = ltssm_index >= ARRAY_SIZE(ltssm_str) ? + "Unknown state" : ltssm_str[ltssm_index]; + dev_err(pcie->dev, + "PCIe link down, current LTSSM state: %s (%#x)\n", + ltssm_state, val); return err; } @@ -600,7 +639,8 @@ static int mtk_pcie_init_irq_domains(struct mtk_gen3_pcie *pcie) &intx_domain_ops, pcie); if (!pcie->intx_domain) { dev_err(dev, "failed to create INTx IRQ domain\n"); - return -ENODEV; + ret = -ENODEV; + goto out_put_node; } /* Setup MSI */ @@ -623,13 +663,15 @@ static int mtk_pcie_init_irq_domains(struct mtk_gen3_pcie *pcie) goto err_msi_domain; } + of_node_put(intc_node); return 0; err_msi_domain: irq_domain_remove(pcie->msi_bottom_domain); err_msi_bottom_domain: irq_domain_remove(pcie->intx_domain); - +out_put_node: + of_node_put(intc_node); return ret; } @@ -838,6 +880,14 @@ static int mtk_pcie_setup(struct mtk_gen3_pcie *pcie) if (err) return err; + /* + * The controller may have been left out of reset by the bootloader + * so make sure that we get a clean start by asserting resets here. + */ + reset_control_assert(pcie->phy_reset); + reset_control_assert(pcie->mac_reset); + usleep_range(10, 20); + /* Don't touch the hardware registers before power up */ err = mtk_pcie_power_up(pcie); if (err) @@ -909,7 +959,7 @@ static int mtk_pcie_remove(struct platform_device *pdev) return 0; } -static void __maybe_unused mtk_pcie_irq_save(struct mtk_gen3_pcie *pcie) +static void mtk_pcie_irq_save(struct mtk_gen3_pcie *pcie) { int i; @@ -927,7 +977,7 @@ static void __maybe_unused mtk_pcie_irq_save(struct mtk_gen3_pcie *pcie) raw_spin_unlock(&pcie->irq_lock); } -static void __maybe_unused mtk_pcie_irq_restore(struct mtk_gen3_pcie *pcie) +static void mtk_pcie_irq_restore(struct mtk_gen3_pcie *pcie) { int i; @@ -945,7 +995,7 @@ static void __maybe_unused mtk_pcie_irq_restore(struct mtk_gen3_pcie *pcie) raw_spin_unlock(&pcie->irq_lock); } -static int __maybe_unused mtk_pcie_turn_off_link(struct mtk_gen3_pcie *pcie) +static int mtk_pcie_turn_off_link(struct mtk_gen3_pcie *pcie) { u32 val; @@ -960,7 +1010,7 @@ static int __maybe_unused mtk_pcie_turn_off_link(struct mtk_gen3_pcie *pcie) 50 * USEC_PER_MSEC); } -static int __maybe_unused mtk_pcie_suspend_noirq(struct device *dev) +static int mtk_pcie_suspend_noirq(struct device *dev) { struct mtk_gen3_pcie *pcie = dev_get_drvdata(dev); int err; @@ -986,7 +1036,7 @@ static int __maybe_unused mtk_pcie_suspend_noirq(struct device *dev) return 0; } -static int __maybe_unused mtk_pcie_resume_noirq(struct device *dev) +static int mtk_pcie_resume_noirq(struct device *dev) { struct mtk_gen3_pcie *pcie = dev_get_drvdata(dev); int err; @@ -1007,8 +1057,8 @@ static int __maybe_unused mtk_pcie_resume_noirq(struct device *dev) } static const struct dev_pm_ops mtk_pcie_pm_ops = { - SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_pcie_suspend_noirq, - mtk_pcie_resume_noirq) + NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_pcie_suspend_noirq, + mtk_pcie_resume_noirq) }; static const struct of_device_id mtk_pcie_of_match[] = { @@ -1021,7 +1071,7 @@ static struct platform_driver mtk_pcie_driver = { .probe = mtk_pcie_probe, .remove = mtk_pcie_remove, .driver = { - .name = "mtk-pcie", + .name = "mtk-pcie-gen3", .of_match_table = mtk_pcie_of_match, .pm = &mtk_pcie_pm_ops, }, diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c index ddfbd4aebdec..ae5ad05ddc1d 100644 --- a/drivers/pci/controller/pcie-mediatek.c +++ b/drivers/pci/controller/pcie-mediatek.c @@ -1008,6 +1008,7 @@ static int mtk_pcie_subsys_powerup(struct mtk_pcie *pcie) "mediatek,generic-pciecfg"); if (cfg_node) { pcie->cfg = syscon_node_to_regmap(cfg_node); + of_node_put(cfg_node); if (IS_ERR(pcie->cfg)) return PTR_ERR(pcie->cfg); } @@ -1149,7 +1150,7 @@ static int mtk_pcie_remove(struct platform_device *pdev) return 0; } -static int __maybe_unused mtk_pcie_suspend_noirq(struct device *dev) +static int mtk_pcie_suspend_noirq(struct device *dev) { struct mtk_pcie *pcie = dev_get_drvdata(dev); struct mtk_pcie_port *port; @@ -1173,7 +1174,7 @@ static int __maybe_unused mtk_pcie_suspend_noirq(struct device *dev) return 0; } -static int __maybe_unused mtk_pcie_resume_noirq(struct device *dev) +static int mtk_pcie_resume_noirq(struct device *dev) { struct mtk_pcie *pcie = dev_get_drvdata(dev); struct mtk_pcie_port *port, *tmp; @@ -1194,8 +1195,8 @@ static int __maybe_unused mtk_pcie_resume_noirq(struct device *dev) } static const struct dev_pm_ops mtk_pcie_pm_ops = { - SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_pcie_suspend_noirq, - mtk_pcie_resume_noirq) + NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_pcie_suspend_noirq, + mtk_pcie_resume_noirq) }; static const struct mtk_pcie_soc mtk_pcie_soc_v1 = { diff --git a/drivers/pci/controller/pcie-microchip-host.c b/drivers/pci/controller/pcie-microchip-host.c index 29d8e81e4181..7263d175b5ad 100644 --- a/drivers/pci/controller/pcie-microchip-host.c +++ b/drivers/pci/controller/pcie-microchip-host.c @@ -406,6 +406,7 @@ static void mc_pcie_enable_msi(struct mc_pcie *port, void __iomem *base) static void mc_handle_msi(struct irq_desc *desc) { struct mc_pcie *port = irq_desc_get_handler_data(desc); + struct irq_chip *chip = irq_desc_get_chip(desc); struct device *dev = port->dev; struct mc_msi *msi = &port->msi; void __iomem *bridge_base_addr = @@ -414,8 +415,11 @@ static void mc_handle_msi(struct irq_desc *desc) u32 bit; int ret; + chained_irq_enter(chip, desc); + status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL); if (status & PM_MSI_INT_MSI_MASK) { + writel_relaxed(status & PM_MSI_INT_MSI_MASK, bridge_base_addr + ISTATUS_LOCAL); status = readl_relaxed(bridge_base_addr + ISTATUS_MSI); for_each_set_bit(bit, &status, msi->num_vectors) { ret = generic_handle_domain_irq(msi->dev_domain, bit); @@ -424,6 +428,8 @@ static void mc_handle_msi(struct irq_desc *desc) bit); } } + + chained_irq_exit(chip, desc); } static void mc_msi_bottom_irq_ack(struct irq_data *data) @@ -432,13 +438,8 @@ static void mc_msi_bottom_irq_ack(struct irq_data *data) void __iomem *bridge_base_addr = port->axi_base_addr + MC_PCIE_BRIDGE_ADDR; u32 bitpos = data->hwirq; - unsigned long status; writel_relaxed(BIT(bitpos), bridge_base_addr + ISTATUS_MSI); - status = readl_relaxed(bridge_base_addr + ISTATUS_MSI); - if (!status) - writel_relaxed(BIT(PM_MSI_INT_MSI_SHIFT), - bridge_base_addr + ISTATUS_LOCAL); } static void mc_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) @@ -563,6 +564,7 @@ static int mc_allocate_msi_domains(struct mc_pcie *port) static void mc_handle_intx(struct irq_desc *desc) { struct mc_pcie *port = irq_desc_get_handler_data(desc); + struct irq_chip *chip = irq_desc_get_chip(desc); struct device *dev = port->dev; void __iomem *bridge_base_addr = port->axi_base_addr + MC_PCIE_BRIDGE_ADDR; @@ -570,6 +572,8 @@ static void mc_handle_intx(struct irq_desc *desc) u32 bit; int ret; + chained_irq_enter(chip, desc); + status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL); if (status & PM_MSI_INT_INTX_MASK) { status &= PM_MSI_INT_INTX_MASK; @@ -581,6 +585,8 @@ static void mc_handle_intx(struct irq_desc *desc) bit); } } + + chained_irq_exit(chip, desc); } static void mc_ack_intx_irq(struct irq_data *data) @@ -898,6 +904,7 @@ static int mc_pcie_init_irq_domains(struct mc_pcie *port) &event_domain_ops, port); if (!port->event_domain) { dev_err(dev, "failed to get event domain\n"); + of_node_put(pcie_intc_node); return -ENOMEM; } @@ -907,6 +914,7 @@ static int mc_pcie_init_irq_domains(struct mc_pcie *port) &intx_domain_ops, port); if (!port->intx_domain) { dev_err(dev, "failed to get an INTx IRQ domain\n"); + of_node_put(pcie_intc_node); return -ENOMEM; } @@ -1115,7 +1123,7 @@ static const struct of_device_id mc_pcie_of_match[] = { {}, }; -MODULE_DEVICE_TABLE(of, mc_pcie_of_match) +MODULE_DEVICE_TABLE(of, mc_pcie_of_match); static struct platform_driver mc_pcie_driver = { .probe = pci_host_common_probe, diff --git a/drivers/pci/controller/pcie-mt7621.c b/drivers/pci/controller/pcie-mt7621.c index 33eb37a2225c..4bd1abf26008 100644 --- a/drivers/pci/controller/pcie-mt7621.c +++ b/drivers/pci/controller/pcie-mt7621.c @@ -30,6 +30,8 @@ #include <linux/reset.h> #include <linux/sys_soc.h> +#include "../pci.h" + /* MediaTek-specific configuration registers */ #define PCIE_FTS_NUM 0x70c #define PCIE_FTS_NUM_MASK GENMASK(15, 8) @@ -120,19 +122,12 @@ static inline void pcie_port_write(struct mt7621_pcie_port *port, writel_relaxed(val, port->base + reg); } -static inline u32 mt7621_pcie_get_cfgaddr(unsigned int bus, unsigned int slot, - unsigned int func, unsigned int where) -{ - return (((where & 0xf00) >> 8) << 24) | (bus << 16) | (slot << 11) | - (func << 8) | (where & 0xfc) | 0x80000000; -} - static void __iomem *mt7621_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, int where) { struct mt7621_pcie *pcie = bus->sysdata; - u32 address = mt7621_pcie_get_cfgaddr(bus->number, PCI_SLOT(devfn), - PCI_FUNC(devfn), where); + u32 address = PCI_CONF1_EXT_ADDRESS(bus->number, PCI_SLOT(devfn), + PCI_FUNC(devfn), where); writel_relaxed(address, pcie->base + RALINK_PCI_CONFIG_ADDR); @@ -147,7 +142,7 @@ static struct pci_ops mt7621_pcie_ops = { static u32 read_config(struct mt7621_pcie *pcie, unsigned int dev, u32 reg) { - u32 address = mt7621_pcie_get_cfgaddr(0, dev, 0, reg); + u32 address = PCI_CONF1_EXT_ADDRESS(0, dev, 0, reg); pcie_write(pcie, address, RALINK_PCI_CONFIG_ADDR); return pcie_read(pcie, RALINK_PCI_CONFIG_DATA); @@ -156,7 +151,7 @@ static u32 read_config(struct mt7621_pcie *pcie, unsigned int dev, u32 reg) static void write_config(struct mt7621_pcie *pcie, unsigned int dev, u32 reg, u32 val) { - u32 address = mt7621_pcie_get_cfgaddr(0, dev, 0, reg); + u32 address = PCI_CONF1_EXT_ADDRESS(0, dev, 0, reg); pcie_write(pcie, address, RALINK_PCI_CONFIG_ADDR); pcie_write(pcie, val, RALINK_PCI_CONFIG_DATA); diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c index 38b6e02edfa9..e4faf90feaf5 100644 --- a/drivers/pci/controller/pcie-rcar-host.c +++ b/drivers/pci/controller/pcie-rcar-host.c @@ -65,6 +65,42 @@ struct rcar_pcie_host { int (*phy_init_fn)(struct rcar_pcie_host *host); }; +static DEFINE_SPINLOCK(pmsr_lock); + +static int rcar_pcie_wakeup(struct device *pcie_dev, void __iomem *pcie_base) +{ + unsigned long flags; + u32 pmsr, val; + int ret = 0; + + spin_lock_irqsave(&pmsr_lock, flags); + + if (!pcie_base || pm_runtime_suspended(pcie_dev)) { + ret = -EINVAL; + goto unlock_exit; + } + + pmsr = readl(pcie_base + PMSR); + + /* + * Test if the PCIe controller received PM_ENTER_L1 DLLP and + * the PCIe controller is not in L1 link state. If true, apply + * fix, which will put the controller into L1 link state, from + * which it can return to L0s/L0 on its own. + */ + if ((pmsr & PMEL1RX) && ((pmsr & PMSTATE) != PMSTATE_L1)) { + writel(L1IATN, pcie_base + PMCTLR); + ret = readl_poll_timeout_atomic(pcie_base + PMSR, val, + val & L1FAEG, 10, 1000); + WARN(ret, "Timeout waiting for L1 link state, ret=%d\n", ret); + writel(L1FAEG | PMEL1RX, pcie_base + PMSR); + } + +unlock_exit: + spin_unlock_irqrestore(&pmsr_lock, flags); + return ret; +} + static struct rcar_pcie_host *msi_to_host(struct rcar_msi *msi) { return container_of(msi, struct rcar_pcie_host, msi); @@ -78,6 +114,54 @@ static u32 rcar_read_conf(struct rcar_pcie *pcie, int where) return val >> shift; } +#ifdef CONFIG_ARM +#define __rcar_pci_rw_reg_workaround(instr) \ + " .arch armv7-a\n" \ + "1: " instr " %1, [%2]\n" \ + "2: isb\n" \ + "3: .pushsection .text.fixup,\"ax\"\n" \ + " .align 2\n" \ + "4: mov %0, #" __stringify(PCIBIOS_SET_FAILED) "\n" \ + " b 3b\n" \ + " .popsection\n" \ + " .pushsection __ex_table,\"a\"\n" \ + " .align 3\n" \ + " .long 1b, 4b\n" \ + " .long 2b, 4b\n" \ + " .popsection\n" +#endif + +static int rcar_pci_write_reg_workaround(struct rcar_pcie *pcie, u32 val, + unsigned int reg) +{ + int error = PCIBIOS_SUCCESSFUL; +#ifdef CONFIG_ARM + asm volatile( + __rcar_pci_rw_reg_workaround("str") + : "+r"(error):"r"(val), "r"(pcie->base + reg) : "memory"); +#else + rcar_pci_write_reg(pcie, val, reg); +#endif + return error; +} + +static int rcar_pci_read_reg_workaround(struct rcar_pcie *pcie, u32 *val, + unsigned int reg) +{ + int error = PCIBIOS_SUCCESSFUL; +#ifdef CONFIG_ARM + asm volatile( + __rcar_pci_rw_reg_workaround("ldr") + : "+r"(error), "=r"(*val) : "r"(pcie->base + reg) : "memory"); + + if (error != PCIBIOS_SUCCESSFUL) + PCI_SET_ERROR_RESPONSE(val); +#else + *val = rcar_pci_read_reg(pcie, reg); +#endif + return error; +} + /* Serialization is provided by 'pci_lock' in drivers/pci/access.c */ static int rcar_pcie_config_access(struct rcar_pcie_host *host, unsigned char access_type, struct pci_bus *bus, @@ -85,6 +169,14 @@ static int rcar_pcie_config_access(struct rcar_pcie_host *host, { struct rcar_pcie *pcie = &host->pcie; unsigned int dev, func, reg, index; + int ret; + + /* Wake the bus up in case it is in L1 state. */ + ret = rcar_pcie_wakeup(pcie->dev, pcie->base); + if (ret) { + PCI_SET_ERROR_RESPONSE(data); + return PCIBIOS_SET_FAILED; + } dev = PCI_SLOT(devfn); func = PCI_FUNC(devfn); @@ -141,14 +233,14 @@ static int rcar_pcie_config_access(struct rcar_pcie_host *host, return PCIBIOS_DEVICE_NOT_FOUND; if (access_type == RCAR_PCI_ACCESS_READ) - *data = rcar_pci_read_reg(pcie, PCIECDR); + ret = rcar_pci_read_reg_workaround(pcie, data, PCIECDR); else - rcar_pci_write_reg(pcie, *data, PCIECDR); + ret = rcar_pci_write_reg_workaround(pcie, *data, PCIECDR); /* Disable the configuration access */ rcar_pci_write_reg(pcie, 0, PCIECCTLR); - return PCIBIOS_SUCCESSFUL; + return ret; } static int rcar_pcie_read_conf(struct pci_bus *bus, unsigned int devfn, @@ -370,7 +462,7 @@ static int rcar_pcie_hw_init(struct rcar_pcie *pcie) * class to match. Hardware takes care of propagating the IDSETR * settings, so there is no need to bother with a quirk. */ - rcar_pci_write_reg(pcie, PCI_CLASS_BRIDGE_PCI << 16, IDSETR1); + rcar_pci_write_reg(pcie, PCI_CLASS_BRIDGE_PCI_NORMAL << 8, IDSETR1); /* * Setup Secondary Bus Number & Subordinate Bus Number, even though @@ -980,7 +1072,7 @@ err_pm_put: return err; } -static int __maybe_unused rcar_pcie_resume(struct device *dev) +static int rcar_pcie_resume(struct device *dev) { struct rcar_pcie_host *host = dev_get_drvdata(dev); struct rcar_pcie *pcie = &host->pcie; @@ -1035,7 +1127,7 @@ static int rcar_pcie_resume_noirq(struct device *dev) } static const struct dev_pm_ops rcar_pcie_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(NULL, rcar_pcie_resume) + SYSTEM_SLEEP_PM_OPS(NULL, rcar_pcie_resume) .resume_noirq = rcar_pcie_resume_noirq, }; @@ -1050,40 +1142,10 @@ static struct platform_driver rcar_pcie_driver = { }; #ifdef CONFIG_ARM -static DEFINE_SPINLOCK(pmsr_lock); static int rcar_pcie_aarch32_abort_handler(unsigned long addr, unsigned int fsr, struct pt_regs *regs) { - unsigned long flags; - u32 pmsr, val; - int ret = 0; - - spin_lock_irqsave(&pmsr_lock, flags); - - if (!pcie_base || pm_runtime_suspended(pcie_dev)) { - ret = 1; - goto unlock_exit; - } - - pmsr = readl(pcie_base + PMSR); - - /* - * Test if the PCIe controller received PM_ENTER_L1 DLLP and - * the PCIe controller is not in L1 link state. If true, apply - * fix, which will put the controller into L1 link state, from - * which it can return to L0s/L0 on its own. - */ - if ((pmsr & PMEL1RX) && ((pmsr & PMSTATE) != PMSTATE_L1)) { - writel(L1IATN, pcie_base + PMCTLR); - ret = readl_poll_timeout_atomic(pcie_base + PMSR, val, - val & L1FAEG, 10, 1000); - WARN(ret, "Timeout waiting for L1 link state, ret=%d\n", ret); - writel(L1FAEG | PMEL1RX, pcie_base + PMSR); - } - -unlock_exit: - spin_unlock_irqrestore(&pmsr_lock, flags); - return ret; + return !fixup_exception(regs); } static const struct of_device_id rcar_pcie_abort_handler_of_match[] __initconst = { diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c index 5fb9ce6e536e..d1a200b93b2b 100644 --- a/drivers/pci/controller/pcie-rockchip-ep.c +++ b/drivers/pci/controller/pcie-rockchip-ep.c @@ -264,8 +264,7 @@ static int rockchip_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn, struct rockchip_pcie *pcie = &ep->rockchip; u32 r; - r = find_first_zero_bit(&ep->ob_region_map, - sizeof(ep->ob_region_map) * BITS_PER_LONG); + r = find_first_zero_bit(&ep->ob_region_map, BITS_PER_LONG); /* * Region 0 is reserved for configuration space and shouldn't * be used elsewhere per TRM, so leave it out. diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c index 45a28880f322..7352b5ff8d35 100644 --- a/drivers/pci/controller/pcie-rockchip-host.c +++ b/drivers/pci/controller/pcie-rockchip-host.c @@ -370,7 +370,7 @@ static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip) rockchip_pcie_write(rockchip, ROCKCHIP_VENDOR_ID, PCIE_CORE_CONFIG_VENDOR); rockchip_pcie_write(rockchip, - PCI_CLASS_BRIDGE_PCI << PCIE_RC_CONFIG_SCC_SHIFT, + PCI_CLASS_BRIDGE_PCI_NORMAL << 8, PCIE_RC_CONFIG_RID_CCR); /* Clear THP cap's next cap pointer to remove L1 substate cap */ @@ -864,7 +864,7 @@ static int rockchip_pcie_wait_l2(struct rockchip_pcie *rockchip) return 0; } -static int __maybe_unused rockchip_pcie_suspend_noirq(struct device *dev) +static int rockchip_pcie_suspend_noirq(struct device *dev) { struct rockchip_pcie *rockchip = dev_get_drvdata(dev); int ret; @@ -889,7 +889,7 @@ static int __maybe_unused rockchip_pcie_suspend_noirq(struct device *dev) return ret; } -static int __maybe_unused rockchip_pcie_resume_noirq(struct device *dev) +static int rockchip_pcie_resume_noirq(struct device *dev) { struct rockchip_pcie *rockchip = dev_get_drvdata(dev); int err; @@ -1035,8 +1035,8 @@ static int rockchip_pcie_remove(struct platform_device *pdev) } static const struct dev_pm_ops rockchip_pcie_pm_ops = { - SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(rockchip_pcie_suspend_noirq, - rockchip_pcie_resume_noirq) + NOIRQ_SYSTEM_SLEEP_PM_OPS(rockchip_pcie_suspend_noirq, + rockchip_pcie_resume_noirq) }; static const struct of_device_id rockchip_pcie_of_match[] = { diff --git a/drivers/pci/controller/pcie-rockchip.h b/drivers/pci/controller/pcie-rockchip.h index 1650a5087450..32c3a859c26b 100644 --- a/drivers/pci/controller/pcie-rockchip.h +++ b/drivers/pci/controller/pcie-rockchip.h @@ -134,7 +134,6 @@ #define PCIE_RC_CONFIG_NORMAL_BASE 0x800000 #define PCIE_RC_CONFIG_BASE 0xa00000 #define PCIE_RC_CONFIG_RID_CCR (PCIE_RC_CONFIG_BASE + 0x08) -#define PCIE_RC_CONFIG_SCC_SHIFT 16 #define PCIE_RC_CONFIG_DCR (PCIE_RC_CONFIG_BASE + 0xc4) #define PCIE_RC_CONFIG_DCR_CSPL_SHIFT 18 #define PCIE_RC_CONFIG_DCR_CSPL_LIMIT 0xff diff --git a/drivers/pci/controller/pcie-xilinx-cpm.c b/drivers/pci/controller/pcie-xilinx-cpm.c index c7cd44ed4dfc..e4ab48041eb6 100644 --- a/drivers/pci/controller/pcie-xilinx-cpm.c +++ b/drivers/pci/controller/pcie-xilinx-cpm.c @@ -35,6 +35,10 @@ #define XILINX_CPM_PCIE_MISC_IR_ENABLE 0x00000348 #define XILINX_CPM_PCIE_MISC_IR_LOCAL BIT(1) +#define XILINX_CPM_PCIE_IR_STATUS 0x000002A0 +#define XILINX_CPM_PCIE_IR_ENABLE 0x000002A8 +#define XILINX_CPM_PCIE_IR_LOCAL BIT(0) + /* Interrupt registers definitions */ #define XILINX_CPM_PCIE_INTR_LINK_DOWN 0 #define XILINX_CPM_PCIE_INTR_HOT_RESET 3 @@ -98,6 +102,19 @@ /* Phy Status/Control Register definitions */ #define XILINX_CPM_PCIE_REG_PSCR_LNKUP BIT(11) +enum xilinx_cpm_version { + CPM, + CPM5, +}; + +/** + * struct xilinx_cpm_variant - CPM variant information + * @version: CPM version + */ +struct xilinx_cpm_variant { + enum xilinx_cpm_version version; +}; + /** * struct xilinx_cpm_pcie - PCIe port information * @dev: Device pointer @@ -109,6 +126,7 @@ * @intx_irq: legacy interrupt number * @irq: Error interrupt number * @lock: lock protecting shared register access + * @variant: CPM version check pointer */ struct xilinx_cpm_pcie { struct device *dev; @@ -120,6 +138,7 @@ struct xilinx_cpm_pcie { int intx_irq; int irq; raw_spinlock_t lock; + const struct xilinx_cpm_variant *variant; }; static u32 pcie_read(struct xilinx_cpm_pcie *port, u32 reg) @@ -285,6 +304,13 @@ static void xilinx_cpm_pcie_event_flow(struct irq_desc *desc) generic_handle_domain_irq(port->cpm_domain, i); pcie_write(port, val, XILINX_CPM_PCIE_REG_IDR); + if (port->variant->version == CPM5) { + val = readl_relaxed(port->cpm_base + XILINX_CPM_PCIE_IR_STATUS); + if (val) + writel_relaxed(val, port->cpm_base + + XILINX_CPM_PCIE_IR_STATUS); + } + /* * XILINX_CPM_PCIE_MISC_IR_STATUS register is mapped to * CPM SLCR block. @@ -484,6 +510,12 @@ static void xilinx_cpm_pcie_init_port(struct xilinx_cpm_pcie *port) */ writel(XILINX_CPM_PCIE_MISC_IR_LOCAL, port->cpm_base + XILINX_CPM_PCIE_MISC_IR_ENABLE); + + if (port->variant->version == CPM5) { + writel(XILINX_CPM_PCIE_IR_LOCAL, + port->cpm_base + XILINX_CPM_PCIE_IR_ENABLE); + } + /* Enable the Bridge enable bit */ pcie_write(port, pcie_read(port, XILINX_CPM_PCIE_REG_RPSC) | XILINX_CPM_PCIE_REG_RPSC_BEN, @@ -518,7 +550,14 @@ static int xilinx_cpm_pcie_parse_dt(struct xilinx_cpm_pcie *port, if (IS_ERR(port->cfg)) return PTR_ERR(port->cfg); - port->reg_base = port->cfg->win; + if (port->variant->version == CPM5) { + port->reg_base = devm_platform_ioremap_resource_byname(pdev, + "cpm_csr"); + if (IS_ERR(port->reg_base)) + return PTR_ERR(port->reg_base); + } else { + port->reg_base = port->cfg->win; + } return 0; } @@ -559,6 +598,8 @@ static int xilinx_cpm_pcie_probe(struct platform_device *pdev) if (!bus) return -ENODEV; + port->variant = of_device_get_match_data(dev); + err = xilinx_cpm_pcie_parse_dt(port, bus->res); if (err) { dev_err(dev, "Parsing DT failed\n"); @@ -591,8 +632,23 @@ err_parse_dt: return err; } +static const struct xilinx_cpm_variant cpm_host = { + .version = CPM, +}; + +static const struct xilinx_cpm_variant cpm5_host = { + .version = CPM5, +}; + static const struct of_device_id xilinx_cpm_pcie_of_match[] = { - { .compatible = "xlnx,versal-cpm-host-1.00", }, + { + .compatible = "xlnx,versal-cpm-host-1.00", + .data = &cpm_host, + }, + { + .compatible = "xlnx,versal-cpm5-host", + .data = &cpm5_host, + }, {} }; diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c index cc166c683638..e06e9f4fc50f 100644 --- a/drivers/pci/controller/vmd.c +++ b/drivers/pci/controller/vmd.c @@ -6,7 +6,6 @@ #include <linux/device.h> #include <linux/interrupt.h> -#include <linux/iommu.h> #include <linux/irq.h> #include <linux/kernel.h> #include <linux/module.h> @@ -99,11 +98,13 @@ struct vmd_irq { * @srcu: SRCU struct for local synchronization. * @count: number of child IRQs assigned to this vector; used to track * sharing. + * @virq: The underlying VMD Linux interrupt number */ struct vmd_irq_list { struct list_head irq_list; struct srcu_struct srcu; unsigned int count; + unsigned int virq; }; struct vmd_dev { @@ -253,7 +254,6 @@ static int vmd_msi_init(struct irq_domain *domain, struct msi_domain_info *info, struct msi_desc *desc = arg->desc; struct vmd_dev *vmd = vmd_from_bus(msi_desc_to_pci_dev(desc)->bus); struct vmd_irq *vmdirq = kzalloc(sizeof(*vmdirq), GFP_KERNEL); - unsigned int index, vector; if (!vmdirq) return -ENOMEM; @@ -261,10 +261,8 @@ static int vmd_msi_init(struct irq_domain *domain, struct msi_domain_info *info, INIT_LIST_HEAD(&vmdirq->node); vmdirq->irq = vmd_next_irq(vmd, desc); vmdirq->virq = virq; - index = index_from_irqs(vmd, vmdirq->irq); - vector = pci_irq_vector(vmd->dev, index); - irq_domain_set_info(domain, virq, vector, info->chip, vmdirq, + irq_domain_set_info(domain, virq, vmdirq->irq->virq, info->chip, vmdirq, handle_untracked_irq, vmd, NULL); return 0; } @@ -685,7 +683,8 @@ static int vmd_alloc_irqs(struct vmd_dev *vmd) return err; INIT_LIST_HEAD(&vmd->irqs[i].irq_list); - err = devm_request_irq(&dev->dev, pci_irq_vector(dev, i), + vmd->irqs[i].virq = pci_irq_vector(dev, i); + err = devm_request_irq(&dev->dev, vmd->irqs[i].virq, vmd_irq, IRQF_NO_THREAD, vmd->name, &vmd->irqs[i]); if (err) @@ -813,8 +812,7 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) * acceptable because the guest is usually CPU-limited and MSI * remapping doesn't become a performance bottleneck. */ - if (iommu_capable(vmd->dev->dev.bus, IOMMU_CAP_INTR_REMAP) || - !(features & VMD_FEAT_CAN_BYPASS_MSI_REMAP) || + if (!(features & VMD_FEAT_CAN_BYPASS_MSI_REMAP) || offset[0] || offset[1]) { ret = vmd_alloc_irqs(vmd); if (ret) @@ -853,6 +851,9 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) vmd_attach_resources(vmd); if (vmd->irq_domain) dev_set_msi_domain(&vmd->bus->dev, vmd->irq_domain); + else + dev_set_msi_domain(&vmd->bus->dev, + dev_get_msi_domain(&vmd->dev->dev)); vmd_acpi_begin(); @@ -897,7 +898,8 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id) if (vmd->instance < 0) return vmd->instance; - vmd->name = kasprintf(GFP_KERNEL, "vmd%d", vmd->instance); + vmd->name = devm_kasprintf(&dev->dev, GFP_KERNEL, "vmd%d", + vmd->instance); if (!vmd->name) { err = -ENOMEM; goto out_release_instance; @@ -935,7 +937,6 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id) out_release_instance: ida_simple_remove(&vmd_instance_ida, vmd->instance); - kfree(vmd->name); return err; } @@ -958,7 +959,6 @@ static void vmd_remove(struct pci_dev *dev) vmd_detach_resources(vmd); vmd_remove_irq_domain(vmd); ida_simple_remove(&vmd_instance_ida, vmd->instance); - kfree(vmd->name); } #ifdef CONFIG_PM_SLEEP @@ -969,7 +969,7 @@ static int vmd_suspend(struct device *dev) int i; for (i = 0; i < vmd->msix_count; i++) - devm_free_irq(dev, pci_irq_vector(pdev, i), &vmd->irqs[i]); + devm_free_irq(dev, vmd->irqs[i].virq, &vmd->irqs[i]); return 0; } @@ -981,7 +981,7 @@ static int vmd_resume(struct device *dev) int err, i; for (i = 0; i < vmd->msix_count; i++) { - err = devm_request_irq(dev, pci_irq_vector(pdev, i), + err = devm_request_irq(dev, vmd->irqs[i].virq, vmd_irq, IRQF_NO_THREAD, vmd->name, &vmd->irqs[i]); if (err) @@ -1012,6 +1012,14 @@ static const struct pci_device_id vmd_ids[] = { .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP | VMD_FEAT_HAS_BUS_RESTRICTIONS | VMD_FEAT_OFFSET_FIRST_VECTOR,}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7d0b), + .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP | + VMD_FEAT_HAS_BUS_RESTRICTIONS | + VMD_FEAT_OFFSET_FIRST_VECTOR,}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xad0b), + .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP | + VMD_FEAT_HAS_BUS_RESTRICTIONS | + VMD_FEAT_OFFSET_FIRST_VECTOR,}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_9A0B), .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP | VMD_FEAT_HAS_BUS_RESTRICTIONS | |