diff options
author | 2020-09-01 14:19:48 +0200 | |
---|---|---|
committer | 2020-09-01 14:19:48 +0200 | |
commit | ead5d1f4d877e92c051e1a1ade623d0d30e71619 (patch) | |
tree | cb9db5698a546e7b96f7d5bef5ce544629dd37a2 /drivers/pci/controller/dwc | |
parent | scif: Fix spelling of EACCES (diff) | |
parent | Merge tag 'docs-5.9-3' of git://git.lwn.net/linux (diff) | |
download | linux-rng-ead5d1f4d877e92c051e1a1ade623d0d30e71619.tar.xz linux-rng-ead5d1f4d877e92c051e1a1ade623d0d30e71619.zip |
Merge branch 'master' into for-next
Sync with Linus' branch in order to be able to apply fixups
of more recent patches.
Diffstat (limited to 'drivers/pci/controller/dwc')
24 files changed, 1656 insertions, 685 deletions
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig index 0830dfcfa43a..044a3761c44f 100644 --- a/drivers/pci/controller/dwc/Kconfig +++ b/drivers/pci/controller/dwc/Kconfig @@ -26,7 +26,7 @@ config PCI_DRA7XX_HOST depends on OF && HAS_IOMEM && TI_PIPE3 select PCIE_DW_HOST select PCI_DRA7XX - default y + default y if SOC_DRA7XX help Enables support for the PCIe controller in the DRA7xx SoC to work in host mode. There are two instances of PCIe controller in DRA7xx. @@ -111,7 +111,6 @@ config PCI_KEYSTONE_HOST depends on PCI_MSI_IRQ_DOMAIN select PCIE_DW_HOST select PCI_KEYSTONE - default y help Enables support for the PCIe controller in the Keystone SoC to work in host mode. The PCI controller on Keystone is based on @@ -248,25 +247,58 @@ config PCI_MESON implement the driver. config PCIE_TEGRA194 - tristate "NVIDIA Tegra194 (and later) PCIe controller" + tristate + +config PCIE_TEGRA194_HOST + tristate "NVIDIA Tegra194 (and later) PCIe controller - Host Mode" depends on ARCH_TEGRA_194_SOC || COMPILE_TEST depends on PCI_MSI_IRQ_DOMAIN select PCIE_DW_HOST select PHY_TEGRA194_P2U + select PCIE_TEGRA194 + help + Enables support for the PCIe controller in the NVIDIA Tegra194 SoC to + work in host mode. There are two instances of PCIe controllers in + Tegra194. This controller can work either as EP or RC. In order to + enable host-specific features PCIE_TEGRA194_HOST must be selected and + in order to enable device-specific features PCIE_TEGRA194_EP must be + selected. This uses the DesignWare core. + +config PCIE_TEGRA194_EP + tristate "NVIDIA Tegra194 (and later) PCIe controller - Endpoint Mode" + depends on ARCH_TEGRA_194_SOC || COMPILE_TEST + depends on PCI_ENDPOINT + select PCIE_DW_EP + select PHY_TEGRA194_P2U + select PCIE_TEGRA194 help - Say Y here if you want support for DesignWare core based PCIe host - controller found in NVIDIA Tegra194 SoC. + Enables support for the PCIe controller in the NVIDIA Tegra194 SoC to + work in host mode. There are two instances of PCIe controllers in + Tegra194. This controller can work either as EP or RC. In order to + enable host-specific features PCIE_TEGRA194_HOST must be selected and + in order to enable device-specific features PCIE_TEGRA194_EP must be + selected. This uses the DesignWare core. config PCIE_UNIPHIER - bool "Socionext UniPhier PCIe controllers" + bool "Socionext UniPhier PCIe host controllers" depends on ARCH_UNIPHIER || COMPILE_TEST depends on OF && HAS_IOMEM depends on PCI_MSI_IRQ_DOMAIN select PCIE_DW_HOST help - Say Y here if you want PCIe controller support on UniPhier SoCs. + Say Y here if you want PCIe host controller support on UniPhier SoCs. This driver supports LD20 and PXs3 SoCs. +config PCIE_UNIPHIER_EP + bool "Socionext UniPhier PCIe endpoint controllers" + depends on ARCH_UNIPHIER || COMPILE_TEST + depends on OF && HAS_IOMEM + depends on PCI_ENDPOINT + select PCIE_DW_EP + help + Say Y here if you want PCIe endpoint controller support on + UniPhier SoCs. This driver supports Pro5 SoC. + config PCIE_AL bool "Amazon Annapurna Labs PCIe controller" depends on OF && (ARM64 || COMPILE_TEST) diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile index 8a637cfcf6e9..a751553fa0db 100644 --- a/drivers/pci/controller/dwc/Makefile +++ b/drivers/pci/controller/dwc/Makefile @@ -19,6 +19,7 @@ obj-$(CONFIG_PCIE_HISI_STB) += pcie-histb.o obj-$(CONFIG_PCI_MESON) += pci-meson.o obj-$(CONFIG_PCIE_TEGRA194) += pcie-tegra194.o obj-$(CONFIG_PCIE_UNIPHIER) += pcie-uniphier.o +obj-$(CONFIG_PCIE_UNIPHIER_EP) += pcie-uniphier-ep.o # The following drivers are for devices that use the generic ACPI # pci_root.c driver but don't support standard ECAM config access. diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c index 9bf7fa99b103..dc387724cf08 100644 --- a/drivers/pci/controller/dwc/pci-dra7xx.c +++ b/drivers/pci/controller/dwc/pci-dra7xx.c @@ -2,7 +2,7 @@ /* * pcie-dra7xx - PCIe controller driver for TI DRA7xx SoCs * - * Copyright (C) 2013-2014 Texas Instruments Incorporated - http://www.ti.com + * Copyright (C) 2013-2014 Texas Instruments Incorporated - https://www.ti.com * * Authors: Kishon Vijay Abraham I <kishon@ti.com> */ @@ -215,10 +215,6 @@ static int dra7xx_pcie_host_init(struct pcie_port *pp) return 0; } -static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = { - .host_init = dra7xx_pcie_host_init, -}; - static int dra7xx_pcie_intx_map(struct irq_domain *domain, unsigned int irq, irq_hw_number_t hwirq) { @@ -233,43 +229,77 @@ static const struct irq_domain_ops intx_domain_ops = { .xlate = pci_irqd_intx_xlate, }; -static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp) +static int dra7xx_pcie_handle_msi(struct pcie_port *pp, int index) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct device *dev = pci->dev; - struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); - struct device_node *node = dev->of_node; - struct device_node *pcie_intc_node = of_get_next_child(node, NULL); + unsigned long val; + int pos, irq; - if (!pcie_intc_node) { - dev_err(dev, "No PCIe Intc node found\n"); - return -ENODEV; - } + val = dw_pcie_readl_dbi(pci, PCIE_MSI_INTR0_STATUS + + (index * MSI_REG_CTRL_BLOCK_SIZE)); + if (!val) + return 0; - dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, - &intx_domain_ops, pp); - of_node_put(pcie_intc_node); - if (!dra7xx->irq_domain) { - dev_err(dev, "Failed to get a INTx IRQ domain\n"); - return -ENODEV; + pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL, 0); + while (pos != MAX_MSI_IRQS_PER_CTRL) { + irq = irq_find_mapping(pp->irq_domain, + (index * MAX_MSI_IRQS_PER_CTRL) + pos); + generic_handle_irq(irq); + pos++; + pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL, pos); } - return 0; + return 1; } -static irqreturn_t dra7xx_pcie_msi_irq_handler(int irq, void *arg) +static void dra7xx_pcie_handle_msi_irq(struct pcie_port *pp) { - struct dra7xx_pcie *dra7xx = arg; - struct dw_pcie *pci = dra7xx->pci; - struct pcie_port *pp = &pci->pp; + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + int ret, i, count, num_ctrls; + + num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; + + /** + * Need to make sure all MSI status bits read 0 before exiting. + * Else, new MSI IRQs are not registered by the wrapper. Have an + * upperbound for the loop and exit the IRQ in case of IRQ flood + * to avoid locking up system in interrupt context. + */ + count = 0; + do { + ret = 0; + + for (i = 0; i < num_ctrls; i++) + ret |= dra7xx_pcie_handle_msi(pp, i); + count++; + } while (ret && count <= 1000); + + if (count > 1000) + dev_warn_ratelimited(pci->dev, + "Too many MSI IRQs to handle\n"); +} + +static void dra7xx_pcie_msi_irq_handler(struct irq_desc *desc) +{ + struct irq_chip *chip = irq_desc_get_chip(desc); + struct dra7xx_pcie *dra7xx; + struct dw_pcie *pci; + struct pcie_port *pp; unsigned long reg; u32 virq, bit; + chained_irq_enter(chip, desc); + + pp = irq_desc_get_handler_data(desc); + pci = to_dw_pcie_from_pp(pp); + dra7xx = to_dra7xx_pcie(pci); + reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI); + dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, reg); switch (reg) { case MSI: - dw_handle_msi_irq(pp); + dra7xx_pcie_handle_msi_irq(pp); break; case INTA: case INTB: @@ -283,9 +313,7 @@ static irqreturn_t dra7xx_pcie_msi_irq_handler(int irq, void *arg) break; } - dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, reg); - - return IRQ_HANDLED; + chained_irq_exit(chip, desc); } static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg) @@ -347,6 +375,145 @@ static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg) return IRQ_HANDLED; } +static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct device *dev = pci->dev; + struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); + struct device_node *node = dev->of_node; + struct device_node *pcie_intc_node = of_get_next_child(node, NULL); + + if (!pcie_intc_node) { + dev_err(dev, "No PCIe Intc node found\n"); + return -ENODEV; + } + + irq_set_chained_handler_and_data(pp->irq, dra7xx_pcie_msi_irq_handler, + pp); + dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, + &intx_domain_ops, pp); + of_node_put(pcie_intc_node); + if (!dra7xx->irq_domain) { + dev_err(dev, "Failed to get a INTx IRQ domain\n"); + return -ENODEV; + } + + return 0; +} + +static void dra7xx_pcie_setup_msi_msg(struct irq_data *d, struct msi_msg *msg) +{ + struct pcie_port *pp = irq_data_get_irq_chip_data(d); + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + u64 msi_target; + + msi_target = (u64)pp->msi_data; + + msg->address_lo = lower_32_bits(msi_target); + msg->address_hi = upper_32_bits(msi_target); + + msg->data = d->hwirq; + + dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n", + (int)d->hwirq, msg->address_hi, msg->address_lo); +} + +static int dra7xx_pcie_msi_set_affinity(struct irq_data *d, + const struct cpumask *mask, + bool force) +{ + return -EINVAL; +} + +static void dra7xx_pcie_bottom_mask(struct irq_data *d) +{ + struct pcie_port *pp = irq_data_get_irq_chip_data(d); + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + unsigned int res, bit, ctrl; + unsigned long flags; + + raw_spin_lock_irqsave(&pp->lock, flags); + + ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; + res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; + bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; + + pp->irq_mask[ctrl] |= BIT(bit); + dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, + pp->irq_mask[ctrl]); + + raw_spin_unlock_irqrestore(&pp->lock, flags); +} + +static void dra7xx_pcie_bottom_unmask(struct irq_data *d) +{ + struct pcie_port *pp = irq_data_get_irq_chip_data(d); + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + unsigned int res, bit, ctrl; + unsigned long flags; + + raw_spin_lock_irqsave(&pp->lock, flags); + + ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; + res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; + bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; + + pp->irq_mask[ctrl] &= ~BIT(bit); + dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, + pp->irq_mask[ctrl]); + + raw_spin_unlock_irqrestore(&pp->lock, flags); +} + +static void dra7xx_pcie_bottom_ack(struct irq_data *d) +{ + struct pcie_port *pp = irq_data_get_irq_chip_data(d); + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + unsigned int res, bit, ctrl; + + ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; + res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; + bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; + + dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_STATUS + res, BIT(bit)); +} + +static struct irq_chip dra7xx_pci_msi_bottom_irq_chip = { + .name = "DRA7XX-PCI-MSI", + .irq_ack = dra7xx_pcie_bottom_ack, + .irq_compose_msi_msg = dra7xx_pcie_setup_msi_msg, + .irq_set_affinity = dra7xx_pcie_msi_set_affinity, + .irq_mask = dra7xx_pcie_bottom_mask, + .irq_unmask = dra7xx_pcie_bottom_unmask, +}; + +static int dra7xx_pcie_msi_host_init(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + u32 ctrl, num_ctrls; + + pp->msi_irq_chip = &dra7xx_pci_msi_bottom_irq_chip; + + num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; + /* Initialize IRQ Status array */ + for (ctrl = 0; ctrl < num_ctrls; ctrl++) { + pp->irq_mask[ctrl] = ~0; + dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + + (ctrl * MSI_REG_CTRL_BLOCK_SIZE), + pp->irq_mask[ctrl]); + dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE + + (ctrl * MSI_REG_CTRL_BLOCK_SIZE), + ~0); + } + + return dw_pcie_allocate_domains(pp); +} + +static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = { + .host_init = dra7xx_pcie_host_init, + .msi_host_init = dra7xx_pcie_msi_host_init, +}; + static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); @@ -426,13 +593,12 @@ static int __init dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx, ep = &pci->ep; ep->ops = &pcie_ep_ops; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics"); - pci->dbi_base = devm_ioremap_resource(dev, res); + pci->dbi_base = devm_platform_ioremap_resource_byname(pdev, "ep_dbics"); if (IS_ERR(pci->dbi_base)) return PTR_ERR(pci->dbi_base); - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics2"); - pci->dbi_base2 = devm_ioremap_resource(dev, res); + pci->dbi_base2 = + devm_platform_ioremap_resource_byname(pdev, "ep_dbics2"); if (IS_ERR(pci->dbi_base2)) return PTR_ERR(pci->dbi_base2); @@ -459,28 +625,16 @@ static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx, struct dw_pcie *pci = dra7xx->pci; struct pcie_port *pp = &pci->pp; struct device *dev = pci->dev; - struct resource *res; pp->irq = platform_get_irq(pdev, 1); - if (pp->irq < 0) { - dev_err(dev, "missing IRQ resource\n"); + if (pp->irq < 0) return pp->irq; - } - - ret = devm_request_irq(dev, pp->irq, dra7xx_pcie_msi_irq_handler, - IRQF_SHARED | IRQF_NO_THREAD, - "dra7-pcie-msi", dra7xx); - if (ret) { - dev_err(dev, "failed to request irq\n"); - return ret; - } ret = dra7xx_pcie_init_irq_domain(pp); if (ret < 0) return ret; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbics"); - pci->dbi_base = devm_ioremap_resource(dev, res); + pci->dbi_base = devm_platform_ioremap_resource_byname(pdev, "rc_dbics"); if (IS_ERR(pci->dbi_base)) return PTR_ERR(pci->dbi_base); @@ -681,7 +835,6 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev) struct phy **phy; struct device_link **link; void __iomem *base; - struct resource *res; struct dw_pcie *pci; struct dra7xx_pcie *dra7xx; struct device *dev = &pdev->dev; @@ -713,15 +866,12 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev) pci->ops = &dw_pcie_ops; irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(dev, "missing IRQ resource: %d\n", irq); + if (irq < 0) return irq; - } - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ti_conf"); - base = devm_ioremap(dev, res->start, resource_size(res)); - if (!base) - return -ENOMEM; + base = devm_platform_ioremap_resource_byname(pdev, "ti_conf"); + if (IS_ERR(base)) + return PTR_ERR(base); phy_count = of_property_count_strings(np, "phy-names"); if (phy_count < 0) { @@ -841,9 +991,8 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev) return 0; err_gpio: - pm_runtime_put(dev); - err_get_sync: + pm_runtime_put(dev); pm_runtime_disable(dev); dra7xx_pcie_disable_phy(dra7xx); diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c index c5043d951e80..8d82c43ae299 100644 --- a/drivers/pci/controller/dwc/pci-exynos.c +++ b/drivers/pci/controller/dwc/pci-exynos.c @@ -3,7 +3,7 @@ * PCIe host controller driver for Samsung Exynos SoCs * * Copyright (C) 2013 Samsung Electronics Co., Ltd. - * http://www.samsung.com + * https://www.samsung.com * * Author: Jingoo Han <jg1.han@samsung.com> */ @@ -84,14 +84,12 @@ static int exynos5440_pcie_get_mem_resources(struct platform_device *pdev, { struct dw_pcie *pci = ep->pci; struct device *dev = pci->dev; - struct resource *res; ep->mem_res = devm_kzalloc(dev, sizeof(*ep->mem_res), GFP_KERNEL); if (!ep->mem_res) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - ep->mem_res->elbi_base = devm_ioremap_resource(dev, res); + ep->mem_res->elbi_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(ep->mem_res->elbi_base)) return PTR_ERR(ep->mem_res->elbi_base); @@ -402,10 +400,9 @@ static int __init exynos_add_pcie_port(struct exynos_pcie *ep, int ret; pp->irq = platform_get_irq(pdev, 1); - if (pp->irq < 0) { - dev_err(dev, "failed to get irq\n"); + if (pp->irq < 0) return pp->irq; - } + ret = devm_request_irq(dev, pp->irq, exynos_pcie_irq_handler, IRQF_SHARED, "exynos-pcie", ep); if (ret) { @@ -415,10 +412,8 @@ static int __init exynos_add_pcie_port(struct exynos_pcie *ep, if (IS_ENABLED(CONFIG_PCI_MSI)) { pp->msi_irq = platform_get_irq(pdev, 0); - if (pp->msi_irq < 0) { - dev_err(dev, "failed to get msi irq\n"); + if (pp->msi_irq < 0) return pp->msi_irq; - } } pp->ops = &exynos_pcie_host_ops; diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c index acfbd34032a8..5fef2613b223 100644 --- a/drivers/pci/controller/dwc/pci-imx6.c +++ b/drivers/pci/controller/dwc/pci-imx6.c @@ -3,7 +3,7 @@ * PCIe host controller driver for Freescale i.MX6 SoCs * * Copyright (C) 2013 Kosagi - * http://www.kosagi.com + * https://www.kosagi.com * * Author: Sean Cross <xobs@kosagi.com> */ @@ -439,7 +439,7 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie) regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 0); break; - case IMX6QP: /* FALLTHROUGH */ + case IMX6QP: case IMX6Q: /* power up core phy and enable ref clock */ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, @@ -642,7 +642,7 @@ static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie) regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6SX_GPR12_PCIE_RX_EQ_MASK, IMX6SX_GPR12_PCIE_RX_EQ_2); - /* FALLTHROUGH */ + fallthrough; default: regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6Q_GPR12_PCIE_CTL_2, 0 << 10); @@ -868,10 +868,8 @@ static int imx6_add_pcie_port(struct imx6_pcie *imx6_pcie, if (IS_ENABLED(CONFIG_PCI_MSI)) { pp->msi_irq = platform_get_irq_byname(pdev, "msi"); - if (pp->msi_irq <= 0) { - dev_err(dev, "failed to get MSI irq\n"); - return -ENODEV; - } + if (pp->msi_irq < 0) + return pp->msi_irq; } pp->ops = &imx6_pcie_host_ops; @@ -1107,7 +1105,7 @@ static int imx6_pcie_probe(struct platform_device *pdev) dev_err(dev, "pcie_aux clock source missing or invalid\n"); return PTR_ERR(imx6_pcie->pcie_aux); } - /* fall through */ + fallthrough; case IMX7D: if (dbi_base->start == IMX8MQ_PCIE2_BASE_ADDR) imx6_pcie->controller_id = 1; @@ -1269,7 +1267,7 @@ static void imx6_pcie_quirk(struct pci_dev *dev) if (bus->dev.parent->parent->driver != &imx6_pcie_driver.driver) return; - if (bus->number == pp->root_bus_nr) { + if (pci_is_root_bus(bus)) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci); diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c index c8c702c494a2..c8c9d6a75f17 100644 --- a/drivers/pci/controller/dwc/pci-keystone.c +++ b/drivers/pci/controller/dwc/pci-keystone.c @@ -3,7 +3,7 @@ * PCIe host controller driver for Texas Instruments Keystone SoCs * * Copyright (C) 2013-2014 Texas Instruments., Ltd. - * http://www.ti.com + * https://www.ti.com * * Author: Murali Karicheri <m-karicheri2@ti.com> * Implementation based on pci-exynos.c and pcie-designware.c @@ -440,7 +440,7 @@ static int ks_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, reg = CFG_BUS(bus->number) | CFG_DEVICE(PCI_SLOT(devfn)) | CFG_FUNC(PCI_FUNC(devfn)); - if (bus->parent->number != pp->root_bus_nr) + if (!pci_is_root_bus(bus->parent)) reg |= CFG_TYPE1; ks_pcie_app_writel(ks_pcie, CFG_SETUP, reg); @@ -457,7 +457,7 @@ static int ks_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, reg = CFG_BUS(bus->number) | CFG_DEVICE(PCI_SLOT(devfn)) | CFG_FUNC(PCI_FUNC(devfn)); - if (bus->parent->number != pp->root_bus_nr) + if (!pci_is_root_bus(bus->parent)) reg |= CFG_TYPE1; ks_pcie_app_writel(ks_pcie, CFG_SETUP, reg); @@ -959,6 +959,9 @@ static int ks_pcie_am654_raise_irq(struct dw_pcie_ep *ep, u8 func_no, case PCI_EPC_IRQ_MSI: dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num); break; + case PCI_EPC_IRQ_MSIX: + dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num); + break; default: dev_err(pci->dev, "UNKNOWN IRQ type\n"); return -EINVAL; @@ -970,7 +973,7 @@ static int ks_pcie_am654_raise_irq(struct dw_pcie_ep *ep, u8 func_no, static const struct pci_epc_features ks_pcie_am654_epc_features = { .linkup_notifier = false, .msi_capable = true, - .msix_capable = false, + .msix_capable = true, .reserved_bar = 1 << BAR_0 | 1 << BAR_1, .bar_fixed_64bit = 1 << BAR_0, .bar_fixed_size[2] = SZ_1M, @@ -1247,10 +1250,8 @@ static int __init ks_pcie_probe(struct platform_device *pdev) pci->version = version; irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(dev, "missing IRQ resource: %d\n", irq); + if (irq < 0) return irq; - } ret = request_irq(irq, ks_pcie_err_irq_handler, IRQF_SHARED, "ks-pcie-error-irq", ks_pcie); @@ -1320,8 +1321,7 @@ static int __init ks_pcie_probe(struct platform_device *pdev) } if (pci->version >= 0x480A) { - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "atu"); - atu_base = devm_ioremap_resource(dev, res); + atu_base = devm_platform_ioremap_resource_byname(pdev, "atu"); if (IS_ERR(atu_base)) { ret = PTR_ERR(atu_base); goto err_get_sync; diff --git a/drivers/pci/controller/dwc/pci-meson.c b/drivers/pci/controller/dwc/pci-meson.c index 3772b02a5c55..4f183b96afbb 100644 --- a/drivers/pci/controller/dwc/pci-meson.c +++ b/drivers/pci/controller/dwc/pci-meson.c @@ -66,7 +66,6 @@ #define PORT_CLK_RATE 100000000UL #define MAX_PAYLOAD_SIZE 256 #define MAX_READ_REQ_SIZE 256 -#define MESON_PCIE_PHY_POWERUP 0x1c #define PCIE_RESET_DELAY 500 #define PCIE_SHARED_RESET 1 #define PCIE_NORMAL_RESET 0 @@ -81,26 +80,19 @@ enum pcie_data_rate { struct meson_pcie_mem_res { void __iomem *elbi_base; void __iomem *cfg_base; - void __iomem *phy_base; }; struct meson_pcie_clk_res { struct clk *clk; - struct clk *mipi_gate; struct clk *port_clk; struct clk *general_clk; }; struct meson_pcie_rc_reset { - struct reset_control *phy; struct reset_control *port; struct reset_control *apb; }; -struct meson_pcie_param { - bool has_shared_phy; -}; - struct meson_pcie { struct dw_pcie pci; struct meson_pcie_mem_res mem_res; @@ -108,7 +100,6 @@ struct meson_pcie { struct meson_pcie_rc_reset mrst; struct gpio_desc *reset_gpio; struct phy *phy; - const struct meson_pcie_param *param; }; static struct reset_control *meson_pcie_get_reset(struct meson_pcie *mp, @@ -130,13 +121,6 @@ static int meson_pcie_get_resets(struct meson_pcie *mp) { struct meson_pcie_rc_reset *mrst = &mp->mrst; - if (!mp->param->has_shared_phy) { - mrst->phy = meson_pcie_get_reset(mp, "phy", PCIE_SHARED_RESET); - if (IS_ERR(mrst->phy)) - return PTR_ERR(mrst->phy); - reset_control_deassert(mrst->phy); - } - mrst->port = meson_pcie_get_reset(mp, "port", PCIE_NORMAL_RESET); if (IS_ERR(mrst->port)) return PTR_ERR(mrst->port); @@ -162,22 +146,6 @@ static void __iomem *meson_pcie_get_mem(struct platform_device *pdev, return devm_ioremap_resource(dev, res); } -static void __iomem *meson_pcie_get_mem_shared(struct platform_device *pdev, - struct meson_pcie *mp, - const char *id) -{ - struct device *dev = mp->pci.dev; - struct resource *res; - - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, id); - if (!res) { - dev_err(dev, "No REG resource %s\n", id); - return ERR_PTR(-ENXIO); - } - - return devm_ioremap(dev, res->start, resource_size(res)); -} - static int meson_pcie_get_mems(struct platform_device *pdev, struct meson_pcie *mp) { @@ -189,14 +157,6 @@ static int meson_pcie_get_mems(struct platform_device *pdev, if (IS_ERR(mp->mem_res.cfg_base)) return PTR_ERR(mp->mem_res.cfg_base); - /* Meson AXG SoC has two PCI controllers use same phy register */ - if (!mp->param->has_shared_phy) { - mp->mem_res.phy_base = - meson_pcie_get_mem_shared(pdev, mp, "phy"); - if (IS_ERR(mp->mem_res.phy_base)) - return PTR_ERR(mp->mem_res.phy_base); - } - return 0; } @@ -204,37 +164,33 @@ static int meson_pcie_power_on(struct meson_pcie *mp) { int ret = 0; - if (mp->param->has_shared_phy) { - ret = phy_init(mp->phy); - if (ret) - return ret; + ret = phy_init(mp->phy); + if (ret) + return ret; - ret = phy_power_on(mp->phy); - if (ret) { - phy_exit(mp->phy); - return ret; - } - } else - writel(MESON_PCIE_PHY_POWERUP, mp->mem_res.phy_base); + ret = phy_power_on(mp->phy); + if (ret) { + phy_exit(mp->phy); + return ret; + } return 0; } +static void meson_pcie_power_off(struct meson_pcie *mp) +{ + phy_power_off(mp->phy); + phy_exit(mp->phy); +} + static int meson_pcie_reset(struct meson_pcie *mp) { struct meson_pcie_rc_reset *mrst = &mp->mrst; int ret = 0; - if (mp->param->has_shared_phy) { - ret = phy_reset(mp->phy); - if (ret) - return ret; - } else { - reset_control_assert(mrst->phy); - udelay(PCIE_RESET_DELAY); - reset_control_deassert(mrst->phy); - udelay(PCIE_RESET_DELAY); - } + ret = phy_reset(mp->phy); + if (ret) + return ret; reset_control_assert(mrst->port); reset_control_assert(mrst->apb); @@ -286,12 +242,6 @@ static int meson_pcie_probe_clocks(struct meson_pcie *mp) if (IS_ERR(res->port_clk)) return PTR_ERR(res->port_clk); - if (!mp->param->has_shared_phy) { - res->mipi_gate = meson_pcie_probe_clock(dev, "mipi", 0); - if (IS_ERR(res->mipi_gate)) - return PTR_ERR(res->mipi_gate); - } - res->general_clk = meson_pcie_probe_clock(dev, "general", 0); if (IS_ERR(res->general_clk)) return PTR_ERR(res->general_clk); @@ -339,11 +289,11 @@ static void meson_pcie_init_dw(struct meson_pcie *mp) meson_cfg_writel(mp, val, PCIE_CFG0); val = meson_elb_readl(mp, PCIE_PORT_LINK_CTRL_OFF); - val &= ~LINK_CAPABLE_MASK; + val &= ~(LINK_CAPABLE_MASK | FAST_LINK_MODE); meson_elb_writel(mp, val, PCIE_PORT_LINK_CTRL_OFF); val = meson_elb_readl(mp, PCIE_PORT_LINK_CTRL_OFF); - val |= LINK_CAPABLE_X1 | FAST_LINK_MODE; + val |= LINK_CAPABLE_X1; meson_elb_writel(mp, val, PCIE_PORT_LINK_CTRL_OFF); val = meson_elb_readl(mp, PCIE_GEN2_CTRL_OFF); @@ -538,10 +488,8 @@ static int meson_add_pcie_port(struct meson_pcie *mp, if (IS_ENABLED(CONFIG_PCI_MSI)) { pp->msi_irq = platform_get_irq(pdev, 0); - if (pp->msi_irq < 0) { - dev_err(dev, "failed to get MSI IRQ\n"); + if (pp->msi_irq < 0) return pp->msi_irq; - } } pp->ops = &meson_pcie_host_ops; @@ -562,7 +510,6 @@ static const struct dw_pcie_ops dw_pcie_ops = { static int meson_pcie_probe(struct platform_device *pdev) { - const struct meson_pcie_param *match_data; struct device *dev = &pdev->dev; struct dw_pcie *pci; struct meson_pcie *mp; @@ -576,17 +523,10 @@ static int meson_pcie_probe(struct platform_device *pdev) pci->dev = dev; pci->ops = &dw_pcie_ops; - match_data = of_device_get_match_data(dev); - if (!match_data) { - dev_err(dev, "failed to get match data\n"); - return -ENODEV; - } - mp->param = match_data; - - if (mp->param->has_shared_phy) { - mp->phy = devm_phy_get(dev, "pcie"); - if (IS_ERR(mp->phy)) - return PTR_ERR(mp->phy); + mp->phy = devm_phy_get(dev, "pcie"); + if (IS_ERR(mp->phy)) { + dev_err(dev, "get phy failed, %ld\n", PTR_ERR(mp->phy)); + return PTR_ERR(mp->phy); } mp->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW); @@ -636,30 +576,16 @@ static int meson_pcie_probe(struct platform_device *pdev) return 0; err_phy: - if (mp->param->has_shared_phy) { - phy_power_off(mp->phy); - phy_exit(mp->phy); - } - + meson_pcie_power_off(mp); return ret; } -static struct meson_pcie_param meson_pcie_axg_param = { - .has_shared_phy = false, -}; - -static struct meson_pcie_param meson_pcie_g12a_param = { - .has_shared_phy = true, -}; - static const struct of_device_id meson_pcie_of_match[] = { { .compatible = "amlogic,axg-pcie", - .data = &meson_pcie_axg_param, }, { .compatible = "amlogic,g12a-pcie", - .data = &meson_pcie_g12a_param, }, {}, }; diff --git a/drivers/pci/controller/dwc/pcie-al.c b/drivers/pci/controller/dwc/pcie-al.c index 1eeda2f6371f..d57d4ee15848 100644 --- a/drivers/pci/controller/dwc/pcie-al.c +++ b/drivers/pci/controller/dwc/pcie-al.c @@ -67,20 +67,15 @@ static int al_pcie_init(struct pci_config_window *cfg) dev_dbg(dev, "Root port dbi res: %pR\n", res); al_pcie->dbi_base = devm_pci_remap_cfg_resource(dev, res); - if (IS_ERR(al_pcie->dbi_base)) { - long err = PTR_ERR(al_pcie->dbi_base); - - dev_err(dev, "couldn't remap dbi base %pR (err:%ld)\n", - res, err); - return err; - } + if (IS_ERR(al_pcie->dbi_base)) + return PTR_ERR(al_pcie->dbi_base); cfg->priv = al_pcie; return 0; } -struct pci_ecam_ops al_pcie_ops = { +const struct pci_ecam_ops al_pcie_ops = { .bus_shift = 20, .init = al_pcie_init, .pci_ops = { @@ -408,10 +403,8 @@ static int al_pcie_probe(struct platform_device *pdev) dbi_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_res); - if (IS_ERR(pci->dbi_base)) { - dev_err(dev, "couldn't remap dbi base %pR\n", dbi_res); + if (IS_ERR(pci->dbi_base)) return PTR_ERR(pci->dbi_base); - } ecam_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config"); if (!ecam_res) { diff --git a/drivers/pci/controller/dwc/pcie-armada8k.c b/drivers/pci/controller/dwc/pcie-armada8k.c index 49596547e8c2..13901f359a41 100644 --- a/drivers/pci/controller/dwc/pcie-armada8k.c +++ b/drivers/pci/controller/dwc/pcie-armada8k.c @@ -248,10 +248,8 @@ static int armada8k_add_pcie_port(struct armada8k_pcie *pcie, pp->ops = &armada8k_pcie_host_ops; pp->irq = platform_get_irq(pdev, 0); - if (pp->irq < 0) { - dev_err(dev, "failed to get irq for port\n"); + if (pp->irq < 0) return pp->irq; - } ret = devm_request_irq(dev, pp->irq, armada8k_pcie_irq_handler, IRQF_SHARED, "armada8k-pcie", pcie); @@ -317,7 +315,6 @@ static int armada8k_pcie_probe(struct platform_device *pdev) base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl"); pci->dbi_base = devm_pci_remap_cfg_resource(dev, base); if (IS_ERR(pci->dbi_base)) { - dev_err(dev, "couldn't remap regs base %p\n", base); ret = PTR_ERR(pci->dbi_base); goto fail_clkreg; } diff --git a/drivers/pci/controller/dwc/pcie-artpec6.c b/drivers/pci/controller/dwc/pcie-artpec6.c index 28d5a1095200..97d50bb50f06 100644 --- a/drivers/pci/controller/dwc/pcie-artpec6.c +++ b/drivers/pci/controller/dwc/pcie-artpec6.c @@ -387,10 +387,8 @@ static int artpec6_add_pcie_port(struct artpec6_pcie *artpec6_pcie, if (IS_ENABLED(CONFIG_PCI_MSI)) { pp->msi_irq = platform_get_irq_byname(pdev, "msi"); - if (pp->msi_irq < 0) { - dev_err(dev, "failed to get MSI irq\n"); + if (pp->msi_irq < 0) return pp->msi_irq; - } } pp->ops = &artpec6_pcie_host_ops; @@ -455,8 +453,7 @@ static int artpec6_add_pcie_ep(struct artpec6_pcie *artpec6_pcie, ep = &pci->ep; ep->ops = &pcie_ep_ops; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2"); - pci->dbi_base2 = devm_ioremap_resource(dev, res); + pci->dbi_base2 = devm_platform_ioremap_resource_byname(pdev, "dbi2"); if (IS_ERR(pci->dbi_base2)) return PTR_ERR(pci->dbi_base2); @@ -481,8 +478,6 @@ static int artpec6_pcie_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct dw_pcie *pci; struct artpec6_pcie *artpec6_pcie; - struct resource *dbi_base; - struct resource *phy_base; int ret; const struct of_device_id *match; const struct artpec_pcie_of_data *data; @@ -512,13 +507,12 @@ static int artpec6_pcie_probe(struct platform_device *pdev) artpec6_pcie->variant = variant; artpec6_pcie->mode = mode; - dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); - pci->dbi_base = devm_ioremap_resource(dev, dbi_base); + pci->dbi_base = devm_platform_ioremap_resource_byname(pdev, "dbi"); if (IS_ERR(pci->dbi_base)) return PTR_ERR(pci->dbi_base); - phy_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy"); - artpec6_pcie->phy_base = devm_ioremap_resource(dev, phy_base); + artpec6_pcie->phy_base = + devm_platform_ioremap_resource_byname(pdev, "phy"); if (IS_ERR(artpec6_pcie->phy_base)) return PTR_ERR(artpec6_pcie->phy_base); diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c index cfeccd7e9fff..305bfec2424d 100644 --- a/drivers/pci/controller/dwc/pcie-designware-ep.c +++ b/drivers/pci/controller/dwc/pcie-designware-ep.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/** +/* * Synopsys DesignWare PCIe Endpoint controller driver * * Copyright (C) 2017 Texas Instruments @@ -18,6 +18,15 @@ void dw_pcie_ep_linkup(struct dw_pcie_ep *ep) pci_epc_linkup(epc); } +EXPORT_SYMBOL_GPL(dw_pcie_ep_linkup); + +void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep) +{ + struct pci_epc *epc = ep->epc; + + pci_epc_init_notify(epc); +} +EXPORT_SYMBOL_GPL(dw_pcie_ep_init_notify); static void __dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar, int flags) @@ -125,6 +134,7 @@ static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no, dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_INBOUND); clear_bit(atu_index, ep->ib_window_map); + ep->epf_bar[bar] = NULL; } static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, @@ -158,6 +168,7 @@ static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, dw_pcie_writel_dbi(pci, reg + 4, 0); } + ep->epf_bar[bar] = epf_bar; dw_pcie_dbi_ro_wr_dis(pci); return 0; @@ -269,7 +280,8 @@ static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no) return val; } -static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts) +static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts, + enum pci_barno bir, u32 offset) { struct dw_pcie_ep *ep = epc_get_drvdata(epc); struct dw_pcie *pci = to_dw_pcie_from_ep(ep); @@ -278,12 +290,22 @@ static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts) if (!ep->msix_cap) return -EINVAL; + dw_pcie_dbi_ro_wr_en(pci); + reg = ep->msix_cap + PCI_MSIX_FLAGS; val = dw_pcie_readw_dbi(pci, reg); val &= ~PCI_MSIX_FLAGS_QSIZE; val |= interrupts; - dw_pcie_dbi_ro_wr_en(pci); dw_pcie_writew_dbi(pci, reg, val); + + reg = ep->msix_cap + PCI_MSIX_TABLE; + val = offset | bir; + dw_pcie_writel_dbi(pci, reg, val); + + reg = ep->msix_cap + PCI_MSIX_PBA; + val = (offset + (interrupts * PCI_MSIX_ENTRY_SIZE)) | bir; + dw_pcie_writel_dbi(pci, reg, val); + dw_pcie_dbi_ro_wr_dis(pci); return 0; @@ -390,11 +412,11 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no, reg = ep->msi_cap + PCI_MSI_DATA_32; msg_data = dw_pcie_readw_dbi(pci, reg); } - aligned_offset = msg_addr_lower & (epc->mem->page_size - 1); + aligned_offset = msg_addr_lower & (epc->mem->window.page_size - 1); msg_addr = ((u64)msg_addr_upper) << 32 | (msg_addr_lower & ~aligned_offset); ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr, - epc->mem->page_size); + epc->mem->window.page_size); if (ret) return ret; @@ -409,55 +431,37 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no, u16 interrupt_num) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + struct pci_epf_msix_tbl *msix_tbl; struct pci_epc *epc = ep->epc; - u16 tbl_offset, bir; - u32 bar_addr_upper, bar_addr_lower; - u32 msg_addr_upper, msg_addr_lower; u32 reg, msg_data, vec_ctrl; - u64 tbl_addr, msg_addr, reg_u64; - void __iomem *msix_tbl; + unsigned int aligned_offset; + u32 tbl_offset; + u64 msg_addr; int ret; + u8 bir; reg = ep->msix_cap + PCI_MSIX_TABLE; tbl_offset = dw_pcie_readl_dbi(pci, reg); bir = (tbl_offset & PCI_MSIX_TABLE_BIR); tbl_offset &= PCI_MSIX_TABLE_OFFSET; - reg = PCI_BASE_ADDRESS_0 + (4 * bir); - bar_addr_upper = 0; - bar_addr_lower = dw_pcie_readl_dbi(pci, reg); - reg_u64 = (bar_addr_lower & PCI_BASE_ADDRESS_MEM_TYPE_MASK); - if (reg_u64 == PCI_BASE_ADDRESS_MEM_TYPE_64) - bar_addr_upper = dw_pcie_readl_dbi(pci, reg + 4); - - tbl_addr = ((u64) bar_addr_upper) << 32 | bar_addr_lower; - tbl_addr += (tbl_offset + ((interrupt_num - 1) * PCI_MSIX_ENTRY_SIZE)); - tbl_addr &= PCI_BASE_ADDRESS_MEM_MASK; - - msix_tbl = ioremap(ep->phys_base + tbl_addr, - PCI_MSIX_ENTRY_SIZE); - if (!msix_tbl) - return -EINVAL; - - msg_addr_lower = readl(msix_tbl + PCI_MSIX_ENTRY_LOWER_ADDR); - msg_addr_upper = readl(msix_tbl + PCI_MSIX_ENTRY_UPPER_ADDR); - msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower; - msg_data = readl(msix_tbl + PCI_MSIX_ENTRY_DATA); - vec_ctrl = readl(msix_tbl + PCI_MSIX_ENTRY_VECTOR_CTRL); - - iounmap(msix_tbl); + msix_tbl = ep->epf_bar[bir]->addr + tbl_offset; + msg_addr = msix_tbl[(interrupt_num - 1)].msg_addr; + msg_data = msix_tbl[(interrupt_num - 1)].msg_data; + vec_ctrl = msix_tbl[(interrupt_num - 1)].vector_ctrl; if (vec_ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT) { dev_dbg(pci->dev, "MSI-X entry ctrl set\n"); return -EPERM; } - ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr, - epc->mem->page_size); + aligned_offset = msg_addr & (epc->mem->window.page_size - 1); + ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr, + epc->mem->window.page_size); if (ret) return ret; - writel(msg_data, ep->msi_mem); + writel(msg_data, ep->msi_mem + aligned_offset); dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys); @@ -469,7 +473,7 @@ void dw_pcie_ep_exit(struct dw_pcie_ep *ep) struct pci_epc *epc = ep->epc; pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem, - epc->mem->page_size); + epc->mem->window.page_size); pci_epc_mem_exit(epc); } @@ -492,19 +496,54 @@ static unsigned int dw_pcie_ep_find_ext_capability(struct dw_pcie *pci, int cap) return 0; } -int dw_pcie_ep_init(struct dw_pcie_ep *ep) +int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep) { + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + unsigned int offset; + unsigned int nbars; + u8 hdr_type; + u32 reg; int i; + + hdr_type = dw_pcie_readb_dbi(pci, PCI_HEADER_TYPE); + if (hdr_type != PCI_HEADER_TYPE_NORMAL) { + dev_err(pci->dev, + "PCIe controller is not set to EP mode (hdr_type:0x%x)!\n", + hdr_type); + return -EIO; + } + + ep->msi_cap = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI); + + ep->msix_cap = dw_pcie_find_capability(pci, PCI_CAP_ID_MSIX); + + offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR); + if (offset) { + reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL); + nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >> + PCI_REBAR_CTRL_NBAR_SHIFT; + + dw_pcie_dbi_ro_wr_en(pci); + for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL) + dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, 0x0); + dw_pcie_dbi_ro_wr_dis(pci); + } + + dw_pcie_setup(pci); + + return 0; +} +EXPORT_SYMBOL_GPL(dw_pcie_ep_init_complete); + +int dw_pcie_ep_init(struct dw_pcie_ep *ep) +{ int ret; - u32 reg; void *addr; - u8 hdr_type; - unsigned int nbars; - unsigned int offset; struct pci_epc *epc; struct dw_pcie *pci = to_dw_pcie_from_ep(ep); struct device *dev = pci->dev; struct device_node *np = dev->of_node; + const struct pci_epc_features *epc_features; if (!pci->dbi_base || !pci->dbi_base2) { dev_err(dev, "dbi_base/dbi_base2 is not populated\n"); @@ -563,47 +602,30 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep) if (ep->ops->ep_init) ep->ops->ep_init(ep); - hdr_type = dw_pcie_readb_dbi(pci, PCI_HEADER_TYPE); - if (hdr_type != PCI_HEADER_TYPE_NORMAL) { - dev_err(pci->dev, "PCIe controller is not set to EP mode (hdr_type:0x%x)!\n", - hdr_type); - return -EIO; - } - ret = of_property_read_u8(np, "max-functions", &epc->max_functions); if (ret < 0) epc->max_functions = 1; - ret = __pci_epc_mem_init(epc, ep->phys_base, ep->addr_size, - ep->page_size); + ret = pci_epc_mem_init(epc, ep->phys_base, ep->addr_size, + ep->page_size); if (ret < 0) { dev_err(dev, "Failed to initialize address space\n"); return ret; } ep->msi_mem = pci_epc_mem_alloc_addr(epc, &ep->msi_mem_phys, - epc->mem->page_size); + epc->mem->window.page_size); if (!ep->msi_mem) { dev_err(dev, "Failed to reserve memory for MSI/MSI-X\n"); return -ENOMEM; } - ep->msi_cap = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI); - - ep->msix_cap = dw_pcie_find_capability(pci, PCI_CAP_ID_MSIX); - - offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR); - if (offset) { - reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL); - nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >> - PCI_REBAR_CTRL_NBAR_SHIFT; - dw_pcie_dbi_ro_wr_en(pci); - for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL) - dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, 0x0); - dw_pcie_dbi_ro_wr_dis(pci); + if (ep->ops->get_features) { + epc_features = ep->ops->get_features(ep); + if (epc_features->core_init_notifier) + return 0; } - dw_pcie_setup(pci); - - return 0; + return dw_pcie_ep_init_complete(ep); } +EXPORT_SYMBOL_GPL(dw_pcie_ep_init); diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c index 395feb8ca051..9dafecba347f 100644 --- a/drivers/pci/controller/dwc/pcie-designware-host.c +++ b/drivers/pci/controller/dwc/pcie-designware-host.c @@ -3,7 +3,7 @@ * Synopsys DesignWare PCIe host controller driver * * Copyright (C) 2013 Samsung Electronics Co., Ltd. - * http://www.samsung.com + * https://www.samsung.com * * Author: Jingoo Han <jg1.han@samsung.com> */ @@ -236,7 +236,7 @@ static void dw_pcie_irq_domain_free(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs) { struct irq_data *d = irq_domain_get_irq_data(domain, virq); - struct pcie_port *pp = irq_data_get_irq_chip_data(d); + struct pcie_port *pp = domain->host_data; unsigned long flags; raw_spin_lock_irqsave(&pp->lock, flags); @@ -264,6 +264,8 @@ int dw_pcie_allocate_domains(struct pcie_port *pp) return -ENOMEM; } + irq_domain_update_bus_token(pp->irq_domain, DOMAIN_BUS_NEXUS); + pp->msi_domain = pci_msi_create_irq_domain(fwnode, &dw_pcie_msi_domain_info, pp->irq_domain); @@ -344,11 +346,6 @@ int dw_pcie_host_init(struct pcie_port *pp) if (!bridge) return -ENOMEM; - ret = pci_parse_request_of_pci_ranges(dev, &bridge->windows, - &bridge->dma_ranges, NULL); - if (ret) - return ret; - /* Get the I/O and memory ranges from DT */ resource_list_for_each_entry(win, &bridge->windows) { switch (resource_type(win->res)) { @@ -471,14 +468,8 @@ int dw_pcie_host_init(struct pcie_port *pp) goto err_free_msi; } - pp->root_bus_nr = pp->busn->start; - - bridge->dev.parent = dev; bridge->sysdata = pp; - bridge->busnr = pp->root_bus_nr; bridge->ops = &dw_pcie_ops; - bridge->map_irq = of_irq_parse_and_map_pci; - bridge->swizzle_irq = pci_common_swizzle; ret = pci_scan_root_bus_bridge(bridge); if (ret) @@ -527,7 +518,7 @@ static int dw_pcie_access_other_conf(struct pcie_port *pp, struct pci_bus *bus, busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) | PCIE_ATU_FUNC(PCI_FUNC(devfn)); - if (bus->parent->number == pp->root_bus_nr) { + if (pci_is_root_bus(bus->parent)) { type = PCIE_ATU_TYPE_CFG0; cpu_addr = pp->cfg0_base; cfg_size = pp->cfg0_size; @@ -583,13 +574,11 @@ static int dw_pcie_valid_device(struct pcie_port *pp, struct pci_bus *bus, struct dw_pcie *pci = to_dw_pcie_from_pp(pp); /* If there is no link, then there is no device */ - if (bus->number != pp->root_bus_nr) { + if (!pci_is_root_bus(bus)) { if (!dw_pcie_link_up(pci)) return 0; - } - - /* Access only one slot on each root port */ - if (bus->number == pp->root_bus_nr && dev > 0) + } else if (dev > 0) + /* Access only one slot on each root port */ return 0; return 1; @@ -605,7 +594,7 @@ static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, return PCIBIOS_DEVICE_NOT_FOUND; } - if (bus->number == pp->root_bus_nr) + if (pci_is_root_bus(bus)) return dw_pcie_rd_own_conf(pp, where, size, val); return dw_pcie_rd_other_conf(pp, bus, devfn, where, size, val); @@ -619,7 +608,7 @@ static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn, if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn))) return PCIBIOS_DEVICE_NOT_FOUND; - if (bus->number == pp->root_bus_nr) + if (pci_is_root_bus(bus)) return dw_pcie_wr_own_conf(pp, where, size, val); return dw_pcie_wr_other_conf(pp, bus, devfn, where, size, val); diff --git a/drivers/pci/controller/dwc/pcie-designware-plat.c b/drivers/pci/controller/dwc/pcie-designware-plat.c index 73646b677aff..712456f6ce36 100644 --- a/drivers/pci/controller/dwc/pcie-designware-plat.c +++ b/drivers/pci/controller/dwc/pcie-designware-plat.c @@ -153,8 +153,7 @@ static int dw_plat_add_pcie_ep(struct dw_plat_pcie *dw_plat_pcie, ep = &pci->ep; ep->ops = &pcie_ep_ops; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2"); - pci->dbi_base2 = devm_ioremap_resource(dev, res); + pci->dbi_base2 = devm_platform_ioremap_resource_byname(pdev, "dbi2"); if (IS_ERR(pci->dbi_base2)) return PTR_ERR(pci->dbi_base2); diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c index 681548c88282..b723e0cc41fb 100644 --- a/drivers/pci/controller/dwc/pcie-designware.c +++ b/drivers/pci/controller/dwc/pcie-designware.c @@ -3,7 +3,7 @@ * Synopsys DesignWare PCIe host controller driver * * Copyright (C) 2013 Samsung Electronics Co., Ltd. - * http://www.samsung.com + * https://www.samsung.com * * Author: Jingoo Han <jg1.han@samsung.com> */ @@ -244,13 +244,16 @@ static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index, u64 pci_addr, u32 size) { u32 retries, val; + u64 limit_addr = cpu_addr + size - 1; dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_BASE, lower_32_bits(cpu_addr)); dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_BASE, upper_32_bits(cpu_addr)); - dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LIMIT, - lower_32_bits(cpu_addr + size - 1)); + dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_LIMIT, + lower_32_bits(limit_addr)); + dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_LIMIT, + upper_32_bits(limit_addr)); dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET, lower_32_bits(pci_addr)); dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET, diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h index a22ea5982817..f911760dcc69 100644 --- a/drivers/pci/controller/dwc/pcie-designware.h +++ b/drivers/pci/controller/dwc/pcie-designware.h @@ -3,7 +3,7 @@ * Synopsys DesignWare PCIe host controller driver * * Copyright (C) 2013 Samsung Electronics Co., Ltd. - * http://www.samsung.com + * https://www.samsung.com * * Author: Jingoo Han <jg1.han@samsung.com> */ @@ -112,9 +112,10 @@ #define PCIE_ATU_UNR_REGION_CTRL2 0x04 #define PCIE_ATU_UNR_LOWER_BASE 0x08 #define PCIE_ATU_UNR_UPPER_BASE 0x0C -#define PCIE_ATU_UNR_LIMIT 0x10 +#define PCIE_ATU_UNR_LOWER_LIMIT 0x10 #define PCIE_ATU_UNR_LOWER_TARGET 0x14 #define PCIE_ATU_UNR_UPPER_TARGET 0x18 +#define PCIE_ATU_UNR_UPPER_LIMIT 0x20 /* * The default address offset between dbi_base and atu_base. Root controller @@ -172,7 +173,6 @@ struct dw_pcie_host_ops { }; struct pcie_port { - u8 root_bus_nr; u64 cfg0_base; void __iomem *va_cfg0_base; u32 cfg0_size; @@ -233,6 +233,7 @@ struct dw_pcie_ep { phys_addr_t msi_mem_phys; u8 msi_cap; /* MSI capability offset */ u8 msix_cap; /* MSI-X capability offset */ + struct pci_epf_bar *epf_bar[PCI_STD_NUM_BARS]; }; struct dw_pcie_ops { @@ -411,6 +412,8 @@ static inline int dw_pcie_allocate_domains(struct pcie_port *pp) #ifdef CONFIG_PCIE_DW_EP void dw_pcie_ep_linkup(struct dw_pcie_ep *ep); int dw_pcie_ep_init(struct dw_pcie_ep *ep); +int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep); +void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep); void dw_pcie_ep_exit(struct dw_pcie_ep *ep); int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no); int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no, @@ -428,6 +431,15 @@ static inline int dw_pcie_ep_init(struct dw_pcie_ep *ep) return 0; } +static inline int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep) +{ + return 0; +} + +static inline void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep) +{ +} + static inline void dw_pcie_ep_exit(struct dw_pcie_ep *ep) { } diff --git a/drivers/pci/controller/dwc/pcie-hisi.c b/drivers/pci/controller/dwc/pcie-hisi.c index 6d9e1b2b8f7b..5ca86796d43a 100644 --- a/drivers/pci/controller/dwc/pcie-hisi.c +++ b/drivers/pci/controller/dwc/pcie-hisi.c @@ -10,15 +10,10 @@ */ #include <linux/interrupt.h> #include <linux/init.h> -#include <linux/mfd/syscon.h> -#include <linux/of_address.h> -#include <linux/of_pci.h> #include <linux/platform_device.h> -#include <linux/of_device.h> #include <linux/pci.h> #include <linux/pci-acpi.h> #include <linux/pci-ecam.h> -#include <linux/regmap.h> #include "../../pci.h" #if defined(CONFIG_PCI_HISI) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)) @@ -104,7 +99,7 @@ static int hisi_pcie_init(struct pci_config_window *cfg) return 0; } -struct pci_ecam_ops hisi_pcie_ops = { +const struct pci_ecam_ops hisi_pcie_ops = { .bus_shift = 20, .init = hisi_pcie_init, .pci_ops = { @@ -118,229 +113,6 @@ struct pci_ecam_ops hisi_pcie_ops = { #ifdef CONFIG_PCI_HISI -#include "pcie-designware.h" - -#define PCIE_SUBCTRL_SYS_STATE4_REG 0x6818 -#define PCIE_HIP06_CTRL_OFF 0x1000 -#define PCIE_SYS_STATE4 (PCIE_HIP06_CTRL_OFF + 0x31c) -#define PCIE_LTSSM_LINKUP_STATE 0x11 -#define PCIE_LTSSM_STATE_MASK 0x3F - -#define to_hisi_pcie(x) dev_get_drvdata((x)->dev) - -struct hisi_pcie; - -struct pcie_soc_ops { - int (*hisi_pcie_link_up)(struct hisi_pcie *hisi_pcie); -}; - -struct hisi_pcie { - struct dw_pcie *pci; - struct regmap *subctrl; - u32 port_id; - const struct pcie_soc_ops *soc_ops; -}; - -/* HipXX PCIe host only supports 32-bit config access */ -static int hisi_pcie_cfg_read(struct pcie_port *pp, int where, int size, - u32 *val) -{ - u32 reg; - u32 reg_val; - void *walker = ®_val; - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - - walker += (where & 0x3); - reg = where & ~0x3; - reg_val = dw_pcie_readl_dbi(pci, reg); - - if (size == 1) - *val = *(u8 __force *) walker; - else if (size == 2) - *val = *(u16 __force *) walker; - else if (size == 4) - *val = reg_val; - else - return PCIBIOS_BAD_REGISTER_NUMBER; - - return PCIBIOS_SUCCESSFUL; -} - -/* HipXX PCIe host only supports 32-bit config access */ -static int hisi_pcie_cfg_write(struct pcie_port *pp, int where, int size, - u32 val) -{ - u32 reg_val; - u32 reg; - void *walker = ®_val; - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - - walker += (where & 0x3); - reg = where & ~0x3; - if (size == 4) - dw_pcie_writel_dbi(pci, reg, val); - else if (size == 2) { - reg_val = dw_pcie_readl_dbi(pci, reg); - *(u16 __force *) walker = val; - dw_pcie_writel_dbi(pci, reg, reg_val); - } else if (size == 1) { - reg_val = dw_pcie_readl_dbi(pci, reg); - *(u8 __force *) walker = val; - dw_pcie_writel_dbi(pci, reg, reg_val); - } else - return PCIBIOS_BAD_REGISTER_NUMBER; - - return PCIBIOS_SUCCESSFUL; -} - -static int hisi_pcie_link_up_hip05(struct hisi_pcie *hisi_pcie) -{ - u32 val; - - regmap_read(hisi_pcie->subctrl, PCIE_SUBCTRL_SYS_STATE4_REG + - 0x100 * hisi_pcie->port_id, &val); - - return ((val & PCIE_LTSSM_STATE_MASK) == PCIE_LTSSM_LINKUP_STATE); -} - -static int hisi_pcie_link_up_hip06(struct hisi_pcie *hisi_pcie) -{ - struct dw_pcie *pci = hisi_pcie->pci; - u32 val; - - val = dw_pcie_readl_dbi(pci, PCIE_SYS_STATE4); - - return ((val & PCIE_LTSSM_STATE_MASK) == PCIE_LTSSM_LINKUP_STATE); -} - -static int hisi_pcie_link_up(struct dw_pcie *pci) -{ - struct hisi_pcie *hisi_pcie = to_hisi_pcie(pci); - - return hisi_pcie->soc_ops->hisi_pcie_link_up(hisi_pcie); -} - -static const struct dw_pcie_host_ops hisi_pcie_host_ops = { - .rd_own_conf = hisi_pcie_cfg_read, - .wr_own_conf = hisi_pcie_cfg_write, -}; - -static int hisi_add_pcie_port(struct hisi_pcie *hisi_pcie, - struct platform_device *pdev) -{ - struct dw_pcie *pci = hisi_pcie->pci; - struct pcie_port *pp = &pci->pp; - struct device *dev = &pdev->dev; - int ret; - u32 port_id; - - if (of_property_read_u32(dev->of_node, "port-id", &port_id)) { - dev_err(dev, "failed to read port-id\n"); - return -EINVAL; - } - if (port_id > 3) { - dev_err(dev, "Invalid port-id: %d\n", port_id); - return -EINVAL; - } - hisi_pcie->port_id = port_id; - - pp->ops = &hisi_pcie_host_ops; - - ret = dw_pcie_host_init(pp); - if (ret) { - dev_err(dev, "failed to initialize host\n"); - return ret; - } - - return 0; -} - -static const struct dw_pcie_ops dw_pcie_ops = { - .link_up = hisi_pcie_link_up, -}; - -static int hisi_pcie_probe(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct dw_pcie *pci; - struct hisi_pcie *hisi_pcie; - struct resource *reg; - int ret; - - hisi_pcie = devm_kzalloc(dev, sizeof(*hisi_pcie), GFP_KERNEL); - if (!hisi_pcie) - return -ENOMEM; - - pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); - if (!pci) - return -ENOMEM; - - pci->dev = dev; - pci->ops = &dw_pcie_ops; - - hisi_pcie->pci = pci; - - hisi_pcie->soc_ops = of_device_get_match_data(dev); - - hisi_pcie->subctrl = - syscon_regmap_lookup_by_compatible("hisilicon,pcie-sas-subctrl"); - if (IS_ERR(hisi_pcie->subctrl)) { - dev_err(dev, "cannot get subctrl base\n"); - return PTR_ERR(hisi_pcie->subctrl); - } - - reg = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbi"); - pci->dbi_base = devm_pci_remap_cfg_resource(dev, reg); - if (IS_ERR(pci->dbi_base)) - return PTR_ERR(pci->dbi_base); - platform_set_drvdata(pdev, hisi_pcie); - - ret = hisi_add_pcie_port(hisi_pcie, pdev); - if (ret) - return ret; - - return 0; -} - -static struct pcie_soc_ops hip05_ops = { - &hisi_pcie_link_up_hip05 -}; - -static struct pcie_soc_ops hip06_ops = { - &hisi_pcie_link_up_hip06 -}; - -static const struct of_device_id hisi_pcie_of_match[] = { - { - .compatible = "hisilicon,hip05-pcie", - .data = (void *) &hip05_ops, - }, - { - .compatible = "hisilicon,hip06-pcie", - .data = (void *) &hip06_ops, - }, - {}, -}; - -static struct platform_driver hisi_pcie_driver = { - .probe = hisi_pcie_probe, - .driver = { - .name = "hisi-pcie", - .of_match_table = hisi_pcie_of_match, - .suppress_bind_attrs = true, - }, -}; -builtin_platform_driver(hisi_pcie_driver); - -static int hisi_pcie_almost_ecam_probe(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct pci_ecam_ops *ops; - - ops = (struct pci_ecam_ops *)of_device_get_match_data(dev); - return pci_host_common_probe(pdev, ops); -} - static int hisi_pcie_platform_init(struct pci_config_window *cfg) { struct device *dev = cfg->parent; @@ -362,7 +134,7 @@ static int hisi_pcie_platform_init(struct pci_config_window *cfg) return 0; } -struct pci_ecam_ops hisi_pcie_platform_ops = { +static const struct pci_ecam_ops hisi_pcie_platform_ops = { .bus_shift = 20, .init = hisi_pcie_platform_init, .pci_ops = { @@ -375,17 +147,17 @@ struct pci_ecam_ops hisi_pcie_platform_ops = { static const struct of_device_id hisi_pcie_almost_ecam_of_match[] = { { .compatible = "hisilicon,hip06-pcie-ecam", - .data = (void *) &hisi_pcie_platform_ops, + .data = &hisi_pcie_platform_ops, }, { .compatible = "hisilicon,hip07-pcie-ecam", - .data = (void *) &hisi_pcie_platform_ops, + .data = &hisi_pcie_platform_ops, }, {}, }; static struct platform_driver hisi_pcie_almost_ecam_driver = { - .probe = hisi_pcie_almost_ecam_probe, + .probe = pci_host_common_probe, .driver = { .name = "hisi-pcie-almost-ecam", .of_match_table = hisi_pcie_almost_ecam_of_match, diff --git a/drivers/pci/controller/dwc/pcie-histb.c b/drivers/pci/controller/dwc/pcie-histb.c index 811b5c6d62ea..2a2835746077 100644 --- a/drivers/pci/controller/dwc/pcie-histb.c +++ b/drivers/pci/controller/dwc/pcie-histb.c @@ -304,7 +304,6 @@ static int histb_pcie_probe(struct platform_device *pdev) struct histb_pcie *hipcie; struct dw_pcie *pci; struct pcie_port *pp; - struct resource *res; struct device_node *np = pdev->dev.of_node; struct device *dev = &pdev->dev; enum of_gpio_flags of_flags; @@ -324,15 +323,13 @@ static int histb_pcie_probe(struct platform_device *pdev) pci->dev = dev; pci->ops = &dw_pcie_ops; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "control"); - hipcie->ctrl = devm_ioremap_resource(dev, res); + hipcie->ctrl = devm_platform_ioremap_resource_byname(pdev, "control"); if (IS_ERR(hipcie->ctrl)) { dev_err(dev, "cannot get control reg base\n"); return PTR_ERR(hipcie->ctrl); } - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc-dbi"); - pci->dbi_base = devm_ioremap_resource(dev, res); + pci->dbi_base = devm_platform_ioremap_resource_byname(pdev, "rc-dbi"); if (IS_ERR(pci->dbi_base)) { dev_err(dev, "cannot get rc-dbi base\n"); return PTR_ERR(pci->dbi_base); @@ -402,10 +399,8 @@ static int histb_pcie_probe(struct platform_device *pdev) if (IS_ENABLED(CONFIG_PCI_MSI)) { pp->msi_irq = platform_get_irq_byname(pdev, "msi"); - if (pp->msi_irq < 0) { - dev_err(dev, "Failed to get MSI IRQ\n"); + if (pp->msi_irq < 0) return pp->msi_irq; - } } hipcie->phy = devm_phy_get(dev, "phy"); diff --git a/drivers/pci/controller/dwc/pcie-intel-gw.c b/drivers/pci/controller/dwc/pcie-intel-gw.c index fc2a12212dec..c3b3a1d162b5 100644 --- a/drivers/pci/controller/dwc/pcie-intel-gw.c +++ b/drivers/pci/controller/dwc/pcie-intel-gw.c @@ -253,11 +253,9 @@ static int intel_pcie_get_resources(struct platform_device *pdev) struct intel_pcie_port *lpp = platform_get_drvdata(pdev); struct dw_pcie *pci = &lpp->pci; struct device *dev = pci->dev; - struct resource *res; int ret; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); - pci->dbi_base = devm_ioremap_resource(dev, res); + pci->dbi_base = devm_platform_ioremap_resource_byname(pdev, "dbi"); if (IS_ERR(pci->dbi_base)) return PTR_ERR(pci->dbi_base); @@ -291,8 +289,7 @@ static int intel_pcie_get_resources(struct platform_device *pdev) ret = of_pci_get_max_link_speed(dev->of_node); lpp->link_gen = ret < 0 ? 0 : ret; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "app"); - lpp->app_base = devm_ioremap_resource(dev, res); + lpp->app_base = devm_platform_ioremap_resource_byname(pdev, "app"); if (IS_ERR(lpp->app_base)) return PTR_ERR(lpp->app_base); @@ -453,7 +450,7 @@ static int intel_pcie_msi_init(struct pcie_port *pp) return 0; } -u64 intel_pcie_cpu_addr(struct dw_pcie *pcie, u64 cpu_addr) +static u64 intel_pcie_cpu_addr(struct dw_pcie *pcie, u64 cpu_addr) { return cpu_addr + BUS_IATU_OFFSET; } diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c index c19617a912bd..e496f51e0152 100644 --- a/drivers/pci/controller/dwc/pcie-kirin.c +++ b/drivers/pci/controller/dwc/pcie-kirin.c @@ -3,7 +3,7 @@ * PCIe host controller driver for Kirin Phone SoCs * * Copyright (C) 2017 HiSilicon Electronics Co., Ltd. - * http://www.huawei.com + * https://www.huawei.com * * Author: Xiaowei Song <songxiaowei@huawei.com> */ @@ -147,23 +147,18 @@ static long kirin_pcie_get_clk(struct kirin_pcie *kirin_pcie, static long kirin_pcie_get_resource(struct kirin_pcie *kirin_pcie, struct platform_device *pdev) { - struct device *dev = &pdev->dev; - struct resource *apb; - struct resource *phy; - struct resource *dbi; - - apb = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apb"); - kirin_pcie->apb_base = devm_ioremap_resource(dev, apb); + kirin_pcie->apb_base = + devm_platform_ioremap_resource_byname(pdev, "apb"); if (IS_ERR(kirin_pcie->apb_base)) return PTR_ERR(kirin_pcie->apb_base); - phy = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy"); - kirin_pcie->phy_base = devm_ioremap_resource(dev, phy); + kirin_pcie->phy_base = + devm_platform_ioremap_resource_byname(pdev, "phy"); if (IS_ERR(kirin_pcie->phy_base)) return PTR_ERR(kirin_pcie->phy_base); - dbi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); - kirin_pcie->pci->dbi_base = devm_ioremap_resource(dev, dbi); + kirin_pcie->pci->dbi_base = + devm_platform_ioremap_resource_byname(pdev, "dbi"); if (IS_ERR(kirin_pcie->pci->dbi_base)) return PTR_ERR(kirin_pcie->pci->dbi_base); @@ -455,11 +450,8 @@ static int kirin_pcie_add_msi(struct dw_pcie *pci, if (IS_ENABLED(CONFIG_PCI_MSI)) { irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(&pdev->dev, - "failed to get MSI IRQ (%d)\n", irq); + if (irq < 0) return irq; - } pci->pp.msi_irq = irq; } diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c index 5ea527a6bd9f..3aac77a295ba 100644 --- a/drivers/pci/controller/dwc/pcie-qcom.c +++ b/drivers/pci/controller/dwc/pcie-qcom.c @@ -27,6 +27,7 @@ #include <linux/slab.h> #include <linux/types.h> +#include "../../pci.h" #include "pcie-designware.h" #define PCIE20_PARF_SYS_CTRL 0x00 @@ -39,13 +40,14 @@ #define L23_CLK_RMV_DIS BIT(2) #define L1_CLK_RMV_DIS BIT(1) -#define PCIE20_COMMAND_STATUS 0x04 -#define CMD_BME_VAL 0x4 -#define PCIE20_DEVICE_CONTROL2_STATUS2 0x98 -#define PCIE_CAP_CPL_TIMEOUT_DISABLE 0x10 - #define PCIE20_PARF_PHY_CTRL 0x40 +#define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK GENMASK(20, 16) +#define PHY_CTRL_PHY_TX0_TERM_OFFSET(x) ((x) << 16) + #define PCIE20_PARF_PHY_REFCLK 0x4C +#define PHY_REFCLK_SSP_EN BIT(16) +#define PHY_REFCLK_USE_PAD BIT(12) + #define PCIE20_PARF_DBI_BASE_ADDR 0x168 #define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C #define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174 @@ -66,8 +68,8 @@ #define CFG_BRIDGE_SB_INIT BIT(0) #define PCIE20_CAP 0x70 -#define PCIE20_CAP_LINK_CAPABILITIES (PCIE20_CAP + 0xC) -#define PCIE20_CAP_ACTIVE_STATE_LINK_PM_SUPPORT (BIT(10) | BIT(11)) +#define PCIE20_DEVICE_CONTROL2_STATUS2 (PCIE20_CAP + PCI_EXP_DEVCTL2) +#define PCIE20_CAP_LINK_CAPABILITIES (PCIE20_CAP + PCI_EXP_LNKCAP) #define PCIE20_CAP_LINK_1 (PCIE20_CAP + 0x14) #define PCIE_CAP_LINK1_VAL 0x2FD7F @@ -77,22 +79,36 @@ #define DBI_RO_WR_EN 1 #define PERST_DELAY_US 1000 +/* PARF registers */ +#define PCIE20_PARF_PCS_DEEMPH 0x34 +#define PCS_DEEMPH_TX_DEEMPH_GEN1(x) ((x) << 16) +#define PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x) ((x) << 8) +#define PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x) ((x) << 0) + +#define PCIE20_PARF_PCS_SWING 0x38 +#define PCS_SWING_TX_SWING_FULL(x) ((x) << 8) +#define PCS_SWING_TX_SWING_LOW(x) ((x) << 0) + +#define PCIE20_PARF_CONFIG_BITS 0x50 +#define PHY_RX0_EQ(x) ((x) << 24) #define PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE 0x358 #define SLV_ADDR_SPACE_SZ 0x10000000 +#define PCIE20_LNK_CONTROL2_LINK_STATUS2 0xa0 + #define DEVICE_TYPE_RC 0x4 #define QCOM_PCIE_2_1_0_MAX_SUPPLY 3 +#define QCOM_PCIE_2_1_0_MAX_CLOCKS 5 struct qcom_pcie_resources_2_1_0 { - struct clk *iface_clk; - struct clk *core_clk; - struct clk *phy_clk; + struct clk_bulk_data clks[QCOM_PCIE_2_1_0_MAX_CLOCKS]; struct reset_control *pci_reset; struct reset_control *axi_reset; struct reset_control *ahb_reset; struct reset_control *por_reset; struct reset_control *phy_reset; + struct reset_control *ext_reset; struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY]; }; @@ -177,6 +193,7 @@ struct qcom_pcie { struct phy *phy; struct gpio_desc *reset; const struct qcom_pcie_ops *ops; + int gen; }; #define to_qcom_pcie(x) dev_get_drvdata((x)->dev) @@ -234,17 +251,21 @@ static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie) if (ret) return ret; - res->iface_clk = devm_clk_get(dev, "iface"); - if (IS_ERR(res->iface_clk)) - return PTR_ERR(res->iface_clk); + res->clks[0].id = "iface"; + res->clks[1].id = "core"; + res->clks[2].id = "phy"; + res->clks[3].id = "aux"; + res->clks[4].id = "ref"; - res->core_clk = devm_clk_get(dev, "core"); - if (IS_ERR(res->core_clk)) - return PTR_ERR(res->core_clk); + /* iface, core, phy are required */ + ret = devm_clk_bulk_get(dev, 3, res->clks); + if (ret < 0) + return ret; - res->phy_clk = devm_clk_get(dev, "phy"); - if (IS_ERR(res->phy_clk)) - return PTR_ERR(res->phy_clk); + /* aux, ref are optional */ + ret = devm_clk_bulk_get_optional(dev, 2, res->clks + 3); + if (ret < 0) + return ret; res->pci_reset = devm_reset_control_get_exclusive(dev, "pci"); if (IS_ERR(res->pci_reset)) @@ -262,6 +283,10 @@ static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie) if (IS_ERR(res->por_reset)) return PTR_ERR(res->por_reset); + res->ext_reset = devm_reset_control_get_optional_exclusive(dev, "ext"); + if (IS_ERR(res->ext_reset)) + return PTR_ERR(res->ext_reset); + res->phy_reset = devm_reset_control_get_exclusive(dev, "phy"); return PTR_ERR_OR_ZERO(res->phy_reset); } @@ -270,14 +295,13 @@ static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie) { struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; + clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks); reset_control_assert(res->pci_reset); reset_control_assert(res->axi_reset); reset_control_assert(res->ahb_reset); reset_control_assert(res->por_reset); - reset_control_assert(res->pci_reset); - clk_disable_unprepare(res->iface_clk); - clk_disable_unprepare(res->core_clk); - clk_disable_unprepare(res->phy_clk); + reset_control_assert(res->ext_reset); + reset_control_assert(res->phy_reset); regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); } @@ -286,6 +310,7 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie) struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; struct dw_pcie *pci = pcie->pci; struct device *dev = pci->dev; + struct device_node *node = dev->of_node; u32 val; int ret; @@ -295,73 +320,85 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie) return ret; } - ret = reset_control_assert(res->ahb_reset); - if (ret) { - dev_err(dev, "cannot assert ahb reset\n"); - goto err_assert_ahb; - } - - ret = clk_prepare_enable(res->iface_clk); - if (ret) { - dev_err(dev, "cannot prepare/enable iface clock\n"); - goto err_assert_ahb; - } - - ret = clk_prepare_enable(res->phy_clk); - if (ret) { - dev_err(dev, "cannot prepare/enable phy clock\n"); - goto err_clk_phy; - } - - ret = clk_prepare_enable(res->core_clk); - if (ret) { - dev_err(dev, "cannot prepare/enable core clock\n"); - goto err_clk_core; - } - ret = reset_control_deassert(res->ahb_reset); if (ret) { dev_err(dev, "cannot deassert ahb reset\n"); goto err_deassert_ahb; } - /* enable PCIe clocks and resets */ - val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); - val &= ~BIT(0); - writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); - - /* enable external reference clock */ - val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK); - val |= BIT(16); - writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK); + ret = reset_control_deassert(res->ext_reset); + if (ret) { + dev_err(dev, "cannot deassert ext reset\n"); + goto err_deassert_ext; + } ret = reset_control_deassert(res->phy_reset); if (ret) { dev_err(dev, "cannot deassert phy reset\n"); - return ret; + goto err_deassert_phy; } ret = reset_control_deassert(res->pci_reset); if (ret) { dev_err(dev, "cannot deassert pci reset\n"); - return ret; + goto err_deassert_pci; } ret = reset_control_deassert(res->por_reset); if (ret) { dev_err(dev, "cannot deassert por reset\n"); - return ret; + goto err_deassert_por; } ret = reset_control_deassert(res->axi_reset); if (ret) { dev_err(dev, "cannot deassert axi reset\n"); - return ret; + goto err_deassert_axi; } + ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks); + if (ret) + goto err_clks; + + /* enable PCIe clocks and resets */ + val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); + val &= ~BIT(0); + writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); + + if (of_device_is_compatible(node, "qcom,pcie-ipq8064") || + of_device_is_compatible(node, "qcom,pcie-ipq8064-v2")) { + writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) | + PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(24) | + PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(34), + pcie->parf + PCIE20_PARF_PCS_DEEMPH); + writel(PCS_SWING_TX_SWING_FULL(120) | + PCS_SWING_TX_SWING_LOW(120), + pcie->parf + PCIE20_PARF_PCS_SWING); + writel(PHY_RX0_EQ(4), pcie->parf + PCIE20_PARF_CONFIG_BITS); + } + + if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) { + /* set TX termination offset */ + val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); + val &= ~PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK; + val |= PHY_CTRL_PHY_TX0_TERM_OFFSET(7); + writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); + } + + /* enable external reference clock */ + val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK); + val &= ~PHY_REFCLK_USE_PAD; + val |= PHY_REFCLK_SSP_EN; + writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK); + /* wait for clock acquisition */ usleep_range(1000, 1500); + if (pcie->gen == 1) { + val = readl(pci->dbi_base + PCIE20_LNK_CONTROL2_LINK_STATUS2); + val |= PCI_EXP_LNKSTA_CLS_2_5GB; + writel(val, pci->dbi_base + PCIE20_LNK_CONTROL2_LINK_STATUS2); + } /* Set the Max TLP size to 2K, instead of using default of 4K */ writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K, @@ -371,13 +408,19 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie) return 0; +err_clks: + reset_control_assert(res->axi_reset); +err_deassert_axi: + reset_control_assert(res->por_reset); +err_deassert_por: + reset_control_assert(res->pci_reset); +err_deassert_pci: + reset_control_assert(res->phy_reset); +err_deassert_phy: + reset_control_assert(res->ext_reset); +err_deassert_ext: + reset_control_assert(res->ahb_reset); err_deassert_ahb: - clk_disable_unprepare(res->core_clk); -err_clk_core: - clk_disable_unprepare(res->phy_clk); -err_clk_phy: - clk_disable_unprepare(res->iface_clk); -err_assert_ahb: regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); return ret; @@ -1047,15 +1090,15 @@ static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie) pcie->parf + PCIE20_PARF_SYS_CTRL); writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH); - writel(CMD_BME_VAL, pci->dbi_base + PCIE20_COMMAND_STATUS); + writel(PCI_COMMAND_MASTER, pci->dbi_base + PCI_COMMAND); writel(DBI_RO_WR_EN, pci->dbi_base + PCIE20_MISC_CONTROL_1_REG); writel(PCIE_CAP_LINK1_VAL, pci->dbi_base + PCIE20_CAP_LINK_1); val = readl(pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES); - val &= ~PCIE20_CAP_ACTIVE_STATE_LINK_PM_SUPPORT; + val &= ~PCI_EXP_LNKCAP_ASPMS; writel(val, pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES); - writel(PCIE_CAP_CPL_TIMEOUT_DISABLE, pci->dbi_base + + writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + PCIE20_DEVICE_CONTROL2_STATUS2); return 0; @@ -1339,10 +1382,8 @@ static int qcom_pcie_probe(struct platform_device *pdev) pm_runtime_enable(dev); ret = pm_runtime_get_sync(dev); - if (ret < 0) { - pm_runtime_disable(dev); - return ret; - } + if (ret < 0) + goto err_pm_runtime_put; pci->dev = dev; pci->ops = &dw_pcie_ops; @@ -1358,8 +1399,11 @@ static int qcom_pcie_probe(struct platform_device *pdev) goto err_pm_runtime_put; } - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "parf"); - pcie->parf = devm_ioremap_resource(dev, res); + pcie->gen = of_pci_get_max_link_speed(pdev->dev.of_node); + if (pcie->gen < 0) + pcie->gen = 2; + + pcie->parf = devm_platform_ioremap_resource_byname(pdev, "parf"); if (IS_ERR(pcie->parf)) { ret = PTR_ERR(pcie->parf); goto err_pm_runtime_put; @@ -1372,8 +1416,7 @@ static int qcom_pcie_probe(struct platform_device *pdev) goto err_pm_runtime_put; } - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi"); - pcie->elbi = devm_ioremap_resource(dev, res); + pcie->elbi = devm_platform_ioremap_resource_byname(pdev, "elbi"); if (IS_ERR(pcie->elbi)) { ret = PTR_ERR(pcie->elbi); goto err_pm_runtime_put; @@ -1426,6 +1469,7 @@ err_pm_runtime_put: static const struct of_device_id qcom_pcie_match[] = { { .compatible = "qcom,pcie-apq8084", .data = &ops_1_0_0 }, { .compatible = "qcom,pcie-ipq8064", .data = &ops_2_1_0 }, + { .compatible = "qcom,pcie-ipq8064-v2", .data = &ops_2_1_0 }, { .compatible = "qcom,pcie-apq8064", .data = &ops_2_1_0 }, { .compatible = "qcom,pcie-msm8996", .data = &ops_2_3_2 }, { .compatible = "qcom,pcie-ipq8074", .data = &ops_2_3_3 }, @@ -1439,7 +1483,13 @@ static void qcom_fixup_class(struct pci_dev *dev) { dev->class = PCI_CLASS_BRIDGE_PCI << 8; } -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, PCI_ANY_ID, qcom_fixup_class); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0101, qcom_fixup_class); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0104, qcom_fixup_class); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0106, qcom_fixup_class); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0107, qcom_fixup_class); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0302, qcom_fixup_class); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1000, qcom_fixup_class); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1001, qcom_fixup_class); static struct platform_driver qcom_pcie_driver = { .probe = qcom_pcie_probe, diff --git a/drivers/pci/controller/dwc/pcie-spear13xx.c b/drivers/pci/controller/dwc/pcie-spear13xx.c index 7d0cdfd8138b..62846562da0b 100644 --- a/drivers/pci/controller/dwc/pcie-spear13xx.c +++ b/drivers/pci/controller/dwc/pcie-spear13xx.c @@ -198,10 +198,9 @@ static int spear13xx_add_pcie_port(struct spear13xx_pcie *spear13xx_pcie, int ret; pp->irq = platform_get_irq(pdev, 0); - if (pp->irq < 0) { - dev_err(dev, "failed to get irq\n"); + if (pp->irq < 0) return pp->irq; - } + ret = devm_request_irq(dev, pp->irq, spear13xx_pcie_irq_handler, IRQF_SHARED | IRQF_NO_THREAD, "spear1340-pcie", spear13xx_pcie); @@ -273,7 +272,6 @@ static int spear13xx_pcie_probe(struct platform_device *pdev) dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_base); if (IS_ERR(pci->dbi_base)) { - dev_err(dev, "couldn't remap dbi base %p\n", dbi_base); ret = PTR_ERR(pci->dbi_base); goto fail_clk; } diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c index cbe95f0ea0ca..70498689d0c0 100644 --- a/drivers/pci/controller/dwc/pcie-tegra194.c +++ b/drivers/pci/controller/dwc/pcie-tegra194.c @@ -11,6 +11,7 @@ #include <linux/debugfs.h> #include <linux/delay.h> #include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <linux/interrupt.h> #include <linux/iopoll.h> #include <linux/kernel.h> @@ -53,6 +54,7 @@ #define APPL_INTR_EN_L0_0_LINK_STATE_INT_EN BIT(0) #define APPL_INTR_EN_L0_0_MSI_RCV_INT_EN BIT(4) #define APPL_INTR_EN_L0_0_INT_INT_EN BIT(8) +#define APPL_INTR_EN_L0_0_PCI_CMD_EN_INT_EN BIT(15) #define APPL_INTR_EN_L0_0_CDM_REG_CHK_INT_EN BIT(19) #define APPL_INTR_EN_L0_0_SYS_INTR_EN BIT(30) #define APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN BIT(31) @@ -60,19 +62,26 @@ #define APPL_INTR_STATUS_L0 0xC #define APPL_INTR_STATUS_L0_LINK_STATE_INT BIT(0) #define APPL_INTR_STATUS_L0_INT_INT BIT(8) +#define APPL_INTR_STATUS_L0_PCI_CMD_EN_INT BIT(15) +#define APPL_INTR_STATUS_L0_PEX_RST_INT BIT(16) #define APPL_INTR_STATUS_L0_CDM_REG_CHK_INT BIT(18) #define APPL_INTR_EN_L1_0_0 0x1C #define APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN BIT(1) +#define APPL_INTR_EN_L1_0_0_RDLH_LINK_UP_INT_EN BIT(3) +#define APPL_INTR_EN_L1_0_0_HOT_RESET_DONE_INT_EN BIT(30) #define APPL_INTR_STATUS_L1_0_0 0x20 #define APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED BIT(1) +#define APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED BIT(3) +#define APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE BIT(30) #define APPL_INTR_STATUS_L1_1 0x2C #define APPL_INTR_STATUS_L1_2 0x30 #define APPL_INTR_STATUS_L1_3 0x34 #define APPL_INTR_STATUS_L1_6 0x3C #define APPL_INTR_STATUS_L1_7 0x40 +#define APPL_INTR_STATUS_L1_15_CFG_BME_CHGED BIT(1) #define APPL_INTR_EN_L1_8_0 0x44 #define APPL_INTR_EN_L1_8_BW_MGT_INT_EN BIT(2) @@ -103,8 +112,12 @@ #define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR BIT(1) #define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR BIT(0) +#define APPL_MSI_CTRL_1 0xAC + #define APPL_MSI_CTRL_2 0xB0 +#define APPL_LEGACY_INTX 0xB8 + #define APPL_LTR_MSG_1 0xC4 #define LTR_MSG_REQ BIT(15) #define LTR_MST_NO_SNOOP_SHIFT 16 @@ -205,6 +218,13 @@ #define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFFFFFF 1 #define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001 2 +#define MSIX_ADDR_MATCH_LOW_OFF 0x940 +#define MSIX_ADDR_MATCH_LOW_OFF_EN BIT(0) +#define MSIX_ADDR_MATCH_LOW_OFF_MASK GENMASK(31, 2) + +#define MSIX_ADDR_MATCH_HIGH_OFF 0x944 +#define MSIX_ADDR_MATCH_HIGH_OFF_MASK GENMASK(31, 0) + #define PORT_LOGIC_MSIX_DOORBELL 0x948 #define CAP_SPCIE_CAP_OFF 0x154 @@ -223,6 +243,13 @@ #define GEN3_CORE_CLK_FREQ 250000000 #define GEN4_CORE_CLK_FREQ 500000000 +#define LTR_MSG_TIMEOUT (100 * 1000) + +#define PERST_DEBOUNCE_TIME (5 * 1000) + +#define EP_STATE_DISABLED 0 +#define EP_STATE_ENABLED 1 + static const unsigned int pcie_gen_freq[] = { GEN1_CORE_CLK_FREQ, GEN2_CORE_CLK_FREQ, @@ -260,6 +287,8 @@ struct tegra_pcie_dw { struct dw_pcie pci; struct tegra_bpmp *bpmp; + enum dw_pcie_device_mode mode; + bool supports_clkreq; bool enable_cdm_check; bool link_state; @@ -283,6 +312,16 @@ struct tegra_pcie_dw { struct phy **phys; struct dentry *debugfs; + + /* Endpoint mode specific */ + struct gpio_desc *pex_rst_gpiod; + struct gpio_desc *pex_refclk_sel_gpiod; + unsigned int pex_rst_irq; + int ep_state; +}; + +struct tegra_pcie_dw_of_data { + enum dw_pcie_device_mode mode; }; static inline struct tegra_pcie_dw *to_tegra_pcie(struct dw_pcie *pci) @@ -339,8 +378,9 @@ static void apply_bad_link_workaround(struct pcie_port *pp) } } -static irqreturn_t tegra_pcie_rp_irq_handler(struct tegra_pcie_dw *pcie) +static irqreturn_t tegra_pcie_rp_irq_handler(int irq, void *arg) { + struct tegra_pcie_dw *pcie = arg; struct dw_pcie *pci = &pcie->pci; struct pcie_port *pp = &pci->pp; u32 val, tmp; @@ -411,11 +451,121 @@ static irqreturn_t tegra_pcie_rp_irq_handler(struct tegra_pcie_dw *pcie) return IRQ_HANDLED; } -static irqreturn_t tegra_pcie_irq_handler(int irq, void *arg) +static void pex_ep_event_hot_rst_done(struct tegra_pcie_dw *pcie) +{ + u32 val; + + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17); + appl_writel(pcie, 0xFFFFFFFF, APPL_MSI_CTRL_2); + + val = appl_readl(pcie, APPL_CTRL); + val |= APPL_CTRL_LTSSM_EN; + appl_writel(pcie, val, APPL_CTRL); +} + +static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg) +{ + struct tegra_pcie_dw *pcie = arg; + struct dw_pcie *pci = &pcie->pci; + u32 val, speed; + + speed = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA) & + PCI_EXP_LNKSTA_CLS; + clk_set_rate(pcie->core_clk, pcie_gen_freq[speed - 1]); + + /* If EP doesn't advertise L1SS, just return */ + val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub); + if (!(val & (PCI_L1SS_CAP_ASPM_L1_1 | PCI_L1SS_CAP_ASPM_L1_2))) + return IRQ_HANDLED; + + /* Check if BME is set to '1' */ + val = dw_pcie_readl_dbi(pci, PCI_COMMAND); + if (val & PCI_COMMAND_MASTER) { + ktime_t timeout; + + /* 110us for both snoop and no-snoop */ + val = 110 | (2 << PCI_LTR_SCALE_SHIFT) | LTR_MSG_REQ; + val |= (val << LTR_MST_NO_SNOOP_SHIFT); + appl_writel(pcie, val, APPL_LTR_MSG_1); + + /* Send LTR upstream */ + val = appl_readl(pcie, APPL_LTR_MSG_2); + val |= APPL_LTR_MSG_2_LTR_MSG_REQ_STATE; + appl_writel(pcie, val, APPL_LTR_MSG_2); + + timeout = ktime_add_us(ktime_get(), LTR_MSG_TIMEOUT); + for (;;) { + val = appl_readl(pcie, APPL_LTR_MSG_2); + if (!(val & APPL_LTR_MSG_2_LTR_MSG_REQ_STATE)) + break; + if (ktime_after(ktime_get(), timeout)) + break; + usleep_range(1000, 1100); + } + if (val & APPL_LTR_MSG_2_LTR_MSG_REQ_STATE) + dev_err(pcie->dev, "Failed to send LTR message\n"); + } + + return IRQ_HANDLED; +} + +static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg) { struct tegra_pcie_dw *pcie = arg; + struct dw_pcie_ep *ep = &pcie->pci.ep; + int spurious = 1; + u32 val, tmp; - return tegra_pcie_rp_irq_handler(pcie); + val = appl_readl(pcie, APPL_INTR_STATUS_L0); + if (val & APPL_INTR_STATUS_L0_LINK_STATE_INT) { + val = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0); + appl_writel(pcie, val, APPL_INTR_STATUS_L1_0_0); + + if (val & APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE) + pex_ep_event_hot_rst_done(pcie); + + if (val & APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED) { + tmp = appl_readl(pcie, APPL_LINK_STATUS); + if (tmp & APPL_LINK_STATUS_RDLH_LINK_UP) { + dev_dbg(pcie->dev, "Link is up with Host\n"); + dw_pcie_ep_linkup(ep); + } + } + + spurious = 0; + } + + if (val & APPL_INTR_STATUS_L0_PCI_CMD_EN_INT) { + val = appl_readl(pcie, APPL_INTR_STATUS_L1_15); + appl_writel(pcie, val, APPL_INTR_STATUS_L1_15); + + if (val & APPL_INTR_STATUS_L1_15_CFG_BME_CHGED) + return IRQ_WAKE_THREAD; + + spurious = 0; + } + + if (spurious) { + dev_warn(pcie->dev, "Random interrupt (STATUS = 0x%08X)\n", + val); + appl_writel(pcie, val, APPL_INTR_STATUS_L0); + } + + return IRQ_HANDLED; } static int tegra_pcie_dw_rd_own_conf(struct pcie_port *pp, int where, int size, @@ -884,8 +1034,26 @@ static void tegra_pcie_set_msi_vec_num(struct pcie_port *pp) pp->num_vectors = MAX_MSI_IRQS; } +static int tegra_pcie_dw_start_link(struct dw_pcie *pci) +{ + struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); + + enable_irq(pcie->pex_rst_irq); + + return 0; +} + +static void tegra_pcie_dw_stop_link(struct dw_pcie *pci) +{ + struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); + + disable_irq(pcie->pex_rst_irq); +} + static const struct dw_pcie_ops tegra_dw_pcie_ops = { .link_up = tegra_pcie_dw_link_up, + .start_link = tegra_pcie_dw_start_link, + .stop_link = tegra_pcie_dw_stop_link, }; static struct dw_pcie_host_ops tegra_pcie_dw_host_ops = { @@ -986,6 +1154,40 @@ static int tegra_pcie_dw_parse_dt(struct tegra_pcie_dw *pcie) pcie->enable_cdm_check = of_property_read_bool(np, "snps,enable-cdm-check"); + if (pcie->mode == DW_PCIE_RC_TYPE) + return 0; + + /* Endpoint mode specific DT entries */ + pcie->pex_rst_gpiod = devm_gpiod_get(pcie->dev, "reset", GPIOD_IN); + if (IS_ERR(pcie->pex_rst_gpiod)) { + int err = PTR_ERR(pcie->pex_rst_gpiod); + const char *level = KERN_ERR; + + if (err == -EPROBE_DEFER) + level = KERN_DEBUG; + + dev_printk(level, pcie->dev, + dev_fmt("Failed to get PERST GPIO: %d\n"), + err); + return err; + } + + pcie->pex_refclk_sel_gpiod = devm_gpiod_get(pcie->dev, + "nvidia,refclk-select", + GPIOD_OUT_HIGH); + if (IS_ERR(pcie->pex_refclk_sel_gpiod)) { + int err = PTR_ERR(pcie->pex_refclk_sel_gpiod); + const char *level = KERN_ERR; + + if (err == -EPROBE_DEFER) + level = KERN_DEBUG; + + dev_printk(level, pcie->dev, + dev_fmt("Failed to get REFCLK select GPIOs: %d\n"), + err); + pcie->pex_refclk_sel_gpiod = NULL; + } + return 0; } @@ -1017,6 +1219,34 @@ static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie, return tegra_bpmp_transfer(pcie->bpmp, &msg); } +static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie, + bool enable) +{ + struct mrq_uphy_response resp; + struct tegra_bpmp_message msg; + struct mrq_uphy_request req; + + memset(&req, 0, sizeof(req)); + memset(&resp, 0, sizeof(resp)); + + if (enable) { + req.cmd = CMD_UPHY_PCIE_EP_CONTROLLER_PLL_INIT; + req.ep_ctrlr_pll_init.ep_controller = pcie->cid; + } else { + req.cmd = CMD_UPHY_PCIE_EP_CONTROLLER_PLL_OFF; + req.ep_ctrlr_pll_off.ep_controller = pcie->cid; + } + + memset(&msg, 0, sizeof(msg)); + msg.mrq = MRQ_UPHY; + msg.tx.data = &req; + msg.tx.size = sizeof(req); + msg.rx.data = &resp; + msg.rx.size = sizeof(resp); + + return tegra_bpmp_transfer(pcie->bpmp, &msg); +} + static void tegra_pcie_downstream_dev_to_D0(struct tegra_pcie_dw *pcie) { struct pcie_port *pp = &pcie->pci.pp; @@ -1393,7 +1623,7 @@ static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie) ret = pinctrl_pm_select_default_state(dev); if (ret < 0) { dev_err(dev, "Failed to configure sideband pins: %d\n", ret); - goto fail_pinctrl; + goto fail_pm_get_sync; } tegra_pcie_init_controller(pcie); @@ -1420,15 +1650,402 @@ static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie) fail_host_init: tegra_pcie_deinit_controller(pcie); -fail_pinctrl: - pm_runtime_put_sync(dev); fail_pm_get_sync: + pm_runtime_put_sync(dev); pm_runtime_disable(dev); return ret; } +static void pex_ep_event_pex_rst_assert(struct tegra_pcie_dw *pcie) +{ + u32 val; + int ret; + + if (pcie->ep_state == EP_STATE_DISABLED) + return; + + /* Disable LTSSM */ + val = appl_readl(pcie, APPL_CTRL); + val &= ~APPL_CTRL_LTSSM_EN; + appl_writel(pcie, val, APPL_CTRL); + + ret = readl_poll_timeout(pcie->appl_base + APPL_DEBUG, val, + ((val & APPL_DEBUG_LTSSM_STATE_MASK) >> + APPL_DEBUG_LTSSM_STATE_SHIFT) == + LTSSM_STATE_PRE_DETECT, + 1, LTSSM_TIMEOUT); + if (ret) + dev_err(pcie->dev, "Failed to go Detect state: %d\n", ret); + + reset_control_assert(pcie->core_rst); + + tegra_pcie_disable_phy(pcie); + + reset_control_assert(pcie->core_apb_rst); + + clk_disable_unprepare(pcie->core_clk); + + pm_runtime_put_sync(pcie->dev); + + ret = tegra_pcie_bpmp_set_pll_state(pcie, false); + if (ret) + dev_err(pcie->dev, "Failed to turn off UPHY: %d\n", ret); + + pcie->ep_state = EP_STATE_DISABLED; + dev_dbg(pcie->dev, "Uninitialization of endpoint is completed\n"); +} + +static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie) +{ + struct dw_pcie *pci = &pcie->pci; + struct dw_pcie_ep *ep = &pci->ep; + struct device *dev = pcie->dev; + u32 val; + int ret; + + if (pcie->ep_state == EP_STATE_ENABLED) + return; + + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + dev_err(dev, "Failed to get runtime sync for PCIe dev: %d\n", + ret); + return; + } + + ret = tegra_pcie_bpmp_set_pll_state(pcie, true); + if (ret) { + dev_err(dev, "Failed to init UPHY for PCIe EP: %d\n", ret); + goto fail_pll_init; + } + + ret = clk_prepare_enable(pcie->core_clk); + if (ret) { + dev_err(dev, "Failed to enable core clock: %d\n", ret); + goto fail_core_clk_enable; + } + + ret = reset_control_deassert(pcie->core_apb_rst); + if (ret) { + dev_err(dev, "Failed to deassert core APB reset: %d\n", ret); + goto fail_core_apb_rst; + } + + ret = tegra_pcie_enable_phy(pcie); + if (ret) { + dev_err(dev, "Failed to enable PHY: %d\n", ret); + goto fail_phy; + } + + /* Clear any stale interrupt statuses */ + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15); + appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17); + + /* configure this core for EP mode operation */ + val = appl_readl(pcie, APPL_DM_TYPE); + val &= ~APPL_DM_TYPE_MASK; + val |= APPL_DM_TYPE_EP; + appl_writel(pcie, val, APPL_DM_TYPE); + + appl_writel(pcie, 0x0, APPL_CFG_SLCG_OVERRIDE); + + val = appl_readl(pcie, APPL_CTRL); + val |= APPL_CTRL_SYS_PRE_DET_STATE; + val |= APPL_CTRL_HW_HOT_RST_EN; + appl_writel(pcie, val, APPL_CTRL); + + val = appl_readl(pcie, APPL_CFG_MISC); + val |= APPL_CFG_MISC_SLV_EP_MODE; + val |= (APPL_CFG_MISC_ARCACHE_VAL << APPL_CFG_MISC_ARCACHE_SHIFT); + appl_writel(pcie, val, APPL_CFG_MISC); + + val = appl_readl(pcie, APPL_PINMUX); + val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN; + val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE; + appl_writel(pcie, val, APPL_PINMUX); + + appl_writel(pcie, pcie->dbi_res->start & APPL_CFG_BASE_ADDR_MASK, + APPL_CFG_BASE_ADDR); + + appl_writel(pcie, pcie->atu_dma_res->start & + APPL_CFG_IATU_DMA_BASE_ADDR_MASK, + APPL_CFG_IATU_DMA_BASE_ADDR); + + val = appl_readl(pcie, APPL_INTR_EN_L0_0); + val |= APPL_INTR_EN_L0_0_SYS_INTR_EN; + val |= APPL_INTR_EN_L0_0_LINK_STATE_INT_EN; + val |= APPL_INTR_EN_L0_0_PCI_CMD_EN_INT_EN; + appl_writel(pcie, val, APPL_INTR_EN_L0_0); + + val = appl_readl(pcie, APPL_INTR_EN_L1_0_0); + val |= APPL_INTR_EN_L1_0_0_HOT_RESET_DONE_INT_EN; + val |= APPL_INTR_EN_L1_0_0_RDLH_LINK_UP_INT_EN; + appl_writel(pcie, val, APPL_INTR_EN_L1_0_0); + + reset_control_deassert(pcie->core_rst); + + if (pcie->update_fc_fixup) { + val = dw_pcie_readl_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF); + val |= 0x1 << CFG_TIMER_CTRL_ACK_NAK_SHIFT; + dw_pcie_writel_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF, val); + } + + config_gen3_gen4_eq_presets(pcie); + + init_host_aspm(pcie); + + /* Disable ASPM-L1SS advertisement if there is no CLKREQ routing */ + if (!pcie->supports_clkreq) { + disable_aspm_l11(pcie); + disable_aspm_l12(pcie); + } + + val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); + val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL; + dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val); + + /* Configure N_FTS & FTS */ + val = dw_pcie_readl_dbi(pci, PORT_LOGIC_ACK_F_ASPM_CTRL); + val &= ~(N_FTS_MASK << N_FTS_SHIFT); + val |= N_FTS_VAL << N_FTS_SHIFT; + dw_pcie_writel_dbi(pci, PORT_LOGIC_ACK_F_ASPM_CTRL, val); + + val = dw_pcie_readl_dbi(pci, PORT_LOGIC_GEN2_CTRL); + val &= ~FTS_MASK; + val |= FTS_VAL; + dw_pcie_writel_dbi(pci, PORT_LOGIC_GEN2_CTRL, val); + + /* Configure Max Speed from DT */ + if (pcie->max_speed && pcie->max_speed != -EINVAL) { + val = dw_pcie_readl_dbi(pci, pcie->pcie_cap_base + + PCI_EXP_LNKCAP); + val &= ~PCI_EXP_LNKCAP_SLS; + val |= pcie->max_speed; + dw_pcie_writel_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP, + val); + } + + pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci, + PCI_CAP_ID_EXP); + clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ); + + val = (ep->msi_mem_phys & MSIX_ADDR_MATCH_LOW_OFF_MASK); + val |= MSIX_ADDR_MATCH_LOW_OFF_EN; + dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_LOW_OFF, val); + val = (lower_32_bits(ep->msi_mem_phys) & MSIX_ADDR_MATCH_HIGH_OFF_MASK); + dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_HIGH_OFF, val); + + ret = dw_pcie_ep_init_complete(ep); + if (ret) { + dev_err(dev, "Failed to complete initialization: %d\n", ret); + goto fail_init_complete; + } + + dw_pcie_ep_init_notify(ep); + + /* Enable LTSSM */ + val = appl_readl(pcie, APPL_CTRL); + val |= APPL_CTRL_LTSSM_EN; + appl_writel(pcie, val, APPL_CTRL); + + pcie->ep_state = EP_STATE_ENABLED; + dev_dbg(dev, "Initialization of endpoint is completed\n"); + + return; + +fail_init_complete: + reset_control_assert(pcie->core_rst); + tegra_pcie_disable_phy(pcie); +fail_phy: + reset_control_assert(pcie->core_apb_rst); +fail_core_apb_rst: + clk_disable_unprepare(pcie->core_clk); +fail_core_clk_enable: + tegra_pcie_bpmp_set_pll_state(pcie, false); +fail_pll_init: + pm_runtime_put_sync(dev); +} + +static irqreturn_t tegra_pcie_ep_pex_rst_irq(int irq, void *arg) +{ + struct tegra_pcie_dw *pcie = arg; + + if (gpiod_get_value(pcie->pex_rst_gpiod)) + pex_ep_event_pex_rst_assert(pcie); + else + pex_ep_event_pex_rst_deassert(pcie); + + return IRQ_HANDLED; +} + +static int tegra_pcie_ep_raise_legacy_irq(struct tegra_pcie_dw *pcie, u16 irq) +{ + /* Tegra194 supports only INTA */ + if (irq > 1) + return -EINVAL; + + appl_writel(pcie, 1, APPL_LEGACY_INTX); + usleep_range(1000, 2000); + appl_writel(pcie, 0, APPL_LEGACY_INTX); + return 0; +} + +static int tegra_pcie_ep_raise_msi_irq(struct tegra_pcie_dw *pcie, u16 irq) +{ + if (unlikely(irq > 31)) + return -EINVAL; + + appl_writel(pcie, (1 << irq), APPL_MSI_CTRL_1); + + return 0; +} + +static int tegra_pcie_ep_raise_msix_irq(struct tegra_pcie_dw *pcie, u16 irq) +{ + struct dw_pcie_ep *ep = &pcie->pci.ep; + + writel(irq, ep->msi_mem); + + return 0; +} + +static int tegra_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no, + enum pci_epc_irq_type type, + u16 interrupt_num) +{ + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); + + switch (type) { + case PCI_EPC_IRQ_LEGACY: + return tegra_pcie_ep_raise_legacy_irq(pcie, interrupt_num); + + case PCI_EPC_IRQ_MSI: + return tegra_pcie_ep_raise_msi_irq(pcie, interrupt_num); + + case PCI_EPC_IRQ_MSIX: + return tegra_pcie_ep_raise_msix_irq(pcie, interrupt_num); + + default: + dev_err(pci->dev, "Unknown IRQ type\n"); + return -EPERM; + } + + return 0; +} + +static const struct pci_epc_features tegra_pcie_epc_features = { + .linkup_notifier = true, + .core_init_notifier = true, + .msi_capable = false, + .msix_capable = false, + .reserved_bar = 1 << BAR_2 | 1 << BAR_3 | 1 << BAR_4 | 1 << BAR_5, + .bar_fixed_64bit = 1 << BAR_0, + .bar_fixed_size[0] = SZ_1M, +}; + +static const struct pci_epc_features* +tegra_pcie_ep_get_features(struct dw_pcie_ep *ep) +{ + return &tegra_pcie_epc_features; +} + +static struct dw_pcie_ep_ops pcie_ep_ops = { + .raise_irq = tegra_pcie_ep_raise_irq, + .get_features = tegra_pcie_ep_get_features, +}; + +static int tegra_pcie_config_ep(struct tegra_pcie_dw *pcie, + struct platform_device *pdev) +{ + struct dw_pcie *pci = &pcie->pci; + struct device *dev = pcie->dev; + struct dw_pcie_ep *ep; + struct resource *res; + char *name; + int ret; + + ep = &pci->ep; + ep->ops = &pcie_ep_ops; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space"); + if (!res) + return -EINVAL; + + ep->phys_base = res->start; + ep->addr_size = resource_size(res); + ep->page_size = SZ_64K; + + ret = gpiod_set_debounce(pcie->pex_rst_gpiod, PERST_DEBOUNCE_TIME); + if (ret < 0) { + dev_err(dev, "Failed to set PERST GPIO debounce time: %d\n", + ret); + return ret; + } + + ret = gpiod_to_irq(pcie->pex_rst_gpiod); + if (ret < 0) { + dev_err(dev, "Failed to get IRQ for PERST GPIO: %d\n", ret); + return ret; + } + pcie->pex_rst_irq = (unsigned int)ret; + + name = devm_kasprintf(dev, GFP_KERNEL, "tegra_pcie_%u_pex_rst_irq", + pcie->cid); + if (!name) { + dev_err(dev, "Failed to create PERST IRQ string\n"); + return -ENOMEM; + } + + irq_set_status_flags(pcie->pex_rst_irq, IRQ_NOAUTOEN); + + pcie->ep_state = EP_STATE_DISABLED; + + ret = devm_request_threaded_irq(dev, pcie->pex_rst_irq, NULL, + tegra_pcie_ep_pex_rst_irq, + IRQF_TRIGGER_RISING | + IRQF_TRIGGER_FALLING | IRQF_ONESHOT, + name, (void *)pcie); + if (ret < 0) { + dev_err(dev, "Failed to request IRQ for PERST: %d\n", ret); + return ret; + } + + name = devm_kasprintf(dev, GFP_KERNEL, "tegra_pcie_%u_ep_work", + pcie->cid); + if (!name) { + dev_err(dev, "Failed to create PCIe EP work thread string\n"); + return -ENOMEM; + } + + pm_runtime_enable(dev); + + ret = dw_pcie_ep_init(ep); + if (ret) { + dev_err(dev, "Failed to initialize DWC Endpoint subsystem: %d\n", + ret); + return ret; + } + + return 0; +} + static int tegra_pcie_dw_probe(struct platform_device *pdev) { + const struct tegra_pcie_dw_of_data *data; struct device *dev = &pdev->dev; struct resource *atu_dma_res; struct tegra_pcie_dw *pcie; @@ -1440,6 +2057,8 @@ static int tegra_pcie_dw_probe(struct platform_device *pdev) int ret; u32 i; + data = of_device_get_match_data(dev); + pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); if (!pcie) return -ENOMEM; @@ -1449,19 +2068,37 @@ static int tegra_pcie_dw_probe(struct platform_device *pdev) pci->ops = &tegra_dw_pcie_ops; pp = &pci->pp; pcie->dev = &pdev->dev; + pcie->mode = (enum dw_pcie_device_mode)data->mode; ret = tegra_pcie_dw_parse_dt(pcie); if (ret < 0) { - dev_err(dev, "Failed to parse device tree: %d\n", ret); + const char *level = KERN_ERR; + + if (ret == -EPROBE_DEFER) + level = KERN_DEBUG; + + dev_printk(level, dev, + dev_fmt("Failed to parse device tree: %d\n"), + ret); return ret; } ret = tegra_pcie_get_slot_regulators(pcie); if (ret < 0) { - dev_err(dev, "Failed to get slot regulators: %d\n", ret); + const char *level = KERN_ERR; + + if (ret == -EPROBE_DEFER) + level = KERN_DEBUG; + + dev_printk(level, dev, + dev_fmt("Failed to get slot regulators: %d\n"), + ret); return ret; } + if (pcie->pex_refclk_sel_gpiod) + gpiod_set_value(pcie->pex_refclk_sel_gpiod, 1); + pcie->pex_ctl_supply = devm_regulator_get(dev, "vddio-pex-ctl"); if (IS_ERR(pcie->pex_ctl_supply)) { ret = PTR_ERR(pcie->pex_ctl_supply); @@ -1552,17 +2189,8 @@ static int tegra_pcie_dw_probe(struct platform_device *pdev) } pp->irq = platform_get_irq_byname(pdev, "intr"); - if (!pp->irq) { - dev_err(dev, "Failed to get \"intr\" interrupt\n"); - return -ENODEV; - } - - ret = devm_request_irq(dev, pp->irq, tegra_pcie_irq_handler, - IRQF_SHARED, "tegra-pcie-intr", pcie); - if (ret) { - dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq, ret); - return ret; - } + if (pp->irq < 0) + return pp->irq; pcie->bpmp = tegra_bpmp_get(dev); if (IS_ERR(pcie->bpmp)) @@ -1570,11 +2198,43 @@ static int tegra_pcie_dw_probe(struct platform_device *pdev) platform_set_drvdata(pdev, pcie); - ret = tegra_pcie_config_rp(pcie); - if (ret && ret != -ENOMEDIUM) - goto fail; - else - return 0; + switch (pcie->mode) { + case DW_PCIE_RC_TYPE: + ret = devm_request_irq(dev, pp->irq, tegra_pcie_rp_irq_handler, + IRQF_SHARED, "tegra-pcie-intr", pcie); + if (ret) { + dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq, + ret); + goto fail; + } + + ret = tegra_pcie_config_rp(pcie); + if (ret && ret != -ENOMEDIUM) + goto fail; + else + return 0; + break; + + case DW_PCIE_EP_TYPE: + ret = devm_request_threaded_irq(dev, pp->irq, + tegra_pcie_ep_hard_irq, + tegra_pcie_ep_irq_thread, + IRQF_SHARED | IRQF_ONESHOT, + "tegra-pcie-ep-intr", pcie); + if (ret) { + dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq, + ret); + goto fail; + } + + ret = tegra_pcie_config_ep(pcie, pdev); + if (ret < 0) + goto fail; + break; + + default: + dev_err(dev, "Invalid PCIe device type %d\n", pcie->mode); + } fail: tegra_bpmp_put(pcie->bpmp); @@ -1593,6 +2253,8 @@ static int tegra_pcie_dw_remove(struct platform_device *pdev) pm_runtime_put_sync(pcie->dev); pm_runtime_disable(pcie->dev); tegra_bpmp_put(pcie->bpmp); + if (pcie->pex_refclk_sel_gpiod) + gpiod_set_value(pcie->pex_refclk_sel_gpiod, 0); return 0; } @@ -1697,9 +2359,22 @@ static void tegra_pcie_dw_shutdown(struct platform_device *pdev) __deinit_controller(pcie); } +static const struct tegra_pcie_dw_of_data tegra_pcie_dw_rc_of_data = { + .mode = DW_PCIE_RC_TYPE, +}; + +static const struct tegra_pcie_dw_of_data tegra_pcie_dw_ep_of_data = { + .mode = DW_PCIE_EP_TYPE, +}; + static const struct of_device_id tegra_pcie_dw_of_match[] = { { .compatible = "nvidia,tegra194-pcie", + .data = &tegra_pcie_dw_rc_of_data, + }, + { + .compatible = "nvidia,tegra194-pcie-ep", + .data = &tegra_pcie_dw_ep_of_data, }, {}, }; diff --git a/drivers/pci/controller/dwc/pcie-uniphier-ep.c b/drivers/pci/controller/dwc/pcie-uniphier-ep.c new file mode 100644 index 000000000000..148355960061 --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-uniphier-ep.c @@ -0,0 +1,383 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCIe endpoint controller driver for UniPhier SoCs + * Copyright 2018 Socionext Inc. + * Author: Kunihiko Hayashi <hayashi.kunihiko@socionext.com> + */ + +#include <linux/bitops.h> +#include <linux/bitfield.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/init.h> +#include <linux/of_device.h> +#include <linux/pci.h> +#include <linux/phy/phy.h> +#include <linux/platform_device.h> +#include <linux/reset.h> + +#include "pcie-designware.h" + +/* Link Glue registers */ +#define PCL_RSTCTRL0 0x0010 +#define PCL_RSTCTRL_AXI_REG BIT(3) +#define PCL_RSTCTRL_AXI_SLAVE BIT(2) +#define PCL_RSTCTRL_AXI_MASTER BIT(1) +#define PCL_RSTCTRL_PIPE3 BIT(0) + +#define PCL_RSTCTRL1 0x0020 +#define PCL_RSTCTRL_PERST BIT(0) + +#define PCL_RSTCTRL2 0x0024 +#define PCL_RSTCTRL_PHY_RESET BIT(0) + +#define PCL_MODE 0x8000 +#define PCL_MODE_REGEN BIT(8) +#define PCL_MODE_REGVAL BIT(0) + +#define PCL_APP_CLK_CTRL 0x8004 +#define PCL_APP_CLK_REQ BIT(0) + +#define PCL_APP_READY_CTRL 0x8008 +#define PCL_APP_LTSSM_ENABLE BIT(0) + +#define PCL_APP_MSI0 0x8040 +#define PCL_APP_VEN_MSI_TC_MASK GENMASK(10, 8) +#define PCL_APP_VEN_MSI_VECTOR_MASK GENMASK(4, 0) + +#define PCL_APP_MSI1 0x8044 +#define PCL_APP_MSI_REQ BIT(0) + +#define PCL_APP_INTX 0x8074 +#define PCL_APP_INTX_SYS_INT BIT(0) + +/* assertion time of INTx in usec */ +#define PCL_INTX_WIDTH_USEC 30 + +struct uniphier_pcie_ep_priv { + void __iomem *base; + struct dw_pcie pci; + struct clk *clk, *clk_gio; + struct reset_control *rst, *rst_gio; + struct phy *phy; + const struct pci_epc_features *features; +}; + +#define to_uniphier_pcie(x) dev_get_drvdata((x)->dev) + +static void uniphier_pcie_ltssm_enable(struct uniphier_pcie_ep_priv *priv, + bool enable) +{ + u32 val; + + val = readl(priv->base + PCL_APP_READY_CTRL); + if (enable) + val |= PCL_APP_LTSSM_ENABLE; + else + val &= ~PCL_APP_LTSSM_ENABLE; + writel(val, priv->base + PCL_APP_READY_CTRL); +} + +static void uniphier_pcie_phy_reset(struct uniphier_pcie_ep_priv *priv, + bool assert) +{ + u32 val; + + val = readl(priv->base + PCL_RSTCTRL2); + if (assert) + val |= PCL_RSTCTRL_PHY_RESET; + else + val &= ~PCL_RSTCTRL_PHY_RESET; + writel(val, priv->base + PCL_RSTCTRL2); +} + +static void uniphier_pcie_init_ep(struct uniphier_pcie_ep_priv *priv) +{ + u32 val; + + /* set EP mode */ + val = readl(priv->base + PCL_MODE); + val |= PCL_MODE_REGEN | PCL_MODE_REGVAL; + writel(val, priv->base + PCL_MODE); + + /* clock request */ + val = readl(priv->base + PCL_APP_CLK_CTRL); + val &= ~PCL_APP_CLK_REQ; + writel(val, priv->base + PCL_APP_CLK_CTRL); + + /* deassert PIPE3 and AXI reset */ + val = readl(priv->base + PCL_RSTCTRL0); + val |= PCL_RSTCTRL_AXI_REG | PCL_RSTCTRL_AXI_SLAVE + | PCL_RSTCTRL_AXI_MASTER | PCL_RSTCTRL_PIPE3; + writel(val, priv->base + PCL_RSTCTRL0); + + uniphier_pcie_ltssm_enable(priv, false); + + msleep(100); +} + +static int uniphier_pcie_start_link(struct dw_pcie *pci) +{ + struct uniphier_pcie_ep_priv *priv = to_uniphier_pcie(pci); + + uniphier_pcie_ltssm_enable(priv, true); + + return 0; +} + +static void uniphier_pcie_stop_link(struct dw_pcie *pci) +{ + struct uniphier_pcie_ep_priv *priv = to_uniphier_pcie(pci); + + uniphier_pcie_ltssm_enable(priv, false); +} + +static void uniphier_pcie_ep_init(struct dw_pcie_ep *ep) +{ + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + enum pci_barno bar; + + for (bar = BAR_0; bar <= BAR_5; bar++) + dw_pcie_ep_reset_bar(pci, bar); +} + +static int uniphier_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep) +{ + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + struct uniphier_pcie_ep_priv *priv = to_uniphier_pcie(pci); + u32 val; + + /* + * This makes pulse signal to send INTx to the RC, so this should + * be cleared as soon as possible. This sequence is covered with + * mutex in pci_epc_raise_irq(). + */ + /* assert INTx */ + val = readl(priv->base + PCL_APP_INTX); + val |= PCL_APP_INTX_SYS_INT; + writel(val, priv->base + PCL_APP_INTX); + + udelay(PCL_INTX_WIDTH_USEC); + + /* deassert INTx */ + val &= ~PCL_APP_INTX_SYS_INT; + writel(val, priv->base + PCL_APP_INTX); + + return 0; +} + +static int uniphier_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, + u8 func_no, u16 interrupt_num) +{ + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + struct uniphier_pcie_ep_priv *priv = to_uniphier_pcie(pci); + u32 val; + + val = FIELD_PREP(PCL_APP_VEN_MSI_TC_MASK, func_no) + | FIELD_PREP(PCL_APP_VEN_MSI_VECTOR_MASK, interrupt_num - 1); + writel(val, priv->base + PCL_APP_MSI0); + + val = readl(priv->base + PCL_APP_MSI1); + val |= PCL_APP_MSI_REQ; + writel(val, priv->base + PCL_APP_MSI1); + + return 0; +} + +static int uniphier_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no, + enum pci_epc_irq_type type, + u16 interrupt_num) +{ + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + + switch (type) { + case PCI_EPC_IRQ_LEGACY: + return uniphier_pcie_ep_raise_legacy_irq(ep); + case PCI_EPC_IRQ_MSI: + return uniphier_pcie_ep_raise_msi_irq(ep, func_no, + interrupt_num); + default: + dev_err(pci->dev, "UNKNOWN IRQ type (%d)\n", type); + } + + return 0; +} + +static const struct pci_epc_features* +uniphier_pcie_get_features(struct dw_pcie_ep *ep) +{ + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + struct uniphier_pcie_ep_priv *priv = to_uniphier_pcie(pci); + + return priv->features; +} + +static const struct dw_pcie_ep_ops uniphier_pcie_ep_ops = { + .ep_init = uniphier_pcie_ep_init, + .raise_irq = uniphier_pcie_ep_raise_irq, + .get_features = uniphier_pcie_get_features, +}; + +static int uniphier_add_pcie_ep(struct uniphier_pcie_ep_priv *priv, + struct platform_device *pdev) +{ + struct dw_pcie *pci = &priv->pci; + struct dw_pcie_ep *ep = &pci->ep; + struct device *dev = &pdev->dev; + struct resource *res; + int ret; + + ep->ops = &uniphier_pcie_ep_ops; + + pci->dbi_base2 = devm_platform_ioremap_resource_byname(pdev, "dbi2"); + if (IS_ERR(pci->dbi_base2)) + return PTR_ERR(pci->dbi_base2); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space"); + if (!res) + return -EINVAL; + + ep->phys_base = res->start; + ep->addr_size = resource_size(res); + + ret = dw_pcie_ep_init(ep); + if (ret) + dev_err(dev, "Failed to initialize endpoint (%d)\n", ret); + + return ret; +} + +static int uniphier_pcie_ep_enable(struct uniphier_pcie_ep_priv *priv) +{ + int ret; + + ret = clk_prepare_enable(priv->clk); + if (ret) + return ret; + + ret = clk_prepare_enable(priv->clk_gio); + if (ret) + goto out_clk_disable; + + ret = reset_control_deassert(priv->rst); + if (ret) + goto out_clk_gio_disable; + + ret = reset_control_deassert(priv->rst_gio); + if (ret) + goto out_rst_assert; + + uniphier_pcie_init_ep(priv); + + uniphier_pcie_phy_reset(priv, true); + + ret = phy_init(priv->phy); + if (ret) + goto out_rst_gio_assert; + + uniphier_pcie_phy_reset(priv, false); + + return 0; + +out_rst_gio_assert: + reset_control_assert(priv->rst_gio); +out_rst_assert: + reset_control_assert(priv->rst); +out_clk_gio_disable: + clk_disable_unprepare(priv->clk_gio); +out_clk_disable: + clk_disable_unprepare(priv->clk); + + return ret; +} + +static const struct dw_pcie_ops dw_pcie_ops = { + .start_link = uniphier_pcie_start_link, + .stop_link = uniphier_pcie_stop_link, +}; + +static int uniphier_pcie_ep_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct uniphier_pcie_ep_priv *priv; + struct resource *res; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->features = of_device_get_match_data(dev); + if (WARN_ON(!priv->features)) + return -EINVAL; + + priv->pci.dev = dev; + priv->pci.ops = &dw_pcie_ops; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); + priv->pci.dbi_base = devm_pci_remap_cfg_resource(dev, res); + if (IS_ERR(priv->pci.dbi_base)) + return PTR_ERR(priv->pci.dbi_base); + + priv->base = devm_platform_ioremap_resource_byname(pdev, "link"); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + + priv->clk_gio = devm_clk_get(dev, "gio"); + if (IS_ERR(priv->clk_gio)) + return PTR_ERR(priv->clk_gio); + + priv->rst_gio = devm_reset_control_get_shared(dev, "gio"); + if (IS_ERR(priv->rst_gio)) + return PTR_ERR(priv->rst_gio); + + priv->clk = devm_clk_get(dev, "link"); + if (IS_ERR(priv->clk)) + return PTR_ERR(priv->clk); + + priv->rst = devm_reset_control_get_shared(dev, "link"); + if (IS_ERR(priv->rst)) + return PTR_ERR(priv->rst); + + priv->phy = devm_phy_optional_get(dev, "pcie-phy"); + if (IS_ERR(priv->phy)) { + ret = PTR_ERR(priv->phy); + dev_err(dev, "Failed to get phy (%d)\n", ret); + return ret; + } + + platform_set_drvdata(pdev, priv); + + ret = uniphier_pcie_ep_enable(priv); + if (ret) + return ret; + + return uniphier_add_pcie_ep(priv, pdev); +} + +static const struct pci_epc_features uniphier_pro5_data = { + .linkup_notifier = false, + .msi_capable = true, + .msix_capable = false, + .align = 1 << 16, + .bar_fixed_64bit = BIT(BAR_0) | BIT(BAR_2) | BIT(BAR_4), + .reserved_bar = BIT(BAR_4), +}; + +static const struct of_device_id uniphier_pcie_ep_match[] = { + { + .compatible = "socionext,uniphier-pro5-pcie-ep", + .data = &uniphier_pro5_data, + }, + { /* sentinel */ }, +}; + +static struct platform_driver uniphier_pcie_ep_driver = { + .probe = uniphier_pcie_ep_probe, + .driver = { + .name = "uniphier-pcie-ep", + .of_match_table = uniphier_pcie_ep_match, + .suppress_bind_attrs = true, + }, +}; +builtin_platform_driver(uniphier_pcie_ep_driver); diff --git a/drivers/pci/controller/dwc/pcie-uniphier.c b/drivers/pci/controller/dwc/pcie-uniphier.c index a5401a0b1e58..3a7f403b57b8 100644 --- a/drivers/pci/controller/dwc/pcie-uniphier.c +++ b/drivers/pci/controller/dwc/pcie-uniphier.c @@ -416,8 +416,7 @@ static int uniphier_pcie_probe(struct platform_device *pdev) if (IS_ERR(priv->pci.dbi_base)) return PTR_ERR(priv->pci.dbi_base); - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "link"); - priv->base = devm_ioremap_resource(dev, res); + priv->base = devm_platform_ioremap_resource_byname(pdev, "link"); if (IS_ERR(priv->base)) return PTR_ERR(priv->base); |