aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/drivers/pci/controller/dwc
diff options
context:
space:
mode:
authorDmitry Torokhov <dmitry.torokhov@gmail.com>2021-02-22 21:35:15 -0800
committerDmitry Torokhov <dmitry.torokhov@gmail.com>2021-02-22 21:35:15 -0800
commitcbecf716ca618fd44feda6bd9a64a8179d031fc5 (patch)
tree186c9f69f0d11f773253c440dac85087f67288b7 /drivers/pci/controller/dwc
parentInput: st1232 - add IDLE state as ready condition (diff)
parentInput: aiptek - convert sysfs sprintf/snprintf family to sysfs_emit (diff)
downloadwireguard-linux-cbecf716ca618fd44feda6bd9a64a8179d031fc5.tar.xz
wireguard-linux-cbecf716ca618fd44feda6bd9a64a8179d031fc5.zip
Merge branch 'next' into for-linus
Prepare input updates for 5.12 merge window.
Diffstat (limited to 'drivers/pci/controller/dwc')
-rw-r--r--drivers/pci/controller/dwc/Kconfig17
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c153
-rw-r--r--drivers/pci/controller/dwc/pci-exynos.c434
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c124
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.c203
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape-ep.c125
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape.c67
-rw-r--r--drivers/pci/controller/dwc/pci-meson.c213
-rw-r--r--drivers/pci/controller/dwc/pcie-al.c109
-rw-r--r--drivers/pci/controller/dwc/pcie-armada8k.c37
-rw-r--r--drivers/pci/controller/dwc/pcie-artpec6.c122
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c315
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c465
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-plat.c72
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c261
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h137
-rw-r--r--drivers/pci/controller/dwc/pcie-hisi.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-histb.c78
-rw-r--r--drivers/pci/controller/dwc/pcie-intel-gw.c126
-rw-r--r--drivers/pci/controller/dwc/pcie-kirin.c107
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c171
-rw-r--r--drivers/pci/controller/dwc/pcie-spear13xx.c91
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c320
-rw-r--r--drivers/pci/controller/dwc/pcie-uniphier-ep.c38
-rw-r--r--drivers/pci/controller/dwc/pcie-uniphier.c52
25 files changed, 1497 insertions, 2342 deletions
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig
index 044a3761c44f..22c5529e9a65 100644
--- a/drivers/pci/controller/dwc/Kconfig
+++ b/drivers/pci/controller/dwc/Kconfig
@@ -83,10 +83,15 @@ config PCIE_DW_PLAT_EP
selected.
config PCI_EXYNOS
- bool "Samsung Exynos PCIe controller"
- depends on SOC_EXYNOS5440 || COMPILE_TEST
+ tristate "Samsung Exynos PCIe controller"
+ depends on ARCH_EXYNOS || COMPILE_TEST
depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW_HOST
+ help
+ Enables support for the PCIe controller in the Samsung Exynos SoCs
+ to work in host mode. The PCI controller is based on the DesignWare
+ hardware and therefore the driver re-uses the DesignWare core
+ functions to implement the driver.
config PCI_IMX6
bool "Freescale i.MX6/7/8 PCIe controller"
@@ -107,7 +112,7 @@ config PCI_KEYSTONE
config PCI_KEYSTONE_HOST
bool "PCI Keystone Host Mode"
- depends on ARCH_KEYSTONE || ARCH_K3 || ((ARM || ARM64) && COMPILE_TEST)
+ depends on ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST
depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW_HOST
select PCI_KEYSTONE
@@ -119,7 +124,7 @@ config PCI_KEYSTONE_HOST
config PCI_KEYSTONE_EP
bool "PCI Keystone Endpoint Mode"
- depends on ARCH_KEYSTONE || ARCH_K3 || ((ARM || ARM64) && COMPILE_TEST)
+ depends on ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST
depends on PCI_ENDPOINT
select PCIE_DW_EP
select PCI_KEYSTONE
@@ -169,6 +174,7 @@ config PCIE_QCOM
depends on OF && (ARCH_QCOM || COMPILE_TEST)
depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW_HOST
+ select CRC8
help
Say Y here to enable PCIe controller support on Qualcomm SoCs. The
PCIe controller uses the DesignWare core plus Qualcomm-specific
@@ -237,8 +243,9 @@ config PCIE_HISI_STB
Say Y here if you want PCIe controller support on HiSilicon STB SoCs
config PCI_MESON
- bool "MESON PCIe controller"
+ tristate "MESON PCIe controller"
depends on PCI_MSI_IRQ_DOMAIN
+ default m if ARCH_MESON
select PCIE_DW_HOST
help
Say Y here if you want to enable PCI controller support on Amlogic
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c
index dc387724cf08..b105af63854a 100644
--- a/drivers/pci/controller/dwc/pci-dra7xx.c
+++ b/drivers/pci/controller/dwc/pci-dra7xx.c
@@ -73,8 +73,6 @@
#define LINK_UP BIT(16)
#define DRA7XX_CPU_TO_BUS_ADDR 0x0FFFFFFF
-#define EXP_CAP_ID_OFFSET 0x70
-
#define PCIECTRL_TI_CONF_INTX_ASSERT 0x0124
#define PCIECTRL_TI_CONF_INTX_DEASSERT 0x0128
@@ -91,7 +89,6 @@ struct dra7xx_pcie {
void __iomem *base; /* DT ti_conf */
int phy_count; /* DT phy-names count */
struct phy **phy;
- int link_gen;
struct irq_domain *irq_domain;
enum dw_pcie_device_mode mode;
};
@@ -142,33 +139,12 @@ static int dra7xx_pcie_establish_link(struct dw_pcie *pci)
struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
struct device *dev = pci->dev;
u32 reg;
- u32 exp_cap_off = EXP_CAP_ID_OFFSET;
if (dw_pcie_link_up(pci)) {
dev_err(dev, "link is already up\n");
return 0;
}
- if (dra7xx->link_gen == 1) {
- dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCAP,
- 4, &reg);
- if ((reg & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) {
- reg &= ~((u32)PCI_EXP_LNKCAP_SLS);
- reg |= PCI_EXP_LNKCAP_SLS_2_5GB;
- dw_pcie_write(pci->dbi_base + exp_cap_off +
- PCI_EXP_LNKCAP, 4, reg);
- }
-
- dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCTL2,
- 2, &reg);
- if ((reg & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) {
- reg &= ~((u32)PCI_EXP_LNKCAP_SLS);
- reg |= PCI_EXP_LNKCAP_SLS_2_5GB;
- dw_pcie_write(pci->dbi_base + exp_cap_off +
- PCI_EXP_LNKCTL2, 2, reg);
- }
- }
-
reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
reg |= LTSSM_EN;
dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
@@ -205,11 +181,6 @@ static int dra7xx_pcie_host_init(struct pcie_port *pp)
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
- dw_pcie_setup_rc(pp);
-
- dra7xx_pcie_establish_link(pci);
- dw_pcie_wait_for_link(pci);
- dw_pcie_msi_init(pp);
dra7xx_pcie_enable_interrupts(dra7xx);
return 0;
@@ -401,117 +372,8 @@ static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp)
return 0;
}
-static void dra7xx_pcie_setup_msi_msg(struct irq_data *d, struct msi_msg *msg)
-{
- struct pcie_port *pp = irq_data_get_irq_chip_data(d);
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- u64 msi_target;
-
- msi_target = (u64)pp->msi_data;
-
- msg->address_lo = lower_32_bits(msi_target);
- msg->address_hi = upper_32_bits(msi_target);
-
- msg->data = d->hwirq;
-
- dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
- (int)d->hwirq, msg->address_hi, msg->address_lo);
-}
-
-static int dra7xx_pcie_msi_set_affinity(struct irq_data *d,
- const struct cpumask *mask,
- bool force)
-{
- return -EINVAL;
-}
-
-static void dra7xx_pcie_bottom_mask(struct irq_data *d)
-{
- struct pcie_port *pp = irq_data_get_irq_chip_data(d);
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- unsigned int res, bit, ctrl;
- unsigned long flags;
-
- raw_spin_lock_irqsave(&pp->lock, flags);
-
- ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
- res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
- bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
-
- pp->irq_mask[ctrl] |= BIT(bit);
- dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res,
- pp->irq_mask[ctrl]);
-
- raw_spin_unlock_irqrestore(&pp->lock, flags);
-}
-
-static void dra7xx_pcie_bottom_unmask(struct irq_data *d)
-{
- struct pcie_port *pp = irq_data_get_irq_chip_data(d);
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- unsigned int res, bit, ctrl;
- unsigned long flags;
-
- raw_spin_lock_irqsave(&pp->lock, flags);
-
- ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
- res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
- bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
-
- pp->irq_mask[ctrl] &= ~BIT(bit);
- dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res,
- pp->irq_mask[ctrl]);
-
- raw_spin_unlock_irqrestore(&pp->lock, flags);
-}
-
-static void dra7xx_pcie_bottom_ack(struct irq_data *d)
-{
- struct pcie_port *pp = irq_data_get_irq_chip_data(d);
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- unsigned int res, bit, ctrl;
-
- ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
- res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
- bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
-
- dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_STATUS + res, BIT(bit));
-}
-
-static struct irq_chip dra7xx_pci_msi_bottom_irq_chip = {
- .name = "DRA7XX-PCI-MSI",
- .irq_ack = dra7xx_pcie_bottom_ack,
- .irq_compose_msi_msg = dra7xx_pcie_setup_msi_msg,
- .irq_set_affinity = dra7xx_pcie_msi_set_affinity,
- .irq_mask = dra7xx_pcie_bottom_mask,
- .irq_unmask = dra7xx_pcie_bottom_unmask,
-};
-
-static int dra7xx_pcie_msi_host_init(struct pcie_port *pp)
-{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- u32 ctrl, num_ctrls;
-
- pp->msi_irq_chip = &dra7xx_pci_msi_bottom_irq_chip;
-
- num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
- /* Initialize IRQ Status array */
- for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
- pp->irq_mask[ctrl] = ~0;
- dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK +
- (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
- pp->irq_mask[ctrl]);
- dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE +
- (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
- ~0);
- }
-
- return dw_pcie_allocate_domains(pp);
-}
-
static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = {
.host_init = dra7xx_pcie_host_init,
- .msi_host_init = dra7xx_pcie_msi_host_init,
};
static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep)
@@ -586,7 +448,6 @@ static int __init dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx,
{
int ret;
struct dw_pcie_ep *ep;
- struct resource *res;
struct device *dev = &pdev->dev;
struct dw_pcie *pci = dra7xx->pci;
@@ -602,13 +463,6 @@ static int __init dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx,
if (IS_ERR(pci->dbi_base2))
return PTR_ERR(pci->dbi_base2);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
- if (!res)
- return -EINVAL;
-
- ep->phys_base = res->start;
- ep->addr_size = resource_size(res);
-
ret = dw_pcie_ep_init(ep);
if (ret) {
dev_err(dev, "failed to initialize endpoint\n");
@@ -630,6 +484,9 @@ static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
if (pp->irq < 0)
return pp->irq;
+ /* MSI IRQ is muxed */
+ pp->msi_irq = -ENODEV;
+
ret = dra7xx_pcie_init_irq_domain(pp);
if (ret < 0)
return ret;
@@ -937,10 +794,6 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
reg &= ~LTSSM_EN;
dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
- dra7xx->link_gen = of_pci_get_max_link_speed(np);
- if (dra7xx->link_gen < 0 || dra7xx->link_gen > 2)
- dra7xx->link_gen = 2;
-
switch (mode) {
case DW_PCIE_RC_TYPE:
if (!IS_ENABLED(CONFIG_PCI_DRA7XX_HOST)) {
diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c
index 8d82c43ae299..c24dab383654 100644
--- a/drivers/pci/controller/dwc/pci-exynos.c
+++ b/drivers/pci/controller/dwc/pci-exynos.c
@@ -2,26 +2,23 @@
/*
* PCIe host controller driver for Samsung Exynos SoCs
*
- * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * Copyright (C) 2013-2020 Samsung Electronics Co., Ltd.
* https://www.samsung.com
*
* Author: Jingoo Han <jg1.han@samsung.com>
+ * Jaehoon Chung <jh80.chung@samsung.com>
*/
#include <linux/clk.h>
#include <linux/delay.h>
-#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
-#include <linux/resource.h>
-#include <linux/signal.h>
-#include <linux/types.h>
+#include <linux/regulator/consumer.h>
#include "pcie-designware.h"
@@ -37,102 +34,43 @@
#define PCIE_IRQ_SPECIAL 0x008
#define PCIE_IRQ_EN_PULSE 0x00c
#define PCIE_IRQ_EN_LEVEL 0x010
-#define IRQ_MSI_ENABLE BIT(2)
#define PCIE_IRQ_EN_SPECIAL 0x014
-#define PCIE_PWR_RESET 0x018
+#define PCIE_SW_WAKE 0x018
+#define PCIE_BUS_EN BIT(1)
#define PCIE_CORE_RESET 0x01c
#define PCIE_CORE_RESET_ENABLE BIT(0)
#define PCIE_STICKY_RESET 0x020
#define PCIE_NONSTICKY_RESET 0x024
#define PCIE_APP_INIT_RESET 0x028
#define PCIE_APP_LTSSM_ENABLE 0x02c
-#define PCIE_ELBI_RDLH_LINKUP 0x064
+#define PCIE_ELBI_RDLH_LINKUP 0x074
+#define PCIE_ELBI_XMLH_LINKUP BIT(4)
#define PCIE_ELBI_LTSSM_ENABLE 0x1
#define PCIE_ELBI_SLV_AWMISC 0x11c
#define PCIE_ELBI_SLV_ARMISC 0x120
#define PCIE_ELBI_SLV_DBI_ENABLE BIT(21)
-struct exynos_pcie_mem_res {
- void __iomem *elbi_base; /* DT 0th resource: PCIe CTRL */
-};
-
-struct exynos_pcie_clk_res {
- struct clk *clk;
- struct clk *bus_clk;
-};
-
struct exynos_pcie {
- struct dw_pcie *pci;
- struct exynos_pcie_mem_res *mem_res;
- struct exynos_pcie_clk_res *clk_res;
- const struct exynos_pcie_ops *ops;
- int reset_gpio;
-
+ struct dw_pcie pci;
+ void __iomem *elbi_base;
+ struct clk *clk;
+ struct clk *bus_clk;
struct phy *phy;
+ struct regulator_bulk_data supplies[2];
};
-struct exynos_pcie_ops {
- int (*get_mem_resources)(struct platform_device *pdev,
- struct exynos_pcie *ep);
- int (*get_clk_resources)(struct exynos_pcie *ep);
- int (*init_clk_resources)(struct exynos_pcie *ep);
- void (*deinit_clk_resources)(struct exynos_pcie *ep);
-};
-
-static int exynos5440_pcie_get_mem_resources(struct platform_device *pdev,
- struct exynos_pcie *ep)
-{
- struct dw_pcie *pci = ep->pci;
- struct device *dev = pci->dev;
-
- ep->mem_res = devm_kzalloc(dev, sizeof(*ep->mem_res), GFP_KERNEL);
- if (!ep->mem_res)
- return -ENOMEM;
-
- ep->mem_res->elbi_base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(ep->mem_res->elbi_base))
- return PTR_ERR(ep->mem_res->elbi_base);
-
- return 0;
-}
-
-static int exynos5440_pcie_get_clk_resources(struct exynos_pcie *ep)
-{
- struct dw_pcie *pci = ep->pci;
- struct device *dev = pci->dev;
-
- ep->clk_res = devm_kzalloc(dev, sizeof(*ep->clk_res), GFP_KERNEL);
- if (!ep->clk_res)
- return -ENOMEM;
-
- ep->clk_res->clk = devm_clk_get(dev, "pcie");
- if (IS_ERR(ep->clk_res->clk)) {
- dev_err(dev, "Failed to get pcie rc clock\n");
- return PTR_ERR(ep->clk_res->clk);
- }
-
- ep->clk_res->bus_clk = devm_clk_get(dev, "pcie_bus");
- if (IS_ERR(ep->clk_res->bus_clk)) {
- dev_err(dev, "Failed to get pcie bus clock\n");
- return PTR_ERR(ep->clk_res->bus_clk);
- }
-
- return 0;
-}
-
-static int exynos5440_pcie_init_clk_resources(struct exynos_pcie *ep)
+static int exynos_pcie_init_clk_resources(struct exynos_pcie *ep)
{
- struct dw_pcie *pci = ep->pci;
- struct device *dev = pci->dev;
+ struct device *dev = ep->pci.dev;
int ret;
- ret = clk_prepare_enable(ep->clk_res->clk);
+ ret = clk_prepare_enable(ep->clk);
if (ret) {
dev_err(dev, "cannot enable pcie rc clock");
return ret;
}
- ret = clk_prepare_enable(ep->clk_res->bus_clk);
+ ret = clk_prepare_enable(ep->bus_clk);
if (ret) {
dev_err(dev, "cannot enable pcie bus clock");
goto err_bus_clk;
@@ -141,24 +79,17 @@ static int exynos5440_pcie_init_clk_resources(struct exynos_pcie *ep)
return 0;
err_bus_clk:
- clk_disable_unprepare(ep->clk_res->clk);
+ clk_disable_unprepare(ep->clk);
return ret;
}
-static void exynos5440_pcie_deinit_clk_resources(struct exynos_pcie *ep)
+static void exynos_pcie_deinit_clk_resources(struct exynos_pcie *ep)
{
- clk_disable_unprepare(ep->clk_res->bus_clk);
- clk_disable_unprepare(ep->clk_res->clk);
+ clk_disable_unprepare(ep->bus_clk);
+ clk_disable_unprepare(ep->clk);
}
-static const struct exynos_pcie_ops exynos5440_pcie_ops = {
- .get_mem_resources = exynos5440_pcie_get_mem_resources,
- .get_clk_resources = exynos5440_pcie_get_clk_resources,
- .init_clk_resources = exynos5440_pcie_init_clk_resources,
- .deinit_clk_resources = exynos5440_pcie_deinit_clk_resources,
-};
-
static void exynos_pcie_writel(void __iomem *base, u32 val, u32 reg)
{
writel(val, base + reg);
@@ -173,115 +104,71 @@ static void exynos_pcie_sideband_dbi_w_mode(struct exynos_pcie *ep, bool on)
{
u32 val;
- val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_ELBI_SLV_AWMISC);
+ val = exynos_pcie_readl(ep->elbi_base, PCIE_ELBI_SLV_AWMISC);
if (on)
val |= PCIE_ELBI_SLV_DBI_ENABLE;
else
val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
- exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_ELBI_SLV_AWMISC);
+ exynos_pcie_writel(ep->elbi_base, val, PCIE_ELBI_SLV_AWMISC);
}
static void exynos_pcie_sideband_dbi_r_mode(struct exynos_pcie *ep, bool on)
{
u32 val;
- val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_ELBI_SLV_ARMISC);
+ val = exynos_pcie_readl(ep->elbi_base, PCIE_ELBI_SLV_ARMISC);
if (on)
val |= PCIE_ELBI_SLV_DBI_ENABLE;
else
val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
- exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_ELBI_SLV_ARMISC);
+ exynos_pcie_writel(ep->elbi_base, val, PCIE_ELBI_SLV_ARMISC);
}
static void exynos_pcie_assert_core_reset(struct exynos_pcie *ep)
{
u32 val;
- val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_CORE_RESET);
+ val = exynos_pcie_readl(ep->elbi_base, PCIE_CORE_RESET);
val &= ~PCIE_CORE_RESET_ENABLE;
- exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_CORE_RESET);
- exynos_pcie_writel(ep->mem_res->elbi_base, 0, PCIE_PWR_RESET);
- exynos_pcie_writel(ep->mem_res->elbi_base, 0, PCIE_STICKY_RESET);
- exynos_pcie_writel(ep->mem_res->elbi_base, 0, PCIE_NONSTICKY_RESET);
+ exynos_pcie_writel(ep->elbi_base, val, PCIE_CORE_RESET);
+ exynos_pcie_writel(ep->elbi_base, 0, PCIE_STICKY_RESET);
+ exynos_pcie_writel(ep->elbi_base, 0, PCIE_NONSTICKY_RESET);
}
static void exynos_pcie_deassert_core_reset(struct exynos_pcie *ep)
{
u32 val;
- val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_CORE_RESET);
+ val = exynos_pcie_readl(ep->elbi_base, PCIE_CORE_RESET);
val |= PCIE_CORE_RESET_ENABLE;
- exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_CORE_RESET);
- exynos_pcie_writel(ep->mem_res->elbi_base, 1, PCIE_STICKY_RESET);
- exynos_pcie_writel(ep->mem_res->elbi_base, 1, PCIE_NONSTICKY_RESET);
- exynos_pcie_writel(ep->mem_res->elbi_base, 1, PCIE_APP_INIT_RESET);
- exynos_pcie_writel(ep->mem_res->elbi_base, 0, PCIE_APP_INIT_RESET);
+ exynos_pcie_writel(ep->elbi_base, val, PCIE_CORE_RESET);
+ exynos_pcie_writel(ep->elbi_base, 1, PCIE_STICKY_RESET);
+ exynos_pcie_writel(ep->elbi_base, 1, PCIE_NONSTICKY_RESET);
+ exynos_pcie_writel(ep->elbi_base, 1, PCIE_APP_INIT_RESET);
+ exynos_pcie_writel(ep->elbi_base, 0, PCIE_APP_INIT_RESET);
}
-static void exynos_pcie_assert_reset(struct exynos_pcie *ep)
+static int exynos_pcie_start_link(struct dw_pcie *pci)
{
- struct dw_pcie *pci = ep->pci;
- struct device *dev = pci->dev;
-
- if (ep->reset_gpio >= 0)
- devm_gpio_request_one(dev, ep->reset_gpio,
- GPIOF_OUT_INIT_HIGH, "RESET");
-}
-
-static int exynos_pcie_establish_link(struct exynos_pcie *ep)
-{
- struct dw_pcie *pci = ep->pci;
- struct pcie_port *pp = &pci->pp;
- struct device *dev = pci->dev;
-
- if (dw_pcie_link_up(pci)) {
- dev_err(dev, "Link already up\n");
- return 0;
- }
-
- exynos_pcie_assert_core_reset(ep);
-
- phy_reset(ep->phy);
-
- exynos_pcie_writel(ep->mem_res->elbi_base, 1,
- PCIE_PWR_RESET);
-
- phy_power_on(ep->phy);
- phy_init(ep->phy);
+ struct exynos_pcie *ep = to_exynos_pcie(pci);
+ u32 val;
- exynos_pcie_deassert_core_reset(ep);
- dw_pcie_setup_rc(pp);
- exynos_pcie_assert_reset(ep);
+ val = exynos_pcie_readl(ep->elbi_base, PCIE_SW_WAKE);
+ val &= ~PCIE_BUS_EN;
+ exynos_pcie_writel(ep->elbi_base, val, PCIE_SW_WAKE);
/* assert LTSSM enable */
- exynos_pcie_writel(ep->mem_res->elbi_base, PCIE_ELBI_LTSSM_ENABLE,
+ exynos_pcie_writel(ep->elbi_base, PCIE_ELBI_LTSSM_ENABLE,
PCIE_APP_LTSSM_ENABLE);
-
- /* check if the link is up or not */
- if (!dw_pcie_wait_for_link(pci))
- return 0;
-
- phy_power_off(ep->phy);
- return -ETIMEDOUT;
+ return 0;
}
static void exynos_pcie_clear_irq_pulse(struct exynos_pcie *ep)
{
- u32 val;
+ u32 val = exynos_pcie_readl(ep->elbi_base, PCIE_IRQ_PULSE);
- val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_IRQ_PULSE);
- exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_IRQ_PULSE);
-}
-
-static void exynos_pcie_enable_irq_pulse(struct exynos_pcie *ep)
-{
- u32 val;
-
- /* enable INTX interrupt */
- val = IRQ_INTA_ASSERT | IRQ_INTB_ASSERT |
- IRQ_INTC_ASSERT | IRQ_INTD_ASSERT;
- exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_IRQ_EN_PULSE);
+ exynos_pcie_writel(ep->elbi_base, val, PCIE_IRQ_PULSE);
}
static irqreturn_t exynos_pcie_irq_handler(int irq, void *arg)
@@ -292,26 +179,14 @@ static irqreturn_t exynos_pcie_irq_handler(int irq, void *arg)
return IRQ_HANDLED;
}
-static void exynos_pcie_msi_init(struct exynos_pcie *ep)
-{
- struct dw_pcie *pci = ep->pci;
- struct pcie_port *pp = &pci->pp;
- u32 val;
-
- dw_pcie_msi_init(pp);
-
- /* enable MSI interrupt */
- val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_IRQ_EN_LEVEL);
- val |= IRQ_MSI_ENABLE;
- exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_IRQ_EN_LEVEL);
-}
-
-static void exynos_pcie_enable_interrupts(struct exynos_pcie *ep)
+static void exynos_pcie_enable_irq_pulse(struct exynos_pcie *ep)
{
- exynos_pcie_enable_irq_pulse(ep);
+ u32 val = IRQ_INTA_ASSERT | IRQ_INTB_ASSERT |
+ IRQ_INTC_ASSERT | IRQ_INTD_ASSERT;
- if (IS_ENABLED(CONFIG_PCI_MSI))
- exynos_pcie_msi_init(ep);
+ exynos_pcie_writel(ep->elbi_base, val, PCIE_IRQ_EN_PULSE);
+ exynos_pcie_writel(ep->elbi_base, 0, PCIE_IRQ_EN_LEVEL);
+ exynos_pcie_writel(ep->elbi_base, 0, PCIE_IRQ_EN_SPECIAL);
}
static u32 exynos_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base,
@@ -336,42 +211,43 @@ static void exynos_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base,
exynos_pcie_sideband_dbi_w_mode(ep, false);
}
-static int exynos_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
- u32 *val)
+static int exynos_pcie_rd_own_conf(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct exynos_pcie *ep = to_exynos_pcie(pci);
- int ret;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata);
- exynos_pcie_sideband_dbi_r_mode(ep, true);
- ret = dw_pcie_read(pci->dbi_base + where, size, val);
- exynos_pcie_sideband_dbi_r_mode(ep, false);
- return ret;
+ if (PCI_SLOT(devfn)) {
+ *val = ~0;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
+ *val = dw_pcie_read_dbi(pci, where, size);
+ return PCIBIOS_SUCCESSFUL;
}
-static int exynos_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
- u32 val)
+static int exynos_pcie_wr_own_conf(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct exynos_pcie *ep = to_exynos_pcie(pci);
- int ret;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata);
- exynos_pcie_sideband_dbi_w_mode(ep, true);
- ret = dw_pcie_write(pci->dbi_base + where, size, val);
- exynos_pcie_sideband_dbi_w_mode(ep, false);
- return ret;
+ if (PCI_SLOT(devfn))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ dw_pcie_write_dbi(pci, where, size, val);
+ return PCIBIOS_SUCCESSFUL;
}
+static struct pci_ops exynos_pci_ops = {
+ .read = exynos_pcie_rd_own_conf,
+ .write = exynos_pcie_wr_own_conf,
+};
+
static int exynos_pcie_link_up(struct dw_pcie *pci)
{
struct exynos_pcie *ep = to_exynos_pcie(pci);
- u32 val;
+ u32 val = exynos_pcie_readl(ep->elbi_base, PCIE_ELBI_RDLH_LINKUP);
- val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_ELBI_RDLH_LINKUP);
- if (val == PCIE_ELBI_LTSSM_ENABLE)
- return 1;
-
- return 0;
+ return (val & PCIE_ELBI_XMLH_LINKUP);
}
static int exynos_pcie_host_init(struct pcie_port *pp)
@@ -379,44 +255,45 @@ static int exynos_pcie_host_init(struct pcie_port *pp)
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct exynos_pcie *ep = to_exynos_pcie(pci);
- exynos_pcie_establish_link(ep);
- exynos_pcie_enable_interrupts(ep);
+ pp->bridge->ops = &exynos_pci_ops;
+
+ exynos_pcie_assert_core_reset(ep);
+
+ phy_reset(ep->phy);
+ phy_power_on(ep->phy);
+ phy_init(ep->phy);
+
+ exynos_pcie_deassert_core_reset(ep);
+ exynos_pcie_enable_irq_pulse(ep);
return 0;
}
static const struct dw_pcie_host_ops exynos_pcie_host_ops = {
- .rd_own_conf = exynos_pcie_rd_own_conf,
- .wr_own_conf = exynos_pcie_wr_own_conf,
.host_init = exynos_pcie_host_init,
};
-static int __init exynos_add_pcie_port(struct exynos_pcie *ep,
+static int exynos_add_pcie_port(struct exynos_pcie *ep,
struct platform_device *pdev)
{
- struct dw_pcie *pci = ep->pci;
+ struct dw_pcie *pci = &ep->pci;
struct pcie_port *pp = &pci->pp;
struct device *dev = &pdev->dev;
int ret;
- pp->irq = platform_get_irq(pdev, 1);
+ pp->irq = platform_get_irq(pdev, 0);
if (pp->irq < 0)
return pp->irq;
ret = devm_request_irq(dev, pp->irq, exynos_pcie_irq_handler,
- IRQF_SHARED, "exynos-pcie", ep);
+ IRQF_SHARED, "exynos-pcie", ep);
if (ret) {
dev_err(dev, "failed to request irq\n");
return ret;
}
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
- pp->msi_irq = platform_get_irq(pdev, 0);
- if (pp->msi_irq < 0)
- return pp->msi_irq;
- }
-
pp->ops = &exynos_pcie_host_ops;
+ pp->msi_irq = -ENODEV;
ret = dw_pcie_host_init(pp);
if (ret) {
@@ -431,12 +308,12 @@ static const struct dw_pcie_ops dw_pcie_ops = {
.read_dbi = exynos_pcie_read_dbi,
.write_dbi = exynos_pcie_write_dbi,
.link_up = exynos_pcie_link_up,
+ .start_link = exynos_pcie_start_link,
};
-static int __init exynos_pcie_probe(struct platform_device *pdev)
+static int exynos_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct dw_pcie *pci;
struct exynos_pcie *ep;
struct device_node *np = dev->of_node;
int ret;
@@ -445,43 +322,45 @@ static int __init exynos_pcie_probe(struct platform_device *pdev)
if (!ep)
return -ENOMEM;
- pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
- if (!pci)
- return -ENOMEM;
-
- pci->dev = dev;
- pci->ops = &dw_pcie_ops;
-
- ep->pci = pci;
- ep->ops = (const struct exynos_pcie_ops *)
- of_device_get_match_data(dev);
-
- ep->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
+ ep->pci.dev = dev;
+ ep->pci.ops = &dw_pcie_ops;
ep->phy = devm_of_phy_get(dev, np, NULL);
- if (IS_ERR(ep->phy)) {
- if (PTR_ERR(ep->phy) != -ENODEV)
- return PTR_ERR(ep->phy);
+ if (IS_ERR(ep->phy))
+ return PTR_ERR(ep->phy);
- ep->phy = NULL;
- }
+ /* External Local Bus interface (ELBI) registers */
+ ep->elbi_base = devm_platform_ioremap_resource_byname(pdev, "elbi");
+ if (IS_ERR(ep->elbi_base))
+ return PTR_ERR(ep->elbi_base);
- if (ep->ops && ep->ops->get_mem_resources) {
- ret = ep->ops->get_mem_resources(pdev, ep);
- if (ret)
- return ret;
+ ep->clk = devm_clk_get(dev, "pcie");
+ if (IS_ERR(ep->clk)) {
+ dev_err(dev, "Failed to get pcie rc clock\n");
+ return PTR_ERR(ep->clk);
}
- if (ep->ops && ep->ops->get_clk_resources &&
- ep->ops->init_clk_resources) {
- ret = ep->ops->get_clk_resources(ep);
- if (ret)
- return ret;
- ret = ep->ops->init_clk_resources(ep);
- if (ret)
- return ret;
+ ep->bus_clk = devm_clk_get(dev, "pcie_bus");
+ if (IS_ERR(ep->bus_clk)) {
+ dev_err(dev, "Failed to get pcie bus clock\n");
+ return PTR_ERR(ep->bus_clk);
}
+ ep->supplies[0].supply = "vdd18";
+ ep->supplies[1].supply = "vdd10";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ep->supplies),
+ ep->supplies);
+ if (ret)
+ return ret;
+
+ ret = exynos_pcie_init_clk_resources(ep);
+ if (ret)
+ return ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ep->supplies), ep->supplies);
+ if (ret)
+ return ret;
+
platform_set_drvdata(pdev, ep);
ret = exynos_add_pcie_port(ep, pdev);
@@ -492,9 +371,9 @@ static int __init exynos_pcie_probe(struct platform_device *pdev)
fail_probe:
phy_exit(ep->phy);
+ exynos_pcie_deinit_clk_resources(ep);
+ regulator_bulk_disable(ARRAY_SIZE(ep->supplies), ep->supplies);
- if (ep->ops && ep->ops->deinit_clk_resources)
- ep->ops->deinit_clk_resources(ep);
return ret;
}
@@ -502,32 +381,65 @@ static int __exit exynos_pcie_remove(struct platform_device *pdev)
{
struct exynos_pcie *ep = platform_get_drvdata(pdev);
- if (ep->ops && ep->ops->deinit_clk_resources)
- ep->ops->deinit_clk_resources(ep);
+ dw_pcie_host_deinit(&ep->pci.pp);
+ exynos_pcie_assert_core_reset(ep);
+ phy_power_off(ep->phy);
+ phy_exit(ep->phy);
+ exynos_pcie_deinit_clk_resources(ep);
+ regulator_bulk_disable(ARRAY_SIZE(ep->supplies), ep->supplies);
return 0;
}
+static int __maybe_unused exynos_pcie_suspend_noirq(struct device *dev)
+{
+ struct exynos_pcie *ep = dev_get_drvdata(dev);
+
+ exynos_pcie_assert_core_reset(ep);
+ phy_power_off(ep->phy);
+ phy_exit(ep->phy);
+ regulator_bulk_disable(ARRAY_SIZE(ep->supplies), ep->supplies);
+
+ return 0;
+}
+
+static int __maybe_unused exynos_pcie_resume_noirq(struct device *dev)
+{
+ struct exynos_pcie *ep = dev_get_drvdata(dev);
+ struct dw_pcie *pci = &ep->pci;
+ struct pcie_port *pp = &pci->pp;
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ep->supplies), ep->supplies);
+ if (ret)
+ return ret;
+
+ /* exynos_pcie_host_init controls ep->phy */
+ exynos_pcie_host_init(pp);
+ dw_pcie_setup_rc(pp);
+ exynos_pcie_start_link(pci);
+ return dw_pcie_wait_for_link(pci);
+}
+
+static const struct dev_pm_ops exynos_pcie_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(exynos_pcie_suspend_noirq,
+ exynos_pcie_resume_noirq)
+};
+
static const struct of_device_id exynos_pcie_of_match[] = {
- {
- .compatible = "samsung,exynos5440-pcie",
- .data = &exynos5440_pcie_ops
- },
- {},
+ { .compatible = "samsung,exynos5433-pcie", },
+ { },
};
static struct platform_driver exynos_pcie_driver = {
+ .probe = exynos_pcie_probe,
.remove = __exit_p(exynos_pcie_remove),
.driver = {
.name = "exynos-pcie",
.of_match_table = exynos_pcie_of_match,
+ .pm = &exynos_pcie_pm_ops,
},
};
-
-/* Exynos PCIe driver does not allow module unload */
-
-static int __init exynos_pcie_init(void)
-{
- return platform_driver_probe(&exynos_pcie_driver, exynos_pcie_probe);
-}
-subsys_initcall(exynos_pcie_init);
+module_platform_driver(exynos_pcie_driver);
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, exynos_pcie_of_match);
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 5fef2613b223..0cf1333c0440 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -79,7 +79,6 @@ struct imx6_pcie {
u32 tx_deemph_gen2_6db;
u32 tx_swing_full;
u32 tx_swing_low;
- int link_gen;
struct regulator *vpcie;
void __iomem *phy_base;
@@ -94,15 +93,6 @@ struct imx6_pcie {
#define PHY_PLL_LOCK_WAIT_USLEEP_MAX 200
#define PHY_PLL_LOCK_WAIT_TIMEOUT (2000 * PHY_PLL_LOCK_WAIT_USLEEP_MAX)
-/* PCIe Root Complex registers (memory-mapped) */
-#define PCIE_RC_IMX6_MSI_CAP 0x50
-#define PCIE_RC_LCR 0x7c
-#define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1 0x1
-#define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2 0x2
-#define PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK 0xf
-
-#define PCIE_RC_LCSR 0x80
-
/* PCIe Port Logic registers (memory-mapped) */
#define PL_OFFSET 0x700
@@ -116,8 +106,6 @@ struct imx6_pcie {
#define PCIE_PHY_STAT (PL_OFFSET + 0x110)
#define PCIE_PHY_STAT_ACK BIT(16)
-#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
-
/* PHY registers (not memory-mapped) */
#define PCIE_PHY_ATEOVRD 0x10
#define PCIE_PHY_ATEOVRD_EN BIT(2)
@@ -757,10 +745,11 @@ static void imx6_pcie_ltssm_enable(struct device *dev)
}
}
-static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
+static int imx6_pcie_start_link(struct dw_pcie *pci)
{
- struct dw_pcie *pci = imx6_pcie->pci;
+ struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
struct device *dev = pci->dev;
+ u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
u32 tmp;
int ret;
@@ -769,10 +758,10 @@ static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
* started in Gen2 mode, there is a possibility the devices on the
* bus will not be detected at all. This happens with PCIe switches.
*/
- tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCR);
- tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
- tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1;
- dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp);
+ tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
+ tmp &= ~PCI_EXP_LNKCAP_SLS;
+ tmp |= PCI_EXP_LNKCAP_SLS_2_5GB;
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, tmp);
/* Start LTSSM. */
imx6_pcie_ltssm_enable(dev);
@@ -781,12 +770,12 @@ static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
if (ret)
goto err_reset_phy;
- if (imx6_pcie->link_gen == 2) {
+ if (pci->link_gen == 2) {
/* Allow Gen2 mode after the link is up. */
- tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCR);
- tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
- tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2;
- dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp);
+ tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
+ tmp &= ~PCI_EXP_LNKCAP_SLS;
+ tmp |= PCI_EXP_LNKCAP_SLS_5_0GB;
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, tmp);
/*
* Start Directed Speed Change so the best possible
@@ -824,8 +813,8 @@ static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
dev_info(dev, "Link: Gen2 disabled\n");
}
- tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCSR);
- dev_info(dev, "Link up, Gen%i\n", (tmp >> 16) & 0xf);
+ tmp = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA);
+ dev_info(dev, "Link up, Gen%i\n", tmp & PCI_EXP_LNKSTA_CLS);
return 0;
err_reset_phy:
@@ -845,11 +834,6 @@ static int imx6_pcie_host_init(struct pcie_port *pp)
imx6_pcie_init_phy(imx6_pcie);
imx6_pcie_deassert_core_reset(imx6_pcie);
imx6_setup_phy_mpll(imx6_pcie);
- dw_pcie_setup_rc(pp);
- imx6_pcie_establish_link(imx6_pcie);
-
- if (IS_ENABLED(CONFIG_PCI_MSI))
- dw_pcie_msi_init(pp);
return 0;
}
@@ -858,33 +842,8 @@ static const struct dw_pcie_host_ops imx6_pcie_host_ops = {
.host_init = imx6_pcie_host_init,
};
-static int imx6_add_pcie_port(struct imx6_pcie *imx6_pcie,
- struct platform_device *pdev)
-{
- struct dw_pcie *pci = imx6_pcie->pci;
- struct pcie_port *pp = &pci->pp;
- struct device *dev = &pdev->dev;
- int ret;
-
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
- pp->msi_irq = platform_get_irq_byname(pdev, "msi");
- if (pp->msi_irq < 0)
- return pp->msi_irq;
- }
-
- pp->ops = &imx6_pcie_host_ops;
-
- ret = dw_pcie_host_init(pp);
- if (ret) {
- dev_err(dev, "failed to initialize host\n");
- return ret;
- }
-
- return 0;
-}
-
static const struct dw_pcie_ops dw_pcie_ops = {
- /* No special ops needed, but pcie-designware still expects this struct */
+ .start_link = imx6_pcie_start_link,
};
#ifdef CONFIG_PM_SLEEP
@@ -993,7 +952,7 @@ static int imx6_pcie_resume_noirq(struct device *dev)
imx6_pcie_deassert_core_reset(imx6_pcie);
dw_pcie_setup_rc(pp);
- ret = imx6_pcie_establish_link(imx6_pcie);
+ ret = imx6_pcie_start_link(imx6_pcie->pci);
if (ret < 0)
dev_info(dev, "pcie link is down after resume.\n");
@@ -1027,6 +986,7 @@ static int imx6_pcie_probe(struct platform_device *pdev)
pci->dev = dev;
pci->ops = &dw_pcie_ops;
+ pci->pp.ops = &imx6_pcie_host_ops;
imx6_pcie->pci = pci;
imx6_pcie->drvdata = of_device_get_match_data(dev);
@@ -1073,38 +1033,33 @@ static int imx6_pcie_probe(struct platform_device *pdev)
/* Fetch clocks */
imx6_pcie->pcie_phy = devm_clk_get(dev, "pcie_phy");
- if (IS_ERR(imx6_pcie->pcie_phy)) {
- dev_err(dev, "pcie_phy clock source missing or invalid\n");
- return PTR_ERR(imx6_pcie->pcie_phy);
- }
+ if (IS_ERR(imx6_pcie->pcie_phy))
+ return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_phy),
+ "pcie_phy clock source missing or invalid\n");
imx6_pcie->pcie_bus = devm_clk_get(dev, "pcie_bus");
- if (IS_ERR(imx6_pcie->pcie_bus)) {
- dev_err(dev, "pcie_bus clock source missing or invalid\n");
- return PTR_ERR(imx6_pcie->pcie_bus);
- }
+ if (IS_ERR(imx6_pcie->pcie_bus))
+ return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_bus),
+ "pcie_bus clock source missing or invalid\n");
imx6_pcie->pcie = devm_clk_get(dev, "pcie");
- if (IS_ERR(imx6_pcie->pcie)) {
- dev_err(dev, "pcie clock source missing or invalid\n");
- return PTR_ERR(imx6_pcie->pcie);
- }
+ if (IS_ERR(imx6_pcie->pcie))
+ return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie),
+ "pcie clock source missing or invalid\n");
switch (imx6_pcie->drvdata->variant) {
case IMX6SX:
imx6_pcie->pcie_inbound_axi = devm_clk_get(dev,
"pcie_inbound_axi");
- if (IS_ERR(imx6_pcie->pcie_inbound_axi)) {
- dev_err(dev, "pcie_inbound_axi clock missing or invalid\n");
- return PTR_ERR(imx6_pcie->pcie_inbound_axi);
- }
+ if (IS_ERR(imx6_pcie->pcie_inbound_axi))
+ return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_inbound_axi),
+ "pcie_inbound_axi clock missing or invalid\n");
break;
case IMX8MQ:
imx6_pcie->pcie_aux = devm_clk_get(dev, "pcie_aux");
- if (IS_ERR(imx6_pcie->pcie_aux)) {
- dev_err(dev, "pcie_aux clock source missing or invalid\n");
- return PTR_ERR(imx6_pcie->pcie_aux);
- }
+ if (IS_ERR(imx6_pcie->pcie_aux))
+ return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_aux),
+ "pcie_aux clock source missing or invalid\n");
fallthrough;
case IMX7D:
if (dbi_base->start == IMX8MQ_PCIE2_BASE_ADDR)
@@ -1165,10 +1120,8 @@ static int imx6_pcie_probe(struct platform_device *pdev)
imx6_pcie->tx_swing_low = 127;
/* Limit link speed */
- ret = of_property_read_u32(node, "fsl,max-link-speed",
- &imx6_pcie->link_gen);
- if (ret)
- imx6_pcie->link_gen = 1;
+ pci->link_gen = 1;
+ ret = of_property_read_u32(node, "fsl,max-link-speed", &pci->link_gen);
imx6_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie");
if (IS_ERR(imx6_pcie->vpcie)) {
@@ -1183,16 +1136,15 @@ static int imx6_pcie_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = imx6_add_pcie_port(imx6_pcie, pdev);
+ ret = dw_pcie_host_init(&pci->pp);
if (ret < 0)
return ret;
if (pci_msi_enabled()) {
- val = dw_pcie_readw_dbi(pci, PCIE_RC_IMX6_MSI_CAP +
- PCI_MSI_FLAGS);
+ u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI);
+ val = dw_pcie_readw_dbi(pci, offset + PCI_MSI_FLAGS);
val |= PCI_MSI_FLAGS_ENABLE;
- dw_pcie_writew_dbi(pci, PCIE_RC_IMX6_MSI_CAP + PCI_MSI_FLAGS,
- val);
+ dw_pcie_writew_dbi(pci, offset + PCI_MSI_FLAGS, val);
}
return 0;
diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
index c8c9d6a75f17..53aa35cb3a49 100644
--- a/drivers/pci/controller/dwc/pci-keystone.c
+++ b/drivers/pci/controller/dwc/pci-keystone.c
@@ -96,8 +96,6 @@
#define LEG_EP 0x1
#define RC 0x2
-#define EXP_CAP_ID_OFFSET 0x70
-
#define KS_PCIE_SYSCLOCKOUTEN BIT(0)
#define AM654_PCIE_DEV_TYPE_MASK 0x3
@@ -275,14 +273,6 @@ static void ks_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie,
ks_pcie_app_writel(ks_pcie, IRQ_EOI, offset);
}
-/*
- * Dummy function so that DW core doesn't configure MSI
- */
-static int ks_pcie_am654_msi_host_init(struct pcie_port *pp)
-{
- return 0;
-}
-
static void ks_pcie_enable_error_irq(struct keystone_pcie *ks_pcie)
{
ks_pcie_app_writel(ks_pcie, ERR_IRQ_ENABLE_SET, ERR_IRQ_ALL);
@@ -400,10 +390,14 @@ static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
u32 num_viewport = ks_pcie->num_viewport;
struct dw_pcie *pci = ks_pcie->pci;
struct pcie_port *pp = &pci->pp;
- u64 start = pp->mem->start;
- u64 end = pp->mem->end;
+ u64 start, end;
+ struct resource *mem;
int i;
+ mem = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM)->res;
+ start = mem->start;
+ end = mem->end;
+
/* Disable BARs for inbound access */
ks_pcie_set_dbi_mode(ks_pcie);
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
@@ -430,10 +424,10 @@ static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
}
-static int ks_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
- unsigned int devfn, int where, int size,
- u32 *val)
+static void __iomem *ks_pcie_other_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
{
+ struct pcie_port *pp = bus->sysdata;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
u32 reg;
@@ -444,36 +438,29 @@ static int ks_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
reg |= CFG_TYPE1;
ks_pcie_app_writel(ks_pcie, CFG_SETUP, reg);
- return dw_pcie_read(pp->va_cfg0_base + where, size, val);
+ return pp->va_cfg0_base + where;
}
-static int ks_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
- unsigned int devfn, int where, int size,
- u32 val)
-{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
- u32 reg;
-
- reg = CFG_BUS(bus->number) | CFG_DEVICE(PCI_SLOT(devfn)) |
- CFG_FUNC(PCI_FUNC(devfn));
- if (!pci_is_root_bus(bus->parent))
- reg |= CFG_TYPE1;
- ks_pcie_app_writel(ks_pcie, CFG_SETUP, reg);
-
- return dw_pcie_write(pp->va_cfg0_base + where, size, val);
-}
+static struct pci_ops ks_child_pcie_ops = {
+ .map_bus = ks_pcie_other_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+};
/**
- * ks_pcie_v3_65_scan_bus() - keystone scan_bus post initialization
+ * ks_pcie_v3_65_add_bus() - keystone add_bus post initialization
*
* This sets BAR0 to enable inbound access for MSI_IRQ register
*/
-static void ks_pcie_v3_65_scan_bus(struct pcie_port *pp)
+static int ks_pcie_v3_65_add_bus(struct pci_bus *bus)
{
+ struct pcie_port *pp = bus->sysdata;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+ if (!pci_is_root_bus(bus))
+ return 0;
+
/* Configure and set up BAR0 */
ks_pcie_set_dbi_mode(ks_pcie);
@@ -488,8 +475,17 @@ static void ks_pcie_v3_65_scan_bus(struct pcie_port *pp)
* be sufficient. Use physical address to avoid any conflicts.
*/
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, ks_pcie->app.start);
+
+ return 0;
}
+static struct pci_ops ks_pcie_ops = {
+ .map_bus = dw_pcie_own_conf_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+ .add_bus = ks_pcie_v3_65_add_bus,
+};
+
/**
* ks_pcie_link_up() - Check if link up
*/
@@ -516,14 +512,8 @@ static void ks_pcie_stop_link(struct dw_pcie *pci)
static int ks_pcie_start_link(struct dw_pcie *pci)
{
struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
- struct device *dev = pci->dev;
u32 val;
- if (dw_pcie_link_up(pci)) {
- dev_dbg(dev, "link is already up\n");
- return 0;
- }
-
/* Initiate Link Training */
val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
@@ -807,6 +797,9 @@ static int __init ks_pcie_host_init(struct pcie_port *pp)
struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
int ret;
+ pp->bridge->ops = &ks_pcie_ops;
+ pp->bridge->child_ops = &ks_child_pcie_ops;
+
ret = ks_pcie_config_legacy_irq(ks_pcie);
if (ret)
return ret;
@@ -815,8 +808,6 @@ static int __init ks_pcie_host_init(struct pcie_port *pp)
if (ret)
return ret;
- dw_pcie_setup_rc(pp);
-
ks_pcie_stop_link(pci);
ks_pcie_setup_rc_app_regs(ks_pcie);
writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8),
@@ -835,23 +826,16 @@ static int __init ks_pcie_host_init(struct pcie_port *pp)
"Asynchronous external abort");
#endif
- ks_pcie_start_link(pci);
- dw_pcie_wait_for_link(pci);
-
return 0;
}
static const struct dw_pcie_host_ops ks_pcie_host_ops = {
- .rd_other_conf = ks_pcie_rd_other_conf,
- .wr_other_conf = ks_pcie_wr_other_conf,
.host_init = ks_pcie_host_init,
.msi_host_init = ks_pcie_msi_host_init,
- .scan_bus = ks_pcie_v3_65_scan_bus,
};
static const struct dw_pcie_host_ops ks_pcie_am654_host_ops = {
.host_init = ks_pcie_host_init,
- .msi_host_init = ks_pcie_am654_msi_host_init,
};
static irqreturn_t ks_pcie_err_irq_handler(int irq, void *priv)
@@ -861,43 +845,6 @@ static irqreturn_t ks_pcie_err_irq_handler(int irq, void *priv)
return ks_pcie_handle_error_irq(ks_pcie);
}
-static int __init ks_pcie_add_pcie_port(struct keystone_pcie *ks_pcie,
- struct platform_device *pdev)
-{
- struct dw_pcie *pci = ks_pcie->pci;
- struct pcie_port *pp = &pci->pp;
- struct device *dev = &pdev->dev;
- struct resource *res;
- int ret;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
- pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res);
- if (IS_ERR(pp->va_cfg0_base))
- return PTR_ERR(pp->va_cfg0_base);
-
- pp->va_cfg1_base = pp->va_cfg0_base;
-
- ret = dw_pcie_host_init(pp);
- if (ret) {
- dev_err(dev, "failed to initialize host\n");
- return ret;
- }
-
- return 0;
-}
-
-static u32 ks_pcie_am654_read_dbi2(struct dw_pcie *pci, void __iomem *base,
- u32 reg, size_t size)
-{
- struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
- u32 val;
-
- ks_pcie_set_dbi_mode(ks_pcie);
- dw_pcie_read(base + reg, size, &val);
- ks_pcie_clear_dbi_mode(ks_pcie);
- return val;
-}
-
static void ks_pcie_am654_write_dbi2(struct dw_pcie *pci, void __iomem *base,
u32 reg, size_t size, u32 val)
{
@@ -912,7 +859,6 @@ static const struct dw_pcie_ops ks_pcie_dw_pcie_ops = {
.start_link = ks_pcie_start_link,
.stop_link = ks_pcie_stop_link,
.link_up = ks_pcie_link_up,
- .read_dbi2 = ks_pcie_am654_read_dbi2,
.write_dbi2 = ks_pcie_am654_write_dbi2,
};
@@ -995,33 +941,6 @@ static const struct dw_pcie_ep_ops ks_pcie_am654_ep_ops = {
.get_features = &ks_pcie_am654_get_features,
};
-static int __init ks_pcie_add_pcie_ep(struct keystone_pcie *ks_pcie,
- struct platform_device *pdev)
-{
- int ret;
- struct dw_pcie_ep *ep;
- struct resource *res;
- struct device *dev = &pdev->dev;
- struct dw_pcie *pci = ks_pcie->pci;
-
- ep = &pci->ep;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
- if (!res)
- return -EINVAL;
-
- ep->phys_base = res->start;
- ep->addr_size = resource_size(res);
-
- ret = dw_pcie_ep_init(ep);
- if (ret) {
- dev_err(dev, "failed to initialize endpoint\n");
- return ret;
- }
-
- return 0;
-}
-
static void ks_pcie_disable_phy(struct keystone_pcie *ks_pcie)
{
int num_lanes = ks_pcie->num_lanes;
@@ -1125,31 +1044,6 @@ static int ks_pcie_am654_set_mode(struct device *dev,
return 0;
}
-static void ks_pcie_set_link_speed(struct dw_pcie *pci, int link_speed)
-{
- u32 val;
-
- dw_pcie_dbi_ro_wr_en(pci);
-
- val = dw_pcie_readl_dbi(pci, EXP_CAP_ID_OFFSET + PCI_EXP_LNKCAP);
- if ((val & PCI_EXP_LNKCAP_SLS) != link_speed) {
- val &= ~((u32)PCI_EXP_LNKCAP_SLS);
- val |= link_speed;
- dw_pcie_writel_dbi(pci, EXP_CAP_ID_OFFSET + PCI_EXP_LNKCAP,
- val);
- }
-
- val = dw_pcie_readl_dbi(pci, EXP_CAP_ID_OFFSET + PCI_EXP_LNKCTL2);
- if ((val & PCI_EXP_LNKCAP_SLS) != link_speed) {
- val &= ~((u32)PCI_EXP_LNKCAP_SLS);
- val |= link_speed;
- dw_pcie_writel_dbi(pci, EXP_CAP_ID_OFFSET + PCI_EXP_LNKCTL2,
- val);
- }
-
- dw_pcie_dbi_ro_wr_dis(pci);
-}
-
static const struct ks_pcie_of_data ks_pcie_rc_of_data = {
.host_ops = &ks_pcie_host_ops,
.version = 0x365A,
@@ -1197,13 +1091,11 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
struct keystone_pcie *ks_pcie;
struct device_link **link;
struct gpio_desc *gpiod;
- void __iomem *atu_base;
struct resource *res;
unsigned int version;
void __iomem *base;
u32 num_viewport;
struct phy **phy;
- int link_speed;
u32 num_lanes;
char name[10];
int ret;
@@ -1320,29 +1212,12 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
goto err_get_sync;
}
- if (pci->version >= 0x480A) {
- atu_base = devm_platform_ioremap_resource_byname(pdev, "atu");
- if (IS_ERR(atu_base)) {
- ret = PTR_ERR(atu_base);
- goto err_get_sync;
- }
-
- pci->atu_base = atu_base;
-
+ if (pci->version >= 0x480A)
ret = ks_pcie_am654_set_mode(dev, mode);
- if (ret < 0)
- goto err_get_sync;
- } else {
+ else
ret = ks_pcie_set_mode(dev);
- if (ret < 0)
- goto err_get_sync;
- }
-
- link_speed = of_pci_get_max_link_speed(np);
- if (link_speed < 0)
- link_speed = 2;
-
- ks_pcie_set_link_speed(pci, link_speed);
+ if (ret < 0)
+ goto err_get_sync;
switch (mode) {
case DW_PCIE_RC_TYPE:
@@ -1372,7 +1247,7 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
ks_pcie->num_viewport = num_viewport;
pci->pp.ops = host_ops;
- ret = ks_pcie_add_pcie_port(ks_pcie, pdev);
+ ret = dw_pcie_host_init(&pci->pp);
if (ret < 0)
goto err_get_sync;
break;
@@ -1383,7 +1258,7 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
}
pci->ep.ops = ep_ops;
- ret = ks_pcie_add_pcie_ep(ks_pcie, pdev);
+ ret = dw_pcie_ep_init(&pci->ep);
if (ret < 0)
goto err_get_sync;
break;
diff --git a/drivers/pci/controller/dwc/pci-layerscape-ep.c b/drivers/pci/controller/dwc/pci-layerscape-ep.c
index 0d151cead1b7..4d12efdacd2f 100644
--- a/drivers/pci/controller/dwc/pci-layerscape-ep.c
+++ b/drivers/pci/controller/dwc/pci-layerscape-ep.c
@@ -18,52 +18,58 @@
#include "pcie-designware.h"
-#define PCIE_DBI2_OFFSET 0x1000 /* DBI2 base address*/
+#define to_ls_pcie_ep(x) dev_get_drvdata((x)->dev)
-struct ls_pcie_ep {
- struct dw_pcie *pci;
+struct ls_pcie_ep_drvdata {
+ u32 func_offset;
+ const struct dw_pcie_ep_ops *ops;
+ const struct dw_pcie_ops *dw_pcie_ops;
};
-#define to_ls_pcie_ep(x) dev_get_drvdata((x)->dev)
+struct ls_pcie_ep {
+ struct dw_pcie *pci;
+ struct pci_epc_features *ls_epc;
+ const struct ls_pcie_ep_drvdata *drvdata;
+};
static int ls_pcie_establish_link(struct dw_pcie *pci)
{
return 0;
}
-static const struct dw_pcie_ops ls_pcie_ep_ops = {
+static const struct dw_pcie_ops dw_ls_pcie_ep_ops = {
.start_link = ls_pcie_establish_link,
};
-static const struct of_device_id ls_pcie_ep_of_match[] = {
- { .compatible = "fsl,ls-pcie-ep",},
- { },
-};
-
-static const struct pci_epc_features ls_pcie_epc_features = {
- .linkup_notifier = false,
- .msi_capable = true,
- .msix_capable = false,
- .bar_fixed_64bit = (1 << BAR_2) | (1 << BAR_4),
-};
-
static const struct pci_epc_features*
ls_pcie_ep_get_features(struct dw_pcie_ep *ep)
{
- return &ls_pcie_epc_features;
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct ls_pcie_ep *pcie = to_ls_pcie_ep(pci);
+
+ return pcie->ls_epc;
}
static void ls_pcie_ep_init(struct dw_pcie_ep *ep)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct ls_pcie_ep *pcie = to_ls_pcie_ep(pci);
+ struct dw_pcie_ep_func *ep_func;
enum pci_barno bar;
+ ep_func = dw_pcie_ep_get_func_from_ep(ep, 0);
+ if (!ep_func)
+ return;
+
for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
dw_pcie_ep_reset_bar(pci, bar);
+
+ pcie->ls_epc->msi_capable = ep_func->msi_cap ? true : false;
+ pcie->ls_epc->msix_capable = ep_func->msix_cap ? true : false;
}
static int ls_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
- enum pci_epc_irq_type type, u16 interrupt_num)
+ enum pci_epc_irq_type type, u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@@ -73,54 +79,56 @@ static int ls_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
case PCI_EPC_IRQ_MSI:
return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
case PCI_EPC_IRQ_MSIX:
- return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
+ return dw_pcie_ep_raise_msix_irq_doorbell(ep, func_no,
+ interrupt_num);
default:
dev_err(pci->dev, "UNKNOWN IRQ type\n");
return -EINVAL;
}
}
-static const struct dw_pcie_ep_ops pcie_ep_ops = {
+static unsigned int ls_pcie_ep_func_conf_select(struct dw_pcie_ep *ep,
+ u8 func_no)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct ls_pcie_ep *pcie = to_ls_pcie_ep(pci);
+
+ WARN_ON(func_no && !pcie->drvdata->func_offset);
+ return pcie->drvdata->func_offset * func_no;
+}
+
+static const struct dw_pcie_ep_ops ls_pcie_ep_ops = {
.ep_init = ls_pcie_ep_init,
.raise_irq = ls_pcie_ep_raise_irq,
.get_features = ls_pcie_ep_get_features,
+ .func_conf_select = ls_pcie_ep_func_conf_select,
};
-static int __init ls_add_pcie_ep(struct ls_pcie_ep *pcie,
- struct platform_device *pdev)
-{
- struct dw_pcie *pci = pcie->pci;
- struct device *dev = pci->dev;
- struct dw_pcie_ep *ep;
- struct resource *res;
- int ret;
-
- ep = &pci->ep;
- ep->ops = &pcie_ep_ops;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
- if (!res)
- return -EINVAL;
-
- ep->phys_base = res->start;
- ep->addr_size = resource_size(res);
+static const struct ls_pcie_ep_drvdata ls1_ep_drvdata = {
+ .ops = &ls_pcie_ep_ops,
+ .dw_pcie_ops = &dw_ls_pcie_ep_ops,
+};
- ret = dw_pcie_ep_init(ep);
- if (ret) {
- dev_err(dev, "failed to initialize endpoint\n");
- return ret;
- }
+static const struct ls_pcie_ep_drvdata ls2_ep_drvdata = {
+ .func_offset = 0x20000,
+ .ops = &ls_pcie_ep_ops,
+ .dw_pcie_ops = &dw_ls_pcie_ep_ops,
+};
- return 0;
-}
+static const struct of_device_id ls_pcie_ep_of_match[] = {
+ { .compatible = "fsl,ls1046a-pcie-ep", .data = &ls1_ep_drvdata },
+ { .compatible = "fsl,ls1088a-pcie-ep", .data = &ls2_ep_drvdata },
+ { .compatible = "fsl,ls2088a-pcie-ep", .data = &ls2_ep_drvdata },
+ { },
+};
static int __init ls_pcie_ep_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct dw_pcie *pci;
struct ls_pcie_ep *pcie;
+ struct pci_epc_features *ls_epc;
struct resource *dbi_base;
- int ret;
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
@@ -130,21 +138,30 @@ static int __init ls_pcie_ep_probe(struct platform_device *pdev)
if (!pci)
return -ENOMEM;
+ ls_epc = devm_kzalloc(dev, sizeof(*ls_epc), GFP_KERNEL);
+ if (!ls_epc)
+ return -ENOMEM;
+
+ pcie->drvdata = of_device_get_match_data(dev);
+
+ pci->dev = dev;
+ pci->ops = pcie->drvdata->dw_pcie_ops;
+
+ ls_epc->bar_fixed_64bit = (1 << BAR_2) | (1 << BAR_4),
+
+ pcie->pci = pci;
+ pcie->ls_epc = ls_epc;
+
dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_base);
if (IS_ERR(pci->dbi_base))
return PTR_ERR(pci->dbi_base);
- pci->dbi_base2 = pci->dbi_base + PCIE_DBI2_OFFSET;
- pci->dev = dev;
- pci->ops = &ls_pcie_ep_ops;
- pcie->pci = pci;
+ pci->ep.ops = &ls_pcie_ep_ops;
platform_set_drvdata(pdev, pcie);
- ret = ls_add_pcie_ep(pcie, pdev);
-
- return ret;
+ return dw_pcie_ep_init(&pci->ep);
}
static struct platform_driver ls_pcie_ep_driver = {
diff --git a/drivers/pci/controller/dwc/pci-layerscape.c b/drivers/pci/controller/dwc/pci-layerscape.c
index f24f79a70d9a..44ad34cdc3bc 100644
--- a/drivers/pci/controller/dwc/pci-layerscape.c
+++ b/drivers/pci/controller/dwc/pci-layerscape.c
@@ -83,14 +83,6 @@ static void ls_pcie_drop_msg_tlp(struct ls_pcie *pcie)
iowrite32(val, pci->dbi_base + PCIE_STRFMR1);
}
-static void ls_pcie_disable_outbound_atus(struct ls_pcie *pcie)
-{
- int i;
-
- for (i = 0; i < PCIE_IATU_NUM; i++)
- dw_pcie_disable_atu(pcie->pci, i, DW_PCIE_REGION_OUTBOUND);
-}
-
static int ls1021_pcie_link_up(struct dw_pcie *pci)
{
u32 state;
@@ -136,12 +128,6 @@ static int ls_pcie_host_init(struct pcie_port *pp)
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct ls_pcie *pcie = to_ls_pcie(pci);
- /*
- * Disable outbound windows configured by the bootloader to avoid
- * one transaction hitting multiple outbound windows.
- * dw_pcie_setup_rc() will reconfigure the outbound windows.
- */
- ls_pcie_disable_outbound_atus(pcie);
ls_pcie_fix_error_response(pcie);
dw_pcie_dbi_ro_wr_en(pci);
@@ -150,8 +136,6 @@ static int ls_pcie_host_init(struct pcie_port *pp)
ls_pcie_drop_msg_tlp(pcie);
- dw_pcie_setup_rc(pp);
-
return 0;
}
@@ -182,37 +166,12 @@ static int ls1021_pcie_host_init(struct pcie_port *pp)
return ls_pcie_host_init(pp);
}
-static int ls_pcie_msi_host_init(struct pcie_port *pp)
-{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct device *dev = pci->dev;
- struct device_node *np = dev->of_node;
- struct device_node *msi_node;
-
- /*
- * The MSI domain is set by the generic of_msi_configure(). This
- * .msi_host_init() function keeps us from doing the default MSI
- * domain setup in dw_pcie_host_init() and also enforces the
- * requirement that "msi-parent" exists.
- */
- msi_node = of_parse_phandle(np, "msi-parent", 0);
- if (!msi_node) {
- dev_err(dev, "failed to find msi-parent\n");
- return -EINVAL;
- }
-
- of_node_put(msi_node);
- return 0;
-}
-
static const struct dw_pcie_host_ops ls1021_pcie_host_ops = {
.host_init = ls1021_pcie_host_init,
- .msi_host_init = ls_pcie_msi_host_init,
};
static const struct dw_pcie_host_ops ls_pcie_host_ops = {
.host_init = ls_pcie_host_init,
- .msi_host_init = ls_pcie_msi_host_init,
};
static const struct dw_pcie_ops dw_ls1021_pcie_ops = {
@@ -273,31 +232,12 @@ static const struct of_device_id ls_pcie_of_match[] = {
{ },
};
-static int __init ls_add_pcie_port(struct ls_pcie *pcie)
-{
- struct dw_pcie *pci = pcie->pci;
- struct pcie_port *pp = &pci->pp;
- struct device *dev = pci->dev;
- int ret;
-
- pp->ops = pcie->drvdata->ops;
-
- ret = dw_pcie_host_init(pp);
- if (ret) {
- dev_err(dev, "failed to initialize host\n");
- return ret;
- }
-
- return 0;
-}
-
static int __init ls_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct dw_pcie *pci;
struct ls_pcie *pcie;
struct resource *dbi_base;
- int ret;
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
@@ -311,6 +251,7 @@ static int __init ls_pcie_probe(struct platform_device *pdev)
pci->dev = dev;
pci->ops = pcie->drvdata->dw_pcie_ops;
+ pci->pp.ops = pcie->drvdata->ops;
pcie->pci = pci;
@@ -326,11 +267,7 @@ static int __init ls_pcie_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pcie);
- ret = ls_add_pcie_port(pcie);
- if (ret < 0)
- return ret;
-
- return 0;
+ return dw_pcie_host_init(&pci->pp);
}
static struct platform_driver ls_pcie_driver = {
diff --git a/drivers/pci/controller/dwc/pci-meson.c b/drivers/pci/controller/dwc/pci-meson.c
index 4f183b96afbb..686ded034f22 100644
--- a/drivers/pci/controller/dwc/pci-meson.c
+++ b/drivers/pci/controller/dwc/pci-meson.c
@@ -17,37 +17,13 @@
#include <linux/resource.h>
#include <linux/types.h>
#include <linux/phy/phy.h>
+#include <linux/module.h>
#include "pcie-designware.h"
#define to_meson_pcie(x) dev_get_drvdata((x)->dev)
-/* External local bus interface registers */
-#define PLR_OFFSET 0x700
-#define PCIE_PORT_LINK_CTRL_OFF (PLR_OFFSET + 0x10)
-#define FAST_LINK_MODE BIT(7)
-#define LINK_CAPABLE_MASK GENMASK(21, 16)
-#define LINK_CAPABLE_X1 BIT(16)
-
-#define PCIE_GEN2_CTRL_OFF (PLR_OFFSET + 0x10c)
-#define NUM_OF_LANES_MASK GENMASK(12, 8)
-#define NUM_OF_LANES_X1 BIT(8)
-#define DIRECT_SPEED_CHANGE BIT(17)
-
-#define TYPE1_HDR_OFFSET 0x0
-#define PCIE_STATUS_COMMAND (TYPE1_HDR_OFFSET + 0x04)
-#define PCI_IO_EN BIT(0)
-#define PCI_MEM_SPACE_EN BIT(1)
-#define PCI_BUS_MASTER_EN BIT(2)
-
-#define PCIE_BASE_ADDR0 (TYPE1_HDR_OFFSET + 0x10)
-#define PCIE_BASE_ADDR1 (TYPE1_HDR_OFFSET + 0x14)
-
-#define PCIE_CAP_OFFSET 0x70
-#define PCIE_DEV_CTRL_DEV_STUS (PCIE_CAP_OFFSET + 0x08)
-#define PCIE_CAP_MAX_PAYLOAD_MASK GENMASK(7, 5)
#define PCIE_CAP_MAX_PAYLOAD_SIZE(x) ((x) << 5)
-#define PCIE_CAP_MAX_READ_REQ_MASK GENMASK(14, 12)
#define PCIE_CAP_MAX_READ_REQ_SIZE(x) ((x) << 12)
/* PCIe specific config registers */
@@ -77,11 +53,6 @@ enum pcie_data_rate {
PCIE_GEN4
};
-struct meson_pcie_mem_res {
- void __iomem *elbi_base;
- void __iomem *cfg_base;
-};
-
struct meson_pcie_clk_res {
struct clk *clk;
struct clk *port_clk;
@@ -95,7 +66,7 @@ struct meson_pcie_rc_reset {
struct meson_pcie {
struct dw_pcie pci;
- struct meson_pcie_mem_res mem_res;
+ void __iomem *cfg_base;
struct meson_pcie_clk_res clk_res;
struct meson_pcie_rc_reset mrst;
struct gpio_desc *reset_gpio;
@@ -134,28 +105,18 @@ static int meson_pcie_get_resets(struct meson_pcie *mp)
return 0;
}
-static void __iomem *meson_pcie_get_mem(struct platform_device *pdev,
- struct meson_pcie *mp,
- const char *id)
-{
- struct device *dev = mp->pci.dev;
- struct resource *res;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, id);
-
- return devm_ioremap_resource(dev, res);
-}
-
static int meson_pcie_get_mems(struct platform_device *pdev,
struct meson_pcie *mp)
{
- mp->mem_res.elbi_base = meson_pcie_get_mem(pdev, mp, "elbi");
- if (IS_ERR(mp->mem_res.elbi_base))
- return PTR_ERR(mp->mem_res.elbi_base);
+ struct dw_pcie *pci = &mp->pci;
- mp->mem_res.cfg_base = meson_pcie_get_mem(pdev, mp, "cfg");
- if (IS_ERR(mp->mem_res.cfg_base))
- return PTR_ERR(mp->mem_res.cfg_base);
+ pci->dbi_base = devm_platform_ioremap_resource_byname(pdev, "elbi");
+ if (IS_ERR(pci->dbi_base))
+ return PTR_ERR(pci->dbi_base);
+
+ mp->cfg_base = devm_platform_ioremap_resource_byname(pdev, "cfg");
+ if (IS_ERR(mp->cfg_base))
+ return PTR_ERR(mp->cfg_base);
return 0;
}
@@ -253,24 +214,14 @@ static int meson_pcie_probe_clocks(struct meson_pcie *mp)
return 0;
}
-static inline void meson_elb_writel(struct meson_pcie *mp, u32 val, u32 reg)
-{
- writel(val, mp->mem_res.elbi_base + reg);
-}
-
-static inline u32 meson_elb_readl(struct meson_pcie *mp, u32 reg)
-{
- return readl(mp->mem_res.elbi_base + reg);
-}
-
static inline u32 meson_cfg_readl(struct meson_pcie *mp, u32 reg)
{
- return readl(mp->mem_res.cfg_base + reg);
+ return readl(mp->cfg_base + reg);
}
static inline void meson_cfg_writel(struct meson_pcie *mp, u32 val, u32 reg)
{
- writel(val, mp->mem_res.cfg_base + reg);
+ writel(val, mp->cfg_base + reg);
}
static void meson_pcie_assert_reset(struct meson_pcie *mp)
@@ -280,32 +231,13 @@ static void meson_pcie_assert_reset(struct meson_pcie *mp)
gpiod_set_value_cansleep(mp->reset_gpio, 0);
}
-static void meson_pcie_init_dw(struct meson_pcie *mp)
+static void meson_pcie_ltssm_enable(struct meson_pcie *mp)
{
u32 val;
val = meson_cfg_readl(mp, PCIE_CFG0);
val |= APP_LTSSM_ENABLE;
meson_cfg_writel(mp, val, PCIE_CFG0);
-
- val = meson_elb_readl(mp, PCIE_PORT_LINK_CTRL_OFF);
- val &= ~(LINK_CAPABLE_MASK | FAST_LINK_MODE);
- meson_elb_writel(mp, val, PCIE_PORT_LINK_CTRL_OFF);
-
- val = meson_elb_readl(mp, PCIE_PORT_LINK_CTRL_OFF);
- val |= LINK_CAPABLE_X1;
- meson_elb_writel(mp, val, PCIE_PORT_LINK_CTRL_OFF);
-
- val = meson_elb_readl(mp, PCIE_GEN2_CTRL_OFF);
- val &= ~NUM_OF_LANES_MASK;
- meson_elb_writel(mp, val, PCIE_GEN2_CTRL_OFF);
-
- val = meson_elb_readl(mp, PCIE_GEN2_CTRL_OFF);
- val |= NUM_OF_LANES_X1 | DIRECT_SPEED_CHANGE;
- meson_elb_writel(mp, val, PCIE_GEN2_CTRL_OFF);
-
- meson_elb_writel(mp, 0x0, PCIE_BASE_ADDR0);
- meson_elb_writel(mp, 0x0, PCIE_BASE_ADDR1);
}
static int meson_size_to_payload(struct meson_pcie *mp, int size)
@@ -327,69 +259,52 @@ static int meson_size_to_payload(struct meson_pcie *mp, int size)
static void meson_set_max_payload(struct meson_pcie *mp, int size)
{
+ struct dw_pcie *pci = &mp->pci;
u32 val;
+ u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
int max_payload_size = meson_size_to_payload(mp, size);
- val = meson_elb_readl(mp, PCIE_DEV_CTRL_DEV_STUS);
- val &= ~PCIE_CAP_MAX_PAYLOAD_MASK;
- meson_elb_writel(mp, val, PCIE_DEV_CTRL_DEV_STUS);
+ val = dw_pcie_readl_dbi(pci, offset + PCI_EXP_DEVCTL);
+ val &= ~PCI_EXP_DEVCTL_PAYLOAD;
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_DEVCTL, val);
- val = meson_elb_readl(mp, PCIE_DEV_CTRL_DEV_STUS);
+ val = dw_pcie_readl_dbi(pci, offset + PCI_EXP_DEVCTL);
val |= PCIE_CAP_MAX_PAYLOAD_SIZE(max_payload_size);
- meson_elb_writel(mp, val, PCIE_DEV_CTRL_DEV_STUS);
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_DEVCTL, val);
}
static void meson_set_max_rd_req_size(struct meson_pcie *mp, int size)
{
+ struct dw_pcie *pci = &mp->pci;
u32 val;
+ u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
int max_rd_req_size = meson_size_to_payload(mp, size);
- val = meson_elb_readl(mp, PCIE_DEV_CTRL_DEV_STUS);
- val &= ~PCIE_CAP_MAX_READ_REQ_MASK;
- meson_elb_writel(mp, val, PCIE_DEV_CTRL_DEV_STUS);
+ val = dw_pcie_readl_dbi(pci, offset + PCI_EXP_DEVCTL);
+ val &= ~PCI_EXP_DEVCTL_READRQ;
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_DEVCTL, val);
- val = meson_elb_readl(mp, PCIE_DEV_CTRL_DEV_STUS);
+ val = dw_pcie_readl_dbi(pci, offset + PCI_EXP_DEVCTL);
val |= PCIE_CAP_MAX_READ_REQ_SIZE(max_rd_req_size);
- meson_elb_writel(mp, val, PCIE_DEV_CTRL_DEV_STUS);
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_DEVCTL, val);
}
-static inline void meson_enable_memory_space(struct meson_pcie *mp)
+static int meson_pcie_start_link(struct dw_pcie *pci)
{
- /* Set the RC Bus Master, Memory Space and I/O Space enables */
- meson_elb_writel(mp, PCI_IO_EN | PCI_MEM_SPACE_EN | PCI_BUS_MASTER_EN,
- PCIE_STATUS_COMMAND);
-}
-
-static int meson_pcie_establish_link(struct meson_pcie *mp)
-{
- struct dw_pcie *pci = &mp->pci;
- struct pcie_port *pp = &pci->pp;
-
- meson_pcie_init_dw(mp);
- meson_set_max_payload(mp, MAX_PAYLOAD_SIZE);
- meson_set_max_rd_req_size(mp, MAX_READ_REQ_SIZE);
-
- dw_pcie_setup_rc(pp);
- meson_enable_memory_space(mp);
+ struct meson_pcie *mp = to_meson_pcie(pci);
+ meson_pcie_ltssm_enable(mp);
meson_pcie_assert_reset(mp);
- return dw_pcie_wait_for_link(pci);
-}
-
-static void meson_pcie_enable_interrupts(struct meson_pcie *mp)
-{
- if (IS_ENABLED(CONFIG_PCI_MSI))
- dw_pcie_msi_init(&mp->pci.pp);
+ return 0;
}
-static int meson_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
- u32 *val)
+static int meson_pcie_rd_own_conf(struct pci_bus *bus, u32 devfn,
+ int where, int size, u32 *val)
{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
int ret;
- ret = dw_pcie_read(pci->dbi_base + where, size, val);
+ ret = pci_generic_config_read(bus, devfn, where, size, val);
if (ret != PCIBIOS_SUCCESSFUL)
return ret;
@@ -410,13 +325,11 @@ static int meson_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
return PCIBIOS_SUCCESSFUL;
}
-static int meson_pcie_wr_own_conf(struct pcie_port *pp, int where,
- int size, u32 val)
-{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
-
- return dw_pcie_write(pci->dbi_base + where, size, val);
-}
+static struct pci_ops meson_pci_ops = {
+ .map_bus = dw_pcie_own_conf_map_bus,
+ .read = meson_pcie_rd_own_conf,
+ .write = pci_generic_config_write,
+};
static int meson_pcie_link_up(struct dw_pcie *pci)
{
@@ -461,51 +374,22 @@ static int meson_pcie_host_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct meson_pcie *mp = to_meson_pcie(pci);
- int ret;
- ret = meson_pcie_establish_link(mp);
- if (ret)
- return ret;
+ pp->bridge->ops = &meson_pci_ops;
- meson_pcie_enable_interrupts(mp);
+ meson_set_max_payload(mp, MAX_PAYLOAD_SIZE);
+ meson_set_max_rd_req_size(mp, MAX_READ_REQ_SIZE);
return 0;
}
static const struct dw_pcie_host_ops meson_pcie_host_ops = {
- .rd_own_conf = meson_pcie_rd_own_conf,
- .wr_own_conf = meson_pcie_wr_own_conf,
.host_init = meson_pcie_host_init,
};
-static int meson_add_pcie_port(struct meson_pcie *mp,
- struct platform_device *pdev)
-{
- struct dw_pcie *pci = &mp->pci;
- struct pcie_port *pp = &pci->pp;
- struct device *dev = &pdev->dev;
- int ret;
-
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
- pp->msi_irq = platform_get_irq(pdev, 0);
- if (pp->msi_irq < 0)
- return pp->msi_irq;
- }
-
- pp->ops = &meson_pcie_host_ops;
- pci->dbi_base = mp->mem_res.elbi_base;
-
- ret = dw_pcie_host_init(pp);
- if (ret) {
- dev_err(dev, "failed to initialize host\n");
- return ret;
- }
-
- return 0;
-}
-
static const struct dw_pcie_ops dw_pcie_ops = {
.link_up = meson_pcie_link_up,
+ .start_link = meson_pcie_start_link,
};
static int meson_pcie_probe(struct platform_device *pdev)
@@ -522,6 +406,8 @@ static int meson_pcie_probe(struct platform_device *pdev)
pci = &mp->pci;
pci->dev = dev;
pci->ops = &dw_pcie_ops;
+ pci->pp.ops = &meson_pcie_host_ops;
+ pci->num_lanes = 1;
mp->phy = devm_phy_get(dev, "pcie");
if (IS_ERR(mp->phy)) {
@@ -567,7 +453,7 @@ static int meson_pcie_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, mp);
- ret = meson_add_pcie_port(mp, pdev);
+ ret = dw_pcie_host_init(&pci->pp);
if (ret < 0) {
dev_err(dev, "Add PCIe port failed, %d\n", ret);
goto err_phy;
@@ -589,6 +475,7 @@ static const struct of_device_id meson_pcie_of_match[] = {
},
{},
};
+MODULE_DEVICE_TABLE(of, meson_pcie_of_match);
static struct platform_driver meson_pcie_driver = {
.probe = meson_pcie_probe,
@@ -598,4 +485,8 @@ static struct platform_driver meson_pcie_driver = {
},
};
-builtin_platform_driver(meson_pcie_driver);
+module_platform_driver(meson_pcie_driver);
+
+MODULE_AUTHOR("Yue Wang <yue.wang@amlogic.com>");
+MODULE_DESCRIPTION("Amlogic PCIe Controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/dwc/pcie-al.c b/drivers/pci/controller/dwc/pcie-al.c
index d57d4ee15848..abf37aa68e51 100644
--- a/drivers/pci/controller/dwc/pcie-al.c
+++ b/drivers/pci/controller/dwc/pcie-al.c
@@ -76,7 +76,6 @@ static int al_pcie_init(struct pci_config_window *cfg)
}
const struct pci_ecam_ops al_pcie_ops = {
- .bus_shift = 20,
.init = al_pcie_init,
.pci_ops = {
.map_bus = al_pcie_map_bus,
@@ -138,8 +137,6 @@ struct al_pcie {
struct al_pcie_target_bus_cfg target_bus_cfg;
};
-#define PCIE_ECAM_DEVFN(x) (((x) & 0xff) << 12)
-
#define to_al_pcie(x) dev_get_drvdata((x)->dev)
static inline u32 al_pcie_controller_readl(struct al_pcie *pcie, u32 offset)
@@ -217,19 +214,15 @@ static inline void al_pcie_target_bus_set(struct al_pcie *pcie,
reg);
}
-static void __iomem *al_pcie_conf_addr_map(struct al_pcie *pcie,
- unsigned int busnr,
- unsigned int devfn)
+static void __iomem *al_pcie_conf_addr_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
{
+ struct pcie_port *pp = bus->sysdata;
+ struct al_pcie *pcie = to_al_pcie(to_dw_pcie_from_pp(pp));
+ unsigned int busnr = bus->number;
struct al_pcie_target_bus_cfg *target_bus_cfg = &pcie->target_bus_cfg;
unsigned int busnr_ecam = busnr & target_bus_cfg->ecam_mask;
unsigned int busnr_reg = busnr & target_bus_cfg->reg_mask;
- struct pcie_port *pp = &pcie->pci->pp;
- void __iomem *pci_base_addr;
-
- pci_base_addr = (void __iomem *)((uintptr_t)pp->va_cfg0_base +
- (busnr_ecam << 20) +
- PCIE_ECAM_DEVFN(devfn));
if (busnr_reg != target_bus_cfg->reg_val) {
dev_dbg(pcie->pci->dev, "Changing target bus busnum val from 0x%x to 0x%x\n",
@@ -240,52 +233,14 @@ static void __iomem *al_pcie_conf_addr_map(struct al_pcie *pcie,
target_bus_cfg->reg_mask);
}
- return pci_base_addr;
-}
-
-static int al_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
- unsigned int devfn, int where, int size,
- u32 *val)
-{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct al_pcie *pcie = to_al_pcie(pci);
- unsigned int busnr = bus->number;
- void __iomem *pci_addr;
- int rc;
-
- pci_addr = al_pcie_conf_addr_map(pcie, busnr, devfn);
-
- rc = dw_pcie_read(pci_addr + where, size, val);
-
- dev_dbg(pci->dev, "%d-byte config read from %04x:%02x:%02x.%d offset 0x%x (pci_addr: 0x%px) - val:0x%x\n",
- size, pci_domain_nr(bus), bus->number,
- PCI_SLOT(devfn), PCI_FUNC(devfn), where,
- (pci_addr + where), *val);
-
- return rc;
+ return pp->va_cfg0_base + PCIE_ECAM_OFFSET(busnr_ecam, devfn, where);
}
-static int al_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
- unsigned int devfn, int where, int size,
- u32 val)
-{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct al_pcie *pcie = to_al_pcie(pci);
- unsigned int busnr = bus->number;
- void __iomem *pci_addr;
- int rc;
-
- pci_addr = al_pcie_conf_addr_map(pcie, busnr, devfn);
-
- rc = dw_pcie_write(pci_addr + where, size, val);
-
- dev_dbg(pci->dev, "%d-byte config write to %04x:%02x:%02x.%d offset 0x%x (pci_addr: 0x%px) - val:0x%x\n",
- size, pci_domain_nr(bus), bus->number,
- PCI_SLOT(devfn), PCI_FUNC(devfn), where,
- (pci_addr + where), val);
-
- return rc;
-}
+static struct pci_ops al_child_pci_ops = {
+ .map_bus = al_pcie_conf_addr_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+};
static void al_pcie_config_prepare(struct al_pcie *pcie)
{
@@ -297,10 +252,11 @@ static void al_pcie_config_prepare(struct al_pcie *pcie)
u8 secondary_bus;
u32 cfg_control;
u32 reg;
+ struct resource *bus = resource_list_first_type(&pp->bridge->windows, IORESOURCE_BUS)->res;
target_bus_cfg = &pcie->target_bus_cfg;
- ecam_bus_mask = (pcie->ecam_size >> 20) - 1;
+ ecam_bus_mask = (pcie->ecam_size >> PCIE_ECAM_BUS_SHIFT) - 1;
if (ecam_bus_mask > 255) {
dev_warn(pcie->dev, "ECAM window size is larger than 256MB. Cutting off at 256\n");
ecam_bus_mask = 255;
@@ -310,13 +266,13 @@ static void al_pcie_config_prepare(struct al_pcie *pcie)
target_bus_cfg->ecam_mask = ecam_bus_mask;
/* This portion is taken from the cfg_target_bus reg */
target_bus_cfg->reg_mask = ~target_bus_cfg->ecam_mask;
- target_bus_cfg->reg_val = pp->busn->start & target_bus_cfg->reg_mask;
+ target_bus_cfg->reg_val = bus->start & target_bus_cfg->reg_mask;
al_pcie_target_bus_set(pcie, target_bus_cfg->reg_val,
target_bus_cfg->reg_mask);
- secondary_bus = pp->busn->start + 1;
- subordinate_bus = pp->busn->end;
+ secondary_bus = bus->start + 1;
+ subordinate_bus = bus->end;
/* Set the valid values of secondary and subordinate buses */
cfg_control_offset = AXI_BASE_OFFSET + pcie->reg_offsets.ob_ctrl +
@@ -339,6 +295,8 @@ static int al_pcie_host_init(struct pcie_port *pp)
struct al_pcie *pcie = to_al_pcie(pci);
int rc;
+ pp->bridge->child_ops = &al_child_pci_ops;
+
rc = al_pcie_rev_id_get(pcie, &pcie->controller_rev_id);
if (rc)
return rc;
@@ -353,28 +311,9 @@ static int al_pcie_host_init(struct pcie_port *pp)
}
static const struct dw_pcie_host_ops al_pcie_host_ops = {
- .rd_other_conf = al_pcie_rd_other_conf,
- .wr_other_conf = al_pcie_wr_other_conf,
.host_init = al_pcie_host_init,
};
-static int al_add_pcie_port(struct pcie_port *pp,
- struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- int ret;
-
- pp->ops = &al_pcie_host_ops;
-
- ret = dw_pcie_host_init(pp);
- if (ret) {
- dev_err(dev, "failed to initialize host\n");
- return ret;
- }
-
- return 0;
-}
-
static const struct dw_pcie_ops dw_pcie_ops = {
};
@@ -383,7 +322,6 @@ static int al_pcie_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct resource *controller_res;
struct resource *ecam_res;
- struct resource *dbi_res;
struct al_pcie *al_pcie;
struct dw_pcie *pci;
@@ -397,15 +335,11 @@ static int al_pcie_probe(struct platform_device *pdev)
pci->dev = dev;
pci->ops = &dw_pcie_ops;
+ pci->pp.ops = &al_pcie_host_ops;
al_pcie->pci = pci;
al_pcie->dev = dev;
- dbi_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
- pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_res);
- if (IS_ERR(pci->dbi_base))
- return PTR_ERR(pci->dbi_base);
-
ecam_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
if (!ecam_res) {
dev_err(dev, "couldn't find 'config' reg in DT\n");
@@ -422,12 +356,11 @@ static int al_pcie_probe(struct platform_device *pdev)
return PTR_ERR(al_pcie->controller_base);
}
- dev_dbg(dev, "From DT: dbi_base: %pR, controller_base: %pR\n",
- dbi_res, controller_res);
+ dev_dbg(dev, "From DT: controller_base: %pR\n", controller_res);
platform_set_drvdata(pdev, al_pcie);
- return al_add_pcie_port(&pci->pp, pdev);
+ return dw_pcie_host_init(&pci->pp);
}
static const struct of_device_id al_pcie_of_match[] = {
diff --git a/drivers/pci/controller/dwc/pcie-armada8k.c b/drivers/pci/controller/dwc/pcie-armada8k.c
index 13901f359a41..4e2552dcf982 100644
--- a/drivers/pci/controller/dwc/pcie-armada8k.c
+++ b/drivers/pci/controller/dwc/pcie-armada8k.c
@@ -154,10 +154,22 @@ static int armada8k_pcie_link_up(struct dw_pcie *pci)
return 0;
}
-static void armada8k_pcie_establish_link(struct armada8k_pcie *pcie)
+static int armada8k_pcie_start_link(struct dw_pcie *pci)
+{
+ u32 reg;
+
+ /* Start LTSSM */
+ reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_CONTROL_REG);
+ reg |= PCIE_APP_LTSSM_EN;
+ dw_pcie_writel_dbi(pci, PCIE_GLOBAL_CONTROL_REG, reg);
+
+ return 0;
+}
+
+static int armada8k_pcie_host_init(struct pcie_port *pp)
{
- struct dw_pcie *pci = pcie->pci;
u32 reg;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
if (!dw_pcie_link_up(pci)) {
/* Disable LTSSM state machine to enable configuration */
@@ -193,26 +205,6 @@ static void armada8k_pcie_establish_link(struct armada8k_pcie *pcie)
PCIE_INT_C_ASSERT_MASK | PCIE_INT_D_ASSERT_MASK;
dw_pcie_writel_dbi(pci, PCIE_GLOBAL_INT_MASK1_REG, reg);
- if (!dw_pcie_link_up(pci)) {
- /* Configuration done. Start LTSSM */
- reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_CONTROL_REG);
- reg |= PCIE_APP_LTSSM_EN;
- dw_pcie_writel_dbi(pci, PCIE_GLOBAL_CONTROL_REG, reg);
- }
-
- /* Wait until the link becomes active again */
- if (dw_pcie_wait_for_link(pci))
- dev_err(pci->dev, "Link not up after reconfiguration\n");
-}
-
-static int armada8k_pcie_host_init(struct pcie_port *pp)
-{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct armada8k_pcie *pcie = to_armada8k_pcie(pci);
-
- dw_pcie_setup_rc(pp);
- armada8k_pcie_establish_link(pcie);
-
return 0;
}
@@ -269,6 +261,7 @@ static int armada8k_add_pcie_port(struct armada8k_pcie *pcie,
static const struct dw_pcie_ops dw_pcie_ops = {
.link_up = armada8k_pcie_link_up,
+ .start_link = armada8k_pcie_start_link,
};
static int armada8k_pcie_probe(struct platform_device *pdev)
diff --git a/drivers/pci/controller/dwc/pcie-artpec6.c b/drivers/pci/controller/dwc/pcie-artpec6.c
index 97d50bb50f06..597c282f586c 100644
--- a/drivers/pci/controller/dwc/pcie-artpec6.c
+++ b/drivers/pci/controller/dwc/pcie-artpec6.c
@@ -44,13 +44,6 @@ struct artpec_pcie_of_data {
static const struct of_device_id artpec6_pcie_of_match[];
-/* PCIe Port Logic registers (memory-mapped) */
-#define PL_OFFSET 0x700
-
-#define ACK_F_ASPM_CTRL_OFF (PL_OFFSET + 0xc)
-#define ACK_N_FTS_MASK GENMASK(15, 8)
-#define ACK_N_FTS(x) (((x) << 8) & ACK_N_FTS_MASK)
-
/* ARTPEC-6 specific registers */
#define PCIECFG 0x18
#define PCIECFG_DBG_OEN BIT(24)
@@ -289,30 +282,6 @@ static void artpec6_pcie_init_phy(struct artpec6_pcie *artpec6_pcie)
}
}
-static void artpec6_pcie_set_nfts(struct artpec6_pcie *artpec6_pcie)
-{
- struct dw_pcie *pci = artpec6_pcie->pci;
- u32 val;
-
- if (artpec6_pcie->variant != ARTPEC7)
- return;
-
- /*
- * Increase the N_FTS (Number of Fast Training Sequences)
- * to be transmitted when transitioning from L0s to L0.
- */
- val = dw_pcie_readl_dbi(pci, ACK_F_ASPM_CTRL_OFF);
- val &= ~ACK_N_FTS_MASK;
- val |= ACK_N_FTS(180);
- dw_pcie_writel_dbi(pci, ACK_F_ASPM_CTRL_OFF, val);
-
- /*
- * Set the Number of Fast Training Sequences that the core
- * advertises as its N_FTS during Gen2 or Gen3 link training.
- */
- dw_pcie_link_set_n_fts(pci, 180);
-}
-
static void artpec6_pcie_assert_core_reset(struct artpec6_pcie *artpec6_pcie)
{
u32 val;
@@ -346,29 +315,19 @@ static void artpec6_pcie_deassert_core_reset(struct artpec6_pcie *artpec6_pcie)
usleep_range(100, 200);
}
-static void artpec6_pcie_enable_interrupts(struct artpec6_pcie *artpec6_pcie)
-{
- struct dw_pcie *pci = artpec6_pcie->pci;
- struct pcie_port *pp = &pci->pp;
-
- if (IS_ENABLED(CONFIG_PCI_MSI))
- dw_pcie_msi_init(pp);
-}
-
static int artpec6_pcie_host_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci);
+ if (artpec6_pcie->variant == ARTPEC7) {
+ pci->n_fts[0] = 180;
+ pci->n_fts[1] = 180;
+ }
artpec6_pcie_assert_core_reset(artpec6_pcie);
artpec6_pcie_init_phy(artpec6_pcie);
artpec6_pcie_deassert_core_reset(artpec6_pcie);
artpec6_pcie_wait_for_phy(artpec6_pcie);
- artpec6_pcie_set_nfts(artpec6_pcie);
- dw_pcie_setup_rc(pp);
- artpec6_pcie_establish_link(pci);
- dw_pcie_wait_for_link(pci);
- artpec6_pcie_enable_interrupts(artpec6_pcie);
return 0;
}
@@ -377,31 +336,6 @@ static const struct dw_pcie_host_ops artpec6_pcie_host_ops = {
.host_init = artpec6_pcie_host_init,
};
-static int artpec6_add_pcie_port(struct artpec6_pcie *artpec6_pcie,
- struct platform_device *pdev)
-{
- struct dw_pcie *pci = artpec6_pcie->pci;
- struct pcie_port *pp = &pci->pp;
- struct device *dev = pci->dev;
- int ret;
-
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
- pp->msi_irq = platform_get_irq_byname(pdev, "msi");
- if (pp->msi_irq < 0)
- return pp->msi_irq;
- }
-
- pp->ops = &artpec6_pcie_host_ops;
-
- ret = dw_pcie_host_init(pp);
- if (ret) {
- dev_err(dev, "failed to initialize host\n");
- return ret;
- }
-
- return 0;
-}
-
static void artpec6_pcie_ep_init(struct dw_pcie_ep *ep)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@@ -412,7 +346,6 @@ static void artpec6_pcie_ep_init(struct dw_pcie_ep *ep)
artpec6_pcie_init_phy(artpec6_pcie);
artpec6_pcie_deassert_core_reset(artpec6_pcie);
artpec6_pcie_wait_for_phy(artpec6_pcie);
- artpec6_pcie_set_nfts(artpec6_pcie);
for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
dw_pcie_ep_reset_bar(pci, bar);
@@ -441,38 +374,6 @@ static const struct dw_pcie_ep_ops pcie_ep_ops = {
.raise_irq = artpec6_pcie_raise_irq,
};
-static int artpec6_add_pcie_ep(struct artpec6_pcie *artpec6_pcie,
- struct platform_device *pdev)
-{
- int ret;
- struct dw_pcie_ep *ep;
- struct resource *res;
- struct device *dev = &pdev->dev;
- struct dw_pcie *pci = artpec6_pcie->pci;
-
- ep = &pci->ep;
- ep->ops = &pcie_ep_ops;
-
- pci->dbi_base2 = devm_platform_ioremap_resource_byname(pdev, "dbi2");
- if (IS_ERR(pci->dbi_base2))
- return PTR_ERR(pci->dbi_base2);
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
- if (!res)
- return -EINVAL;
-
- ep->phys_base = res->start;
- ep->addr_size = resource_size(res);
-
- ret = dw_pcie_ep_init(ep);
- if (ret) {
- dev_err(dev, "failed to initialize endpoint\n");
- return ret;
- }
-
- return 0;
-}
-
static int artpec6_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -507,10 +408,6 @@ static int artpec6_pcie_probe(struct platform_device *pdev)
artpec6_pcie->variant = variant;
artpec6_pcie->mode = mode;
- pci->dbi_base = devm_platform_ioremap_resource_byname(pdev, "dbi");
- if (IS_ERR(pci->dbi_base))
- return PTR_ERR(pci->dbi_base);
-
artpec6_pcie->phy_base =
devm_platform_ioremap_resource_byname(pdev, "phy");
if (IS_ERR(artpec6_pcie->phy_base))
@@ -529,7 +426,9 @@ static int artpec6_pcie_probe(struct platform_device *pdev)
if (!IS_ENABLED(CONFIG_PCIE_ARTPEC6_HOST))
return -ENODEV;
- ret = artpec6_add_pcie_port(artpec6_pcie, pdev);
+ pci->pp.ops = &artpec6_pcie_host_ops;
+
+ ret = dw_pcie_host_init(&pci->pp);
if (ret < 0)
return ret;
break;
@@ -542,9 +441,10 @@ static int artpec6_pcie_probe(struct platform_device *pdev)
val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
val &= ~PCIECFG_DEVICE_TYPE_MASK;
artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
- ret = artpec6_add_pcie_ep(artpec6_pcie, pdev);
- if (ret < 0)
- return ret;
+
+ pci->ep.ops = &pcie_ep_ops;
+
+ return dw_pcie_ep_init(&pci->ep);
break;
}
default:
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index 305bfec2424d..bcd1cd9ba8c8 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -7,11 +7,14 @@
*/
#include <linux/of.h>
+#include <linux/platform_device.h>
#include "pcie-designware.h"
#include <linux/pci-epc.h>
#include <linux/pci-epf.h>
+#include "../../pci.h"
+
void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
{
struct pci_epc *epc = ep->epc;
@@ -28,12 +31,39 @@ void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep)
}
EXPORT_SYMBOL_GPL(dw_pcie_ep_init_notify);
-static void __dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar,
- int flags)
+struct dw_pcie_ep_func *
+dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no)
+{
+ struct dw_pcie_ep_func *ep_func;
+
+ list_for_each_entry(ep_func, &ep->func_list, list) {
+ if (ep_func->func_no == func_no)
+ return ep_func;
+ }
+
+ return NULL;
+}
+
+static unsigned int dw_pcie_ep_func_select(struct dw_pcie_ep *ep, u8 func_no)
+{
+ unsigned int func_offset = 0;
+
+ if (ep->ops->func_conf_select)
+ func_offset = ep->ops->func_conf_select(ep, func_no);
+
+ return func_offset;
+}
+
+static void __dw_pcie_ep_reset_bar(struct dw_pcie *pci, u8 func_no,
+ enum pci_barno bar, int flags)
{
u32 reg;
+ unsigned int func_offset = 0;
+ struct dw_pcie_ep *ep = &pci->ep;
+
+ func_offset = dw_pcie_ep_func_select(ep, func_no);
- reg = PCI_BASE_ADDRESS_0 + (4 * bar);
+ reg = func_offset + PCI_BASE_ADDRESS_0 + (4 * bar);
dw_pcie_dbi_ro_wr_en(pci);
dw_pcie_writel_dbi2(pci, reg, 0x0);
dw_pcie_writel_dbi(pci, reg, 0x0);
@@ -46,7 +76,53 @@ static void __dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar,
void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
{
- __dw_pcie_ep_reset_bar(pci, bar, 0);
+ u8 func_no, funcs;
+
+ funcs = pci->ep.epc->max_functions;
+
+ for (func_no = 0; func_no < funcs; func_no++)
+ __dw_pcie_ep_reset_bar(pci, func_no, bar, 0);
+}
+
+static u8 __dw_pcie_ep_find_next_cap(struct dw_pcie_ep *ep, u8 func_no,
+ u8 cap_ptr, u8 cap)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ unsigned int func_offset = 0;
+ u8 cap_id, next_cap_ptr;
+ u16 reg;
+
+ if (!cap_ptr)
+ return 0;
+
+ func_offset = dw_pcie_ep_func_select(ep, func_no);
+
+ reg = dw_pcie_readw_dbi(pci, func_offset + cap_ptr);
+ cap_id = (reg & 0x00ff);
+
+ if (cap_id > PCI_CAP_ID_MAX)
+ return 0;
+
+ if (cap_id == cap)
+ return cap_ptr;
+
+ next_cap_ptr = (reg & 0xff00) >> 8;
+ return __dw_pcie_ep_find_next_cap(ep, func_no, next_cap_ptr, cap);
+}
+
+static u8 dw_pcie_ep_find_capability(struct dw_pcie_ep *ep, u8 func_no, u8 cap)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ unsigned int func_offset = 0;
+ u8 next_cap_ptr;
+ u16 reg;
+
+ func_offset = dw_pcie_ep_func_select(ep, func_no);
+
+ reg = dw_pcie_readw_dbi(pci, func_offset + PCI_CAPABILITY_LIST);
+ next_cap_ptr = (reg & 0x00ff);
+
+ return __dw_pcie_ep_find_next_cap(ep, func_no, next_cap_ptr, cap);
}
static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no,
@@ -54,41 +130,44 @@ static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no,
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ unsigned int func_offset = 0;
+
+ func_offset = dw_pcie_ep_func_select(ep, func_no);
dw_pcie_dbi_ro_wr_en(pci);
- dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, hdr->vendorid);
- dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, hdr->deviceid);
- dw_pcie_writeb_dbi(pci, PCI_REVISION_ID, hdr->revid);
- dw_pcie_writeb_dbi(pci, PCI_CLASS_PROG, hdr->progif_code);
- dw_pcie_writew_dbi(pci, PCI_CLASS_DEVICE,
+ dw_pcie_writew_dbi(pci, func_offset + PCI_VENDOR_ID, hdr->vendorid);
+ dw_pcie_writew_dbi(pci, func_offset + PCI_DEVICE_ID, hdr->deviceid);
+ dw_pcie_writeb_dbi(pci, func_offset + PCI_REVISION_ID, hdr->revid);
+ dw_pcie_writeb_dbi(pci, func_offset + PCI_CLASS_PROG, hdr->progif_code);
+ dw_pcie_writew_dbi(pci, func_offset + PCI_CLASS_DEVICE,
hdr->subclass_code | hdr->baseclass_code << 8);
- dw_pcie_writeb_dbi(pci, PCI_CACHE_LINE_SIZE,
+ dw_pcie_writeb_dbi(pci, func_offset + PCI_CACHE_LINE_SIZE,
hdr->cache_line_size);
- dw_pcie_writew_dbi(pci, PCI_SUBSYSTEM_VENDOR_ID,
+ dw_pcie_writew_dbi(pci, func_offset + PCI_SUBSYSTEM_VENDOR_ID,
hdr->subsys_vendor_id);
- dw_pcie_writew_dbi(pci, PCI_SUBSYSTEM_ID, hdr->subsys_id);
- dw_pcie_writeb_dbi(pci, PCI_INTERRUPT_PIN,
+ dw_pcie_writew_dbi(pci, func_offset + PCI_SUBSYSTEM_ID, hdr->subsys_id);
+ dw_pcie_writeb_dbi(pci, func_offset + PCI_INTERRUPT_PIN,
hdr->interrupt_pin);
dw_pcie_dbi_ro_wr_dis(pci);
return 0;
}
-static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, enum pci_barno bar,
- dma_addr_t cpu_addr,
+static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no,
+ enum pci_barno bar, dma_addr_t cpu_addr,
enum dw_pcie_as_type as_type)
{
int ret;
u32 free_win;
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- free_win = find_first_zero_bit(ep->ib_window_map, ep->num_ib_windows);
- if (free_win >= ep->num_ib_windows) {
+ free_win = find_first_zero_bit(ep->ib_window_map, pci->num_ib_windows);
+ if (free_win >= pci->num_ib_windows) {
dev_err(pci->dev, "No free inbound window\n");
return -EINVAL;
}
- ret = dw_pcie_prog_inbound_atu(pci, free_win, bar, cpu_addr,
+ ret = dw_pcie_prog_inbound_atu(pci, func_no, free_win, bar, cpu_addr,
as_type);
if (ret < 0) {
dev_err(pci->dev, "Failed to program IB window\n");
@@ -101,20 +180,21 @@ static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, enum pci_barno bar,
return 0;
}
-static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, phys_addr_t phys_addr,
+static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, u8 func_no,
+ phys_addr_t phys_addr,
u64 pci_addr, size_t size)
{
u32 free_win;
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- free_win = find_first_zero_bit(ep->ob_window_map, ep->num_ob_windows);
- if (free_win >= ep->num_ob_windows) {
+ free_win = find_first_zero_bit(ep->ob_window_map, pci->num_ob_windows);
+ if (free_win >= pci->num_ob_windows) {
dev_err(pci->dev, "No free outbound window\n");
return -EINVAL;
}
- dw_pcie_prog_outbound_atu(pci, free_win, PCIE_ATU_TYPE_MEM,
- phys_addr, pci_addr, size);
+ dw_pcie_prog_ep_outbound_atu(pci, func_no, free_win, PCIE_ATU_TYPE_MEM,
+ phys_addr, pci_addr, size);
set_bit(free_win, ep->ob_window_map);
ep->outbound_addr[free_win] = phys_addr;
@@ -130,7 +210,7 @@ static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no,
enum pci_barno bar = epf_bar->barno;
u32 atu_index = ep->bar_to_atu[bar];
- __dw_pcie_ep_reset_bar(pci, bar, epf_bar->flags);
+ __dw_pcie_ep_reset_bar(pci, func_no, bar, epf_bar->flags);
dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_INBOUND);
clear_bit(atu_index, ep->ib_window_map);
@@ -147,14 +227,20 @@ static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no,
size_t size = epf_bar->size;
int flags = epf_bar->flags;
enum dw_pcie_as_type as_type;
- u32 reg = PCI_BASE_ADDRESS_0 + (4 * bar);
+ u32 reg;
+ unsigned int func_offset = 0;
+
+ func_offset = dw_pcie_ep_func_select(ep, func_no);
+
+ reg = PCI_BASE_ADDRESS_0 + (4 * bar) + func_offset;
if (!(flags & PCI_BASE_ADDRESS_SPACE))
as_type = DW_PCIE_AS_MEM;
else
as_type = DW_PCIE_AS_IO;
- ret = dw_pcie_ep_inbound_atu(ep, bar, epf_bar->phys_addr, as_type);
+ ret = dw_pcie_ep_inbound_atu(ep, func_no, bar,
+ epf_bar->phys_addr, as_type);
if (ret)
return ret;
@@ -178,8 +264,9 @@ static int dw_pcie_find_index(struct dw_pcie_ep *ep, phys_addr_t addr,
u32 *atu_index)
{
u32 index;
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- for (index = 0; index < ep->num_ob_windows; index++) {
+ for (index = 0; index < pci->num_ob_windows; index++) {
if (ep->outbound_addr[index] != addr)
continue;
*atu_index = index;
@@ -213,7 +300,7 @@ static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no,
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- ret = dw_pcie_ep_outbound_atu(ep, addr, pci_addr, size);
+ ret = dw_pcie_ep_outbound_atu(ep, func_no, addr, pci_addr, size);
if (ret) {
dev_err(pci->dev, "Failed to enable address\n");
return ret;
@@ -227,11 +314,16 @@ static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no)
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
u32 val, reg;
+ unsigned int func_offset = 0;
+ struct dw_pcie_ep_func *ep_func;
- if (!ep->msi_cap)
+ ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
+ if (!ep_func || !ep_func->msi_cap)
return -EINVAL;
- reg = ep->msi_cap + PCI_MSI_FLAGS;
+ func_offset = dw_pcie_ep_func_select(ep, func_no);
+
+ reg = ep_func->msi_cap + func_offset + PCI_MSI_FLAGS;
val = dw_pcie_readw_dbi(pci, reg);
if (!(val & PCI_MSI_FLAGS_ENABLE))
return -EINVAL;
@@ -246,11 +338,16 @@ static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
u32 val, reg;
+ unsigned int func_offset = 0;
+ struct dw_pcie_ep_func *ep_func;
- if (!ep->msi_cap)
+ ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
+ if (!ep_func || !ep_func->msi_cap)
return -EINVAL;
- reg = ep->msi_cap + PCI_MSI_FLAGS;
+ func_offset = dw_pcie_ep_func_select(ep, func_no);
+
+ reg = ep_func->msi_cap + func_offset + PCI_MSI_FLAGS;
val = dw_pcie_readw_dbi(pci, reg);
val &= ~PCI_MSI_FLAGS_QMASK;
val |= (interrupts << 1) & PCI_MSI_FLAGS_QMASK;
@@ -266,11 +363,16 @@ static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no)
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
u32 val, reg;
+ unsigned int func_offset = 0;
+ struct dw_pcie_ep_func *ep_func;
- if (!ep->msix_cap)
+ ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
+ if (!ep_func || !ep_func->msix_cap)
return -EINVAL;
- reg = ep->msix_cap + PCI_MSIX_FLAGS;
+ func_offset = dw_pcie_ep_func_select(ep, func_no);
+
+ reg = ep_func->msix_cap + func_offset + PCI_MSIX_FLAGS;
val = dw_pcie_readw_dbi(pci, reg);
if (!(val & PCI_MSIX_FLAGS_ENABLE))
return -EINVAL;
@@ -286,23 +388,28 @@ static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts,
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
u32 val, reg;
+ unsigned int func_offset = 0;
+ struct dw_pcie_ep_func *ep_func;
- if (!ep->msix_cap)
+ ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
+ if (!ep_func || !ep_func->msix_cap)
return -EINVAL;
dw_pcie_dbi_ro_wr_en(pci);
- reg = ep->msix_cap + PCI_MSIX_FLAGS;
+ func_offset = dw_pcie_ep_func_select(ep, func_no);
+
+ reg = ep_func->msix_cap + func_offset + PCI_MSIX_FLAGS;
val = dw_pcie_readw_dbi(pci, reg);
val &= ~PCI_MSIX_FLAGS_QSIZE;
val |= interrupts;
dw_pcie_writew_dbi(pci, reg, val);
- reg = ep->msix_cap + PCI_MSIX_TABLE;
+ reg = ep_func->msix_cap + func_offset + PCI_MSIX_TABLE;
val = offset | bir;
dw_pcie_writel_dbi(pci, reg, val);
- reg = ep->msix_cap + PCI_MSIX_PBA;
+ reg = ep_func->msix_cap + func_offset + PCI_MSIX_PBA;
val = (offset + (interrupts * PCI_MSIX_ENTRY_SIZE)) | bir;
dw_pcie_writel_dbi(pci, reg, val);
@@ -385,31 +492,36 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
u8 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct dw_pcie_ep_func *ep_func;
struct pci_epc *epc = ep->epc;
unsigned int aligned_offset;
+ unsigned int func_offset = 0;
u16 msg_ctrl, msg_data;
u32 msg_addr_lower, msg_addr_upper, reg;
u64 msg_addr;
bool has_upper;
int ret;
- if (!ep->msi_cap)
+ ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
+ if (!ep_func || !ep_func->msi_cap)
return -EINVAL;
+ func_offset = dw_pcie_ep_func_select(ep, func_no);
+
/* Raise MSI per the PCI Local Bus Specification Revision 3.0, 6.8.1. */
- reg = ep->msi_cap + PCI_MSI_FLAGS;
+ reg = ep_func->msi_cap + func_offset + PCI_MSI_FLAGS;
msg_ctrl = dw_pcie_readw_dbi(pci, reg);
has_upper = !!(msg_ctrl & PCI_MSI_FLAGS_64BIT);
- reg = ep->msi_cap + PCI_MSI_ADDRESS_LO;
+ reg = ep_func->msi_cap + func_offset + PCI_MSI_ADDRESS_LO;
msg_addr_lower = dw_pcie_readl_dbi(pci, reg);
if (has_upper) {
- reg = ep->msi_cap + PCI_MSI_ADDRESS_HI;
+ reg = ep_func->msi_cap + func_offset + PCI_MSI_ADDRESS_HI;
msg_addr_upper = dw_pcie_readl_dbi(pci, reg);
- reg = ep->msi_cap + PCI_MSI_DATA_64;
+ reg = ep_func->msi_cap + func_offset + PCI_MSI_DATA_64;
msg_data = dw_pcie_readw_dbi(pci, reg);
} else {
msg_addr_upper = 0;
- reg = ep->msi_cap + PCI_MSI_DATA_32;
+ reg = ep_func->msi_cap + func_offset + PCI_MSI_DATA_32;
msg_data = dw_pcie_readw_dbi(pci, reg);
}
aligned_offset = msg_addr_lower & (epc->mem->window.page_size - 1);
@@ -427,12 +539,33 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
return 0;
}
+int dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep *ep, u8 func_no,
+ u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct dw_pcie_ep_func *ep_func;
+ u32 msg_data;
+
+ ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
+ if (!ep_func || !ep_func->msix_cap)
+ return -EINVAL;
+
+ msg_data = (func_no << PCIE_MSIX_DOORBELL_PF_SHIFT) |
+ (interrupt_num - 1);
+
+ dw_pcie_writel_dbi(pci, PCIE_MSIX_DOORBELL, msg_data);
+
+ return 0;
+}
+
int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
- u16 interrupt_num)
+ u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct dw_pcie_ep_func *ep_func;
struct pci_epf_msix_tbl *msix_tbl;
struct pci_epc *epc = ep->epc;
+ unsigned int func_offset = 0;
u32 reg, msg_data, vec_ctrl;
unsigned int aligned_offset;
u32 tbl_offset;
@@ -440,7 +573,13 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
int ret;
u8 bir;
- reg = ep->msix_cap + PCI_MSIX_TABLE;
+ ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
+ if (!ep_func || !ep_func->msix_cap)
+ return -EINVAL;
+
+ func_offset = dw_pcie_ep_func_select(ep, func_no);
+
+ reg = ep_func->msix_cap + func_offset + PCI_MSIX_TABLE;
tbl_offset = dw_pcie_readl_dbi(pci, reg);
bir = (tbl_offset & PCI_MSIX_TABLE_BIR);
tbl_offset &= PCI_MSIX_TABLE_OFFSET;
@@ -505,7 +644,8 @@ int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
u32 reg;
int i;
- hdr_type = dw_pcie_readb_dbi(pci, PCI_HEADER_TYPE);
+ hdr_type = dw_pcie_readb_dbi(pci, PCI_HEADER_TYPE) &
+ PCI_HEADER_TYPE_MASK;
if (hdr_type != PCI_HEADER_TYPE_NORMAL) {
dev_err(pci->dev,
"PCIe controller is not set to EP mode (hdr_type:0x%x)!\n",
@@ -513,23 +653,21 @@ int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
return -EIO;
}
- ep->msi_cap = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI);
+ offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
- ep->msix_cap = dw_pcie_find_capability(pci, PCI_CAP_ID_MSIX);
+ dw_pcie_dbi_ro_wr_en(pci);
- offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
if (offset) {
reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >>
PCI_REBAR_CTRL_NBAR_SHIFT;
- dw_pcie_dbi_ro_wr_en(pci);
for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL)
dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, 0x0);
- dw_pcie_dbi_ro_wr_dis(pci);
}
dw_pcie_setup(pci);
+ dw_pcie_dbi_ro_wr_dis(pci);
return 0;
}
@@ -539,57 +677,66 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
{
int ret;
void *addr;
+ u8 func_no;
+ struct resource *res;
struct pci_epc *epc;
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct device *dev = pci->dev;
+ struct platform_device *pdev = to_platform_device(dev);
struct device_node *np = dev->of_node;
const struct pci_epc_features *epc_features;
+ struct dw_pcie_ep_func *ep_func;
- if (!pci->dbi_base || !pci->dbi_base2) {
- dev_err(dev, "dbi_base/dbi_base2 is not populated\n");
- return -EINVAL;
- }
+ INIT_LIST_HEAD(&ep->func_list);
- ret = of_property_read_u32(np, "num-ib-windows", &ep->num_ib_windows);
- if (ret < 0) {
- dev_err(dev, "Unable to read *num-ib-windows* property\n");
- return ret;
- }
- if (ep->num_ib_windows > MAX_IATU_IN) {
- dev_err(dev, "Invalid *num-ib-windows*\n");
- return -EINVAL;
+ if (!pci->dbi_base) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+ pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(pci->dbi_base))
+ return PTR_ERR(pci->dbi_base);
}
- ret = of_property_read_u32(np, "num-ob-windows", &ep->num_ob_windows);
- if (ret < 0) {
- dev_err(dev, "Unable to read *num-ob-windows* property\n");
- return ret;
+ if (!pci->dbi_base2) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2");
+ if (!res)
+ pci->dbi_base2 = pci->dbi_base + SZ_4K;
+ else {
+ pci->dbi_base2 = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(pci->dbi_base2))
+ return PTR_ERR(pci->dbi_base2);
+ }
}
- if (ep->num_ob_windows > MAX_IATU_OUT) {
- dev_err(dev, "Invalid *num-ob-windows*\n");
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
+ if (!res)
return -EINVAL;
- }
+
+ ep->phys_base = res->start;
+ ep->addr_size = resource_size(res);
ep->ib_window_map = devm_kcalloc(dev,
- BITS_TO_LONGS(ep->num_ib_windows),
+ BITS_TO_LONGS(pci->num_ib_windows),
sizeof(long),
GFP_KERNEL);
if (!ep->ib_window_map)
return -ENOMEM;
ep->ob_window_map = devm_kcalloc(dev,
- BITS_TO_LONGS(ep->num_ob_windows),
+ BITS_TO_LONGS(pci->num_ob_windows),
sizeof(long),
GFP_KERNEL);
if (!ep->ob_window_map)
return -ENOMEM;
- addr = devm_kcalloc(dev, ep->num_ob_windows, sizeof(phys_addr_t),
+ addr = devm_kcalloc(dev, pci->num_ob_windows, sizeof(phys_addr_t),
GFP_KERNEL);
if (!addr)
return -ENOMEM;
ep->outbound_addr = addr;
+ if (pci->link_gen < 1)
+ pci->link_gen = of_pci_get_max_link_speed(np);
+
epc = devm_pci_epc_create(dev, &epc_ops);
if (IS_ERR(epc)) {
dev_err(dev, "Failed to create epc device\n");
@@ -599,13 +746,27 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
ep->epc = epc;
epc_set_drvdata(epc, ep);
- if (ep->ops->ep_init)
- ep->ops->ep_init(ep);
-
ret = of_property_read_u8(np, "max-functions", &epc->max_functions);
if (ret < 0)
epc->max_functions = 1;
+ for (func_no = 0; func_no < epc->max_functions; func_no++) {
+ ep_func = devm_kzalloc(dev, sizeof(*ep_func), GFP_KERNEL);
+ if (!ep_func)
+ return -ENOMEM;
+
+ ep_func->func_no = func_no;
+ ep_func->msi_cap = dw_pcie_ep_find_capability(ep, func_no,
+ PCI_CAP_ID_MSI);
+ ep_func->msix_cap = dw_pcie_ep_find_capability(ep, func_no,
+ PCI_CAP_ID_MSIX);
+
+ list_add_tail(&ep_func->list, &ep->func_list);
+ }
+
+ if (ep->ops->ep_init)
+ ep->ops->ep_init(ep);
+
ret = pci_epc_mem_init(epc, ep->phys_base, ep->addr_size,
ep->page_size);
if (ret < 0) {
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index 9dafecba347f..8a84c005f32b 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -20,30 +20,7 @@
#include "pcie-designware.h"
static struct pci_ops dw_pcie_ops;
-
-static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
- u32 *val)
-{
- struct dw_pcie *pci;
-
- if (pp->ops->rd_own_conf)
- return pp->ops->rd_own_conf(pp, where, size, val);
-
- pci = to_dw_pcie_from_pp(pp);
- return dw_pcie_read(pci->dbi_base + where, size, val);
-}
-
-static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
- u32 val)
-{
- struct dw_pcie *pci;
-
- if (pp->ops->wr_own_conf)
- return pp->ops->wr_own_conf(pp, where, size, val);
-
- pci = to_dw_pcie_from_pp(pp);
- return dw_pcie_write(pci->dbi_base + where, size, val);
-}
+static struct pci_ops dw_child_pcie_ops;
static void dw_msi_ack_irq(struct irq_data *d)
{
@@ -82,13 +59,13 @@ irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
unsigned long val;
u32 status, num_ctrls;
irqreturn_t ret = IRQ_NONE;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
for (i = 0; i < num_ctrls; i++) {
- dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS +
- (i * MSI_REG_CTRL_BLOCK_SIZE),
- 4, &status);
+ status = dw_pcie_readl_dbi(pci, PCIE_MSI_INTR0_STATUS +
+ (i * MSI_REG_CTRL_BLOCK_SIZE));
if (!status)
continue;
@@ -148,6 +125,7 @@ static int dw_pci_msi_set_affinity(struct irq_data *d,
static void dw_pci_bottom_mask(struct irq_data *d)
{
struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
unsigned int res, bit, ctrl;
unsigned long flags;
@@ -158,8 +136,7 @@ static void dw_pci_bottom_mask(struct irq_data *d)
bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
pp->irq_mask[ctrl] |= BIT(bit);
- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4,
- pp->irq_mask[ctrl]);
+ dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]);
raw_spin_unlock_irqrestore(&pp->lock, flags);
}
@@ -167,6 +144,7 @@ static void dw_pci_bottom_mask(struct irq_data *d)
static void dw_pci_bottom_unmask(struct irq_data *d)
{
struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
unsigned int res, bit, ctrl;
unsigned long flags;
@@ -177,8 +155,7 @@ static void dw_pci_bottom_unmask(struct irq_data *d)
bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
pp->irq_mask[ctrl] &= ~BIT(bit);
- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4,
- pp->irq_mask[ctrl]);
+ dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]);
raw_spin_unlock_irqrestore(&pp->lock, flags);
}
@@ -186,13 +163,14 @@ static void dw_pci_bottom_unmask(struct irq_data *d)
static void dw_pci_bottom_ack(struct irq_data *d)
{
struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
unsigned int res, bit, ctrl;
ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + res, 4, BIT(bit));
+ dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_STATUS + res, BIT(bit));
}
static struct irq_chip dw_pci_msi_bottom_irq_chip = {
@@ -278,7 +256,7 @@ int dw_pcie_allocate_domains(struct pcie_port *pp)
return 0;
}
-void dw_pcie_free_msi(struct pcie_port *pp)
+static void dw_pcie_free_msi(struct pcie_port *pp)
{
if (pp->msi_irq) {
irq_set_chained_handler(pp->msi_irq, NULL);
@@ -288,34 +266,27 @@ void dw_pcie_free_msi(struct pcie_port *pp)
irq_domain_remove(pp->msi_domain);
irq_domain_remove(pp->irq_domain);
- if (pp->msi_page)
- __free_page(pp->msi_page);
+ if (pp->msi_data) {
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct device *dev = pci->dev;
+
+ dma_unmap_single_attrs(dev, pp->msi_data, sizeof(pp->msi_msg),
+ DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ }
}
-void dw_pcie_msi_init(struct pcie_port *pp)
+static void dw_pcie_msi_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct device *dev = pci->dev;
- u64 msi_target;
+ u64 msi_target = (u64)pp->msi_data;
- pp->msi_page = alloc_page(GFP_KERNEL);
- pp->msi_data = dma_map_page(dev, pp->msi_page, 0, PAGE_SIZE,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(dev, pp->msi_data)) {
- dev_err(dev, "Failed to map MSI data\n");
- __free_page(pp->msi_page);
- pp->msi_page = NULL;
+ if (!pci_msi_enabled() || !pp->has_msi_ctrl)
return;
- }
- msi_target = (u64)pp->msi_data;
/* Program the msi_data */
- dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4,
- lower_32_bits(msi_target));
- dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4,
- upper_32_bits(msi_target));
+ dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_LO, lower_32_bits(msi_target));
+ dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_HI, upper_32_bits(msi_target));
}
-EXPORT_SYMBOL_GPL(dw_pcie_msi_init);
int dw_pcie_host_init(struct pcie_port *pp)
{
@@ -324,69 +295,58 @@ int dw_pcie_host_init(struct pcie_port *pp)
struct device_node *np = dev->of_node;
struct platform_device *pdev = to_platform_device(dev);
struct resource_entry *win;
- struct pci_bus *child;
struct pci_host_bridge *bridge;
struct resource *cfg_res;
- u32 hdr_type;
int ret;
raw_spin_lock_init(&pci->pp.lock);
cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
if (cfg_res) {
- pp->cfg0_size = resource_size(cfg_res) >> 1;
- pp->cfg1_size = resource_size(cfg_res) >> 1;
+ pp->cfg0_size = resource_size(cfg_res);
pp->cfg0_base = cfg_res->start;
- pp->cfg1_base = cfg_res->start + pp->cfg0_size;
} else if (!pp->va_cfg0_base) {
dev_err(dev, "Missing *config* reg space\n");
}
+ if (!pci->dbi_base) {
+ struct resource *dbi_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+ pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_res);
+ if (IS_ERR(pci->dbi_base))
+ return PTR_ERR(pci->dbi_base);
+ }
+
bridge = devm_pci_alloc_host_bridge(dev, 0);
if (!bridge)
return -ENOMEM;
+ pp->bridge = bridge;
+
/* Get the I/O and memory ranges from DT */
resource_list_for_each_entry(win, &bridge->windows) {
switch (resource_type(win->res)) {
case IORESOURCE_IO:
- pp->io = win->res;
- pp->io->name = "I/O";
- pp->io_size = resource_size(pp->io);
- pp->io_bus_addr = pp->io->start - win->offset;
- pp->io_base = pci_pio_to_address(pp->io->start);
- break;
- case IORESOURCE_MEM:
- pp->mem = win->res;
- pp->mem->name = "MEM";
- pp->mem_size = resource_size(pp->mem);
- pp->mem_bus_addr = pp->mem->start - win->offset;
+ pp->io_size = resource_size(win->res);
+ pp->io_bus_addr = win->res->start - win->offset;
+ pp->io_base = pci_pio_to_address(win->res->start);
break;
case 0:
- pp->cfg = win->res;
- pp->cfg0_size = resource_size(pp->cfg) >> 1;
- pp->cfg1_size = resource_size(pp->cfg) >> 1;
- pp->cfg0_base = pp->cfg->start;
- pp->cfg1_base = pp->cfg->start + pp->cfg0_size;
- break;
- case IORESOURCE_BUS:
- pp->busn = win->res;
+ dev_err(dev, "Missing *config* reg space\n");
+ pp->cfg0_size = resource_size(win->res);
+ pp->cfg0_base = win->res->start;
+ if (!pci->dbi_base) {
+ pci->dbi_base = devm_pci_remap_cfgspace(dev,
+ pp->cfg0_base,
+ pp->cfg0_size);
+ if (!pci->dbi_base) {
+ dev_err(dev, "Error with ioremap\n");
+ return -ENOMEM;
+ }
+ }
break;
}
}
- if (!pci->dbi_base) {
- pci->dbi_base = devm_pci_remap_cfgspace(dev,
- pp->cfg->start,
- resource_size(pp->cfg));
- if (!pci->dbi_base) {
- dev_err(dev, "Error with ioremap\n");
- return -ENOMEM;
- }
- }
-
- pp->mem_base = pp->mem->start;
-
if (!pp->va_cfg0_base) {
pp->va_cfg0_base = devm_pci_remap_cfgspace(dev,
pp->cfg0_base, pp->cfg0_size);
@@ -396,101 +356,92 @@ int dw_pcie_host_init(struct pcie_port *pp)
}
}
- if (!pp->va_cfg1_base) {
- pp->va_cfg1_base = devm_pci_remap_cfgspace(dev,
- pp->cfg1_base,
- pp->cfg1_size);
- if (!pp->va_cfg1_base) {
- dev_err(dev, "Error with ioremap\n");
- return -ENOMEM;
- }
- }
-
- ret = of_property_read_u32(np, "num-viewport", &pci->num_viewport);
- if (ret)
- pci->num_viewport = 2;
+ if (pci->link_gen < 1)
+ pci->link_gen = of_pci_get_max_link_speed(np);
if (pci_msi_enabled()) {
- /*
- * If a specific SoC driver needs to change the
- * default number of vectors, it needs to implement
- * the set_num_vectors callback.
- */
- if (!pp->ops->set_num_vectors) {
+ pp->has_msi_ctrl = !(pp->ops->msi_host_init ||
+ of_property_read_bool(np, "msi-parent") ||
+ of_property_read_bool(np, "msi-map"));
+
+ if (!pp->num_vectors) {
pp->num_vectors = MSI_DEF_NUM_VECTORS;
- } else {
- pp->ops->set_num_vectors(pp);
-
- if (pp->num_vectors > MAX_MSI_IRQS ||
- pp->num_vectors == 0) {
- dev_err(dev,
- "Invalid number of vectors\n");
- return -EINVAL;
- }
+ } else if (pp->num_vectors > MAX_MSI_IRQS) {
+ dev_err(dev, "Invalid number of vectors\n");
+ return -EINVAL;
}
- if (!pp->ops->msi_host_init) {
+ if (pp->ops->msi_host_init) {
+ ret = pp->ops->msi_host_init(pp);
+ if (ret < 0)
+ return ret;
+ } else if (pp->has_msi_ctrl) {
+ if (!pp->msi_irq) {
+ pp->msi_irq = platform_get_irq_byname_optional(pdev, "msi");
+ if (pp->msi_irq < 0) {
+ pp->msi_irq = platform_get_irq(pdev, 0);
+ if (pp->msi_irq < 0)
+ return pp->msi_irq;
+ }
+ }
+
pp->msi_irq_chip = &dw_pci_msi_bottom_irq_chip;
ret = dw_pcie_allocate_domains(pp);
if (ret)
return ret;
- if (pp->msi_irq)
+ if (pp->msi_irq > 0)
irq_set_chained_handler_and_data(pp->msi_irq,
dw_chained_msi_isr,
pp);
- } else {
- ret = pp->ops->msi_host_init(pp);
- if (ret < 0)
- return ret;
+
+ ret = dma_set_mask(pci->dev, DMA_BIT_MASK(32));
+ if (ret)
+ dev_warn(pci->dev, "Failed to set DMA mask to 32-bit. Devices with only 32-bit MSI support may not work properly\n");
+
+ pp->msi_data = dma_map_single_attrs(pci->dev, &pp->msi_msg,
+ sizeof(pp->msi_msg),
+ DMA_FROM_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ if (dma_mapping_error(pci->dev, pp->msi_data)) {
+ dev_err(pci->dev, "Failed to map MSI data\n");
+ pp->msi_data = 0;
+ goto err_free_msi;
+ }
}
}
+ /* Set default bus ops */
+ bridge->ops = &dw_pcie_ops;
+ bridge->child_ops = &dw_child_pcie_ops;
+
if (pp->ops->host_init) {
ret = pp->ops->host_init(pp);
if (ret)
goto err_free_msi;
}
- ret = dw_pcie_rd_own_conf(pp, PCI_HEADER_TYPE, 1, &hdr_type);
- if (ret != PCIBIOS_SUCCESSFUL) {
- dev_err(pci->dev, "Failed reading PCI_HEADER_TYPE cfg space reg (ret: 0x%x)\n",
- ret);
- ret = pcibios_err_to_errno(ret);
- goto err_free_msi;
- }
- if (hdr_type != PCI_HEADER_TYPE_BRIDGE) {
- dev_err(pci->dev,
- "PCIe controller is not set to bridge type (hdr_type: 0x%x)!\n",
- hdr_type);
- ret = -EIO;
- goto err_free_msi;
- }
-
- bridge->sysdata = pp;
- bridge->ops = &dw_pcie_ops;
-
- ret = pci_scan_root_bus_bridge(bridge);
- if (ret)
- goto err_free_msi;
+ dw_pcie_setup_rc(pp);
+ dw_pcie_msi_init(pp);
- pp->root_bus = bridge->bus;
-
- if (pp->ops->scan_bus)
- pp->ops->scan_bus(pp);
+ if (!dw_pcie_link_up(pci) && pci->ops->start_link) {
+ ret = pci->ops->start_link(pci);
+ if (ret)
+ goto err_free_msi;
+ }
- pci_bus_size_bridges(pp->root_bus);
- pci_bus_assign_resources(pp->root_bus);
+ /* Ignore errors, the link may come up later */
+ dw_pcie_wait_for_link(pci);
- list_for_each_entry(child, &pp->root_bus->children, node)
- pcie_bus_configure_settings(child);
+ bridge->sysdata = pp;
- pci_bus_add_devices(pp->root_bus);
- return 0;
+ ret = pci_host_probe(bridge);
+ if (!ret)
+ return 0;
err_free_msi:
- if (pci_msi_enabled() && !pp->ops->msi_host_init)
+ if (pp->has_msi_ctrl)
dw_pcie_free_msi(pp);
return ret;
}
@@ -498,129 +449,105 @@ EXPORT_SYMBOL_GPL(dw_pcie_host_init);
void dw_pcie_host_deinit(struct pcie_port *pp)
{
- pci_stop_root_bus(pp->root_bus);
- pci_remove_root_bus(pp->root_bus);
- if (pci_msi_enabled() && !pp->ops->msi_host_init)
+ pci_stop_root_bus(pp->bridge->bus);
+ pci_remove_root_bus(pp->bridge->bus);
+ if (pp->has_msi_ctrl)
dw_pcie_free_msi(pp);
}
EXPORT_SYMBOL_GPL(dw_pcie_host_deinit);
-static int dw_pcie_access_other_conf(struct pcie_port *pp, struct pci_bus *bus,
- u32 devfn, int where, int size, u32 *val,
- bool write)
+static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
{
- int ret, type;
- u32 busdev, cfg_size;
- u64 cpu_addr;
- void __iomem *va_cfg_base;
+ int type;
+ u32 busdev;
+ struct pcie_port *pp = bus->sysdata;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ /*
+ * Checking whether the link is up here is a last line of defense
+ * against platforms that forward errors on the system bus as
+ * SError upon PCI configuration transactions issued when the link
+ * is down. This check is racy by definition and does not stop
+ * the system from triggering an SError if the link goes down
+ * after this check is performed.
+ */
+ if (!dw_pcie_link_up(pci))
+ return NULL;
+
busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
PCIE_ATU_FUNC(PCI_FUNC(devfn));
- if (pci_is_root_bus(bus->parent)) {
+ if (pci_is_root_bus(bus->parent))
type = PCIE_ATU_TYPE_CFG0;
- cpu_addr = pp->cfg0_base;
- cfg_size = pp->cfg0_size;
- va_cfg_base = pp->va_cfg0_base;
- } else {
- type = PCIE_ATU_TYPE_CFG1;
- cpu_addr = pp->cfg1_base;
- cfg_size = pp->cfg1_size;
- va_cfg_base = pp->va_cfg1_base;
- }
-
- dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
- type, cpu_addr,
- busdev, cfg_size);
- if (write)
- ret = dw_pcie_write(va_cfg_base + where, size, *val);
else
- ret = dw_pcie_read(va_cfg_base + where, size, val);
-
- if (pci->num_viewport <= 2)
- dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
- PCIE_ATU_TYPE_IO, pp->io_base,
- pp->io_bus_addr, pp->io_size);
-
- return ret;
-}
+ type = PCIE_ATU_TYPE_CFG1;
-static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
- u32 devfn, int where, int size, u32 *val)
-{
- if (pp->ops->rd_other_conf)
- return pp->ops->rd_other_conf(pp, bus, devfn, where,
- size, val);
- return dw_pcie_access_other_conf(pp, bus, devfn, where, size, val,
- false);
-}
+ dw_pcie_prog_outbound_atu(pci, 0, type, pp->cfg0_base, busdev, pp->cfg0_size);
-static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
- u32 devfn, int where, int size, u32 val)
-{
- if (pp->ops->wr_other_conf)
- return pp->ops->wr_other_conf(pp, bus, devfn, where,
- size, val);
-
- return dw_pcie_access_other_conf(pp, bus, devfn, where, size, &val,
- true);
+ return pp->va_cfg0_base + where;
}
-static int dw_pcie_valid_device(struct pcie_port *pp, struct pci_bus *bus,
- int dev)
+static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
{
+ int ret;
+ struct pcie_port *pp = bus->sysdata;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- /* If there is no link, then there is no device */
- if (!pci_is_root_bus(bus)) {
- if (!dw_pcie_link_up(pci))
- return 0;
- } else if (dev > 0)
- /* Access only one slot on each root port */
- return 0;
+ ret = pci_generic_config_read(bus, devfn, where, size, val);
+
+ if (!ret && pci->io_cfg_atu_shared)
+ dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO, pp->io_base,
+ pp->io_bus_addr, pp->io_size);
- return 1;
+ return ret;
}
-static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
- int size, u32 *val)
+static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
{
+ int ret;
struct pcie_port *pp = bus->sysdata;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn))) {
- *val = 0xffffffff;
- return PCIBIOS_DEVICE_NOT_FOUND;
- }
+ ret = pci_generic_config_write(bus, devfn, where, size, val);
- if (pci_is_root_bus(bus))
- return dw_pcie_rd_own_conf(pp, where, size, val);
+ if (!ret && pci->io_cfg_atu_shared)
+ dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO, pp->io_base,
+ pp->io_bus_addr, pp->io_size);
- return dw_pcie_rd_other_conf(pp, bus, devfn, where, size, val);
+ return ret;
}
-static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
- int where, int size, u32 val)
+static struct pci_ops dw_child_pcie_ops = {
+ .map_bus = dw_pcie_other_conf_map_bus,
+ .read = dw_pcie_rd_other_conf,
+ .write = dw_pcie_wr_other_conf,
+};
+
+void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn, int where)
{
struct pcie_port *pp = bus->sysdata;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn)))
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- if (pci_is_root_bus(bus))
- return dw_pcie_wr_own_conf(pp, where, size, val);
+ if (PCI_SLOT(devfn) > 0)
+ return NULL;
- return dw_pcie_wr_other_conf(pp, bus, devfn, where, size, val);
+ return pci->dbi_base + where;
}
+EXPORT_SYMBOL_GPL(dw_pcie_own_conf_map_bus);
static struct pci_ops dw_pcie_ops = {
- .read = dw_pcie_rd_conf,
- .write = dw_pcie_wr_conf,
+ .map_bus = dw_pcie_own_conf_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
};
void dw_pcie_setup_rc(struct pcie_port *pp)
{
+ int i;
u32 val, ctrl, num_ctrls;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
@@ -632,18 +559,18 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
dw_pcie_setup(pci);
- if (!pp->ops->msi_host_init) {
+ if (pp->has_msi_ctrl) {
num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
/* Initialize IRQ Status array */
for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
pp->irq_mask[ctrl] = ~0;
- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK +
+ dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK +
(ctrl * MSI_REG_CTRL_BLOCK_SIZE),
- 4, pp->irq_mask[ctrl]);
- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE +
+ pp->irq_mask[ctrl]);
+ dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE +
(ctrl * MSI_REG_CTRL_BLOCK_SIZE),
- 4, ~0);
+ ~0);
}
}
@@ -670,29 +597,55 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
+ /* Ensure all outbound windows are disabled so there are multiple matches */
+ for (i = 0; i < pci->num_ob_windows; i++)
+ dw_pcie_disable_atu(pci, i, DW_PCIE_REGION_OUTBOUND);
+
/*
- * If the platform provides ->rd_other_conf, it means the platform
- * uses its own address translation component rather than ATU, so
- * we should not program the ATU here.
+ * If the platform provides its own child bus config accesses, it means
+ * the platform uses its own address translation component rather than
+ * ATU, so we should not program the ATU here.
*/
- if (!pp->ops->rd_other_conf) {
- dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX0,
- PCIE_ATU_TYPE_MEM, pp->mem_base,
- pp->mem_bus_addr, pp->mem_size);
- if (pci->num_viewport > 2)
- dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX2,
- PCIE_ATU_TYPE_IO, pp->io_base,
- pp->io_bus_addr, pp->io_size);
+ if (pp->bridge->child_ops == &dw_child_pcie_ops) {
+ int atu_idx = 0;
+ struct resource_entry *entry;
+
+ /* Get last memory resource entry */
+ resource_list_for_each_entry(entry, &pp->bridge->windows) {
+ if (resource_type(entry->res) != IORESOURCE_MEM)
+ continue;
+
+ if (pci->num_ob_windows <= ++atu_idx)
+ break;
+
+ dw_pcie_prog_outbound_atu(pci, atu_idx,
+ PCIE_ATU_TYPE_MEM, entry->res->start,
+ entry->res->start - entry->offset,
+ resource_size(entry->res));
+ }
+
+ if (pp->io_size) {
+ if (pci->num_ob_windows > ++atu_idx)
+ dw_pcie_prog_outbound_atu(pci, atu_idx,
+ PCIE_ATU_TYPE_IO, pp->io_base,
+ pp->io_bus_addr, pp->io_size);
+ else
+ pci->io_cfg_atu_shared = true;
+ }
+
+ if (pci->num_ob_windows <= atu_idx)
+ dev_warn(pci->dev, "Resources exceed number of ATU entries (%d)",
+ pci->num_ob_windows);
}
- dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0);
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
/* Program correct class for RC */
- dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI);
+ dw_pcie_writew_dbi(pci, PCI_CLASS_DEVICE, PCI_CLASS_BRIDGE_PCI);
- dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val);
+ val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
val |= PORT_LOGIC_SPEED_CHANGE;
- dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val);
+ dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
dw_pcie_dbi_ro_wr_dis(pci);
}
diff --git a/drivers/pci/controller/dwc/pcie-designware-plat.c b/drivers/pci/controller/dwc/pcie-designware-plat.c
index 712456f6ce36..9b397c807261 100644
--- a/drivers/pci/controller/dwc/pcie-designware-plat.c
+++ b/drivers/pci/controller/dwc/pcie-designware-plat.c
@@ -33,27 +33,7 @@ struct dw_plat_pcie_of_data {
static const struct of_device_id dw_plat_pcie_of_match[];
-static int dw_plat_pcie_host_init(struct pcie_port *pp)
-{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
-
- dw_pcie_setup_rc(pp);
- dw_pcie_wait_for_link(pci);
-
- if (IS_ENABLED(CONFIG_PCI_MSI))
- dw_pcie_msi_init(pp);
-
- return 0;
-}
-
-static void dw_plat_set_num_vectors(struct pcie_port *pp)
-{
- pp->num_vectors = MAX_MSI_IRQS;
-}
-
static const struct dw_pcie_host_ops dw_plat_pcie_host_ops = {
- .host_init = dw_plat_pcie_host_init,
- .set_num_vectors = dw_plat_set_num_vectors,
};
static int dw_plat_pcie_establish_link(struct dw_pcie *pci)
@@ -124,12 +104,7 @@ static int dw_plat_add_pcie_port(struct dw_plat_pcie *dw_plat_pcie,
if (pp->irq < 0)
return pp->irq;
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
- pp->msi_irq = platform_get_irq(pdev, 0);
- if (pp->msi_irq < 0)
- return pp->msi_irq;
- }
-
+ pp->num_vectors = MAX_MSI_IRQS;
pp->ops = &dw_plat_pcie_host_ops;
ret = dw_pcie_host_init(pp);
@@ -141,43 +116,11 @@ static int dw_plat_add_pcie_port(struct dw_plat_pcie *dw_plat_pcie,
return 0;
}
-static int dw_plat_add_pcie_ep(struct dw_plat_pcie *dw_plat_pcie,
- struct platform_device *pdev)
-{
- int ret;
- struct dw_pcie_ep *ep;
- struct resource *res;
- struct device *dev = &pdev->dev;
- struct dw_pcie *pci = dw_plat_pcie->pci;
-
- ep = &pci->ep;
- ep->ops = &pcie_ep_ops;
-
- pci->dbi_base2 = devm_platform_ioremap_resource_byname(pdev, "dbi2");
- if (IS_ERR(pci->dbi_base2))
- return PTR_ERR(pci->dbi_base2);
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
- if (!res)
- return -EINVAL;
-
- ep->phys_base = res->start;
- ep->addr_size = resource_size(res);
-
- ret = dw_pcie_ep_init(ep);
- if (ret) {
- dev_err(dev, "Failed to initialize endpoint\n");
- return ret;
- }
- return 0;
-}
-
static int dw_plat_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct dw_plat_pcie *dw_plat_pcie;
struct dw_pcie *pci;
- struct resource *res; /* Resource from DT */
int ret;
const struct of_device_id *match;
const struct dw_plat_pcie_of_data *data;
@@ -204,14 +147,6 @@ static int dw_plat_pcie_probe(struct platform_device *pdev)
dw_plat_pcie->pci = pci;
dw_plat_pcie->mode = mode;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
- if (!res)
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- pci->dbi_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(pci->dbi_base))
- return PTR_ERR(pci->dbi_base);
-
platform_set_drvdata(pdev, dw_plat_pcie);
switch (dw_plat_pcie->mode) {
@@ -227,9 +162,8 @@ static int dw_plat_pcie_probe(struct platform_device *pdev)
if (!IS_ENABLED(CONFIG_PCIE_DW_PLAT_EP))
return -ENODEV;
- ret = dw_plat_add_pcie_ep(dw_plat_pcie, pdev);
- if (ret < 0)
- return ret;
+ pci->ep.ops = &pcie_ep_ops;
+ return dw_pcie_ep_init(&pci->ep);
break;
default:
dev_err(dev, "INVALID device type %d\n", dw_plat_pcie->mode);
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
index b723e0cc41fb..645fa1892375 100644
--- a/drivers/pci/controller/dwc/pcie-designware.c
+++ b/drivers/pci/controller/dwc/pcie-designware.c
@@ -10,6 +10,7 @@
#include <linux/delay.h>
#include <linux/of.h>
+#include <linux/of_platform.h>
#include <linux/types.h>
#include "../../pci.h"
@@ -166,21 +167,6 @@ void dw_pcie_write_dbi(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
}
EXPORT_SYMBOL_GPL(dw_pcie_write_dbi);
-u32 dw_pcie_read_dbi2(struct dw_pcie *pci, u32 reg, size_t size)
-{
- int ret;
- u32 val;
-
- if (pci->ops->read_dbi2)
- return pci->ops->read_dbi2(pci, pci->dbi_base2, reg, size);
-
- ret = dw_pcie_read(pci->dbi_base2 + reg, size, &val);
- if (ret)
- dev_err(pci->dev, "read DBI address failed\n");
-
- return val;
-}
-
void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
{
int ret;
@@ -195,31 +181,31 @@ void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
dev_err(pci->dev, "write DBI address failed\n");
}
-u32 dw_pcie_read_atu(struct dw_pcie *pci, u32 reg, size_t size)
+static u32 dw_pcie_readl_atu(struct dw_pcie *pci, u32 reg)
{
int ret;
u32 val;
if (pci->ops->read_dbi)
- return pci->ops->read_dbi(pci, pci->atu_base, reg, size);
+ return pci->ops->read_dbi(pci, pci->atu_base, reg, 4);
- ret = dw_pcie_read(pci->atu_base + reg, size, &val);
+ ret = dw_pcie_read(pci->atu_base + reg, 4, &val);
if (ret)
dev_err(pci->dev, "Read ATU address failed\n");
return val;
}
-void dw_pcie_write_atu(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
+static void dw_pcie_writel_atu(struct dw_pcie *pci, u32 reg, u32 val)
{
int ret;
if (pci->ops->write_dbi) {
- pci->ops->write_dbi(pci, pci->atu_base, reg, size, val);
+ pci->ops->write_dbi(pci, pci->atu_base, reg, 4, val);
return;
}
- ret = dw_pcie_write(pci->atu_base + reg, size, val);
+ ret = dw_pcie_write(pci->atu_base + reg, 4, val);
if (ret)
dev_err(pci->dev, "Write ATU address failed\n");
}
@@ -239,9 +225,10 @@ static void dw_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg,
dw_pcie_writel_atu(pci, offset + reg, val);
}
-static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index,
- int type, u64 cpu_addr,
- u64 pci_addr, u32 size)
+static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, u8 func_no,
+ int index, int type,
+ u64 cpu_addr, u64 pci_addr,
+ u64 size)
{
u32 retries, val;
u64 limit_addr = cpu_addr + size - 1;
@@ -258,8 +245,10 @@ static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index,
lower_32_bits(pci_addr));
dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
upper_32_bits(pci_addr));
- dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1,
- type);
+ val = type | PCIE_ATU_FUNC_NUM(func_no);
+ val = upper_32_bits(size - 1) ?
+ val | PCIE_ATU_INCREASE_REGION_SIZE : val;
+ dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, val);
dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
PCIE_ATU_ENABLE);
@@ -278,8 +267,9 @@ static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index,
dev_err(pci->dev, "Outbound iATU is not being enabled\n");
}
-void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
- u64 cpu_addr, u64 pci_addr, u32 size)
+static void __dw_pcie_prog_outbound_atu(struct dw_pcie *pci, u8 func_no,
+ int index, int type, u64 cpu_addr,
+ u64 pci_addr, u64 size)
{
u32 retries, val;
@@ -287,8 +277,8 @@ void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr);
if (pci->iatu_unroll_enabled) {
- dw_pcie_prog_outbound_atu_unroll(pci, index, type, cpu_addr,
- pci_addr, size);
+ dw_pcie_prog_outbound_atu_unroll(pci, func_no, index, type,
+ cpu_addr, pci_addr, size);
return;
}
@@ -304,7 +294,8 @@ void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
lower_32_bits(pci_addr));
dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET,
upper_32_bits(pci_addr));
- dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type);
+ dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type |
+ PCIE_ATU_FUNC_NUM(func_no));
dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE);
/*
@@ -321,6 +312,21 @@ void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
dev_err(pci->dev, "Outbound iATU is not being enabled\n");
}
+void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
+ u64 cpu_addr, u64 pci_addr, u64 size)
+{
+ __dw_pcie_prog_outbound_atu(pci, 0, index, type,
+ cpu_addr, pci_addr, size);
+}
+
+void dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index,
+ int type, u64 cpu_addr, u64 pci_addr,
+ u32 size)
+{
+ __dw_pcie_prog_outbound_atu(pci, func_no, index, type,
+ cpu_addr, pci_addr, size);
+}
+
static u32 dw_pcie_readl_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg)
{
u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
@@ -336,8 +342,8 @@ static void dw_pcie_writel_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg,
dw_pcie_writel_atu(pci, offset + reg, val);
}
-static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index,
- int bar, u64 cpu_addr,
+static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, u8 func_no,
+ int index, int bar, u64 cpu_addr,
enum dw_pcie_as_type as_type)
{
int type;
@@ -359,8 +365,10 @@ static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index,
return -EINVAL;
}
- dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, type);
+ dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, type |
+ PCIE_ATU_FUNC_NUM(func_no));
dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
+ PCIE_ATU_FUNC_NUM_MATCH_EN |
PCIE_ATU_ENABLE |
PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
@@ -381,14 +389,15 @@ static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index,
return -EBUSY;
}
-int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar,
- u64 cpu_addr, enum dw_pcie_as_type as_type)
+int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
+ int bar, u64 cpu_addr,
+ enum dw_pcie_as_type as_type)
{
int type;
u32 retries, val;
if (pci->iatu_unroll_enabled)
- return dw_pcie_prog_inbound_atu_unroll(pci, index, bar,
+ return dw_pcie_prog_inbound_atu_unroll(pci, func_no, index, bar,
cpu_addr, as_type);
dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND |
@@ -407,9 +416,11 @@ int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar,
return -EINVAL;
}
- dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type);
- dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE
- | PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
+ dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type |
+ PCIE_ATU_FUNC_NUM(func_no));
+ dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE |
+ PCIE_ATU_FUNC_NUM_MATCH_EN |
+ PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
/*
* Make sure ATU enable takes effect before any subsequent config
@@ -444,7 +455,7 @@ void dw_pcie_disable_atu(struct dw_pcie *pci, int index,
}
dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, region | index);
- dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, (u32)~PCIE_ATU_ENABLE);
+ dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, ~(u32)PCIE_ATU_ENABLE);
}
int dw_pcie_wait_for_link(struct dw_pcie *pci)
@@ -488,50 +499,41 @@ void dw_pcie_upconfig_setup(struct dw_pcie *pci)
}
EXPORT_SYMBOL_GPL(dw_pcie_upconfig_setup);
-void dw_pcie_link_set_max_speed(struct dw_pcie *pci, u32 link_gen)
+static void dw_pcie_link_set_max_speed(struct dw_pcie *pci, u32 link_gen)
{
- u32 reg, val;
+ u32 cap, ctrl2, link_speed;
u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
- reg = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCTL2);
- reg &= ~PCI_EXP_LNKCTL2_TLS;
+ cap = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
+ ctrl2 = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCTL2);
+ ctrl2 &= ~PCI_EXP_LNKCTL2_TLS;
switch (pcie_link_speed[link_gen]) {
case PCIE_SPEED_2_5GT:
- reg |= PCI_EXP_LNKCTL2_TLS_2_5GT;
+ link_speed = PCI_EXP_LNKCTL2_TLS_2_5GT;
break;
case PCIE_SPEED_5_0GT:
- reg |= PCI_EXP_LNKCTL2_TLS_5_0GT;
+ link_speed = PCI_EXP_LNKCTL2_TLS_5_0GT;
break;
case PCIE_SPEED_8_0GT:
- reg |= PCI_EXP_LNKCTL2_TLS_8_0GT;
+ link_speed = PCI_EXP_LNKCTL2_TLS_8_0GT;
break;
case PCIE_SPEED_16_0GT:
- reg |= PCI_EXP_LNKCTL2_TLS_16_0GT;
+ link_speed = PCI_EXP_LNKCTL2_TLS_16_0GT;
break;
default:
/* Use hardware capability */
- val = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
- val = FIELD_GET(PCI_EXP_LNKCAP_SLS, val);
- reg &= ~PCI_EXP_LNKCTL2_HASD;
- reg |= FIELD_PREP(PCI_EXP_LNKCTL2_TLS, val);
+ link_speed = FIELD_GET(PCI_EXP_LNKCAP_SLS, cap);
+ ctrl2 &= ~PCI_EXP_LNKCTL2_HASD;
break;
}
- dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCTL2, reg);
-}
-EXPORT_SYMBOL_GPL(dw_pcie_link_set_max_speed);
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCTL2, ctrl2 | link_speed);
-void dw_pcie_link_set_n_fts(struct dw_pcie *pci, u32 n_fts)
-{
- u32 val;
+ cap &= ~((u32)PCI_EXP_LNKCAP_SLS);
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, cap | link_speed);
- val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
- val &= ~PORT_LOGIC_N_FTS_MASK;
- val |= n_fts & PORT_LOGIC_N_FTS_MASK;
- dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
}
-EXPORT_SYMBOL_GPL(dw_pcie_link_set_n_fts);
static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
{
@@ -544,34 +546,139 @@ static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
return 0;
}
+static void dw_pcie_iatu_detect_regions_unroll(struct dw_pcie *pci)
+{
+ int max_region, i, ob = 0, ib = 0;
+ u32 val;
+
+ max_region = min((int)pci->atu_size / 512, 256);
+
+ for (i = 0; i < max_region; i++) {
+ dw_pcie_writel_ob_unroll(pci, i, PCIE_ATU_UNR_LOWER_TARGET,
+ 0x11110000);
+
+ val = dw_pcie_readl_ob_unroll(pci, i, PCIE_ATU_UNR_LOWER_TARGET);
+ if (val == 0x11110000)
+ ob++;
+ else
+ break;
+ }
+
+ for (i = 0; i < max_region; i++) {
+ dw_pcie_writel_ib_unroll(pci, i, PCIE_ATU_UNR_LOWER_TARGET,
+ 0x11110000);
+
+ val = dw_pcie_readl_ib_unroll(pci, i, PCIE_ATU_UNR_LOWER_TARGET);
+ if (val == 0x11110000)
+ ib++;
+ else
+ break;
+ }
+ pci->num_ib_windows = ib;
+ pci->num_ob_windows = ob;
+}
+
+static void dw_pcie_iatu_detect_regions(struct dw_pcie *pci)
+{
+ int max_region, i, ob = 0, ib = 0;
+ u32 val;
+
+ dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, 0xFF);
+ max_region = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT) + 1;
+
+ for (i = 0; i < max_region; i++) {
+ dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_OUTBOUND | i);
+ dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, 0x11110000);
+ val = dw_pcie_readl_dbi(pci, PCIE_ATU_LOWER_TARGET);
+ if (val == 0x11110000)
+ ob++;
+ else
+ break;
+ }
+
+ for (i = 0; i < max_region; i++) {
+ dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND | i);
+ dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, 0x11110000);
+ val = dw_pcie_readl_dbi(pci, PCIE_ATU_LOWER_TARGET);
+ if (val == 0x11110000)
+ ib++;
+ else
+ break;
+ }
+
+ pci->num_ib_windows = ib;
+ pci->num_ob_windows = ob;
+}
+
void dw_pcie_setup(struct dw_pcie *pci)
{
- int ret;
u32 val;
- u32 lanes;
struct device *dev = pci->dev;
struct device_node *np = dev->of_node;
+ struct platform_device *pdev = to_platform_device(dev);
if (pci->version >= 0x480A || (!pci->version &&
dw_pcie_iatu_unroll_enabled(pci))) {
pci->iatu_unroll_enabled = true;
- if (!pci->atu_base)
- pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
- }
- dev_dbg(pci->dev, "iATU unroll: %s\n", pci->iatu_unroll_enabled ?
+ if (!pci->atu_base) {
+ struct resource *res =
+ platform_get_resource_byname(pdev, IORESOURCE_MEM, "atu");
+ if (res)
+ pci->atu_size = resource_size(res);
+ pci->atu_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pci->atu_base))
+ pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
+ }
+
+ if (!pci->atu_size)
+ /* Pick a minimal default, enough for 8 in and 8 out windows */
+ pci->atu_size = SZ_4K;
+
+ dw_pcie_iatu_detect_regions_unroll(pci);
+ } else
+ dw_pcie_iatu_detect_regions(pci);
+
+ dev_info(pci->dev, "iATU unroll: %s\n", pci->iatu_unroll_enabled ?
"enabled" : "disabled");
+ dev_info(pci->dev, "Detected iATU regions: %u outbound, %u inbound",
+ pci->num_ob_windows, pci->num_ib_windows);
- ret = of_property_read_u32(np, "num-lanes", &lanes);
- if (ret) {
- dev_dbg(pci->dev, "property num-lanes isn't found\n");
+ if (pci->link_gen > 0)
+ dw_pcie_link_set_max_speed(pci, pci->link_gen);
+
+ /* Configure Gen1 N_FTS */
+ if (pci->n_fts[0]) {
+ val = dw_pcie_readl_dbi(pci, PCIE_PORT_AFR);
+ val &= ~(PORT_AFR_N_FTS_MASK | PORT_AFR_CC_N_FTS_MASK);
+ val |= PORT_AFR_N_FTS(pci->n_fts[0]);
+ val |= PORT_AFR_CC_N_FTS(pci->n_fts[0]);
+ dw_pcie_writel_dbi(pci, PCIE_PORT_AFR, val);
+ }
+
+ /* Configure Gen2+ N_FTS */
+ if (pci->n_fts[1]) {
+ val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
+ val &= ~PORT_LOGIC_N_FTS_MASK;
+ val |= pci->n_fts[pci->link_gen - 1];
+ dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
+ }
+
+ val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
+ val &= ~PORT_LINK_FAST_LINK_MODE;
+ val |= PORT_LINK_DLL_LINK_EN;
+ dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
+
+ of_property_read_u32(np, "num-lanes", &pci->num_lanes);
+ if (!pci->num_lanes) {
+ dev_dbg(pci->dev, "Using h/w default number of lanes\n");
return;
}
/* Set the number of lanes */
- val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
+ val &= ~PORT_LINK_FAST_LINK_MODE;
val &= ~PORT_LINK_MODE_MASK;
- switch (lanes) {
+ switch (pci->num_lanes) {
case 1:
val |= PORT_LINK_MODE_1_LANES;
break;
@@ -585,7 +692,7 @@ void dw_pcie_setup(struct dw_pcie *pci)
val |= PORT_LINK_MODE_8_LANES;
break;
default:
- dev_err(pci->dev, "num-lanes %u: invalid value\n", lanes);
+ dev_err(pci->dev, "num-lanes %u: invalid value\n", pci->num_lanes);
return;
}
dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
@@ -593,7 +700,7 @@ void dw_pcie_setup(struct dw_pcie *pci)
/* Set link width speed control register */
val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
- switch (lanes) {
+ switch (pci->num_lanes) {
case 1:
val |= PORT_LOGIC_LINK_WIDTH_1_LANES;
break;
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index f911760dcc69..0207840756c4 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -32,10 +32,18 @@
/* Synopsys-specific PCIe configuration registers */
#define PCIE_PORT_AFR 0x70C
#define PORT_AFR_N_FTS_MASK GENMASK(15, 8)
+#define PORT_AFR_N_FTS(n) FIELD_PREP(PORT_AFR_N_FTS_MASK, n)
#define PORT_AFR_CC_N_FTS_MASK GENMASK(23, 16)
+#define PORT_AFR_CC_N_FTS(n) FIELD_PREP(PORT_AFR_CC_N_FTS_MASK, n)
+#define PORT_AFR_ENTER_ASPM BIT(30)
+#define PORT_AFR_L0S_ENTRANCE_LAT_SHIFT 24
+#define PORT_AFR_L0S_ENTRANCE_LAT_MASK GENMASK(26, 24)
+#define PORT_AFR_L1_ENTRANCE_LAT_SHIFT 27
+#define PORT_AFR_L1_ENTRANCE_LAT_MASK GENMASK(29, 27)
#define PCIE_PORT_LINK_CONTROL 0x710
#define PORT_LINK_DLL_LINK_EN BIT(5)
+#define PORT_LINK_FAST_LINK_MODE BIT(7)
#define PORT_LINK_MODE_MASK GENMASK(21, 16)
#define PORT_LINK_MODE(n) FIELD_PREP(PORT_LINK_MODE_MASK, n)
#define PORT_LINK_MODE_1_LANES PORT_LINK_MODE(0x1)
@@ -72,17 +80,17 @@
#define PCIE_ATU_VIEWPORT 0x900
#define PCIE_ATU_REGION_INBOUND BIT(31)
#define PCIE_ATU_REGION_OUTBOUND 0
-#define PCIE_ATU_REGION_INDEX2 0x2
-#define PCIE_ATU_REGION_INDEX1 0x1
-#define PCIE_ATU_REGION_INDEX0 0x0
#define PCIE_ATU_CR1 0x904
+#define PCIE_ATU_INCREASE_REGION_SIZE BIT(13)
#define PCIE_ATU_TYPE_MEM 0x0
#define PCIE_ATU_TYPE_IO 0x2
#define PCIE_ATU_TYPE_CFG0 0x4
#define PCIE_ATU_TYPE_CFG1 0x5
+#define PCIE_ATU_FUNC_NUM(pf) ((pf) << 20)
#define PCIE_ATU_CR2 0x908
#define PCIE_ATU_ENABLE BIT(31)
#define PCIE_ATU_BAR_MODE_ENABLE BIT(30)
+#define PCIE_ATU_FUNC_NUM_MATCH_EN BIT(19)
#define PCIE_ATU_LOWER_BASE 0x90C
#define PCIE_ATU_UPPER_BASE 0x910
#define PCIE_ATU_LIMIT 0x914
@@ -95,6 +103,9 @@
#define PCIE_MISC_CONTROL_1_OFF 0x8BC
#define PCIE_DBI_RO_WR_EN BIT(0)
+#define PCIE_MSIX_DOORBELL 0x948
+#define PCIE_MSIX_DOORBELL_PF_SHIFT 24
+
#define PCIE_PL_CHK_REG_CONTROL_STATUS 0xB20
#define PCIE_PL_CHK_REG_CHK_REG_START BIT(0)
#define PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS BIT(1)
@@ -160,46 +171,29 @@ enum dw_pcie_device_mode {
};
struct dw_pcie_host_ops {
- int (*rd_own_conf)(struct pcie_port *pp, int where, int size, u32 *val);
- int (*wr_own_conf)(struct pcie_port *pp, int where, int size, u32 val);
- int (*rd_other_conf)(struct pcie_port *pp, struct pci_bus *bus,
- unsigned int devfn, int where, int size, u32 *val);
- int (*wr_other_conf)(struct pcie_port *pp, struct pci_bus *bus,
- unsigned int devfn, int where, int size, u32 val);
int (*host_init)(struct pcie_port *pp);
- void (*scan_bus)(struct pcie_port *pp);
- void (*set_num_vectors)(struct pcie_port *pp);
int (*msi_host_init)(struct pcie_port *pp);
};
struct pcie_port {
+ bool has_msi_ctrl:1;
u64 cfg0_base;
void __iomem *va_cfg0_base;
u32 cfg0_size;
- u64 cfg1_base;
- void __iomem *va_cfg1_base;
- u32 cfg1_size;
resource_size_t io_base;
phys_addr_t io_bus_addr;
u32 io_size;
- u64 mem_base;
- phys_addr_t mem_bus_addr;
- u32 mem_size;
- struct resource *cfg;
- struct resource *io;
- struct resource *mem;
- struct resource *busn;
int irq;
const struct dw_pcie_host_ops *ops;
int msi_irq;
struct irq_domain *irq_domain;
struct irq_domain *msi_domain;
+ u16 msi_msg;
dma_addr_t msi_data;
- struct page *msi_page;
struct irq_chip *msi_irq_chip;
u32 num_vectors;
u32 irq_mask[MAX_MSI_CTRLS];
- struct pci_bus *root_bus;
+ struct pci_host_bridge *bridge;
raw_spinlock_t lock;
DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS);
};
@@ -215,10 +209,26 @@ struct dw_pcie_ep_ops {
int (*raise_irq)(struct dw_pcie_ep *ep, u8 func_no,
enum pci_epc_irq_type type, u16 interrupt_num);
const struct pci_epc_features* (*get_features)(struct dw_pcie_ep *ep);
+ /*
+ * Provide a method to implement the different func config space
+ * access for different platform, if different func have different
+ * offset, return the offset of func. if use write a register way
+ * return a 0, and implement code in callback function of platform
+ * driver.
+ */
+ unsigned int (*func_conf_select)(struct dw_pcie_ep *ep, u8 func_no);
+};
+
+struct dw_pcie_ep_func {
+ struct list_head list;
+ u8 func_no;
+ u8 msi_cap; /* MSI capability offset */
+ u8 msix_cap; /* MSI-X capability offset */
};
struct dw_pcie_ep {
struct pci_epc *epc;
+ struct list_head func_list;
const struct dw_pcie_ep_ops *ops;
phys_addr_t phys_base;
size_t addr_size;
@@ -227,12 +237,8 @@ struct dw_pcie_ep {
phys_addr_t *outbound_addr;
unsigned long *ib_window_map;
unsigned long *ob_window_map;
- u32 num_ib_windows;
- u32 num_ob_windows;
void __iomem *msi_mem;
phys_addr_t msi_mem_phys;
- u8 msi_cap; /* MSI capability offset */
- u8 msix_cap; /* MSI-X capability offset */
struct pci_epf_bar *epf_bar[PCI_STD_NUM_BARS];
};
@@ -242,8 +248,6 @@ struct dw_pcie_ops {
size_t size);
void (*write_dbi)(struct dw_pcie *pcie, void __iomem *base, u32 reg,
size_t size, u32 val);
- u32 (*read_dbi2)(struct dw_pcie *pcie, void __iomem *base, u32 reg,
- size_t size);
void (*write_dbi2)(struct dw_pcie *pcie, void __iomem *base, u32 reg,
size_t size, u32 val);
int (*link_up)(struct dw_pcie *pcie);
@@ -257,12 +261,18 @@ struct dw_pcie {
void __iomem *dbi_base2;
/* Used when iatu_unroll_enabled is true */
void __iomem *atu_base;
- u32 num_viewport;
- u8 iatu_unroll_enabled;
+ size_t atu_size;
+ u32 num_ib_windows;
+ u32 num_ob_windows;
struct pcie_port pp;
struct dw_pcie_ep ep;
const struct dw_pcie_ops *ops;
unsigned int version;
+ int num_lanes;
+ int link_gen;
+ u8 n_fts[2];
+ bool iatu_unroll_enabled: 1;
+ bool io_cfg_atu_shared: 1;
};
#define to_dw_pcie_from_pp(port) container_of((port), struct dw_pcie, pp)
@@ -278,20 +288,19 @@ int dw_pcie_write(void __iomem *addr, int size, u32 val);
u32 dw_pcie_read_dbi(struct dw_pcie *pci, u32 reg, size_t size);
void dw_pcie_write_dbi(struct dw_pcie *pci, u32 reg, size_t size, u32 val);
-u32 dw_pcie_read_dbi2(struct dw_pcie *pci, u32 reg, size_t size);
void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val);
-u32 dw_pcie_read_atu(struct dw_pcie *pci, u32 reg, size_t size);
-void dw_pcie_write_atu(struct dw_pcie *pci, u32 reg, size_t size, u32 val);
int dw_pcie_link_up(struct dw_pcie *pci);
void dw_pcie_upconfig_setup(struct dw_pcie *pci);
-void dw_pcie_link_set_max_speed(struct dw_pcie *pci, u32 link_gen);
-void dw_pcie_link_set_n_fts(struct dw_pcie *pci, u32 n_fts);
int dw_pcie_wait_for_link(struct dw_pcie *pci);
void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index,
int type, u64 cpu_addr, u64 pci_addr,
- u32 size);
-int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar,
- u64 cpu_addr, enum dw_pcie_as_type as_type);
+ u64 size);
+void dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index,
+ int type, u64 cpu_addr, u64 pci_addr,
+ u32 size);
+int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
+ int bar, u64 cpu_addr,
+ enum dw_pcie_as_type as_type);
void dw_pcie_disable_atu(struct dw_pcie *pci, int index,
enum dw_pcie_region_type type);
void dw_pcie_setup(struct dw_pcie *pci);
@@ -331,21 +340,6 @@ static inline void dw_pcie_writel_dbi2(struct dw_pcie *pci, u32 reg, u32 val)
dw_pcie_write_dbi2(pci, reg, 0x4, val);
}
-static inline u32 dw_pcie_readl_dbi2(struct dw_pcie *pci, u32 reg)
-{
- return dw_pcie_read_dbi2(pci, reg, 0x4);
-}
-
-static inline void dw_pcie_writel_atu(struct dw_pcie *pci, u32 reg, u32 val)
-{
- dw_pcie_write_atu(pci, reg, 0x4, val);
-}
-
-static inline u32 dw_pcie_readl_atu(struct dw_pcie *pci, u32 reg)
-{
- return dw_pcie_read_atu(pci, reg, 0x4);
-}
-
static inline void dw_pcie_dbi_ro_wr_en(struct dw_pcie *pci)
{
u32 reg;
@@ -370,26 +364,18 @@ static inline void dw_pcie_dbi_ro_wr_dis(struct dw_pcie *pci)
#ifdef CONFIG_PCIE_DW_HOST
irqreturn_t dw_handle_msi_irq(struct pcie_port *pp);
-void dw_pcie_msi_init(struct pcie_port *pp);
-void dw_pcie_free_msi(struct pcie_port *pp);
void dw_pcie_setup_rc(struct pcie_port *pp);
int dw_pcie_host_init(struct pcie_port *pp);
void dw_pcie_host_deinit(struct pcie_port *pp);
int dw_pcie_allocate_domains(struct pcie_port *pp);
+void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn,
+ int where);
#else
static inline irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
{
return IRQ_NONE;
}
-static inline void dw_pcie_msi_init(struct pcie_port *pp)
-{
-}
-
-static inline void dw_pcie_free_msi(struct pcie_port *pp)
-{
-}
-
static inline void dw_pcie_setup_rc(struct pcie_port *pp)
{
}
@@ -407,6 +393,12 @@ static inline int dw_pcie_allocate_domains(struct pcie_port *pp)
{
return 0;
}
+static inline void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus,
+ unsigned int devfn,
+ int where)
+{
+ return NULL;
+}
#endif
#ifdef CONFIG_PCIE_DW_EP
@@ -420,7 +412,11 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
u8 interrupt_num);
int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
u16 interrupt_num);
+int dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep *ep, u8 func_no,
+ u16 interrupt_num);
void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar);
+struct dw_pcie_ep_func *
+dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no);
#else
static inline void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
{
@@ -461,8 +457,21 @@ static inline int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
return 0;
}
+static inline int dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep *ep,
+ u8 func_no,
+ u16 interrupt_num)
+{
+ return 0;
+}
+
static inline void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
{
}
+
+static inline struct dw_pcie_ep_func *
+dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no)
+{
+ return NULL;
+}
#endif
#endif /* _PCIE_DESIGNWARE_H */
diff --git a/drivers/pci/controller/dwc/pcie-hisi.c b/drivers/pci/controller/dwc/pcie-hisi.c
index 5ca86796d43a..8fc5960faf28 100644
--- a/drivers/pci/controller/dwc/pcie-hisi.c
+++ b/drivers/pci/controller/dwc/pcie-hisi.c
@@ -100,7 +100,6 @@ static int hisi_pcie_init(struct pci_config_window *cfg)
}
const struct pci_ecam_ops hisi_pcie_ops = {
- .bus_shift = 20,
.init = hisi_pcie_init,
.pci_ops = {
.map_bus = hisi_pcie_map_bus,
@@ -135,7 +134,6 @@ static int hisi_pcie_platform_init(struct pci_config_window *cfg)
}
static const struct pci_ecam_ops hisi_pcie_platform_ops = {
- .bus_shift = 20,
.init = hisi_pcie_platform_init,
.pci_ops = {
.map_bus = hisi_pcie_map_bus,
diff --git a/drivers/pci/controller/dwc/pcie-histb.c b/drivers/pci/controller/dwc/pcie-histb.c
index 2a2835746077..86f9d16c50d7 100644
--- a/drivers/pci/controller/dwc/pcie-histb.c
+++ b/drivers/pci/controller/dwc/pcie-histb.c
@@ -122,32 +122,37 @@ static void histb_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base,
histb_pcie_dbi_w_mode(&pci->pp, false);
}
-static int histb_pcie_rd_own_conf(struct pcie_port *pp, int where,
- int size, u32 *val)
+static int histb_pcie_rd_own_conf(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- int ret;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata);
- histb_pcie_dbi_r_mode(pp, true);
- ret = dw_pcie_read(pci->dbi_base + where, size, val);
- histb_pcie_dbi_r_mode(pp, false);
+ if (PCI_SLOT(devfn)) {
+ *val = ~0;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
- return ret;
+ *val = dw_pcie_read_dbi(pci, where, size);
+ return PCIBIOS_SUCCESSFUL;
}
-static int histb_pcie_wr_own_conf(struct pcie_port *pp, int where,
- int size, u32 val)
+static int histb_pcie_wr_own_conf(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- int ret;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata);
- histb_pcie_dbi_w_mode(pp, true);
- ret = dw_pcie_write(pci->dbi_base + where, size, val);
- histb_pcie_dbi_w_mode(pp, false);
+ if (PCI_SLOT(devfn))
+ return PCIBIOS_DEVICE_NOT_FOUND;
- return ret;
+ dw_pcie_write_dbi(pci, where, size, val);
+ return PCIBIOS_SUCCESSFUL;
}
+static struct pci_ops histb_pci_ops = {
+ .read = histb_pcie_rd_own_conf,
+ .write = histb_pcie_wr_own_conf,
+};
+
static int histb_pcie_link_up(struct dw_pcie *pci)
{
struct histb_pcie *hipcie = to_histb_pcie(pci);
@@ -164,47 +169,37 @@ static int histb_pcie_link_up(struct dw_pcie *pci)
return 0;
}
-static int histb_pcie_establish_link(struct pcie_port *pp)
+static int histb_pcie_start_link(struct dw_pcie *pci)
{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct histb_pcie *hipcie = to_histb_pcie(pci);
u32 regval;
- if (dw_pcie_link_up(pci)) {
- dev_info(pci->dev, "Link already up\n");
- return 0;
- }
-
- /* PCIe RC work mode */
- regval = histb_pcie_readl(hipcie, PCIE_SYS_CTRL0);
- regval &= ~PCIE_DEVICE_TYPE_MASK;
- regval |= PCIE_WM_RC;
- histb_pcie_writel(hipcie, PCIE_SYS_CTRL0, regval);
-
- /* setup root complex */
- dw_pcie_setup_rc(pp);
-
/* assert LTSSM enable */
regval = histb_pcie_readl(hipcie, PCIE_SYS_CTRL7);
regval |= PCIE_APP_LTSSM_ENABLE;
histb_pcie_writel(hipcie, PCIE_SYS_CTRL7, regval);
- return dw_pcie_wait_for_link(pci);
+ return 0;
}
static int histb_pcie_host_init(struct pcie_port *pp)
{
- histb_pcie_establish_link(pp);
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct histb_pcie *hipcie = to_histb_pcie(pci);
+ u32 regval;
- if (IS_ENABLED(CONFIG_PCI_MSI))
- dw_pcie_msi_init(pp);
+ pp->bridge->ops = &histb_pci_ops;
+
+ /* PCIe RC work mode */
+ regval = histb_pcie_readl(hipcie, PCIE_SYS_CTRL0);
+ regval &= ~PCIE_DEVICE_TYPE_MASK;
+ regval |= PCIE_WM_RC;
+ histb_pcie_writel(hipcie, PCIE_SYS_CTRL0, regval);
return 0;
}
static const struct dw_pcie_host_ops histb_pcie_host_ops = {
- .rd_own_conf = histb_pcie_rd_own_conf,
- .wr_own_conf = histb_pcie_wr_own_conf,
.host_init = histb_pcie_host_init,
};
@@ -297,6 +292,7 @@ static const struct dw_pcie_ops dw_pcie_ops = {
.read_dbi = histb_pcie_read_dbi,
.write_dbi = histb_pcie_write_dbi,
.link_up = histb_pcie_link_up,
+ .start_link = histb_pcie_start_link,
};
static int histb_pcie_probe(struct platform_device *pdev)
@@ -397,12 +393,6 @@ static int histb_pcie_probe(struct platform_device *pdev)
return PTR_ERR(hipcie->bus_reset);
}
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
- pp->msi_irq = platform_get_irq_byname(pdev, "msi");
- if (pp->msi_irq < 0)
- return pp->msi_irq;
- }
-
hipcie->phy = devm_phy_get(dev, "phy");
if (IS_ERR(hipcie->phy)) {
dev_info(dev, "no pcie-phy found\n");
diff --git a/drivers/pci/controller/dwc/pcie-intel-gw.c b/drivers/pci/controller/dwc/pcie-intel-gw.c
index c3b3a1d162b5..0cedd1f95f37 100644
--- a/drivers/pci/controller/dwc/pcie-intel-gw.c
+++ b/drivers/pci/controller/dwc/pcie-intel-gw.c
@@ -58,8 +58,6 @@
struct intel_pcie_soc {
unsigned int pcie_ver;
- unsigned int pcie_atu_offset;
- u32 num_viewport;
};
struct intel_pcie_port {
@@ -67,14 +65,9 @@ struct intel_pcie_port {
void __iomem *app_base;
struct gpio_desc *reset_gpio;
u32 rst_intrvl;
- u32 max_speed;
- u32 link_gen;
- u32 max_width;
- u32 n_fts;
struct clk *core_clk;
struct reset_control *core_rst;
struct phy *phy;
- u8 pcie_cap_ofst;
};
static void pcie_update_bits(void __iomem *base, u32 ofs, u32 mask, u32 val)
@@ -134,11 +127,7 @@ static void intel_pcie_ltssm_disable(struct intel_pcie_port *lpp)
static void intel_pcie_link_setup(struct intel_pcie_port *lpp)
{
u32 val;
- u8 offset = lpp->pcie_cap_ofst;
-
- val = pcie_rc_cfg_rd(lpp, offset + PCI_EXP_LNKCAP);
- lpp->max_speed = FIELD_GET(PCI_EXP_LNKCAP_SLS, val);
- lpp->max_width = FIELD_GET(PCI_EXP_LNKCAP_MLW, val);
+ u8 offset = dw_pcie_find_capability(&lpp->pci, PCI_CAP_ID_EXP);
val = pcie_rc_cfg_rd(lpp, offset + PCI_EXP_LNKCTL);
@@ -146,41 +135,20 @@ static void intel_pcie_link_setup(struct intel_pcie_port *lpp)
pcie_rc_cfg_wr(lpp, offset + PCI_EXP_LNKCTL, val);
}
-static void intel_pcie_port_logic_setup(struct intel_pcie_port *lpp)
+static void intel_pcie_init_n_fts(struct dw_pcie *pci)
{
- u32 val, mask;
-
- switch (pcie_link_speed[lpp->max_speed]) {
- case PCIE_SPEED_8_0GT:
- lpp->n_fts = PORT_AFR_N_FTS_GEN3;
+ switch (pci->link_gen) {
+ case 3:
+ pci->n_fts[1] = PORT_AFR_N_FTS_GEN3;
break;
- case PCIE_SPEED_16_0GT:
- lpp->n_fts = PORT_AFR_N_FTS_GEN4;
+ case 4:
+ pci->n_fts[1] = PORT_AFR_N_FTS_GEN4;
break;
default:
- lpp->n_fts = PORT_AFR_N_FTS_GEN12_DFT;
+ pci->n_fts[1] = PORT_AFR_N_FTS_GEN12_DFT;
break;
}
-
- mask = PORT_AFR_N_FTS_MASK | PORT_AFR_CC_N_FTS_MASK;
- val = FIELD_PREP(PORT_AFR_N_FTS_MASK, lpp->n_fts) |
- FIELD_PREP(PORT_AFR_CC_N_FTS_MASK, lpp->n_fts);
- pcie_rc_cfg_wr_mask(lpp, PCIE_PORT_AFR, mask, val);
-
- /* Port Link Control Register */
- pcie_rc_cfg_wr_mask(lpp, PCIE_PORT_LINK_CONTROL, PORT_LINK_DLL_LINK_EN,
- PORT_LINK_DLL_LINK_EN);
-}
-
-static void intel_pcie_rc_setup(struct intel_pcie_port *lpp)
-{
- intel_pcie_ltssm_disable(lpp);
- intel_pcie_link_setup(lpp);
- dw_pcie_setup_rc(&lpp->pci.pp);
- dw_pcie_upconfig_setup(&lpp->pci);
- intel_pcie_port_logic_setup(lpp);
- dw_pcie_link_set_max_speed(&lpp->pci, lpp->link_gen);
- dw_pcie_link_set_n_fts(&lpp->pci, lpp->n_fts);
+ pci->n_fts[0] = PORT_AFR_N_FTS_GEN12_DFT;
}
static int intel_pcie_ep_rst_init(struct intel_pcie_port *lpp)
@@ -234,14 +202,6 @@ static void intel_pcie_device_rst_deassert(struct intel_pcie_port *lpp)
gpiod_set_value_cansleep(lpp->reset_gpio, 0);
}
-static int intel_pcie_app_logic_setup(struct intel_pcie_port *lpp)
-{
- intel_pcie_device_rst_deassert(lpp);
- intel_pcie_ltssm_enable(lpp);
-
- return dw_pcie_wait_for_link(&lpp->pci);
-}
-
static void intel_pcie_core_irq_disable(struct intel_pcie_port *lpp)
{
pcie_app_wr(lpp, PCIE_APP_IRNEN, 0);
@@ -255,10 +215,6 @@ static int intel_pcie_get_resources(struct platform_device *pdev)
struct device *dev = pci->dev;
int ret;
- pci->dbi_base = devm_platform_ioremap_resource_byname(pdev, "dbi");
- if (IS_ERR(pci->dbi_base))
- return PTR_ERR(pci->dbi_base);
-
lpp->core_clk = devm_clk_get(dev, NULL);
if (IS_ERR(lpp->core_clk)) {
ret = PTR_ERR(lpp->core_clk);
@@ -275,20 +231,11 @@ static int intel_pcie_get_resources(struct platform_device *pdev)
return ret;
}
- ret = device_property_match_string(dev, "device_type", "pci");
- if (ret) {
- dev_err(dev, "Failed to find pci device type: %d\n", ret);
- return ret;
- }
-
ret = device_property_read_u32(dev, "reset-assert-ms",
&lpp->rst_intrvl);
if (ret)
lpp->rst_intrvl = RESET_INTERVAL_MS;
- ret = of_pci_get_max_link_speed(dev->of_node);
- lpp->link_gen = ret < 0 ? 0 : ret;
-
lpp->app_base = devm_platform_ioremap_resource_byname(pdev, "app");
if (IS_ERR(lpp->app_base))
return PTR_ERR(lpp->app_base);
@@ -304,17 +251,13 @@ static int intel_pcie_get_resources(struct platform_device *pdev)
return 0;
}
-static void intel_pcie_deinit_phy(struct intel_pcie_port *lpp)
-{
- phy_exit(lpp->phy);
-}
-
static int intel_pcie_wait_l2(struct intel_pcie_port *lpp)
{
u32 value;
int ret;
+ struct dw_pcie *pci = &lpp->pci;
- if (pcie_link_speed[lpp->max_speed] < PCIE_SPEED_8_0GT)
+ if (pci->link_gen < 3)
return 0;
/* Send PME_TURN_OFF message */
@@ -343,8 +286,8 @@ static void intel_pcie_turn_off(struct intel_pcie_port *lpp)
static int intel_pcie_host_setup(struct intel_pcie_port *lpp)
{
- struct device *dev = lpp->pci.dev;
int ret;
+ struct dw_pcie *pci = &lpp->pci;
intel_pcie_core_rst_assert(lpp);
intel_pcie_device_rst_assert(lpp);
@@ -361,19 +304,18 @@ static int intel_pcie_host_setup(struct intel_pcie_port *lpp)
goto clk_err;
}
- if (!lpp->pcie_cap_ofst) {
- ret = dw_pcie_find_capability(&lpp->pci, PCI_CAP_ID_EXP);
- if (!ret) {
- ret = -ENXIO;
- dev_err(dev, "Invalid PCIe capability offset\n");
- goto app_init_err;
- }
+ pci->atu_base = pci->dbi_base + 0xC0000;
- lpp->pcie_cap_ofst = ret;
- }
+ intel_pcie_ltssm_disable(lpp);
+ intel_pcie_link_setup(lpp);
+ intel_pcie_init_n_fts(pci);
+ dw_pcie_setup_rc(&pci->pp);
+ dw_pcie_upconfig_setup(pci);
+
+ intel_pcie_device_rst_deassert(lpp);
+ intel_pcie_ltssm_enable(lpp);
- intel_pcie_rc_setup(lpp);
- ret = intel_pcie_app_logic_setup(lpp);
+ ret = dw_pcie_wait_for_link(pci);
if (ret)
goto app_init_err;
@@ -387,7 +329,7 @@ app_init_err:
clk_disable_unprepare(lpp->core_clk);
clk_err:
intel_pcie_core_rst_assert(lpp);
- intel_pcie_deinit_phy(lpp);
+ phy_exit(lpp->phy);
return ret;
}
@@ -398,7 +340,7 @@ static void __intel_pcie_remove(struct intel_pcie_port *lpp)
intel_pcie_turn_off(lpp);
clk_disable_unprepare(lpp->core_clk);
intel_pcie_core_rst_assert(lpp);
- intel_pcie_deinit_phy(lpp);
+ phy_exit(lpp->phy);
}
static int intel_pcie_remove(struct platform_device *pdev)
@@ -422,7 +364,7 @@ static int __maybe_unused intel_pcie_suspend_noirq(struct device *dev)
if (ret)
return ret;
- intel_pcie_deinit_phy(lpp);
+ phy_exit(lpp->phy);
clk_disable_unprepare(lpp->core_clk);
return ret;
}
@@ -442,14 +384,6 @@ static int intel_pcie_rc_init(struct pcie_port *pp)
return intel_pcie_host_setup(lpp);
}
-/*
- * Dummy function so that DW core doesn't configure MSI
- */
-static int intel_pcie_msi_init(struct pcie_port *pp)
-{
- return 0;
-}
-
static u64 intel_pcie_cpu_addr(struct dw_pcie *pcie, u64 cpu_addr)
{
return cpu_addr + BUS_IATU_OFFSET;
@@ -461,13 +395,10 @@ static const struct dw_pcie_ops intel_pcie_ops = {
static const struct dw_pcie_host_ops intel_pcie_dw_ops = {
.host_init = intel_pcie_rc_init,
- .msi_host_init = intel_pcie_msi_init,
};
static const struct intel_pcie_soc pcie_data = {
.pcie_ver = 0x520A,
- .pcie_atu_offset = 0xC0000,
- .num_viewport = 3,
};
static int intel_pcie_probe(struct platform_device *pdev)
@@ -502,7 +433,6 @@ static int intel_pcie_probe(struct platform_device *pdev)
pci->ops = &intel_pcie_ops;
pci->version = data->pcie_ver;
- pci->atu_base = pci->dbi_base + data->pcie_atu_offset;
pp->ops = &intel_pcie_dw_ops;
ret = dw_pcie_host_init(pp);
@@ -511,12 +441,6 @@ static int intel_pcie_probe(struct platform_device *pdev)
return ret;
}
- /*
- * Intel PCIe doesn't configure IO region, so set viewport
- * to not perform IO region access.
- */
- pci->num_viewport = data->num_viewport;
-
return 0;
}
diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c
index e496f51e0152..026fd1e42a55 100644
--- a/drivers/pci/controller/dwc/pcie-kirin.c
+++ b/drivers/pci/controller/dwc/pcie-kirin.c
@@ -157,11 +157,6 @@ static long kirin_pcie_get_resource(struct kirin_pcie *kirin_pcie,
if (IS_ERR(kirin_pcie->phy_base))
return PTR_ERR(kirin_pcie->phy_base);
- kirin_pcie->pci->dbi_base =
- devm_platform_ioremap_resource_byname(pdev, "dbi");
- if (IS_ERR(kirin_pcie->pci->dbi_base))
- return PTR_ERR(kirin_pcie->pci->dbi_base);
-
kirin_pcie->crgctrl =
syscon_regmap_lookup_by_compatible("hisilicon,hi3660-crgctrl");
if (IS_ERR(kirin_pcie->crgctrl))
@@ -330,34 +325,37 @@ static void kirin_pcie_sideband_dbi_r_mode(struct kirin_pcie *kirin_pcie,
kirin_apb_ctrl_writel(kirin_pcie, val, SOC_PCIECTRL_CTRL1_ADDR);
}
-static int kirin_pcie_rd_own_conf(struct pcie_port *pp,
+static int kirin_pcie_rd_own_conf(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *val)
{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
- int ret;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata);
- kirin_pcie_sideband_dbi_r_mode(kirin_pcie, true);
- ret = dw_pcie_read(pci->dbi_base + where, size, val);
- kirin_pcie_sideband_dbi_r_mode(kirin_pcie, false);
+ if (PCI_SLOT(devfn)) {
+ *val = ~0;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
- return ret;
+ *val = dw_pcie_read_dbi(pci, where, size);
+ return PCIBIOS_SUCCESSFUL;
}
-static int kirin_pcie_wr_own_conf(struct pcie_port *pp,
+static int kirin_pcie_wr_own_conf(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 val)
{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
- int ret;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata);
- kirin_pcie_sideband_dbi_w_mode(kirin_pcie, true);
- ret = dw_pcie_write(pci->dbi_base + where, size, val);
- kirin_pcie_sideband_dbi_w_mode(kirin_pcie, false);
+ if (PCI_SLOT(devfn))
+ return PCIBIOS_DEVICE_NOT_FOUND;
- return ret;
+ dw_pcie_write_dbi(pci, where, size, val);
+ return PCIBIOS_SUCCESSFUL;
}
+static struct pci_ops kirin_pci_ops = {
+ .read = kirin_pcie_rd_own_conf,
+ .write = kirin_pcie_wr_own_conf,
+};
+
static u32 kirin_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base,
u32 reg, size_t size)
{
@@ -392,41 +390,20 @@ static int kirin_pcie_link_up(struct dw_pcie *pci)
return 0;
}
-static int kirin_pcie_establish_link(struct pcie_port *pp)
+static int kirin_pcie_start_link(struct dw_pcie *pci)
{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
- struct device *dev = kirin_pcie->pci->dev;
- int count = 0;
-
- if (kirin_pcie_link_up(pci))
- return 0;
-
- dw_pcie_setup_rc(pp);
/* assert LTSSM enable */
kirin_apb_ctrl_writel(kirin_pcie, PCIE_LTSSM_ENABLE_BIT,
PCIE_APP_LTSSM_ENABLE);
- /* check if the link is up or not */
- while (!kirin_pcie_link_up(pci)) {
- usleep_range(LINK_WAIT_MIN, LINK_WAIT_MAX);
- count++;
- if (count == 1000) {
- dev_err(dev, "Link Fail\n");
- return -EINVAL;
- }
- }
-
return 0;
}
static int kirin_pcie_host_init(struct pcie_port *pp)
{
- kirin_pcie_establish_link(pp);
-
- if (IS_ENABLED(CONFIG_PCI_MSI))
- dw_pcie_msi_init(pp);
+ pp->bridge->ops = &kirin_pci_ops;
return 0;
}
@@ -435,44 +412,13 @@ static const struct dw_pcie_ops kirin_dw_pcie_ops = {
.read_dbi = kirin_pcie_read_dbi,
.write_dbi = kirin_pcie_write_dbi,
.link_up = kirin_pcie_link_up,
+ .start_link = kirin_pcie_start_link,
};
static const struct dw_pcie_host_ops kirin_pcie_host_ops = {
- .rd_own_conf = kirin_pcie_rd_own_conf,
- .wr_own_conf = kirin_pcie_wr_own_conf,
.host_init = kirin_pcie_host_init,
};
-static int kirin_pcie_add_msi(struct dw_pcie *pci,
- struct platform_device *pdev)
-{
- int irq;
-
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
-
- pci->pp.msi_irq = irq;
- }
-
- return 0;
-}
-
-static int kirin_add_pcie_port(struct dw_pcie *pci,
- struct platform_device *pdev)
-{
- int ret;
-
- ret = kirin_pcie_add_msi(pci, pdev);
- if (ret)
- return ret;
-
- pci->pp.ops = &kirin_pcie_host_ops;
-
- return dw_pcie_host_init(&pci->pp);
-}
-
static int kirin_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -495,6 +441,7 @@ static int kirin_pcie_probe(struct platform_device *pdev)
pci->dev = dev;
pci->ops = &kirin_dw_pcie_ops;
+ pci->pp.ops = &kirin_pcie_host_ops;
kirin_pcie->pci = pci;
ret = kirin_pcie_get_clk(kirin_pcie, pdev);
@@ -507,8 +454,12 @@ static int kirin_pcie_probe(struct platform_device *pdev)
kirin_pcie->gpio_id_reset = of_get_named_gpio(dev->of_node,
"reset-gpios", 0);
- if (kirin_pcie->gpio_id_reset < 0)
+ if (kirin_pcie->gpio_id_reset == -EPROBE_DEFER) {
+ return -EPROBE_DEFER;
+ } else if (!gpio_is_valid(kirin_pcie->gpio_id_reset)) {
+ dev_err(dev, "unable to get a valid gpio pin\n");
return -ENODEV;
+ }
ret = kirin_pcie_power_on(kirin_pcie);
if (ret)
@@ -516,7 +467,7 @@ static int kirin_pcie_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, kirin_pcie);
- return kirin_add_pcie_port(pci, pdev);
+ return dw_pcie_host_init(&pci->pp);
}
static const struct of_device_id kirin_pcie_match[] = {
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index 3aac77a295ba..affa2713bf80 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -9,6 +9,7 @@
*/
#include <linux/clk.h>
+#include <linux/crc8.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
@@ -57,6 +58,7 @@
#define PCIE20_PARF_SID_OFFSET 0x234
#define PCIE20_PARF_BDF_TRANSLATE_CFG 0x24C
#define PCIE20_PARF_DEVICE_TYPE 0x1000
+#define PCIE20_PARF_BDF_TO_SID_TABLE_N 0x2000
#define PCIE20_ELBI_SYS_CTRL 0x04
#define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0)
@@ -67,10 +69,6 @@
#define PCIE20_AXI_MSTR_RESP_COMP_CTRL1 0x81c
#define CFG_BRIDGE_SB_INIT BIT(0)
-#define PCIE20_CAP 0x70
-#define PCIE20_DEVICE_CONTROL2_STATUS2 (PCIE20_CAP + PCI_EXP_DEVCTL2)
-#define PCIE20_CAP_LINK_CAPABILITIES (PCIE20_CAP + PCI_EXP_LNKCAP)
-#define PCIE20_CAP_LINK_1 (PCIE20_CAP + 0x14)
#define PCIE_CAP_LINK1_VAL 0x2FD7F
#define PCIE20_PARF_Q2A_FLUSH 0x1AC
@@ -101,6 +99,9 @@
#define QCOM_PCIE_2_1_0_MAX_SUPPLY 3
#define QCOM_PCIE_2_1_0_MAX_CLOCKS 5
+
+#define QCOM_PCIE_CRC8_POLYNOMIAL (BIT(2) | BIT(1) | BIT(0))
+
struct qcom_pcie_resources_2_1_0 {
struct clk_bulk_data clks[QCOM_PCIE_2_1_0_MAX_CLOCKS];
struct reset_control *pci_reset;
@@ -183,6 +184,7 @@ struct qcom_pcie_ops {
void (*deinit)(struct qcom_pcie *pcie);
void (*post_deinit)(struct qcom_pcie *pcie);
void (*ltssm_enable)(struct qcom_pcie *pcie);
+ int (*config_sid)(struct qcom_pcie *pcie);
};
struct qcom_pcie {
@@ -193,7 +195,6 @@ struct qcom_pcie {
struct phy *phy;
struct gpio_desc *reset;
const struct qcom_pcie_ops *ops;
- int gen;
};
#define to_qcom_pcie(x) dev_get_drvdata((x)->dev)
@@ -212,18 +213,15 @@ static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
}
-static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
+static int qcom_pcie_start_link(struct dw_pcie *pci)
{
- struct dw_pcie *pci = pcie->pci;
-
- if (dw_pcie_link_up(pci))
- return 0;
+ struct qcom_pcie *pcie = to_qcom_pcie(pci);
/* Enable Link Training state machine */
if (pcie->ops->ltssm_enable)
pcie->ops->ltssm_enable(pcie);
- return dw_pcie_wait_for_link(pci);
+ return 0;
}
static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie)
@@ -302,6 +300,9 @@ static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie)
reset_control_assert(res->por_reset);
reset_control_assert(res->ext_reset);
reset_control_assert(res->phy_reset);
+
+ writel(1, pcie->parf + PCIE20_PARF_PHY_CTRL);
+
regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
}
@@ -314,6 +315,16 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
u32 val;
int ret;
+ /* reset the PCIe interface as uboot can leave it undefined state */
+ reset_control_assert(res->pci_reset);
+ reset_control_assert(res->axi_reset);
+ reset_control_assert(res->ahb_reset);
+ reset_control_assert(res->por_reset);
+ reset_control_assert(res->ext_reset);
+ reset_control_assert(res->phy_reset);
+
+ writel(1, pcie->parf + PCIE20_PARF_PHY_CTRL);
+
ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
if (ret < 0) {
dev_err(dev, "cannot enable regulators\n");
@@ -394,12 +405,6 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
/* wait for clock acquisition */
usleep_range(1000, 1500);
- if (pcie->gen == 1) {
- val = readl(pci->dbi_base + PCIE20_LNK_CONTROL2_LINK_STATUS2);
- val |= PCI_EXP_LNKSTA_CLS_2_5GB;
- writel(val, pci->dbi_base + PCIE20_LNK_CONTROL2_LINK_STATUS2);
- }
-
/* Set the Max TLP size to 2K, instead of using default of 4K */
writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K,
pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL0);
@@ -1017,6 +1022,7 @@ static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
struct dw_pcie *pci = pcie->pci;
struct device *dev = pci->dev;
+ u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
int i, ret;
u32 val;
@@ -1092,14 +1098,14 @@ static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
writel(PCI_COMMAND_MASTER, pci->dbi_base + PCI_COMMAND);
writel(DBI_RO_WR_EN, pci->dbi_base + PCIE20_MISC_CONTROL_1_REG);
- writel(PCIE_CAP_LINK1_VAL, pci->dbi_base + PCIE20_CAP_LINK_1);
+ writel(PCIE_CAP_LINK1_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP);
- val = readl(pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES);
+ val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP);
val &= ~PCI_EXP_LNKCAP_ASPMS;
- writel(val, pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES);
+ writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP);
- writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base +
- PCIE20_DEVICE_CONTROL2_STATUS2);
+ writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset +
+ PCI_EXP_DEVCTL2);
return 0;
@@ -1252,11 +1258,83 @@ static void qcom_pcie_post_deinit_2_7_0(struct qcom_pcie *pcie)
static int qcom_pcie_link_up(struct dw_pcie *pci)
{
- u16 val = readw(pci->dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA);
+ u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ u16 val = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA);
return !!(val & PCI_EXP_LNKSTA_DLLLA);
}
+static int qcom_pcie_config_sid_sm8250(struct qcom_pcie *pcie)
+{
+ /* iommu map structure */
+ struct {
+ u32 bdf;
+ u32 phandle;
+ u32 smmu_sid;
+ u32 smmu_sid_len;
+ } *map;
+ void __iomem *bdf_to_sid_base = pcie->parf + PCIE20_PARF_BDF_TO_SID_TABLE_N;
+ struct device *dev = pcie->pci->dev;
+ u8 qcom_pcie_crc8_table[CRC8_TABLE_SIZE];
+ int i, nr_map, size = 0;
+ u32 smmu_sid_base;
+
+ of_get_property(dev->of_node, "iommu-map", &size);
+ if (!size)
+ return 0;
+
+ map = kzalloc(size, GFP_KERNEL);
+ if (!map)
+ return -ENOMEM;
+
+ of_property_read_u32_array(dev->of_node,
+ "iommu-map", (u32 *)map, size / sizeof(u32));
+
+ nr_map = size / (sizeof(*map));
+
+ crc8_populate_msb(qcom_pcie_crc8_table, QCOM_PCIE_CRC8_POLYNOMIAL);
+
+ /* Registers need to be zero out first */
+ memset_io(bdf_to_sid_base, 0, CRC8_TABLE_SIZE * sizeof(u32));
+
+ /* Extract the SMMU SID base from the first entry of iommu-map */
+ smmu_sid_base = map[0].smmu_sid;
+
+ /* Look for an available entry to hold the mapping */
+ for (i = 0; i < nr_map; i++) {
+ u16 bdf_be = cpu_to_be16(map[i].bdf);
+ u32 val;
+ u8 hash;
+
+ hash = crc8(qcom_pcie_crc8_table, (u8 *)&bdf_be, sizeof(bdf_be),
+ 0);
+
+ val = readl(bdf_to_sid_base + hash * sizeof(u32));
+
+ /* If the register is already populated, look for next available entry */
+ while (val) {
+ u8 current_hash = hash++;
+ u8 next_mask = 0xff;
+
+ /* If NEXT field is NULL then update it with next hash */
+ if (!(val & next_mask)) {
+ val |= (u32)hash;
+ writel(val, bdf_to_sid_base + current_hash * sizeof(u32));
+ }
+
+ val = readl(bdf_to_sid_base + hash * sizeof(u32));
+ }
+
+ /* BDF [31:16] | SID [15:8] | NEXT [7:0] */
+ val = map[i].bdf << 16 | (map[i].smmu_sid - smmu_sid_base) << 8 | 0;
+ writel(val, bdf_to_sid_base + hash * sizeof(u32));
+ }
+
+ kfree(map);
+
+ return 0;
+}
+
static int qcom_pcie_host_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
@@ -1279,18 +1357,16 @@ static int qcom_pcie_host_init(struct pcie_port *pp)
goto err_disable_phy;
}
- dw_pcie_setup_rc(pp);
-
- if (IS_ENABLED(CONFIG_PCI_MSI))
- dw_pcie_msi_init(pp);
-
qcom_ep_reset_deassert(pcie);
- ret = qcom_pcie_establish_link(pcie);
- if (ret)
- goto err;
+ if (pcie->ops->config_sid) {
+ ret = pcie->ops->config_sid(pcie);
+ if (ret)
+ goto err;
+ }
return 0;
+
err:
qcom_ep_reset_assert(pcie);
if (pcie->ops->post_deinit)
@@ -1359,14 +1435,25 @@ static const struct qcom_pcie_ops ops_2_7_0 = {
.post_deinit = qcom_pcie_post_deinit_2_7_0,
};
+/* Qcom IP rev.: 1.9.0 */
+static const struct qcom_pcie_ops ops_1_9_0 = {
+ .get_resources = qcom_pcie_get_resources_2_7_0,
+ .init = qcom_pcie_init_2_7_0,
+ .deinit = qcom_pcie_deinit_2_7_0,
+ .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
+ .post_init = qcom_pcie_post_init_2_7_0,
+ .post_deinit = qcom_pcie_post_deinit_2_7_0,
+ .config_sid = qcom_pcie_config_sid_sm8250,
+};
+
static const struct dw_pcie_ops dw_pcie_ops = {
.link_up = qcom_pcie_link_up,
+ .start_link = qcom_pcie_start_link,
};
static int qcom_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct resource *res;
struct pcie_port *pp;
struct dw_pcie *pci;
struct qcom_pcie *pcie;
@@ -1399,23 +1486,12 @@ static int qcom_pcie_probe(struct platform_device *pdev)
goto err_pm_runtime_put;
}
- pcie->gen = of_pci_get_max_link_speed(pdev->dev.of_node);
- if (pcie->gen < 0)
- pcie->gen = 2;
-
pcie->parf = devm_platform_ioremap_resource_byname(pdev, "parf");
if (IS_ERR(pcie->parf)) {
ret = PTR_ERR(pcie->parf);
goto err_pm_runtime_put;
}
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
- pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
- if (IS_ERR(pci->dbi_base)) {
- ret = PTR_ERR(pci->dbi_base);
- goto err_pm_runtime_put;
- }
-
pcie->elbi = devm_platform_ioremap_resource_byname(pdev, "elbi");
if (IS_ERR(pcie->elbi)) {
ret = PTR_ERR(pcie->elbi);
@@ -1434,14 +1510,6 @@ static int qcom_pcie_probe(struct platform_device *pdev)
pp->ops = &qcom_pcie_dw_ops;
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
- pp->msi_irq = platform_get_irq_byname(pdev, "msi");
- if (pp->msi_irq < 0) {
- ret = pp->msi_irq;
- goto err_pm_runtime_put;
- }
- }
-
ret = phy_init(pcie->phy);
if (ret) {
pm_runtime_disable(&pdev->dev);
@@ -1476,6 +1544,7 @@ static const struct of_device_id qcom_pcie_match[] = {
{ .compatible = "qcom,pcie-ipq4019", .data = &ops_2_4_0 },
{ .compatible = "qcom,pcie-qcs404", .data = &ops_2_4_0 },
{ .compatible = "qcom,pcie-sdm845", .data = &ops_2_7_0 },
+ { .compatible = "qcom,pcie-sm8250", .data = &ops_1_9_0 },
{ }
};
diff --git a/drivers/pci/controller/dwc/pcie-spear13xx.c b/drivers/pci/controller/dwc/pcie-spear13xx.c
index 62846562da0b..1a9e353bef55 100644
--- a/drivers/pci/controller/dwc/pcie-spear13xx.c
+++ b/drivers/pci/controller/dwc/pcie-spear13xx.c
@@ -26,7 +26,6 @@ struct spear13xx_pcie {
void __iomem *app_base;
struct phy *phy;
struct clk *clk;
- bool is_gen1;
};
struct pcie_app_reg {
@@ -65,60 +64,12 @@ struct pcie_app_reg {
/* CR6 */
#define MSI_CTRL_INT (1 << 26)
-#define EXP_CAP_ID_OFFSET 0x70
-
#define to_spear13xx_pcie(x) dev_get_drvdata((x)->dev)
-static int spear13xx_pcie_establish_link(struct spear13xx_pcie *spear13xx_pcie)
+static int spear13xx_pcie_start_link(struct dw_pcie *pci)
{
- struct dw_pcie *pci = spear13xx_pcie->pci;
- struct pcie_port *pp = &pci->pp;
+ struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pci);
struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
- u32 val;
- u32 exp_cap_off = EXP_CAP_ID_OFFSET;
-
- if (dw_pcie_link_up(pci)) {
- dev_err(pci->dev, "link already up\n");
- return 0;
- }
-
- dw_pcie_setup_rc(pp);
-
- /*
- * this controller support only 128 bytes read size, however its
- * default value in capability register is 512 bytes. So force
- * it to 128 here.
- */
- dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_DEVCTL, 2, &val);
- val &= ~PCI_EXP_DEVCTL_READRQ;
- dw_pcie_write(pci->dbi_base + exp_cap_off + PCI_EXP_DEVCTL, 2, val);
-
- dw_pcie_write(pci->dbi_base + PCI_VENDOR_ID, 2, 0x104A);
- dw_pcie_write(pci->dbi_base + PCI_DEVICE_ID, 2, 0xCD80);
-
- /*
- * if is_gen1 is set then handle it, so that some buggy card
- * also works
- */
- if (spear13xx_pcie->is_gen1) {
- dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCAP,
- 4, &val);
- if ((val & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) {
- val &= ~((u32)PCI_EXP_LNKCAP_SLS);
- val |= PCI_EXP_LNKCAP_SLS_2_5GB;
- dw_pcie_write(pci->dbi_base + exp_cap_off +
- PCI_EXP_LNKCAP, 4, val);
- }
-
- dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCTL2,
- 2, &val);
- if ((val & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) {
- val &= ~((u32)PCI_EXP_LNKCAP_SLS);
- val |= PCI_EXP_LNKCAP_SLS_2_5GB;
- dw_pcie_write(pci->dbi_base + exp_cap_off +
- PCI_EXP_LNKCTL2, 2, val);
- }
- }
/* enable ltssm */
writel(DEVICE_TYPE_RC | (1 << MISCTRL_EN_ID)
@@ -126,7 +77,7 @@ static int spear13xx_pcie_establish_link(struct spear13xx_pcie *spear13xx_pcie)
| ((u32)1 << REG_TRANSLATION_ENABLE),
&app_reg->app_ctrl_0);
- return dw_pcie_wait_for_link(pci);
+ return 0;
}
static irqreturn_t spear13xx_pcie_irq_handler(int irq, void *arg)
@@ -151,16 +102,12 @@ static irqreturn_t spear13xx_pcie_irq_handler(int irq, void *arg)
static void spear13xx_pcie_enable_interrupts(struct spear13xx_pcie *spear13xx_pcie)
{
- struct dw_pcie *pci = spear13xx_pcie->pci;
- struct pcie_port *pp = &pci->pp;
struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
/* Enable MSI interrupt */
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
- dw_pcie_msi_init(pp);
+ if (IS_ENABLED(CONFIG_PCI_MSI))
writel(readl(&app_reg->int_mask) |
MSI_CTRL_INT, &app_reg->int_mask);
- }
}
static int spear13xx_pcie_link_up(struct dw_pcie *pci)
@@ -178,8 +125,23 @@ static int spear13xx_pcie_host_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pci);
+ u32 exp_cap_off = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ u32 val;
+
+ spear13xx_pcie->app_base = pci->dbi_base + 0x2000;
+
+ /*
+ * this controller support only 128 bytes read size, however its
+ * default value in capability register is 512 bytes. So force
+ * it to 128 here.
+ */
+ val = dw_pcie_readw_dbi(pci, exp_cap_off + PCI_EXP_DEVCTL);
+ val &= ~PCI_EXP_DEVCTL_READRQ;
+ dw_pcie_writew_dbi(pci, exp_cap_off + PCI_EXP_DEVCTL, val);
+
+ dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, 0x104A);
+ dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, 0xCD80);
- spear13xx_pcie_establish_link(spear13xx_pcie);
spear13xx_pcie_enable_interrupts(spear13xx_pcie);
return 0;
@@ -210,6 +172,7 @@ static int spear13xx_add_pcie_port(struct spear13xx_pcie *spear13xx_pcie,
}
pp->ops = &spear13xx_pcie_host_ops;
+ pp->msi_irq = -ENODEV;
ret = dw_pcie_host_init(pp);
if (ret) {
@@ -222,6 +185,7 @@ static int spear13xx_add_pcie_port(struct spear13xx_pcie *spear13xx_pcie,
static const struct dw_pcie_ops dw_pcie_ops = {
.link_up = spear13xx_pcie_link_up,
+ .start_link = spear13xx_pcie_start_link,
};
static int spear13xx_pcie_probe(struct platform_device *pdev)
@@ -230,7 +194,6 @@ static int spear13xx_pcie_probe(struct platform_device *pdev)
struct dw_pcie *pci;
struct spear13xx_pcie *spear13xx_pcie;
struct device_node *np = dev->of_node;
- struct resource *dbi_base;
int ret;
spear13xx_pcie = devm_kzalloc(dev, sizeof(*spear13xx_pcie), GFP_KERNEL);
@@ -269,16 +232,8 @@ static int spear13xx_pcie_probe(struct platform_device *pdev)
return ret;
}
- dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
- pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_base);
- if (IS_ERR(pci->dbi_base)) {
- ret = PTR_ERR(pci->dbi_base);
- goto fail_clk;
- }
- spear13xx_pcie->app_base = pci->dbi_base + 0x2000;
-
if (of_property_read_bool(np, "st,pcie-is-gen1"))
- spear13xx_pcie->is_gen1 = true;
+ pci->link_gen = 1;
platform_set_drvdata(pdev, spear13xx_pcie);
diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
index 70498689d0c0..6fa216e52d14 100644
--- a/drivers/pci/controller/dwc/pcie-tegra194.c
+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -183,19 +183,7 @@
#define EVENT_COUNTER_GROUP_SEL_SHIFT 24
#define EVENT_COUNTER_GROUP_5 0x5
-#define PORT_LOGIC_ACK_F_ASPM_CTRL 0x70C
-#define ENTER_ASPM BIT(30)
-#define L0S_ENTRANCE_LAT_SHIFT 24
-#define L0S_ENTRANCE_LAT_MASK GENMASK(26, 24)
-#define L1_ENTRANCE_LAT_SHIFT 27
-#define L1_ENTRANCE_LAT_MASK GENMASK(29, 27)
-#define N_FTS_SHIFT 8
-#define N_FTS_MASK GENMASK(7, 0)
#define N_FTS_VAL 52
-
-#define PORT_LOGIC_GEN2_CTRL 0x80C
-#define PORT_LOGIC_GEN2_CTRL_DIRECT_SPEED_CHANGE BIT(17)
-#define FTS_MASK GENMASK(7, 0)
#define FTS_VAL 52
#define PORT_LOGIC_MSI_CTRL_INT_0_EN 0x828
@@ -296,7 +284,6 @@ struct tegra_pcie_dw {
u8 init_link_width;
u32 msi_ctrl_int;
u32 num_lanes;
- u32 max_speed;
u32 cid;
u32 cfg_link_cap_l1sub;
u32 pcie_cap_base;
@@ -401,9 +388,9 @@ static irqreturn_t tegra_pcie_rp_irq_handler(int irq, void *arg)
val |= APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N;
appl_writel(pcie, val, APPL_CAR_RESET_OVRD);
- val = dw_pcie_readl_dbi(pci, PORT_LOGIC_GEN2_CTRL);
- val |= PORT_LOGIC_GEN2_CTRL_DIRECT_SPEED_CHANGE;
- dw_pcie_writel_dbi(pci, PORT_LOGIC_GEN2_CTRL, val);
+ val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
+ val |= PORT_LOGIC_SPEED_CHANGE;
+ dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
}
}
@@ -568,42 +555,44 @@ static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg)
return IRQ_HANDLED;
}
-static int tegra_pcie_dw_rd_own_conf(struct pcie_port *pp, int where, int size,
- u32 *val)
+static int tegra_pcie_dw_rd_own_conf(struct pci_bus *bus, u32 devfn, int where,
+ int size, u32 *val)
{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
-
/*
* This is an endpoint mode specific register happen to appear even
* when controller is operating in root port mode and system hangs
* when it is accessed with link being in ASPM-L1 state.
* So skip accessing it altogether
*/
- if (where == PORT_LOGIC_MSIX_DOORBELL) {
+ if (!PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL) {
*val = 0x00000000;
return PCIBIOS_SUCCESSFUL;
}
- return dw_pcie_read(pci->dbi_base + where, size, val);
+ return pci_generic_config_read(bus, devfn, where, size, val);
}
-static int tegra_pcie_dw_wr_own_conf(struct pcie_port *pp, int where, int size,
- u32 val)
+static int tegra_pcie_dw_wr_own_conf(struct pci_bus *bus, u32 devfn, int where,
+ int size, u32 val)
{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
-
/*
* This is an endpoint mode specific register happen to appear even
* when controller is operating in root port mode and system hangs
* when it is accessed with link being in ASPM-L1 state.
* So skip accessing it altogether
*/
- if (where == PORT_LOGIC_MSIX_DOORBELL)
+ if (!PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL)
return PCIBIOS_SUCCESSFUL;
- return dw_pcie_write(pci->dbi_base + where, size, val);
+ return pci_generic_config_write(bus, devfn, where, size, val);
}
+static struct pci_ops tegra_pci_ops = {
+ .map_bus = dw_pcie_own_conf_map_bus,
+ .read = tegra_pcie_dw_rd_own_conf,
+ .write = tegra_pcie_dw_wr_own_conf,
+};
+
#if defined(CONFIG_PCIEASPM)
static void disable_aspm_l11(struct tegra_pcie_dw *pcie)
{
@@ -692,30 +681,23 @@ static void init_host_aspm(struct tegra_pcie_dw *pcie)
dw_pcie_writel_dbi(pci, pcie->cfg_link_cap_l1sub, val);
/* Program L0s and L1 entrance latencies */
- val = dw_pcie_readl_dbi(pci, PORT_LOGIC_ACK_F_ASPM_CTRL);
- val &= ~L0S_ENTRANCE_LAT_MASK;
- val |= (pcie->aspm_l0s_enter_lat << L0S_ENTRANCE_LAT_SHIFT);
- val |= ENTER_ASPM;
- dw_pcie_writel_dbi(pci, PORT_LOGIC_ACK_F_ASPM_CTRL, val);
+ val = dw_pcie_readl_dbi(pci, PCIE_PORT_AFR);
+ val &= ~PORT_AFR_L0S_ENTRANCE_LAT_MASK;
+ val |= (pcie->aspm_l0s_enter_lat << PORT_AFR_L0S_ENTRANCE_LAT_SHIFT);
+ val |= PORT_AFR_ENTER_ASPM;
+ dw_pcie_writel_dbi(pci, PCIE_PORT_AFR, val);
}
-static int init_debugfs(struct tegra_pcie_dw *pcie)
+static void init_debugfs(struct tegra_pcie_dw *pcie)
{
- struct dentry *d;
-
- d = debugfs_create_devm_seqfile(pcie->dev, "aspm_state_cnt",
- pcie->debugfs, aspm_state_cnt);
- if (IS_ERR_OR_NULL(d))
- dev_err(pcie->dev,
- "Failed to create debugfs file \"aspm_state_cnt\"\n");
-
- return 0;
+ debugfs_create_devm_seqfile(pcie->dev, "aspm_state_cnt", pcie->debugfs,
+ aspm_state_cnt);
}
#else
static inline void disable_aspm_l12(struct tegra_pcie_dw *pcie) { return; }
static inline void disable_aspm_l11(struct tegra_pcie_dw *pcie) { return; }
static inline void init_host_aspm(struct tegra_pcie_dw *pcie) { return; }
-static inline int init_debugfs(struct tegra_pcie_dw *pcie) { return 0; }
+static inline void init_debugfs(struct tegra_pcie_dw *pcie) { return; }
#endif
static void tegra_pcie_enable_system_interrupts(struct pcie_port *pp)
@@ -783,8 +765,6 @@ static void tegra_pcie_enable_msi_interrupts(struct pcie_port *pp)
struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
u32 val;
- dw_pcie_msi_init(pp);
-
/* Enable MSI interrupt generation */
val = appl_readl(pcie, APPL_INTR_EN_L0_0);
val |= APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN;
@@ -827,26 +807,24 @@ static void config_gen3_gen4_eq_presets(struct tegra_pcie_dw *pcie)
/* Program init preset */
for (i = 0; i < pcie->num_lanes; i++) {
- dw_pcie_read(pci->dbi_base + CAP_SPCIE_CAP_OFF
- + (i * 2), 2, &val);
+ val = dw_pcie_readw_dbi(pci, CAP_SPCIE_CAP_OFF + (i * 2));
val &= ~CAP_SPCIE_CAP_OFF_DSP_TX_PRESET0_MASK;
val |= GEN3_GEN4_EQ_PRESET_INIT;
val &= ~CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_MASK;
val |= (GEN3_GEN4_EQ_PRESET_INIT <<
CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_SHIFT);
- dw_pcie_write(pci->dbi_base + CAP_SPCIE_CAP_OFF
- + (i * 2), 2, val);
+ dw_pcie_writew_dbi(pci, CAP_SPCIE_CAP_OFF + (i * 2), val);
offset = dw_pcie_find_ext_capability(pci,
PCI_EXT_CAP_ID_PL_16GT) +
PCI_PL_16GT_LE_CTRL;
- dw_pcie_read(pci->dbi_base + offset + i, 1, &val);
+ val = dw_pcie_readb_dbi(pci, offset + i);
val &= ~PCI_PL_16GT_LE_CTRL_DSP_TX_PRESET_MASK;
val |= GEN3_GEN4_EQ_PRESET_INIT;
val &= ~PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_MASK;
val |= (GEN3_GEN4_EQ_PRESET_INIT <<
PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_SHIFT);
- dw_pcie_write(pci->dbi_base + offset + i, 1, val);
+ dw_pcie_writeb_dbi(pci, offset + i, val);
}
val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
@@ -875,12 +853,18 @@ static void config_gen3_gen4_eq_presets(struct tegra_pcie_dw *pcie)
dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
}
-static void tegra_pcie_prepare_host(struct pcie_port *pp)
+static int tegra_pcie_dw_host_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
u32 val;
+ pp->bridge->ops = &tegra_pci_ops;
+
+ if (!pcie->pcie_cap_base)
+ pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci,
+ PCI_CAP_ID_EXP);
+
val = dw_pcie_readl_dbi(pci, PCI_IO_BASE);
val &= ~(IO_BASE_IO_DECODE | IO_BASE_IO_DECODE_BIT8);
dw_pcie_writel_dbi(pci, PCI_IO_BASE, val);
@@ -892,17 +876,6 @@ static void tegra_pcie_prepare_host(struct pcie_port *pp)
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
- /* Configure FTS */
- val = dw_pcie_readl_dbi(pci, PORT_LOGIC_ACK_F_ASPM_CTRL);
- val &= ~(N_FTS_MASK << N_FTS_SHIFT);
- val |= N_FTS_VAL << N_FTS_SHIFT;
- dw_pcie_writel_dbi(pci, PORT_LOGIC_ACK_F_ASPM_CTRL, val);
-
- val = dw_pcie_readl_dbi(pci, PORT_LOGIC_GEN2_CTRL);
- val &= ~FTS_MASK;
- val |= FTS_VAL;
- dw_pcie_writel_dbi(pci, PORT_LOGIC_GEN2_CTRL, val);
-
/* Enable as 0xFFFF0001 response for CRS */
val = dw_pcie_readl_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT);
val &= ~(AMBA_ERROR_RESPONSE_CRS_MASK << AMBA_ERROR_RESPONSE_CRS_SHIFT);
@@ -910,16 +883,6 @@ static void tegra_pcie_prepare_host(struct pcie_port *pp)
AMBA_ERROR_RESPONSE_CRS_SHIFT);
dw_pcie_writel_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT, val);
- /* Configure Max Speed from DT */
- if (pcie->max_speed && pcie->max_speed != -EINVAL) {
- val = dw_pcie_readl_dbi(pci, pcie->pcie_cap_base +
- PCI_EXP_LNKCAP);
- val &= ~PCI_EXP_LNKCAP_SLS;
- val |= pcie->max_speed;
- dw_pcie_writel_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP,
- val);
- }
-
/* Configure Max lane width from DT */
val = dw_pcie_readl_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP);
val &= ~PCI_EXP_LNKCAP_MLW;
@@ -930,6 +893,12 @@ static void tegra_pcie_prepare_host(struct pcie_port *pp)
init_host_aspm(pcie);
+ /* Disable ASPM-L1SS advertisement if there is no CLKREQ routing */
+ if (!pcie->supports_clkreq) {
+ disable_aspm_l11(pcie);
+ disable_aspm_l12(pcie);
+ }
+
val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
@@ -940,10 +909,24 @@ static void tegra_pcie_prepare_host(struct pcie_port *pp)
dw_pcie_writel_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF, val);
}
- dw_pcie_setup_rc(pp);
-
clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ);
+ return 0;
+}
+
+static int tegra_pcie_dw_start_link(struct dw_pcie *pci)
+{
+ u32 val, offset, speed, tmp;
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+ struct pcie_port *pp = &pci->pp;
+ bool retry = true;
+
+ if (pcie->mode == DW_PCIE_EP_TYPE) {
+ enable_irq(pcie->pex_rst_irq);
+ return 0;
+ }
+
+retry_link:
/* Assert RST */
val = appl_readl(pcie, APPL_PINMUX);
val &= ~APPL_PINMUX_PEX_RST;
@@ -962,17 +945,10 @@ static void tegra_pcie_prepare_host(struct pcie_port *pp)
appl_writel(pcie, val, APPL_PINMUX);
msleep(100);
-}
-
-static int tegra_pcie_dw_host_init(struct pcie_port *pp)
-{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
- u32 val, tmp, offset, speed;
-
- tegra_pcie_prepare_host(pp);
if (dw_pcie_wait_for_link(pci)) {
+ if (!retry)
+ return 0;
/*
* There are some endpoints which can't get the link up if
* root port has Data Link Feature (DLF) enabled.
@@ -1006,10 +982,11 @@ static int tegra_pcie_dw_host_init(struct pcie_port *pp)
val &= ~PCI_DLF_EXCHANGE_ENABLE;
dw_pcie_writel_dbi(pci, offset, val);
- tegra_pcie_prepare_host(pp);
+ tegra_pcie_dw_host_init(pp);
+ dw_pcie_setup_rc(pp);
- if (dw_pcie_wait_for_link(pci))
- return 0;
+ retry = false;
+ goto retry_link;
}
speed = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA) &
@@ -1029,20 +1006,6 @@ static int tegra_pcie_dw_link_up(struct dw_pcie *pci)
return !!(val & PCI_EXP_LNKSTA_DLLLA);
}
-static void tegra_pcie_set_msi_vec_num(struct pcie_port *pp)
-{
- pp->num_vectors = MAX_MSI_IRQS;
-}
-
-static int tegra_pcie_dw_start_link(struct dw_pcie *pci)
-{
- struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
-
- enable_irq(pcie->pex_rst_irq);
-
- return 0;
-}
-
static void tegra_pcie_dw_stop_link(struct dw_pcie *pci)
{
struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
@@ -1057,10 +1020,7 @@ static const struct dw_pcie_ops tegra_dw_pcie_ops = {
};
static struct dw_pcie_host_ops tegra_pcie_dw_host_ops = {
- .rd_own_conf = tegra_pcie_dw_rd_own_conf,
- .wr_own_conf = tegra_pcie_dw_wr_own_conf,
.host_init = tegra_pcie_dw_host_init,
- .set_num_vectors = tegra_pcie_set_msi_vec_num,
};
static void tegra_pcie_disable_phy(struct tegra_pcie_dw *pcie)
@@ -1102,9 +1062,16 @@ phy_exit:
static int tegra_pcie_dw_parse_dt(struct tegra_pcie_dw *pcie)
{
+ struct platform_device *pdev = to_platform_device(pcie->dev);
struct device_node *np = pcie->dev->of_node;
int ret;
+ pcie->dbi_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+ if (!pcie->dbi_res) {
+ dev_err(pcie->dev, "Failed to find \"dbi\" region\n");
+ return -ENODEV;
+ }
+
ret = of_property_read_u32(np, "nvidia,aspm-cmrt-us", &pcie->aspm_cmrt);
if (ret < 0) {
dev_info(pcie->dev, "Failed to read ASPM T_cmrt: %d\n", ret);
@@ -1129,8 +1096,6 @@ static int tegra_pcie_dw_parse_dt(struct tegra_pcie_dw *pcie)
return ret;
}
- pcie->max_speed = of_pci_get_max_link_speed(np);
-
ret = of_property_read_u32_index(np, "nvidia,bpmp", 1, &pcie->cid);
if (ret) {
dev_err(pcie->dev, "Failed to read Controller-ID: %d\n", ret);
@@ -1262,9 +1227,9 @@ static void tegra_pcie_downstream_dev_to_D0(struct tegra_pcie_dw *pcie)
* 5.2 Link State Power Management (Page #428).
*/
- list_for_each_entry(child, &pp->root_bus->children, node) {
+ list_for_each_entry(child, &pp->bridge->bus->children, node) {
/* Bring downstream devices to D0 if they are not already in */
- if (child->parent == pp->root_bus) {
+ if (child->parent == pp->bridge->bus) {
root_bus = child;
break;
}
@@ -1433,15 +1398,6 @@ static int tegra_pcie_config_controller(struct tegra_pcie_dw *pcie,
reset_control_deassert(pcie->core_rst);
- pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci,
- PCI_CAP_ID_EXP);
-
- /* Disable ASPM-L1SS advertisement as there is no CLKREQ routing */
- if (!pcie->supports_clkreq) {
- disable_aspm_l11(pcie);
- disable_aspm_l12(pcie);
- }
-
return ret;
fail_phy:
@@ -1458,43 +1414,32 @@ fail_slot_reg_en:
return ret;
}
-static int __deinit_controller(struct tegra_pcie_dw *pcie)
+static void tegra_pcie_unconfig_controller(struct tegra_pcie_dw *pcie)
{
int ret;
ret = reset_control_assert(pcie->core_rst);
- if (ret) {
- dev_err(pcie->dev, "Failed to assert \"core\" reset: %d\n",
- ret);
- return ret;
- }
+ if (ret)
+ dev_err(pcie->dev, "Failed to assert \"core\" reset: %d\n", ret);
tegra_pcie_disable_phy(pcie);
ret = reset_control_assert(pcie->core_apb_rst);
- if (ret) {
+ if (ret)
dev_err(pcie->dev, "Failed to assert APB reset: %d\n", ret);
- return ret;
- }
clk_disable_unprepare(pcie->core_clk);
ret = regulator_disable(pcie->pex_ctl_supply);
- if (ret) {
+ if (ret)
dev_err(pcie->dev, "Failed to disable regulator: %d\n", ret);
- return ret;
- }
tegra_pcie_disable_slot_regulators(pcie);
ret = tegra_pcie_bpmp_set_ctrl_state(pcie, false);
- if (ret) {
+ if (ret)
dev_err(pcie->dev, "Failed to disable controller %d: %d\n",
pcie->cid, ret);
- return ret;
- }
-
- return ret;
}
static int tegra_pcie_init_controller(struct tegra_pcie_dw *pcie)
@@ -1518,7 +1463,8 @@ static int tegra_pcie_init_controller(struct tegra_pcie_dw *pcie)
return 0;
fail_host_init:
- return __deinit_controller(pcie);
+ tegra_pcie_unconfig_controller(pcie);
+ return ret;
}
static int tegra_pcie_try_link_l2(struct tegra_pcie_dw *pcie)
@@ -1559,6 +1505,14 @@ static void tegra_pcie_dw_pme_turnoff(struct tegra_pcie_dw *pcie)
data &= ~APPL_PINMUX_PEX_RST;
appl_writel(pcie, data, APPL_PINMUX);
+ /*
+ * Some cards do not go to detect state even after de-asserting
+ * PERST#. So, de-assert LTSSM to bring link to detect state.
+ */
+ data = readl(pcie->appl_base + APPL_CTRL);
+ data &= ~APPL_CTRL_LTSSM_EN;
+ writel(data, pcie->appl_base + APPL_CTRL);
+
err = readl_poll_timeout_atomic(pcie->appl_base + APPL_DEBUG,
data,
((data &
@@ -1566,14 +1520,8 @@ static void tegra_pcie_dw_pme_turnoff(struct tegra_pcie_dw *pcie)
APPL_DEBUG_LTSSM_STATE_SHIFT) ==
LTSSM_STATE_PRE_DETECT,
1, LTSSM_TIMEOUT);
- if (err) {
+ if (err)
dev_info(pcie->dev, "Link didn't go to detect state\n");
- } else {
- /* Disable LTSSM after link is in detect state */
- data = appl_readl(pcie, APPL_CTRL);
- data &= ~APPL_CTRL_LTSSM_EN;
- appl_writel(pcie, data, APPL_CTRL);
- }
}
/*
* DBI registers may not be accessible after this as PLL-E would be
@@ -1587,30 +1535,20 @@ static void tegra_pcie_dw_pme_turnoff(struct tegra_pcie_dw *pcie)
appl_writel(pcie, data, APPL_PINMUX);
}
-static int tegra_pcie_deinit_controller(struct tegra_pcie_dw *pcie)
+static void tegra_pcie_deinit_controller(struct tegra_pcie_dw *pcie)
{
tegra_pcie_downstream_dev_to_D0(pcie);
dw_pcie_host_deinit(&pcie->pci.pp);
tegra_pcie_dw_pme_turnoff(pcie);
-
- return __deinit_controller(pcie);
+ tegra_pcie_unconfig_controller(pcie);
}
static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie)
{
- struct pcie_port *pp = &pcie->pci.pp;
struct device *dev = pcie->dev;
char *name;
int ret;
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
- pp->msi_irq = of_irq_get_byname(dev->of_node, "msi");
- if (!pp->msi_irq) {
- dev_err(dev, "Failed to get MSI interrupt\n");
- return -ENODEV;
- }
- }
-
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
@@ -1626,7 +1564,11 @@ static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie)
goto fail_pm_get_sync;
}
- tegra_pcie_init_controller(pcie);
+ ret = tegra_pcie_init_controller(pcie);
+ if (ret < 0) {
+ dev_err(dev, "Failed to initialize controller: %d\n", ret);
+ goto fail_pm_get_sync;
+ }
pcie->link_state = tegra_pcie_dw_link_up(&pcie->pci);
if (!pcie->link_state) {
@@ -1641,10 +1583,7 @@ static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie)
}
pcie->debugfs = debugfs_create_dir(name, NULL);
- if (!pcie->debugfs)
- dev_err(dev, "Failed to create debugfs\n");
- else
- init_debugfs(pcie);
+ init_debugfs(pcie);
return ret;
@@ -1817,27 +1756,6 @@ static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie)
val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
- /* Configure N_FTS & FTS */
- val = dw_pcie_readl_dbi(pci, PORT_LOGIC_ACK_F_ASPM_CTRL);
- val &= ~(N_FTS_MASK << N_FTS_SHIFT);
- val |= N_FTS_VAL << N_FTS_SHIFT;
- dw_pcie_writel_dbi(pci, PORT_LOGIC_ACK_F_ASPM_CTRL, val);
-
- val = dw_pcie_readl_dbi(pci, PORT_LOGIC_GEN2_CTRL);
- val &= ~FTS_MASK;
- val |= FTS_VAL;
- dw_pcie_writel_dbi(pci, PORT_LOGIC_GEN2_CTRL, val);
-
- /* Configure Max Speed from DT */
- if (pcie->max_speed && pcie->max_speed != -EINVAL) {
- val = dw_pcie_readl_dbi(pci, pcie->pcie_cap_base +
- PCI_EXP_LNKCAP);
- val &= ~PCI_EXP_LNKCAP_SLS;
- val |= pcie->max_speed;
- dw_pcie_writel_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP,
- val);
- }
-
pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci,
PCI_CAP_ID_EXP);
clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ);
@@ -1974,19 +1892,12 @@ static int tegra_pcie_config_ep(struct tegra_pcie_dw *pcie,
struct dw_pcie *pci = &pcie->pci;
struct device *dev = pcie->dev;
struct dw_pcie_ep *ep;
- struct resource *res;
char *name;
int ret;
ep = &pci->ep;
ep->ops = &pcie_ep_ops;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
- if (!res)
- return -EINVAL;
-
- ep->phys_base = res->start;
- ep->addr_size = resource_size(res);
ep->page_size = SZ_64K;
ret = gpiod_set_debounce(pcie->pex_rst_gpiod, PERST_DEBOUNCE_TIME);
@@ -2049,7 +1960,6 @@ static int tegra_pcie_dw_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct resource *atu_dma_res;
struct tegra_pcie_dw *pcie;
- struct resource *dbi_res;
struct pcie_port *pp;
struct dw_pcie *pci;
struct phy **phys;
@@ -2066,7 +1976,12 @@ static int tegra_pcie_dw_probe(struct platform_device *pdev)
pci = &pcie->pci;
pci->dev = &pdev->dev;
pci->ops = &tegra_dw_pcie_ops;
+ pci->n_fts[0] = N_FTS_VAL;
+ pci->n_fts[1] = FTS_VAL;
+ pci->version = 0x490A;
+
pp = &pci->pp;
+ pp->num_vectors = MAX_MSI_IRQS;
pcie->dev = &pdev->dev;
pcie->mode = (enum dw_pcie_device_mode)data->mode;
@@ -2155,20 +2070,6 @@ static int tegra_pcie_dw_probe(struct platform_device *pdev)
pcie->phys = phys;
- dbi_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
- if (!dbi_res) {
- dev_err(dev, "Failed to find \"dbi\" region\n");
- return -ENODEV;
- }
- pcie->dbi_res = dbi_res;
-
- pci->dbi_base = devm_ioremap_resource(dev, dbi_res);
- if (IS_ERR(pci->dbi_base))
- return PTR_ERR(pci->dbi_base);
-
- /* Tegra HW locates DBI2 at a fixed offset from DBI */
- pci->dbi_base2 = pci->dbi_base + 0x1000;
-
atu_dma_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"atu_dma");
if (!atu_dma_res) {
@@ -2177,6 +2078,7 @@ static int tegra_pcie_dw_probe(struct platform_device *pdev)
}
pcie->atu_dma_res = atu_dma_res;
+ pci->atu_size = resource_size(atu_dma_res);
pci->atu_base = devm_ioremap_resource(dev, atu_dma_res);
if (IS_ERR(pci->atu_base))
return PTR_ERR(pci->atu_base);
@@ -2289,8 +2191,9 @@ static int tegra_pcie_dw_suspend_noirq(struct device *dev)
PORT_LOGIC_MSI_CTRL_INT_0_EN);
tegra_pcie_downstream_dev_to_D0(pcie);
tegra_pcie_dw_pme_turnoff(pcie);
+ tegra_pcie_unconfig_controller(pcie);
- return __deinit_controller(pcie);
+ return 0;
}
static int tegra_pcie_dw_resume_noirq(struct device *dev)
@@ -2311,6 +2214,10 @@ static int tegra_pcie_dw_resume_noirq(struct device *dev)
goto fail_host_init;
}
+ ret = tegra_pcie_dw_start_link(&pcie->pci);
+ if (ret < 0)
+ goto fail_host_init;
+
/* Restore MSI interrupt vector */
dw_pcie_writel_dbi(&pcie->pci, PORT_LOGIC_MSI_CTRL_INT_0_EN,
pcie->msi_ctrl_int);
@@ -2318,7 +2225,8 @@ static int tegra_pcie_dw_resume_noirq(struct device *dev)
return 0;
fail_host_init:
- return __deinit_controller(pcie);
+ tegra_pcie_unconfig_controller(pcie);
+ return ret;
}
static int tegra_pcie_dw_resume_early(struct device *dev)
@@ -2356,7 +2264,7 @@ static void tegra_pcie_dw_shutdown(struct platform_device *pdev)
disable_irq(pcie->pci.pp.msi_irq);
tegra_pcie_dw_pme_turnoff(pcie);
- __deinit_controller(pcie);
+ tegra_pcie_unconfig_controller(pcie);
}
static const struct tegra_pcie_dw_of_data tegra_pcie_dw_rc_of_data = {
diff --git a/drivers/pci/controller/dwc/pcie-uniphier-ep.c b/drivers/pci/controller/dwc/pcie-uniphier-ep.c
index 148355960061..69810c6b0d58 100644
--- a/drivers/pci/controller/dwc/pcie-uniphier-ep.c
+++ b/drivers/pci/controller/dwc/pcie-uniphier-ep.c
@@ -218,35 +218,6 @@ static const struct dw_pcie_ep_ops uniphier_pcie_ep_ops = {
.get_features = uniphier_pcie_get_features,
};
-static int uniphier_add_pcie_ep(struct uniphier_pcie_ep_priv *priv,
- struct platform_device *pdev)
-{
- struct dw_pcie *pci = &priv->pci;
- struct dw_pcie_ep *ep = &pci->ep;
- struct device *dev = &pdev->dev;
- struct resource *res;
- int ret;
-
- ep->ops = &uniphier_pcie_ep_ops;
-
- pci->dbi_base2 = devm_platform_ioremap_resource_byname(pdev, "dbi2");
- if (IS_ERR(pci->dbi_base2))
- return PTR_ERR(pci->dbi_base2);
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
- if (!res)
- return -EINVAL;
-
- ep->phys_base = res->start;
- ep->addr_size = resource_size(res);
-
- ret = dw_pcie_ep_init(ep);
- if (ret)
- dev_err(dev, "Failed to initialize endpoint (%d)\n", ret);
-
- return ret;
-}
-
static int uniphier_pcie_ep_enable(struct uniphier_pcie_ep_priv *priv)
{
int ret;
@@ -300,7 +271,6 @@ static int uniphier_pcie_ep_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct uniphier_pcie_ep_priv *priv;
- struct resource *res;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -314,11 +284,6 @@ static int uniphier_pcie_ep_probe(struct platform_device *pdev)
priv->pci.dev = dev;
priv->pci.ops = &dw_pcie_ops;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
- priv->pci.dbi_base = devm_pci_remap_cfg_resource(dev, res);
- if (IS_ERR(priv->pci.dbi_base))
- return PTR_ERR(priv->pci.dbi_base);
-
priv->base = devm_platform_ioremap_resource_byname(pdev, "link");
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
@@ -352,7 +317,8 @@ static int uniphier_pcie_ep_probe(struct platform_device *pdev)
if (ret)
return ret;
- return uniphier_add_pcie_ep(priv, pdev);
+ priv->pci.ep.ops = &uniphier_pcie_ep_ops;
+ return dw_pcie_ep_init(&priv->pci.ep);
}
static const struct pci_epc_features uniphier_pro5_data = {
diff --git a/drivers/pci/controller/dwc/pcie-uniphier.c b/drivers/pci/controller/dwc/pcie-uniphier.c
index 3a7f403b57b8..7e8bad326770 100644
--- a/drivers/pci/controller/dwc/pcie-uniphier.c
+++ b/drivers/pci/controller/dwc/pcie-uniphier.c
@@ -146,16 +146,13 @@ static int uniphier_pcie_link_up(struct dw_pcie *pci)
return (val & mask) == mask;
}
-static int uniphier_pcie_establish_link(struct dw_pcie *pci)
+static int uniphier_pcie_start_link(struct dw_pcie *pci)
{
struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
- if (dw_pcie_link_up(pci))
- return 0;
-
uniphier_pcie_ltssm_enable(priv, true);
- return dw_pcie_wait_for_link(pci);
+ return 0;
}
static void uniphier_pcie_stop_link(struct dw_pcie *pci)
@@ -317,14 +314,6 @@ static int uniphier_pcie_host_init(struct pcie_port *pp)
uniphier_pcie_irq_enable(priv);
- dw_pcie_setup_rc(pp);
- ret = uniphier_pcie_establish_link(pci);
- if (ret)
- return ret;
-
- if (IS_ENABLED(CONFIG_PCI_MSI))
- dw_pcie_msi_init(pp);
-
return 0;
}
@@ -332,31 +321,6 @@ static const struct dw_pcie_host_ops uniphier_pcie_host_ops = {
.host_init = uniphier_pcie_host_init,
};
-static int uniphier_add_pcie_port(struct uniphier_pcie_priv *priv,
- struct platform_device *pdev)
-{
- struct dw_pcie *pci = &priv->pci;
- struct pcie_port *pp = &pci->pp;
- struct device *dev = &pdev->dev;
- int ret;
-
- pp->ops = &uniphier_pcie_host_ops;
-
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
- pp->msi_irq = platform_get_irq_byname(pdev, "msi");
- if (pp->msi_irq < 0)
- return pp->msi_irq;
- }
-
- ret = dw_pcie_host_init(pp);
- if (ret) {
- dev_err(dev, "Failed to initialize host (%d)\n", ret);
- return ret;
- }
-
- return 0;
-}
-
static int uniphier_pcie_host_enable(struct uniphier_pcie_priv *priv)
{
int ret;
@@ -392,7 +356,7 @@ out_clk_disable:
}
static const struct dw_pcie_ops dw_pcie_ops = {
- .start_link = uniphier_pcie_establish_link,
+ .start_link = uniphier_pcie_start_link,
.stop_link = uniphier_pcie_stop_link,
.link_up = uniphier_pcie_link_up,
};
@@ -401,7 +365,6 @@ static int uniphier_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct uniphier_pcie_priv *priv;
- struct resource *res;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -411,11 +374,6 @@ static int uniphier_pcie_probe(struct platform_device *pdev)
priv->pci.dev = dev;
priv->pci.ops = &dw_pcie_ops;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
- priv->pci.dbi_base = devm_pci_remap_cfg_resource(dev, res);
- if (IS_ERR(priv->pci.dbi_base))
- return PTR_ERR(priv->pci.dbi_base);
-
priv->base = devm_platform_ioremap_resource_byname(pdev, "link");
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
@@ -438,7 +396,9 @@ static int uniphier_pcie_probe(struct platform_device *pdev)
if (ret)
return ret;
- return uniphier_add_pcie_port(priv, pdev);
+ priv->pci.pp.ops = &uniphier_pcie_host_ops;
+
+ return dw_pcie_host_init(&priv->pci.pp);
}
static const struct of_device_id uniphier_pcie_match[] = {