aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci/controller
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci/controller')
-rw-r--r--drivers/pci/controller/Kconfig4
-rw-r--r--drivers/pci/controller/dwc/Kconfig2
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c1
-rw-r--r--drivers/pci/controller/dwc/pcie-armada8k.c84
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c12
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c61
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h39
-rw-r--r--drivers/pci/controller/dwc/pcie-kirin.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c115
-rw-r--r--drivers/pci/controller/pci-aardvark.c2
-rw-r--r--drivers/pci/controller/pci-hyperv.c15
-rw-r--r--drivers/pci/controller/pci-tegra.c589
-rw-r--r--drivers/pci/controller/pcie-altera-msi.c10
-rw-r--r--drivers/pci/controller/pcie-altera.c69
-rw-r--r--drivers/pci/controller/pcie-iproc-platform.c2
-rw-r--r--drivers/pci/controller/pcie-iproc.c2
-rw-r--r--drivers/pci/controller/pcie-mobiveil.c525
-rw-r--r--drivers/pci/controller/pcie-xilinx-nwl.c11
-rw-r--r--drivers/pci/controller/vmd.c2
19 files changed, 1119 insertions, 428 deletions
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index 011c57cae4b0..fe9f9f13ce11 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -174,14 +174,14 @@ config PCIE_IPROC_MSI
PCIe controller
config PCIE_ALTERA
- bool "Altera PCIe controller"
+ tristate "Altera PCIe controller"
depends on ARM || NIOS2 || ARM64 || COMPILE_TEST
help
Say Y here if you want to enable PCIe controller support on Altera
FPGA.
config PCIE_ALTERA_MSI
- bool "Altera PCIe MSI feature"
+ tristate "Altera PCIe MSI feature"
depends on PCIE_ALTERA
depends on PCI_MSI_IRQ_DOMAIN
help
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig
index a6ce1ee51b4c..6ea778ae4877 100644
--- a/drivers/pci/controller/dwc/Kconfig
+++ b/drivers/pci/controller/dwc/Kconfig
@@ -90,7 +90,7 @@ config PCI_EXYNOS
config PCI_IMX6
bool "Freescale i.MX6/7/8 PCIe controller"
- depends on SOC_IMX6Q || SOC_IMX7D || (ARM64 && ARCH_MXC) || COMPILE_TEST
+ depends on ARCH_MXC || COMPILE_TEST
depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW_HOST
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c
index 419451efd58c..4234ddb4722f 100644
--- a/drivers/pci/controller/dwc/pci-dra7xx.c
+++ b/drivers/pci/controller/dwc/pci-dra7xx.c
@@ -26,6 +26,7 @@
#include <linux/types.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
+#include <linux/gpio/consumer.h>
#include "../../pci.h"
#include "pcie-designware.h"
diff --git a/drivers/pci/controller/dwc/pcie-armada8k.c b/drivers/pci/controller/dwc/pcie-armada8k.c
index 0c389a30ef5d..3d55dc78d999 100644
--- a/drivers/pci/controller/dwc/pcie-armada8k.c
+++ b/drivers/pci/controller/dwc/pcie-armada8k.c
@@ -25,10 +25,14 @@
#include "pcie-designware.h"
+#define ARMADA8K_PCIE_MAX_LANES PCIE_LNK_X4
+
struct armada8k_pcie {
struct dw_pcie *pci;
struct clk *clk;
struct clk *clk_reg;
+ struct phy *phy[ARMADA8K_PCIE_MAX_LANES];
+ unsigned int phy_count;
};
#define PCIE_VENDOR_REGS_OFFSET 0x8000
@@ -55,7 +59,7 @@ struct armada8k_pcie {
#define PCIE_ARUSER_REG (PCIE_VENDOR_REGS_OFFSET + 0x5C)
#define PCIE_AWUSER_REG (PCIE_VENDOR_REGS_OFFSET + 0x60)
/*
- * AR/AW Cache defauls: Normal memory, Write-Back, Read / Write
+ * AR/AW Cache defaults: Normal memory, Write-Back, Read / Write
* allocate
*/
#define ARCACHE_DEFAULT_VALUE 0x3511
@@ -67,6 +71,76 @@ struct armada8k_pcie {
#define to_armada8k_pcie(x) dev_get_drvdata((x)->dev)
+static void armada8k_pcie_disable_phys(struct armada8k_pcie *pcie)
+{
+ int i;
+
+ for (i = 0; i < ARMADA8K_PCIE_MAX_LANES; i++) {
+ phy_power_off(pcie->phy[i]);
+ phy_exit(pcie->phy[i]);
+ }
+}
+
+static int armada8k_pcie_enable_phys(struct armada8k_pcie *pcie)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < ARMADA8K_PCIE_MAX_LANES; i++) {
+ ret = phy_init(pcie->phy[i]);
+ if (ret)
+ return ret;
+
+ ret = phy_set_mode_ext(pcie->phy[i], PHY_MODE_PCIE,
+ pcie->phy_count);
+ if (ret) {
+ phy_exit(pcie->phy[i]);
+ return ret;
+ }
+
+ ret = phy_power_on(pcie->phy[i]);
+ if (ret) {
+ phy_exit(pcie->phy[i]);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int armada8k_pcie_setup_phys(struct armada8k_pcie *pcie)
+{
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ struct device_node *node = dev->of_node;
+ int ret = 0;
+ int i;
+
+ for (i = 0; i < ARMADA8K_PCIE_MAX_LANES; i++) {
+ pcie->phy[i] = devm_of_phy_get_by_index(dev, node, i);
+ if (IS_ERR(pcie->phy[i]) &&
+ (PTR_ERR(pcie->phy[i]) == -EPROBE_DEFER))
+ return PTR_ERR(pcie->phy[i]);
+
+ if (IS_ERR(pcie->phy[i])) {
+ pcie->phy[i] = NULL;
+ continue;
+ }
+
+ pcie->phy_count++;
+ }
+
+ /* Old bindings miss the PHY handle, so just warn if there is no PHY */
+ if (!pcie->phy_count)
+ dev_warn(dev, "No available PHY\n");
+
+ ret = armada8k_pcie_enable_phys(pcie);
+ if (ret)
+ dev_err(dev, "Failed to initialize PHY(s) (%d)\n", ret);
+
+ return ret;
+}
+
static int armada8k_pcie_link_up(struct dw_pcie *pci)
{
u32 reg;
@@ -249,14 +323,20 @@ static int armada8k_pcie_probe(struct platform_device *pdev)
goto fail_clkreg;
}
+ ret = armada8k_pcie_setup_phys(pcie);
+ if (ret)
+ goto fail_clkreg;
+
platform_set_drvdata(pdev, pcie);
ret = armada8k_add_pcie_port(pcie, pdev);
if (ret)
- goto fail_clkreg;
+ goto disable_phy;
return 0;
+disable_phy:
+ armada8k_pcie_disable_phys(pcie);
fail_clkreg:
clk_disable_unprepare(pcie->clk_reg);
fail:
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index 77db32529319..f93252d0da5b 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -311,6 +311,7 @@ void dw_pcie_msi_init(struct pcie_port *pp)
dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4,
upper_32_bits(msi_target));
}
+EXPORT_SYMBOL_GPL(dw_pcie_msi_init);
int dw_pcie_host_init(struct pcie_port *pp)
{
@@ -495,6 +496,16 @@ err_free_msi:
dw_pcie_free_msi(pp);
return ret;
}
+EXPORT_SYMBOL_GPL(dw_pcie_host_init);
+
+void dw_pcie_host_deinit(struct pcie_port *pp)
+{
+ pci_stop_root_bus(pp->root_bus);
+ pci_remove_root_bus(pp->root_bus);
+ if (pci_msi_enabled() && !pp->ops->msi_host_init)
+ dw_pcie_free_msi(pp);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_host_deinit);
static int dw_pcie_access_other_conf(struct pcie_port *pp, struct pci_bus *bus,
u32 devfn, int where, int size, u32 *val,
@@ -687,3 +698,4 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
val |= PORT_LOGIC_SPEED_CHANGE;
dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val);
}
+EXPORT_SYMBOL_GPL(dw_pcie_setup_rc);
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
index 9d7c51c32b3b..7d25102c304c 100644
--- a/drivers/pci/controller/dwc/pcie-designware.c
+++ b/drivers/pci/controller/dwc/pcie-designware.c
@@ -34,6 +34,7 @@ int dw_pcie_read(void __iomem *addr, int size, u32 *val)
return PCIBIOS_SUCCESSFUL;
}
+EXPORT_SYMBOL_GPL(dw_pcie_read);
int dw_pcie_write(void __iomem *addr, int size, u32 val)
{
@@ -51,69 +52,97 @@ int dw_pcie_write(void __iomem *addr, int size, u32 val)
return PCIBIOS_SUCCESSFUL;
}
+EXPORT_SYMBOL_GPL(dw_pcie_write);
-u32 __dw_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
- size_t size)
+u32 dw_pcie_read_dbi(struct dw_pcie *pci, u32 reg, size_t size)
{
int ret;
u32 val;
if (pci->ops->read_dbi)
- return pci->ops->read_dbi(pci, base, reg, size);
+ return pci->ops->read_dbi(pci, pci->dbi_base, reg, size);
- ret = dw_pcie_read(base + reg, size, &val);
+ ret = dw_pcie_read(pci->dbi_base + reg, size, &val);
if (ret)
dev_err(pci->dev, "Read DBI address failed\n");
return val;
}
+EXPORT_SYMBOL_GPL(dw_pcie_read_dbi);
-void __dw_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
- size_t size, u32 val)
+void dw_pcie_write_dbi(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
{
int ret;
if (pci->ops->write_dbi) {
- pci->ops->write_dbi(pci, base, reg, size, val);
+ pci->ops->write_dbi(pci, pci->dbi_base, reg, size, val);
return;
}
- ret = dw_pcie_write(base + reg, size, val);
+ ret = dw_pcie_write(pci->dbi_base + reg, size, val);
if (ret)
dev_err(pci->dev, "Write DBI address failed\n");
}
+EXPORT_SYMBOL_GPL(dw_pcie_write_dbi);
-u32 __dw_pcie_read_dbi2(struct dw_pcie *pci, void __iomem *base, u32 reg,
- size_t size)
+u32 dw_pcie_read_dbi2(struct dw_pcie *pci, u32 reg, size_t size)
{
int ret;
u32 val;
if (pci->ops->read_dbi2)
- return pci->ops->read_dbi2(pci, base, reg, size);
+ return pci->ops->read_dbi2(pci, pci->dbi_base2, reg, size);
- ret = dw_pcie_read(base + reg, size, &val);
+ ret = dw_pcie_read(pci->dbi_base2 + reg, size, &val);
if (ret)
dev_err(pci->dev, "read DBI address failed\n");
return val;
}
-void __dw_pcie_write_dbi2(struct dw_pcie *pci, void __iomem *base, u32 reg,
- size_t size, u32 val)
+void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
{
int ret;
if (pci->ops->write_dbi2) {
- pci->ops->write_dbi2(pci, base, reg, size, val);
+ pci->ops->write_dbi2(pci, pci->dbi_base2, reg, size, val);
return;
}
- ret = dw_pcie_write(base + reg, size, val);
+ ret = dw_pcie_write(pci->dbi_base2 + reg, size, val);
if (ret)
dev_err(pci->dev, "write DBI address failed\n");
}
+u32 dw_pcie_read_atu(struct dw_pcie *pci, u32 reg, size_t size)
+{
+ int ret;
+ u32 val;
+
+ if (pci->ops->read_dbi)
+ return pci->ops->read_dbi(pci, pci->atu_base, reg, size);
+
+ ret = dw_pcie_read(pci->atu_base + reg, size, &val);
+ if (ret)
+ dev_err(pci->dev, "Read ATU address failed\n");
+
+ return val;
+}
+
+void dw_pcie_write_atu(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
+{
+ int ret;
+
+ if (pci->ops->write_dbi) {
+ pci->ops->write_dbi(pci, pci->atu_base, reg, size, val);
+ return;
+ }
+
+ ret = dw_pcie_write(pci->atu_base + reg, size, val);
+ if (ret)
+ dev_err(pci->dev, "Write ATU address failed\n");
+}
+
static u32 dw_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg)
{
u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index b8993f2b78df..ffed084a0b4f 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -254,14 +254,12 @@ struct dw_pcie {
int dw_pcie_read(void __iomem *addr, int size, u32 *val);
int dw_pcie_write(void __iomem *addr, int size, u32 val);
-u32 __dw_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
- size_t size);
-void __dw_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
- size_t size, u32 val);
-u32 __dw_pcie_read_dbi2(struct dw_pcie *pci, void __iomem *base, u32 reg,
- size_t size);
-void __dw_pcie_write_dbi2(struct dw_pcie *pci, void __iomem *base, u32 reg,
- size_t size, u32 val);
+u32 dw_pcie_read_dbi(struct dw_pcie *pci, u32 reg, size_t size);
+void dw_pcie_write_dbi(struct dw_pcie *pci, u32 reg, size_t size, u32 val);
+u32 dw_pcie_read_dbi2(struct dw_pcie *pci, u32 reg, size_t size);
+void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val);
+u32 dw_pcie_read_atu(struct dw_pcie *pci, u32 reg, size_t size);
+void dw_pcie_write_atu(struct dw_pcie *pci, u32 reg, size_t size, u32 val);
int dw_pcie_link_up(struct dw_pcie *pci);
int dw_pcie_wait_for_link(struct dw_pcie *pci);
void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index,
@@ -275,52 +273,52 @@ void dw_pcie_setup(struct dw_pcie *pci);
static inline void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val)
{
- __dw_pcie_write_dbi(pci, pci->dbi_base, reg, 0x4, val);
+ dw_pcie_write_dbi(pci, reg, 0x4, val);
}
static inline u32 dw_pcie_readl_dbi(struct dw_pcie *pci, u32 reg)
{
- return __dw_pcie_read_dbi(pci, pci->dbi_base, reg, 0x4);
+ return dw_pcie_read_dbi(pci, reg, 0x4);
}
static inline void dw_pcie_writew_dbi(struct dw_pcie *pci, u32 reg, u16 val)
{
- __dw_pcie_write_dbi(pci, pci->dbi_base, reg, 0x2, val);
+ dw_pcie_write_dbi(pci, reg, 0x2, val);
}
static inline u16 dw_pcie_readw_dbi(struct dw_pcie *pci, u32 reg)
{
- return __dw_pcie_read_dbi(pci, pci->dbi_base, reg, 0x2);
+ return dw_pcie_read_dbi(pci, reg, 0x2);
}
static inline void dw_pcie_writeb_dbi(struct dw_pcie *pci, u32 reg, u8 val)
{
- __dw_pcie_write_dbi(pci, pci->dbi_base, reg, 0x1, val);
+ dw_pcie_write_dbi(pci, reg, 0x1, val);
}
static inline u8 dw_pcie_readb_dbi(struct dw_pcie *pci, u32 reg)
{
- return __dw_pcie_read_dbi(pci, pci->dbi_base, reg, 0x1);
+ return dw_pcie_read_dbi(pci, reg, 0x1);
}
static inline void dw_pcie_writel_dbi2(struct dw_pcie *pci, u32 reg, u32 val)
{
- __dw_pcie_write_dbi2(pci, pci->dbi_base2, reg, 0x4, val);
+ dw_pcie_write_dbi2(pci, reg, 0x4, val);
}
static inline u32 dw_pcie_readl_dbi2(struct dw_pcie *pci, u32 reg)
{
- return __dw_pcie_read_dbi2(pci, pci->dbi_base2, reg, 0x4);
+ return dw_pcie_read_dbi2(pci, reg, 0x4);
}
static inline void dw_pcie_writel_atu(struct dw_pcie *pci, u32 reg, u32 val)
{
- __dw_pcie_write_dbi(pci, pci->atu_base, reg, 0x4, val);
+ dw_pcie_write_atu(pci, reg, 0x4, val);
}
static inline u32 dw_pcie_readl_atu(struct dw_pcie *pci, u32 reg)
{
- return __dw_pcie_read_dbi(pci, pci->atu_base, reg, 0x4);
+ return dw_pcie_read_atu(pci, reg, 0x4);
}
static inline void dw_pcie_dbi_ro_wr_en(struct dw_pcie *pci)
@@ -351,6 +349,7 @@ void dw_pcie_msi_init(struct pcie_port *pp);
void dw_pcie_free_msi(struct pcie_port *pp);
void dw_pcie_setup_rc(struct pcie_port *pp);
int dw_pcie_host_init(struct pcie_port *pp);
+void dw_pcie_host_deinit(struct pcie_port *pp);
int dw_pcie_allocate_domains(struct pcie_port *pp);
#else
static inline irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
@@ -375,6 +374,10 @@ static inline int dw_pcie_host_init(struct pcie_port *pp)
return 0;
}
+static inline void dw_pcie_host_deinit(struct pcie_port *pp)
+{
+}
+
static inline int dw_pcie_allocate_domains(struct pcie_port *pp)
{
return 0;
diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c
index 9b599296205d..8df1914226be 100644
--- a/drivers/pci/controller/dwc/pcie-kirin.c
+++ b/drivers/pci/controller/dwc/pcie-kirin.c
@@ -2,7 +2,7 @@
/*
* PCIe host controller driver for Kirin Phone SoCs
*
- * Copyright (C) 2017 Hilisicon Electronics Co., Ltd.
+ * Copyright (C) 2017 HiSilicon Electronics Co., Ltd.
* http://www.huawei.com
*
* Author: Xiaowei Song <songxiaowei@huawei.com>
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index 0ed235d560e3..7e581748ee9f 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -112,10 +112,10 @@ struct qcom_pcie_resources_2_3_2 {
struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY];
};
+#define QCOM_PCIE_2_4_0_MAX_CLOCKS 4
struct qcom_pcie_resources_2_4_0 {
- struct clk *aux_clk;
- struct clk *master_clk;
- struct clk *slave_clk;
+ struct clk_bulk_data clks[QCOM_PCIE_2_4_0_MAX_CLOCKS];
+ int num_clks;
struct reset_control *axi_m_reset;
struct reset_control *axi_s_reset;
struct reset_control *pipe_reset;
@@ -178,6 +178,8 @@ static void qcom_ep_reset_assert(struct qcom_pcie *pcie)
static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
{
+ /* Ensure that PERST has been asserted for at least 100 ms */
+ msleep(100);
gpiod_set_value_cansleep(pcie->reset, 0);
usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
}
@@ -638,18 +640,20 @@ static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie)
struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
struct dw_pcie *pci = pcie->pci;
struct device *dev = pci->dev;
+ bool is_ipq = of_device_is_compatible(dev->of_node, "qcom,pcie-ipq4019");
+ int ret;
- res->aux_clk = devm_clk_get(dev, "aux");
- if (IS_ERR(res->aux_clk))
- return PTR_ERR(res->aux_clk);
+ res->clks[0].id = "aux";
+ res->clks[1].id = "master_bus";
+ res->clks[2].id = "slave_bus";
+ res->clks[3].id = "iface";
- res->master_clk = devm_clk_get(dev, "master_bus");
- if (IS_ERR(res->master_clk))
- return PTR_ERR(res->master_clk);
+ /* qcom,pcie-ipq4019 is defined without "iface" */
+ res->num_clks = is_ipq ? 3 : 4;
- res->slave_clk = devm_clk_get(dev, "slave_bus");
- if (IS_ERR(res->slave_clk))
- return PTR_ERR(res->slave_clk);
+ ret = devm_clk_bulk_get(dev, res->num_clks, res->clks);
+ if (ret < 0)
+ return ret;
res->axi_m_reset = devm_reset_control_get_exclusive(dev, "axi_m");
if (IS_ERR(res->axi_m_reset))
@@ -659,27 +663,33 @@ static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie)
if (IS_ERR(res->axi_s_reset))
return PTR_ERR(res->axi_s_reset);
- res->pipe_reset = devm_reset_control_get_exclusive(dev, "pipe");
- if (IS_ERR(res->pipe_reset))
- return PTR_ERR(res->pipe_reset);
-
- res->axi_m_vmid_reset = devm_reset_control_get_exclusive(dev,
- "axi_m_vmid");
- if (IS_ERR(res->axi_m_vmid_reset))
- return PTR_ERR(res->axi_m_vmid_reset);
-
- res->axi_s_xpu_reset = devm_reset_control_get_exclusive(dev,
- "axi_s_xpu");
- if (IS_ERR(res->axi_s_xpu_reset))
- return PTR_ERR(res->axi_s_xpu_reset);
-
- res->parf_reset = devm_reset_control_get_exclusive(dev, "parf");
- if (IS_ERR(res->parf_reset))
- return PTR_ERR(res->parf_reset);
-
- res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
- if (IS_ERR(res->phy_reset))
- return PTR_ERR(res->phy_reset);
+ if (is_ipq) {
+ /*
+ * These resources relates to the PHY or are secure clocks, but
+ * are controlled here for IPQ4019
+ */
+ res->pipe_reset = devm_reset_control_get_exclusive(dev, "pipe");
+ if (IS_ERR(res->pipe_reset))
+ return PTR_ERR(res->pipe_reset);
+
+ res->axi_m_vmid_reset = devm_reset_control_get_exclusive(dev,
+ "axi_m_vmid");
+ if (IS_ERR(res->axi_m_vmid_reset))
+ return PTR_ERR(res->axi_m_vmid_reset);
+
+ res->axi_s_xpu_reset = devm_reset_control_get_exclusive(dev,
+ "axi_s_xpu");
+ if (IS_ERR(res->axi_s_xpu_reset))
+ return PTR_ERR(res->axi_s_xpu_reset);
+
+ res->parf_reset = devm_reset_control_get_exclusive(dev, "parf");
+ if (IS_ERR(res->parf_reset))
+ return PTR_ERR(res->parf_reset);
+
+ res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
+ if (IS_ERR(res->phy_reset))
+ return PTR_ERR(res->phy_reset);
+ }
res->axi_m_sticky_reset = devm_reset_control_get_exclusive(dev,
"axi_m_sticky");
@@ -699,9 +709,11 @@ static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie)
if (IS_ERR(res->ahb_reset))
return PTR_ERR(res->ahb_reset);
- res->phy_ahb_reset = devm_reset_control_get_exclusive(dev, "phy_ahb");
- if (IS_ERR(res->phy_ahb_reset))
- return PTR_ERR(res->phy_ahb_reset);
+ if (is_ipq) {
+ res->phy_ahb_reset = devm_reset_control_get_exclusive(dev, "phy_ahb");
+ if (IS_ERR(res->phy_ahb_reset))
+ return PTR_ERR(res->phy_ahb_reset);
+ }
return 0;
}
@@ -719,9 +731,7 @@ static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie)
reset_control_assert(res->axi_m_sticky_reset);
reset_control_assert(res->pwr_reset);
reset_control_assert(res->ahb_reset);
- clk_disable_unprepare(res->aux_clk);
- clk_disable_unprepare(res->master_clk);
- clk_disable_unprepare(res->slave_clk);
+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
}
static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie)
@@ -850,23 +860,9 @@ static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie)
usleep_range(10000, 12000);
- ret = clk_prepare_enable(res->aux_clk);
- if (ret) {
- dev_err(dev, "cannot prepare/enable iface clock\n");
- goto err_clk_aux;
- }
-
- ret = clk_prepare_enable(res->master_clk);
- if (ret) {
- dev_err(dev, "cannot prepare/enable core clock\n");
- goto err_clk_axi_m;
- }
-
- ret = clk_prepare_enable(res->slave_clk);
- if (ret) {
- dev_err(dev, "cannot prepare/enable phy clock\n");
- goto err_clk_axi_s;
- }
+ ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
+ if (ret)
+ goto err_clks;
/* enable PCIe clocks and resets */
val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
@@ -891,11 +887,7 @@ static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie)
return 0;
-err_clk_axi_s:
- clk_disable_unprepare(res->master_clk);
-err_clk_axi_m:
- clk_disable_unprepare(res->aux_clk);
-err_clk_aux:
+err_clks:
reset_control_assert(res->ahb_reset);
err_rst_ahb:
reset_control_assert(res->pwr_reset);
@@ -1289,6 +1281,7 @@ static const struct of_device_id qcom_pcie_match[] = {
{ .compatible = "qcom,pcie-msm8996", .data = &ops_2_3_2 },
{ .compatible = "qcom,pcie-ipq8074", .data = &ops_2_3_3 },
{ .compatible = "qcom,pcie-ipq4019", .data = &ops_2_4_0 },
+ { .compatible = "qcom,pcie-qcs404", .data = &ops_2_4_0 },
{ }
};
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
index 134e0306ff00..fc0fe4d4de49 100644
--- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c
@@ -308,7 +308,7 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_MASK_REG);
- /* Unmask all MSI's */
+ /* Unmask all MSIs */
advk_writel(pcie, 0, PCIE_MSI_MASK_REG);
/* Enable summary interrupt for GIC SPI source */
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index 82acd6155adf..40b625458afa 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -1875,6 +1875,7 @@ static void hv_pci_devices_present(struct hv_pcibus_device *hbus,
static void hv_eject_device_work(struct work_struct *work)
{
struct pci_eject_response *ejct_pkt;
+ struct hv_pcibus_device *hbus;
struct hv_pci_dev *hpdev;
struct pci_dev *pdev;
unsigned long flags;
@@ -1885,6 +1886,7 @@ static void hv_eject_device_work(struct work_struct *work)
} ctxt;
hpdev = container_of(work, struct hv_pci_dev, wrk);
+ hbus = hpdev->hbus;
WARN_ON(hpdev->state != hv_pcichild_ejecting);
@@ -1895,8 +1897,7 @@ static void hv_eject_device_work(struct work_struct *work)
* because hbus->pci_bus may not exist yet.
*/
wslot = wslot_to_devfn(hpdev->desc.win_slot.slot);
- pdev = pci_get_domain_bus_and_slot(hpdev->hbus->sysdata.domain, 0,
- wslot);
+ pdev = pci_get_domain_bus_and_slot(hbus->sysdata.domain, 0, wslot);
if (pdev) {
pci_lock_rescan_remove();
pci_stop_and_remove_bus_device(pdev);
@@ -1904,9 +1905,9 @@ static void hv_eject_device_work(struct work_struct *work)
pci_unlock_rescan_remove();
}
- spin_lock_irqsave(&hpdev->hbus->device_list_lock, flags);
+ spin_lock_irqsave(&hbus->device_list_lock, flags);
list_del(&hpdev->list_entry);
- spin_unlock_irqrestore(&hpdev->hbus->device_list_lock, flags);
+ spin_unlock_irqrestore(&hbus->device_list_lock, flags);
if (hpdev->pci_slot)
pci_destroy_slot(hpdev->pci_slot);
@@ -1915,7 +1916,7 @@ static void hv_eject_device_work(struct work_struct *work)
ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message;
ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE;
ejct_pkt->wslot.slot = hpdev->desc.win_slot.slot;
- vmbus_sendpacket(hpdev->hbus->hdev->channel, ejct_pkt,
+ vmbus_sendpacket(hbus->hdev->channel, ejct_pkt,
sizeof(*ejct_pkt), (unsigned long)&ctxt.pkt,
VM_PKT_DATA_INBAND, 0);
@@ -1924,7 +1925,9 @@ static void hv_eject_device_work(struct work_struct *work)
/* For the two refs got in new_pcichild_device() */
put_pcichild(hpdev);
put_pcichild(hpdev);
- put_hvpcibus(hpdev->hbus);
+ /* hpdev has been freed. Do not use it any more. */
+
+ put_hvpcibus(hbus);
}
/**
diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c
index 464ba2538d52..9a917b2456f6 100644
--- a/drivers/pci/controller/pci-tegra.c
+++ b/drivers/pci/controller/pci-tegra.c
@@ -17,6 +17,7 @@
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/export.h>
+#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/irq.h>
@@ -30,6 +31,7 @@
#include <linux/of_platform.h>
#include <linux/pci.h>
#include <linux/phy/phy.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/sizes.h>
@@ -95,7 +97,8 @@
#define AFI_MSI_EN_VEC7 0xa8
#define AFI_CONFIGURATION 0xac
-#define AFI_CONFIGURATION_EN_FPCI (1 << 0)
+#define AFI_CONFIGURATION_EN_FPCI (1 << 0)
+#define AFI_CONFIGURATION_CLKEN_OVERRIDE (1 << 31)
#define AFI_FPCI_ERROR_MASKS 0xb0
@@ -159,13 +162,14 @@
#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211 (0x1 << 20)
#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411 (0x2 << 20)
#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111 (0x2 << 20)
+#define AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(x) (1 << ((x) + 29))
+#define AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO_ALL (0x7 << 29)
#define AFI_FUSE 0x104
#define AFI_FUSE_PCIE_T0_GEN2_DIS (1 << 2)
#define AFI_PEX0_CTRL 0x110
#define AFI_PEX1_CTRL 0x118
-#define AFI_PEX2_CTRL 0x128
#define AFI_PEX_CTRL_RST (1 << 0)
#define AFI_PEX_CTRL_CLKREQ_EN (1 << 1)
#define AFI_PEX_CTRL_REFCLK_EN (1 << 3)
@@ -177,20 +181,74 @@
#define AFI_PEXBIAS_CTRL_0 0x168
+#define RP_PRIV_XP_DL 0x00000494
+#define RP_PRIV_XP_DL_GEN2_UPD_FC_TSHOLD (0x1ff << 1)
+
+#define RP_RX_HDR_LIMIT 0x00000e00
+#define RP_RX_HDR_LIMIT_PW_MASK (0xff << 8)
+#define RP_RX_HDR_LIMIT_PW (0x0e << 8)
+
+#define RP_ECTL_2_R1 0x00000e84
+#define RP_ECTL_2_R1_RX_CTLE_1C_MASK 0xffff
+
+#define RP_ECTL_4_R1 0x00000e8c
+#define RP_ECTL_4_R1_RX_CDR_CTRL_1C_MASK (0xffff << 16)
+#define RP_ECTL_4_R1_RX_CDR_CTRL_1C_SHIFT 16
+
+#define RP_ECTL_5_R1 0x00000e90
+#define RP_ECTL_5_R1_RX_EQ_CTRL_L_1C_MASK 0xffffffff
+
+#define RP_ECTL_6_R1 0x00000e94
+#define RP_ECTL_6_R1_RX_EQ_CTRL_H_1C_MASK 0xffffffff
+
+#define RP_ECTL_2_R2 0x00000ea4
+#define RP_ECTL_2_R2_RX_CTLE_1C_MASK 0xffff
+
+#define RP_ECTL_4_R2 0x00000eac
+#define RP_ECTL_4_R2_RX_CDR_CTRL_1C_MASK (0xffff << 16)
+#define RP_ECTL_4_R2_RX_CDR_CTRL_1C_SHIFT 16
+
+#define RP_ECTL_5_R2 0x00000eb0
+#define RP_ECTL_5_R2_RX_EQ_CTRL_L_1C_MASK 0xffffffff
+
+#define RP_ECTL_6_R2 0x00000eb4
+#define RP_ECTL_6_R2_RX_EQ_CTRL_H_1C_MASK 0xffffffff
+
#define RP_VEND_XP 0x00000f00
-#define RP_VEND_XP_DL_UP (1 << 30)
+#define RP_VEND_XP_DL_UP (1 << 30)
+#define RP_VEND_XP_OPPORTUNISTIC_ACK (1 << 27)
+#define RP_VEND_XP_OPPORTUNISTIC_UPDATEFC (1 << 28)
+#define RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK (0xff << 18)
+
+#define RP_VEND_CTL0 0x00000f44
+#define RP_VEND_CTL0_DSK_RST_PULSE_WIDTH_MASK (0xf << 12)
+#define RP_VEND_CTL0_DSK_RST_PULSE_WIDTH (0x9 << 12)
+
+#define RP_VEND_CTL1 0x00000f48
+#define RP_VEND_CTL1_ERPT (1 << 13)
+
+#define RP_VEND_XP_BIST 0x00000f4c
+#define RP_VEND_XP_BIST_GOTO_L1_L2_AFTER_DLLP_DONE (1 << 28)
#define RP_VEND_CTL2 0x00000fa8
#define RP_VEND_CTL2_PCA_ENABLE (1 << 7)
#define RP_PRIV_MISC 0x00000fe0
-#define RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT (0xe << 0)
-#define RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT (0xf << 0)
+#define RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT (0xe << 0)
+#define RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT (0xf << 0)
+#define RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD_MASK (0x7f << 16)
+#define RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD (0xf << 16)
+#define RP_PRIV_MISC_CTLR_CLK_CLAMP_ENABLE (1 << 23)
+#define RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD_MASK (0x7f << 24)
+#define RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD (0xf << 24)
+#define RP_PRIV_MISC_TMS_CLK_CLAMP_ENABLE (1 << 31)
#define RP_LINK_CONTROL_STATUS 0x00000090
#define RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE 0x20000000
#define RP_LINK_CONTROL_STATUS_LINKSTAT_MASK 0x3fff0000
+#define RP_LINK_CONTROL_STATUS_2 0x000000b0
+
#define PADS_CTL_SEL 0x0000009c
#define PADS_CTL 0x000000a0
@@ -226,6 +284,7 @@
#define PADS_REFCLK_CFG_DRVI_SHIFT 12 /* 15:12 */
#define PME_ACK_TIMEOUT 10000
+#define LINK_RETRAIN_TIMEOUT 100000 /* in usec */
struct tegra_msi {
struct msi_controller chip;
@@ -249,10 +308,12 @@ struct tegra_pcie_soc {
unsigned int num_ports;
const struct tegra_pcie_port_soc *ports;
unsigned int msi_base_shift;
+ unsigned long afi_pex2_ctrl;
u32 pads_pll_ctl;
u32 tx_ref_sel;
u32 pads_refclk_cfg0;
u32 pads_refclk_cfg1;
+ u32 update_fc_threshold;
bool has_pex_clkreq_en;
bool has_pex_bias_ctrl;
bool has_intr_prsnt_sense;
@@ -260,6 +321,24 @@ struct tegra_pcie_soc {
bool has_gen2;
bool force_pca_enable;
bool program_uphy;
+ bool update_clamp_threshold;
+ bool program_deskew_time;
+ bool raw_violation_fixup;
+ bool update_fc_timer;
+ bool has_cache_bars;
+ struct {
+ struct {
+ u32 rp_ectl_2_r1;
+ u32 rp_ectl_4_r1;
+ u32 rp_ectl_5_r1;
+ u32 rp_ectl_6_r1;
+ u32 rp_ectl_2_r2;
+ u32 rp_ectl_4_r2;
+ u32 rp_ectl_5_r2;
+ u32 rp_ectl_6_r2;
+ } regs;
+ bool enable;
+ } ectl;
};
static inline struct tegra_msi *to_tegra_msi(struct msi_controller *chip)
@@ -321,6 +400,8 @@ struct tegra_pcie_port {
unsigned int lanes;
struct phy **phys;
+
+ struct gpio_desc *reset_gpio;
};
struct tegra_pcie_bus {
@@ -440,6 +521,7 @@ static struct pci_ops tegra_pcie_ops = {
static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port)
{
+ const struct tegra_pcie_soc *soc = port->pcie->soc;
unsigned long ret = 0;
switch (port->index) {
@@ -452,7 +534,7 @@ static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port)
break;
case 2:
- ret = AFI_PEX2_CTRL;
+ ret = soc->afi_pex2_ctrl;
break;
}
@@ -465,15 +547,162 @@ static void tegra_pcie_port_reset(struct tegra_pcie_port *port)
unsigned long value;
/* pulse reset signal */
- value = afi_readl(port->pcie, ctrl);
- value &= ~AFI_PEX_CTRL_RST;
- afi_writel(port->pcie, value, ctrl);
+ if (port->reset_gpio) {
+ gpiod_set_value(port->reset_gpio, 1);
+ } else {
+ value = afi_readl(port->pcie, ctrl);
+ value &= ~AFI_PEX_CTRL_RST;
+ afi_writel(port->pcie, value, ctrl);
+ }
usleep_range(1000, 2000);
- value = afi_readl(port->pcie, ctrl);
- value |= AFI_PEX_CTRL_RST;
- afi_writel(port->pcie, value, ctrl);
+ if (port->reset_gpio) {
+ gpiod_set_value(port->reset_gpio, 0);
+ } else {
+ value = afi_readl(port->pcie, ctrl);
+ value |= AFI_PEX_CTRL_RST;
+ afi_writel(port->pcie, value, ctrl);
+ }
+}
+
+static void tegra_pcie_enable_rp_features(struct tegra_pcie_port *port)
+{
+ const struct tegra_pcie_soc *soc = port->pcie->soc;
+ u32 value;
+
+ /* Enable AER capability */
+ value = readl(port->base + RP_VEND_CTL1);
+ value |= RP_VEND_CTL1_ERPT;
+ writel(value, port->base + RP_VEND_CTL1);
+
+ /* Optimal settings to enhance bandwidth */
+ value = readl(port->base + RP_VEND_XP);
+ value |= RP_VEND_XP_OPPORTUNISTIC_ACK;
+ value |= RP_VEND_XP_OPPORTUNISTIC_UPDATEFC;
+ writel(value, port->base + RP_VEND_XP);
+
+ /*
+ * LTSSM will wait for DLLP to finish before entering L1 or L2,
+ * to avoid truncation of PM messages which results in receiver errors
+ */
+ value = readl(port->base + RP_VEND_XP_BIST);
+ value |= RP_VEND_XP_BIST_GOTO_L1_L2_AFTER_DLLP_DONE;
+ writel(value, port->base + RP_VEND_XP_BIST);
+
+ value = readl(port->base + RP_PRIV_MISC);
+ value |= RP_PRIV_MISC_CTLR_CLK_CLAMP_ENABLE;
+ value |= RP_PRIV_MISC_TMS_CLK_CLAMP_ENABLE;
+
+ if (soc->update_clamp_threshold) {
+ value &= ~(RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD_MASK |
+ RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD_MASK);
+ value |= RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD |
+ RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD;
+ }
+
+ writel(value, port->base + RP_PRIV_MISC);
+}
+
+static void tegra_pcie_program_ectl_settings(struct tegra_pcie_port *port)
+{
+ const struct tegra_pcie_soc *soc = port->pcie->soc;
+ u32 value;
+
+ value = readl(port->base + RP_ECTL_2_R1);
+ value &= ~RP_ECTL_2_R1_RX_CTLE_1C_MASK;
+ value |= soc->ectl.regs.rp_ectl_2_r1;
+ writel(value, port->base + RP_ECTL_2_R1);
+
+ value = readl(port->base + RP_ECTL_4_R1);
+ value &= ~RP_ECTL_4_R1_RX_CDR_CTRL_1C_MASK;
+ value |= soc->ectl.regs.rp_ectl_4_r1 <<
+ RP_ECTL_4_R1_RX_CDR_CTRL_1C_SHIFT;
+ writel(value, port->base + RP_ECTL_4_R1);
+
+ value = readl(port->base + RP_ECTL_5_R1);
+ value &= ~RP_ECTL_5_R1_RX_EQ_CTRL_L_1C_MASK;
+ value |= soc->ectl.regs.rp_ectl_5_r1;
+ writel(value, port->base + RP_ECTL_5_R1);
+
+ value = readl(port->base + RP_ECTL_6_R1);
+ value &= ~RP_ECTL_6_R1_RX_EQ_CTRL_H_1C_MASK;
+ value |= soc->ectl.regs.rp_ectl_6_r1;
+ writel(value, port->base + RP_ECTL_6_R1);
+
+ value = readl(port->base + RP_ECTL_2_R2);
+ value &= ~RP_ECTL_2_R2_RX_CTLE_1C_MASK;
+ value |= soc->ectl.regs.rp_ectl_2_r2;
+ writel(value, port->base + RP_ECTL_2_R2);
+
+ value = readl(port->base + RP_ECTL_4_R2);
+ value &= ~RP_ECTL_4_R2_RX_CDR_CTRL_1C_MASK;
+ value |= soc->ectl.regs.rp_ectl_4_r2 <<
+ RP_ECTL_4_R2_RX_CDR_CTRL_1C_SHIFT;
+ writel(value, port->base + RP_ECTL_4_R2);
+
+ value = readl(port->base + RP_ECTL_5_R2);
+ value &= ~RP_ECTL_5_R2_RX_EQ_CTRL_L_1C_MASK;
+ value |= soc->ectl.regs.rp_ectl_5_r2;
+ writel(value, port->base + RP_ECTL_5_R2);
+
+ value = readl(port->base + RP_ECTL_6_R2);
+ value &= ~RP_ECTL_6_R2_RX_EQ_CTRL_H_1C_MASK;
+ value |= soc->ectl.regs.rp_ectl_6_r2;
+ writel(value, port->base + RP_ECTL_6_R2);
+}
+
+static void tegra_pcie_apply_sw_fixup(struct tegra_pcie_port *port)
+{
+ const struct tegra_pcie_soc *soc = port->pcie->soc;
+ u32 value;
+
+ /*
+ * Sometimes link speed change from Gen2 to Gen1 fails due to
+ * instability in deskew logic on lane-0. Increase the deskew
+ * retry time to resolve this issue.
+ */
+ if (soc->program_deskew_time) {
+ value = readl(port->base + RP_VEND_CTL0);
+ value &= ~RP_VEND_CTL0_DSK_RST_PULSE_WIDTH_MASK;
+ value |= RP_VEND_CTL0_DSK_RST_PULSE_WIDTH;
+ writel(value, port->base + RP_VEND_CTL0);
+ }
+
+ /* Fixup for read after write violation. */
+ if (soc->raw_violation_fixup) {
+ value = readl(port->base + RP_RX_HDR_LIMIT);
+ value &= ~RP_RX_HDR_LIMIT_PW_MASK;
+ value |= RP_RX_HDR_LIMIT_PW;
+ writel(value, port->base + RP_RX_HDR_LIMIT);
+
+ value = readl(port->base + RP_PRIV_XP_DL);
+ value |= RP_PRIV_XP_DL_GEN2_UPD_FC_TSHOLD;
+ writel(value, port->base + RP_PRIV_XP_DL);
+
+ value = readl(port->base + RP_VEND_XP);
+ value &= ~RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK;
+ value |= soc->update_fc_threshold;
+ writel(value, port->base + RP_VEND_XP);
+ }
+
+ if (soc->update_fc_timer) {
+ value = readl(port->base + RP_VEND_XP);
+ value &= ~RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK;
+ value |= soc->update_fc_threshold;
+ writel(value, port->base + RP_VEND_XP);
+ }
+
+ /*
+ * PCIe link doesn't come up with few legacy PCIe endpoints if
+ * root port advertises both Gen-1 and Gen-2 speeds in Tegra.
+ * Hence, the strategy followed here is to initially advertise
+ * only Gen-1 and after link is up, retrain link to Gen-2 speed
+ */
+ value = readl(port->base + RP_LINK_CONTROL_STATUS_2);
+ value &= ~PCI_EXP_LNKSTA_CLS;
+ value |= PCI_EXP_LNKSTA_CLS_2_5GB;
+ writel(value, port->base + RP_LINK_CONTROL_STATUS_2);
}
static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
@@ -500,6 +729,13 @@ static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
value |= RP_VEND_CTL2_PCA_ENABLE;
writel(value, port->base + RP_VEND_CTL2);
}
+
+ tegra_pcie_enable_rp_features(port);
+
+ if (soc->ectl.enable)
+ tegra_pcie_program_ectl_settings(port);
+
+ tegra_pcie_apply_sw_fixup(port);
}
static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
@@ -521,6 +757,12 @@ static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
value &= ~AFI_PEX_CTRL_REFCLK_EN;
afi_writel(port->pcie, value, ctrl);
+
+ /* disable PCIe port and set CLKREQ# as GPIO to allow PLLE power down */
+ value = afi_readl(port->pcie, AFI_PCIE_CONFIG);
+ value |= AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
+ value |= AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(port->index);
+ afi_writel(port->pcie, value, AFI_PCIE_CONFIG);
}
static void tegra_pcie_port_free(struct tegra_pcie_port *port)
@@ -545,12 +787,15 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_fixup_class);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_fixup_class);
-/* Tegra PCIE requires relaxed ordering */
+/* Tegra20 and Tegra30 PCIE requires relaxed ordering */
static void tegra_pcie_relax_enable(struct pci_dev *dev)
{
pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
}
-DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_relax_enable);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_relax_enable);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_relax_enable);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_relax_enable);
static int tegra_pcie_request_resources(struct tegra_pcie *pcie)
{
@@ -635,7 +880,7 @@ static irqreturn_t tegra_pcie_isr(int irq, void *arg)
* do not pollute kernel log with master abort reports since they
* happen a lot during enumeration
*/
- if (code == AFI_INTR_MASTER_ABORT)
+ if (code == AFI_INTR_MASTER_ABORT || code == AFI_INTR_PE_PRSNT_SENSE)
dev_dbg(dev, "%s, signature: %08x\n", err_msg[code], signature);
else
dev_err(dev, "%s, signature: %08x\n", err_msg[code], signature);
@@ -704,11 +949,13 @@ static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
afi_writel(pcie, 0, AFI_AXI_BAR5_SZ);
afi_writel(pcie, 0, AFI_FPCI_BAR5);
- /* map all upstream transactions as uncached */
- afi_writel(pcie, 0, AFI_CACHE_BAR0_ST);
- afi_writel(pcie, 0, AFI_CACHE_BAR0_SZ);
- afi_writel(pcie, 0, AFI_CACHE_BAR1_ST);
- afi_writel(pcie, 0, AFI_CACHE_BAR1_SZ);
+ if (pcie->soc->has_cache_bars) {
+ /* map all upstream transactions as uncached */
+ afi_writel(pcie, 0, AFI_CACHE_BAR0_ST);
+ afi_writel(pcie, 0, AFI_CACHE_BAR0_SZ);
+ afi_writel(pcie, 0, AFI_CACHE_BAR1_ST);
+ afi_writel(pcie, 0, AFI_CACHE_BAR1_SZ);
+ }
/* MSI translations are setup only when needed */
afi_writel(pcie, 0, AFI_MSI_FPCI_BAR_ST);
@@ -852,7 +1099,6 @@ static int tegra_pcie_port_phy_power_off(struct tegra_pcie_port *port)
static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie)
{
struct device *dev = pcie->dev;
- const struct tegra_pcie_soc *soc = pcie->soc;
struct tegra_pcie_port *port;
int err;
@@ -878,12 +1124,6 @@ static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie)
}
}
- /* Configure the reference clock driver */
- pads_writel(pcie, soc->pads_refclk_cfg0, PADS_REFCLK_CFG0);
-
- if (soc->num_ports > 2)
- pads_writel(pcie, soc->pads_refclk_cfg1, PADS_REFCLK_CFG1);
-
return 0;
}
@@ -918,13 +1158,11 @@ static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie)
return 0;
}
-static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
+static void tegra_pcie_enable_controller(struct tegra_pcie *pcie)
{
- struct device *dev = pcie->dev;
const struct tegra_pcie_soc *soc = pcie->soc;
struct tegra_pcie_port *port;
unsigned long value;
- int err;
/* enable PLL power down */
if (pcie->phy) {
@@ -942,9 +1180,12 @@ static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
value = afi_readl(pcie, AFI_PCIE_CONFIG);
value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK;
value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config;
+ value |= AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO_ALL;
- list_for_each_entry(port, &pcie->ports, list)
+ list_for_each_entry(port, &pcie->ports, list) {
value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
+ value &= ~AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(port->index);
+ }
afi_writel(pcie, value, AFI_PCIE_CONFIG);
@@ -958,20 +1199,10 @@ static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
afi_writel(pcie, value, AFI_FUSE);
}
- if (soc->program_uphy) {
- err = tegra_pcie_phy_power_on(pcie);
- if (err < 0) {
- dev_err(dev, "failed to power on PHY(s): %d\n", err);
- return err;
- }
- }
-
- /* take the PCIe interface module out of reset */
- reset_control_deassert(pcie->pcie_xrst);
-
- /* finally enable PCIe */
+ /* Disable AFI dynamic clock gating and enable PCIe */
value = afi_readl(pcie, AFI_CONFIGURATION);
value |= AFI_CONFIGURATION_EN_FPCI;
+ value |= AFI_CONFIGURATION_CLKEN_OVERRIDE;
afi_writel(pcie, value, AFI_CONFIGURATION);
value = AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR |
@@ -989,22 +1220,6 @@ static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
/* disable all exceptions */
afi_writel(pcie, 0, AFI_FPCI_ERROR_MASKS);
-
- return 0;
-}
-
-static void tegra_pcie_disable_controller(struct tegra_pcie *pcie)
-{
- int err;
-
- reset_control_assert(pcie->pcie_xrst);
-
- if (pcie->soc->program_uphy) {
- err = tegra_pcie_phy_power_off(pcie);
- if (err < 0)
- dev_err(pcie->dev, "failed to power off PHY(s): %d\n",
- err);
- }
}
static void tegra_pcie_power_off(struct tegra_pcie *pcie)
@@ -1014,13 +1229,11 @@ static void tegra_pcie_power_off(struct tegra_pcie *pcie)
int err;
reset_control_assert(pcie->afi_rst);
- reset_control_assert(pcie->pex_rst);
clk_disable_unprepare(pcie->pll_e);
if (soc->has_cml_clk)
clk_disable_unprepare(pcie->cml_clk);
clk_disable_unprepare(pcie->afi_clk);
- clk_disable_unprepare(pcie->pex_clk);
if (!dev->pm_domain)
tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
@@ -1048,46 +1261,66 @@ static int tegra_pcie_power_on(struct tegra_pcie *pcie)
if (err < 0)
dev_err(dev, "failed to enable regulators: %d\n", err);
- if (dev->pm_domain) {
- err = clk_prepare_enable(pcie->pex_clk);
+ if (!dev->pm_domain) {
+ err = tegra_powergate_power_on(TEGRA_POWERGATE_PCIE);
if (err) {
- dev_err(dev, "failed to enable PEX clock: %d\n", err);
- return err;
+ dev_err(dev, "failed to power ungate: %d\n", err);
+ goto regulator_disable;
}
- reset_control_deassert(pcie->pex_rst);
- } else {
- err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE,
- pcie->pex_clk,
- pcie->pex_rst);
+ err = tegra_powergate_remove_clamping(TEGRA_POWERGATE_PCIE);
if (err) {
- dev_err(dev, "powerup sequence failed: %d\n", err);
- return err;
+ dev_err(dev, "failed to remove clamp: %d\n", err);
+ goto powergate;
}
}
- reset_control_deassert(pcie->afi_rst);
-
err = clk_prepare_enable(pcie->afi_clk);
if (err < 0) {
dev_err(dev, "failed to enable AFI clock: %d\n", err);
- return err;
+ goto powergate;
}
if (soc->has_cml_clk) {
err = clk_prepare_enable(pcie->cml_clk);
if (err < 0) {
dev_err(dev, "failed to enable CML clock: %d\n", err);
- return err;
+ goto disable_afi_clk;
}
}
err = clk_prepare_enable(pcie->pll_e);
if (err < 0) {
dev_err(dev, "failed to enable PLLE clock: %d\n", err);
- return err;
+ goto disable_cml_clk;
}
+ reset_control_deassert(pcie->afi_rst);
+
return 0;
+
+disable_cml_clk:
+ if (soc->has_cml_clk)
+ clk_disable_unprepare(pcie->cml_clk);
+disable_afi_clk:
+ clk_disable_unprepare(pcie->afi_clk);
+powergate:
+ if (!dev->pm_domain)
+ tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
+regulator_disable:
+ regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
+
+ return err;
+}
+
+static void tegra_pcie_apply_pad_settings(struct tegra_pcie *pcie)
+{
+ const struct tegra_pcie_soc *soc = pcie->soc;
+
+ /* Configure the reference clock driver */
+ pads_writel(pcie, soc->pads_refclk_cfg0, PADS_REFCLK_CFG0);
+
+ if (soc->num_ports > 2)
+ pads_writel(pcie, soc->pads_refclk_cfg1, PADS_REFCLK_CFG1);
}
static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
@@ -1647,6 +1880,15 @@ static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
return 0;
}
+static void tegra_pcie_disable_interrupts(struct tegra_pcie *pcie)
+{
+ u32 value;
+
+ value = afi_readl(pcie, AFI_INTR_MASK);
+ value &= ~AFI_INTR_MASK_INT_MASK;
+ afi_writel(pcie, value, AFI_INTR_MASK);
+}
+
static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
u32 *xbar)
{
@@ -1990,6 +2232,7 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
struct tegra_pcie_port *rp;
unsigned int index;
u32 value;
+ char *label;
err = of_pci_get_devfn(port);
if (err < 0) {
@@ -2048,6 +2291,31 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
if (IS_ERR(rp->base))
return PTR_ERR(rp->base);
+ label = devm_kasprintf(dev, GFP_KERNEL, "pex-reset-%u", index);
+ if (!label) {
+ dev_err(dev, "failed to create reset GPIO label\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * Returns -ENOENT if reset-gpios property is not populated
+ * and in this case fall back to using AFI per port register
+ * to toggle PERST# SFIO line.
+ */
+ rp->reset_gpio = devm_gpiod_get_from_of_node(dev, port,
+ "reset-gpios", 0,
+ GPIOD_OUT_LOW,
+ label);
+ if (IS_ERR(rp->reset_gpio)) {
+ if (PTR_ERR(rp->reset_gpio) == -ENOENT) {
+ rp->reset_gpio = NULL;
+ } else {
+ dev_err(dev, "failed to get reset GPIO: %d\n",
+ err);
+ return PTR_ERR(rp->reset_gpio);
+ }
+ }
+
list_add_tail(&rp->list, &pcie->ports);
}
@@ -2095,7 +2363,7 @@ static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
} while (--timeout);
if (!timeout) {
- dev_err(dev, "link %u down, retrying\n", port->index);
+ dev_dbg(dev, "link %u down, retrying\n", port->index);
goto retry;
}
@@ -2117,6 +2385,64 @@ retry:
return false;
}
+static void tegra_pcie_change_link_speed(struct tegra_pcie *pcie)
+{
+ struct device *dev = pcie->dev;
+ struct tegra_pcie_port *port;
+ ktime_t deadline;
+ u32 value;
+
+ list_for_each_entry(port, &pcie->ports, list) {
+ /*
+ * "Supported Link Speeds Vector" in "Link Capabilities 2"
+ * is not supported by Tegra. tegra_pcie_change_link_speed()
+ * is called only for Tegra chips which support Gen2.
+ * So there no harm if supported link speed is not verified.
+ */
+ value = readl(port->base + RP_LINK_CONTROL_STATUS_2);
+ value &= ~PCI_EXP_LNKSTA_CLS;
+ value |= PCI_EXP_LNKSTA_CLS_5_0GB;
+ writel(value, port->base + RP_LINK_CONTROL_STATUS_2);
+
+ /*
+ * Poll until link comes back from recovery to avoid race
+ * condition.
+ */
+ deadline = ktime_add_us(ktime_get(), LINK_RETRAIN_TIMEOUT);
+
+ while (ktime_before(ktime_get(), deadline)) {
+ value = readl(port->base + RP_LINK_CONTROL_STATUS);
+ if ((value & PCI_EXP_LNKSTA_LT) == 0)
+ break;
+
+ usleep_range(2000, 3000);
+ }
+
+ if (value & PCI_EXP_LNKSTA_LT)
+ dev_warn(dev, "PCIe port %u link is in recovery\n",
+ port->index);
+
+ /* Retrain the link */
+ value = readl(port->base + RP_LINK_CONTROL_STATUS);
+ value |= PCI_EXP_LNKCTL_RL;
+ writel(value, port->base + RP_LINK_CONTROL_STATUS);
+
+ deadline = ktime_add_us(ktime_get(), LINK_RETRAIN_TIMEOUT);
+
+ while (ktime_before(ktime_get(), deadline)) {
+ value = readl(port->base + RP_LINK_CONTROL_STATUS);
+ if ((value & PCI_EXP_LNKSTA_LT) == 0)
+ break;
+
+ usleep_range(2000, 3000);
+ }
+
+ if (value & PCI_EXP_LNKSTA_LT)
+ dev_err(dev, "failed to retrain link of port %u\n",
+ port->index);
+ }
+}
+
static void tegra_pcie_enable_ports(struct tegra_pcie *pcie)
{
struct device *dev = pcie->dev;
@@ -2127,7 +2453,12 @@ static void tegra_pcie_enable_ports(struct tegra_pcie *pcie)
port->index, port->lanes);
tegra_pcie_port_enable(port);
+ }
+ /* Start LTSSM from Tegra side */
+ reset_control_deassert(pcie->pcie_xrst);
+
+ list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
if (tegra_pcie_port_check_link(port))
continue;
@@ -2136,12 +2467,17 @@ static void tegra_pcie_enable_ports(struct tegra_pcie *pcie)
tegra_pcie_port_disable(port);
tegra_pcie_port_free(port);
}
+
+ if (pcie->soc->has_gen2)
+ tegra_pcie_change_link_speed(pcie);
}
static void tegra_pcie_disable_ports(struct tegra_pcie *pcie)
{
struct tegra_pcie_port *port, *tmp;
+ reset_control_assert(pcie->pcie_xrst);
+
list_for_each_entry_safe(port, tmp, &pcie->ports, list)
tegra_pcie_port_disable(port);
}
@@ -2155,6 +2491,7 @@ static const struct tegra_pcie_soc tegra20_pcie = {
.num_ports = 2,
.ports = tegra20_pcie_ports,
.msi_base_shift = 0,
+ .afi_pex2_ctrl = 0x128,
.pads_pll_ctl = PADS_PLL_CTL_TEGRA20,
.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10,
.pads_refclk_cfg0 = 0xfa5cfa5c,
@@ -2165,6 +2502,12 @@ static const struct tegra_pcie_soc tegra20_pcie = {
.has_gen2 = false,
.force_pca_enable = false,
.program_uphy = true,
+ .update_clamp_threshold = false,
+ .program_deskew_time = false,
+ .raw_violation_fixup = false,
+ .update_fc_timer = false,
+ .has_cache_bars = true,
+ .ectl.enable = false,
};
static const struct tegra_pcie_port_soc tegra30_pcie_ports[] = {
@@ -2188,6 +2531,12 @@ static const struct tegra_pcie_soc tegra30_pcie = {
.has_gen2 = false,
.force_pca_enable = false,
.program_uphy = true,
+ .update_clamp_threshold = false,
+ .program_deskew_time = false,
+ .raw_violation_fixup = false,
+ .update_fc_timer = false,
+ .has_cache_bars = false,
+ .ectl.enable = false,
};
static const struct tegra_pcie_soc tegra124_pcie = {
@@ -2197,6 +2546,8 @@ static const struct tegra_pcie_soc tegra124_pcie = {
.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
.pads_refclk_cfg0 = 0x44ac44ac,
+ /* FC threshold is bit[25:18] */
+ .update_fc_threshold = 0x03fc0000,
.has_pex_clkreq_en = true,
.has_pex_bias_ctrl = true,
.has_intr_prsnt_sense = true,
@@ -2204,6 +2555,12 @@ static const struct tegra_pcie_soc tegra124_pcie = {
.has_gen2 = true,
.force_pca_enable = false,
.program_uphy = true,
+ .update_clamp_threshold = true,
+ .program_deskew_time = false,
+ .raw_violation_fixup = true,
+ .update_fc_timer = false,
+ .has_cache_bars = false,
+ .ectl.enable = false,
};
static const struct tegra_pcie_soc tegra210_pcie = {
@@ -2213,6 +2570,8 @@ static const struct tegra_pcie_soc tegra210_pcie = {
.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
.pads_refclk_cfg0 = 0x90b890b8,
+ /* FC threshold is bit[25:18] */
+ .update_fc_threshold = 0x01800000,
.has_pex_clkreq_en = true,
.has_pex_bias_ctrl = true,
.has_intr_prsnt_sense = true,
@@ -2220,6 +2579,24 @@ static const struct tegra_pcie_soc tegra210_pcie = {
.has_gen2 = true,
.force_pca_enable = true,
.program_uphy = true,
+ .update_clamp_threshold = true,
+ .program_deskew_time = true,
+ .raw_violation_fixup = false,
+ .update_fc_timer = true,
+ .has_cache_bars = false,
+ .ectl = {
+ .regs = {
+ .rp_ectl_2_r1 = 0x0000000f,
+ .rp_ectl_4_r1 = 0x00000067,
+ .rp_ectl_5_r1 = 0x55010000,
+ .rp_ectl_6_r1 = 0x00000001,
+ .rp_ectl_2_r2 = 0x0000008f,
+ .rp_ectl_4_r2 = 0x000000c7,
+ .rp_ectl_5_r2 = 0x55010000,
+ .rp_ectl_6_r2 = 0x00000001,
+ },
+ .enable = true,
+ },
};
static const struct tegra_pcie_port_soc tegra186_pcie_ports[] = {
@@ -2232,6 +2609,7 @@ static const struct tegra_pcie_soc tegra186_pcie = {
.num_ports = 3,
.ports = tegra186_pcie_ports,
.msi_base_shift = 8,
+ .afi_pex2_ctrl = 0x19c,
.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
.pads_refclk_cfg0 = 0x80b880b8,
@@ -2243,6 +2621,12 @@ static const struct tegra_pcie_soc tegra186_pcie = {
.has_gen2 = true,
.force_pca_enable = false,
.program_uphy = false,
+ .update_clamp_threshold = false,
+ .program_deskew_time = false,
+ .raw_violation_fixup = false,
+ .update_fc_timer = false,
+ .has_cache_bars = false,
+ .ectl.enable = false,
};
static const struct of_device_id tegra_pcie_of_match[] = {
@@ -2485,16 +2869,32 @@ static int __maybe_unused tegra_pcie_pm_suspend(struct device *dev)
{
struct tegra_pcie *pcie = dev_get_drvdata(dev);
struct tegra_pcie_port *port;
+ int err;
list_for_each_entry(port, &pcie->ports, list)
tegra_pcie_pme_turnoff(port);
tegra_pcie_disable_ports(pcie);
+ /*
+ * AFI_INTR is unmasked in tegra_pcie_enable_controller(), mask it to
+ * avoid unwanted interrupts raised by AFI after pex_rst is asserted.
+ */
+ tegra_pcie_disable_interrupts(pcie);
+
+ if (pcie->soc->program_uphy) {
+ err = tegra_pcie_phy_power_off(pcie);
+ if (err < 0)
+ dev_err(dev, "failed to power off PHY(s): %d\n", err);
+ }
+
+ reset_control_assert(pcie->pex_rst);
+ clk_disable_unprepare(pcie->pex_clk);
+
if (IS_ENABLED(CONFIG_PCI_MSI))
tegra_pcie_disable_msi(pcie);
- tegra_pcie_disable_controller(pcie);
+ pinctrl_pm_select_idle_state(dev);
tegra_pcie_power_off(pcie);
return 0;
@@ -2510,20 +2910,45 @@ static int __maybe_unused tegra_pcie_pm_resume(struct device *dev)
dev_err(dev, "tegra pcie power on fail: %d\n", err);
return err;
}
- err = tegra_pcie_enable_controller(pcie);
- if (err) {
- dev_err(dev, "tegra pcie controller enable fail: %d\n", err);
+
+ err = pinctrl_pm_select_default_state(dev);
+ if (err < 0) {
+ dev_err(dev, "failed to disable PCIe IO DPD: %d\n", err);
goto poweroff;
}
+
+ tegra_pcie_enable_controller(pcie);
tegra_pcie_setup_translations(pcie);
if (IS_ENABLED(CONFIG_PCI_MSI))
tegra_pcie_enable_msi(pcie);
+ err = clk_prepare_enable(pcie->pex_clk);
+ if (err) {
+ dev_err(dev, "failed to enable PEX clock: %d\n", err);
+ goto pex_dpd_enable;
+ }
+
+ reset_control_deassert(pcie->pex_rst);
+
+ if (pcie->soc->program_uphy) {
+ err = tegra_pcie_phy_power_on(pcie);
+ if (err < 0) {
+ dev_err(dev, "failed to power on PHY(s): %d\n", err);
+ goto disable_pex_clk;
+ }
+ }
+
+ tegra_pcie_apply_pad_settings(pcie);
tegra_pcie_enable_ports(pcie);
return 0;
+disable_pex_clk:
+ reset_control_assert(pcie->pex_rst);
+ clk_disable_unprepare(pcie->pex_clk);
+pex_dpd_enable:
+ pinctrl_pm_select_idle_state(dev);
poweroff:
tegra_pcie_power_off(pcie);
diff --git a/drivers/pci/controller/pcie-altera-msi.c b/drivers/pci/controller/pcie-altera-msi.c
index 025ef7d9a046..16d938920ca5 100644
--- a/drivers/pci/controller/pcie-altera-msi.c
+++ b/drivers/pci/controller/pcie-altera-msi.c
@@ -10,6 +10,7 @@
#include <linux/interrupt.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/init.h>
+#include <linux/module.h>
#include <linux/msi.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
@@ -288,4 +289,13 @@ static int __init altera_msi_init(void)
{
return platform_driver_register(&altera_msi_driver);
}
+
+static void __exit altera_msi_exit(void)
+{
+ platform_driver_unregister(&altera_msi_driver);
+}
+
subsys_initcall(altera_msi_init);
+MODULE_DEVICE_TABLE(of, altera_msi_of_match);
+module_exit(altera_msi_exit);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pcie-altera.c b/drivers/pci/controller/pcie-altera.c
index 27edcebd1726..d2497ca43828 100644
--- a/drivers/pci/controller/pcie-altera.c
+++ b/drivers/pci/controller/pcie-altera.c
@@ -10,6 +10,7 @@
#include <linux/interrupt.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/init.h>
+#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
@@ -43,6 +44,8 @@
#define S10_RP_RXCPL_STATUS 0x200C
#define S10_RP_CFG_ADDR(pcie, reg) \
(((pcie)->hip_base) + (reg) + (1 << 20))
+#define S10_RP_SECONDARY(pcie) \
+ readb(S10_RP_CFG_ADDR(pcie, PCI_SECONDARY_BUS))
/* TLP configuration type 0 and 1 */
#define TLP_FMTTYPE_CFGRD0 0x04 /* Configuration Read Type 0 */
@@ -54,14 +57,9 @@
#define TLP_WRITE_TAG 0x10
#define RP_DEVFN 0
#define TLP_REQ_ID(bus, devfn) (((bus) << 8) | (devfn))
-#define TLP_CFGRD_DW0(pcie, bus) \
- ((((bus == pcie->root_bus_nr) ? pcie->pcie_data->cfgrd0 \
- : pcie->pcie_data->cfgrd1) << 24) | \
- TLP_PAYLOAD_SIZE)
-#define TLP_CFGWR_DW0(pcie, bus) \
- ((((bus == pcie->root_bus_nr) ? pcie->pcie_data->cfgwr0 \
- : pcie->pcie_data->cfgwr1) << 24) | \
- TLP_PAYLOAD_SIZE)
+#define TLP_CFG_DW0(pcie, cfg) \
+ (((cfg) << 24) | \
+ TLP_PAYLOAD_SIZE)
#define TLP_CFG_DW1(pcie, tag, be) \
(((TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN)) << 16) | (tag << 8) | (be))
#define TLP_CFG_DW2(bus, devfn, offset) \
@@ -321,14 +319,31 @@ static void s10_tlp_write_packet(struct altera_pcie *pcie, u32 *headers,
s10_tlp_write_tx(pcie, data, RP_TX_EOP);
}
+static void get_tlp_header(struct altera_pcie *pcie, u8 bus, u32 devfn,
+ int where, u8 byte_en, bool read, u32 *headers)
+{
+ u8 cfg;
+ u8 cfg0 = read ? pcie->pcie_data->cfgrd0 : pcie->pcie_data->cfgwr0;
+ u8 cfg1 = read ? pcie->pcie_data->cfgrd1 : pcie->pcie_data->cfgwr1;
+ u8 tag = read ? TLP_READ_TAG : TLP_WRITE_TAG;
+
+ if (pcie->pcie_data->version == ALTERA_PCIE_V1)
+ cfg = (bus == pcie->root_bus_nr) ? cfg0 : cfg1;
+ else
+ cfg = (bus > S10_RP_SECONDARY(pcie)) ? cfg0 : cfg1;
+
+ headers[0] = TLP_CFG_DW0(pcie, cfg);
+ headers[1] = TLP_CFG_DW1(pcie, tag, byte_en);
+ headers[2] = TLP_CFG_DW2(bus, devfn, where);
+}
+
static int tlp_cfg_dword_read(struct altera_pcie *pcie, u8 bus, u32 devfn,
int where, u8 byte_en, u32 *value)
{
u32 headers[TLP_HDR_SIZE];
- headers[0] = TLP_CFGRD_DW0(pcie, bus);
- headers[1] = TLP_CFG_DW1(pcie, TLP_READ_TAG, byte_en);
- headers[2] = TLP_CFG_DW2(bus, devfn, where);
+ get_tlp_header(pcie, bus, devfn, where, byte_en, true,
+ headers);
pcie->pcie_data->ops->tlp_write_pkt(pcie, headers, 0, false);
@@ -341,9 +356,8 @@ static int tlp_cfg_dword_write(struct altera_pcie *pcie, u8 bus, u32 devfn,
u32 headers[TLP_HDR_SIZE];
int ret;
- headers[0] = TLP_CFGWR_DW0(pcie, bus);
- headers[1] = TLP_CFG_DW1(pcie, TLP_WRITE_TAG, byte_en);
- headers[2] = TLP_CFG_DW2(bus, devfn, where);
+ get_tlp_header(pcie, bus, devfn, where, byte_en, false,
+ headers);
/* check alignment to Qword */
if ((where & 0x7) == 0)
@@ -705,6 +719,13 @@ static int altera_pcie_init_irq_domain(struct altera_pcie *pcie)
return 0;
}
+static void altera_pcie_irq_teardown(struct altera_pcie *pcie)
+{
+ irq_set_chained_handler_and_data(pcie->irq, NULL, NULL);
+ irq_domain_remove(pcie->irq_domain);
+ irq_dispose_mapping(pcie->irq);
+}
+
static int altera_pcie_parse_dt(struct altera_pcie *pcie)
{
struct device *dev = &pcie->pdev->dev;
@@ -798,6 +819,7 @@ static int altera_pcie_probe(struct platform_device *pdev)
pcie = pci_host_bridge_priv(bridge);
pcie->pdev = pdev;
+ platform_set_drvdata(pdev, pcie);
match = of_match_device(altera_pcie_of_match, &pdev->dev);
if (!match)
@@ -855,13 +877,28 @@ static int altera_pcie_probe(struct platform_device *pdev)
return ret;
}
+static int altera_pcie_remove(struct platform_device *pdev)
+{
+ struct altera_pcie *pcie = platform_get_drvdata(pdev);
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
+
+ pci_stop_root_bus(bridge->bus);
+ pci_remove_root_bus(bridge->bus);
+ pci_free_resource_list(&pcie->resources);
+ altera_pcie_irq_teardown(pcie);
+
+ return 0;
+}
+
static struct platform_driver altera_pcie_driver = {
.probe = altera_pcie_probe,
+ .remove = altera_pcie_remove,
.driver = {
.name = "altera-pcie",
.of_match_table = altera_pcie_of_match,
- .suppress_bind_attrs = true,
},
};
-builtin_platform_driver(altera_pcie_driver);
+MODULE_DEVICE_TABLE(of, altera_pcie_of_match);
+module_platform_driver(altera_pcie_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pcie-iproc-platform.c b/drivers/pci/controller/pcie-iproc-platform.c
index f30f5f3fb5c1..5a3550b6bb29 100644
--- a/drivers/pci/controller/pcie-iproc-platform.c
+++ b/drivers/pci/controller/pcie-iproc-platform.c
@@ -87,7 +87,7 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
/*
* DT nodes are not used by all platforms that use the iProc PCIe
- * core driver. For platforms that require explict inbound mapping
+ * core driver. For platforms that require explicit inbound mapping
* configuration, "dma-ranges" would have been present in DT
*/
pcie->need_ib_cfg = of_property_read_bool(np, "dma-ranges");
diff --git a/drivers/pci/controller/pcie-iproc.c b/drivers/pci/controller/pcie-iproc.c
index e3ca46497470..2d457bfdaf66 100644
--- a/drivers/pci/controller/pcie-iproc.c
+++ b/drivers/pci/controller/pcie-iproc.c
@@ -163,7 +163,7 @@ enum iproc_pcie_ib_map_type {
* @size_unit: inbound mapping region size unit, could be SZ_1K, SZ_1M, or
* SZ_1G
* @region_sizes: list of supported inbound mapping region sizes in KB, MB, or
- * GB, depedning on the size unit
+ * GB, depending on the size unit
* @nr_sizes: number of supported inbound mapping region sizes
* @nr_windows: number of supported inbound mapping windows for the region
* @imap_addr_offset: register offset between the upper and lower 32-bit
diff --git a/drivers/pci/controller/pcie-mobiveil.c b/drivers/pci/controller/pcie-mobiveil.c
index 77052a0712d0..672e633601c7 100644
--- a/drivers/pci/controller/pcie-mobiveil.c
+++ b/drivers/pci/controller/pcie-mobiveil.c
@@ -31,56 +31,61 @@
* translation tables are grouped into windows, each window registers are
* grouped into blocks of 4 or 16 registers each
*/
-#define PAB_REG_BLOCK_SIZE 16
-#define PAB_EXT_REG_BLOCK_SIZE 4
+#define PAB_REG_BLOCK_SIZE 16
+#define PAB_EXT_REG_BLOCK_SIZE 4
-#define PAB_REG_ADDR(offset, win) (offset + (win * PAB_REG_BLOCK_SIZE))
-#define PAB_EXT_REG_ADDR(offset, win) (offset + (win * PAB_EXT_REG_BLOCK_SIZE))
+#define PAB_REG_ADDR(offset, win) \
+ (offset + (win * PAB_REG_BLOCK_SIZE))
+#define PAB_EXT_REG_ADDR(offset, win) \
+ (offset + (win * PAB_EXT_REG_BLOCK_SIZE))
-#define LTSSM_STATUS 0x0404
-#define LTSSM_STATUS_L0_MASK 0x3f
-#define LTSSM_STATUS_L0 0x2d
+#define LTSSM_STATUS 0x0404
+#define LTSSM_STATUS_L0_MASK 0x3f
+#define LTSSM_STATUS_L0 0x2d
-#define PAB_CTRL 0x0808
-#define AMBA_PIO_ENABLE_SHIFT 0
-#define PEX_PIO_ENABLE_SHIFT 1
-#define PAGE_SEL_SHIFT 13
-#define PAGE_SEL_MASK 0x3f
-#define PAGE_LO_MASK 0x3ff
-#define PAGE_SEL_EN 0xc00
-#define PAGE_SEL_OFFSET_SHIFT 10
+#define PAB_CTRL 0x0808
+#define AMBA_PIO_ENABLE_SHIFT 0
+#define PEX_PIO_ENABLE_SHIFT 1
+#define PAGE_SEL_SHIFT 13
+#define PAGE_SEL_MASK 0x3f
+#define PAGE_LO_MASK 0x3ff
+#define PAGE_SEL_OFFSET_SHIFT 10
-#define PAB_AXI_PIO_CTRL 0x0840
-#define APIO_EN_MASK 0xf
+#define PAB_AXI_PIO_CTRL 0x0840
+#define APIO_EN_MASK 0xf
-#define PAB_PEX_PIO_CTRL 0x08c0
-#define PIO_ENABLE_SHIFT 0
+#define PAB_PEX_PIO_CTRL 0x08c0
+#define PIO_ENABLE_SHIFT 0
#define PAB_INTP_AMBA_MISC_ENB 0x0b0c
-#define PAB_INTP_AMBA_MISC_STAT 0x0b1c
+#define PAB_INTP_AMBA_MISC_STAT 0x0b1c
#define PAB_INTP_INTX_MASK 0x01e0
#define PAB_INTP_MSI_MASK 0x8
-#define PAB_AXI_AMAP_CTRL(win) PAB_REG_ADDR(0x0ba0, win)
-#define WIN_ENABLE_SHIFT 0
-#define WIN_TYPE_SHIFT 1
+#define PAB_AXI_AMAP_CTRL(win) PAB_REG_ADDR(0x0ba0, win)
+#define WIN_ENABLE_SHIFT 0
+#define WIN_TYPE_SHIFT 1
+#define WIN_TYPE_MASK 0x3
+#define WIN_SIZE_MASK 0xfffffc00
#define PAB_EXT_AXI_AMAP_SIZE(win) PAB_EXT_REG_ADDR(0xbaf0, win)
+#define PAB_EXT_AXI_AMAP_AXI_WIN(win) PAB_EXT_REG_ADDR(0x80a0, win)
#define PAB_AXI_AMAP_AXI_WIN(win) PAB_REG_ADDR(0x0ba4, win)
#define AXI_WINDOW_ALIGN_MASK 3
#define PAB_AXI_AMAP_PEX_WIN_L(win) PAB_REG_ADDR(0x0ba8, win)
-#define PAB_BUS_SHIFT 24
-#define PAB_DEVICE_SHIFT 19
-#define PAB_FUNCTION_SHIFT 16
+#define PAB_BUS_SHIFT 24
+#define PAB_DEVICE_SHIFT 19
+#define PAB_FUNCTION_SHIFT 16
#define PAB_AXI_AMAP_PEX_WIN_H(win) PAB_REG_ADDR(0x0bac, win)
#define PAB_INTP_AXI_PIO_CLASS 0x474
-#define PAB_PEX_AMAP_CTRL(win) PAB_REG_ADDR(0x4ba0, win)
-#define AMAP_CTRL_EN_SHIFT 0
-#define AMAP_CTRL_TYPE_SHIFT 1
+#define PAB_PEX_AMAP_CTRL(win) PAB_REG_ADDR(0x4ba0, win)
+#define AMAP_CTRL_EN_SHIFT 0
+#define AMAP_CTRL_TYPE_SHIFT 1
+#define AMAP_CTRL_TYPE_MASK 3
#define PAB_EXT_PEX_AMAP_SIZEN(win) PAB_EXT_REG_ADDR(0xbef0, win)
#define PAB_PEX_AMAP_AXI_WIN(win) PAB_REG_ADDR(0x4ba4, win)
@@ -88,34 +93,40 @@
#define PAB_PEX_AMAP_PEX_WIN_H(win) PAB_REG_ADDR(0x4bac, win)
/* starting offset of INTX bits in status register */
-#define PAB_INTX_START 5
+#define PAB_INTX_START 5
/* supported number of MSI interrupts */
-#define PCI_NUM_MSI 16
+#define PCI_NUM_MSI 16
/* MSI registers */
-#define MSI_BASE_LO_OFFSET 0x04
-#define MSI_BASE_HI_OFFSET 0x08
-#define MSI_SIZE_OFFSET 0x0c
-#define MSI_ENABLE_OFFSET 0x14
-#define MSI_STATUS_OFFSET 0x18
-#define MSI_DATA_OFFSET 0x20
-#define MSI_ADDR_L_OFFSET 0x24
-#define MSI_ADDR_H_OFFSET 0x28
+#define MSI_BASE_LO_OFFSET 0x04
+#define MSI_BASE_HI_OFFSET 0x08
+#define MSI_SIZE_OFFSET 0x0c
+#define MSI_ENABLE_OFFSET 0x14
+#define MSI_STATUS_OFFSET 0x18
+#define MSI_DATA_OFFSET 0x20
+#define MSI_ADDR_L_OFFSET 0x24
+#define MSI_ADDR_H_OFFSET 0x28
/* outbound and inbound window definitions */
-#define WIN_NUM_0 0
-#define WIN_NUM_1 1
-#define CFG_WINDOW_TYPE 0
-#define IO_WINDOW_TYPE 1
-#define MEM_WINDOW_TYPE 2
-#define IB_WIN_SIZE ((u64)256 * 1024 * 1024 * 1024)
-#define MAX_PIO_WINDOWS 8
+#define WIN_NUM_0 0
+#define WIN_NUM_1 1
+#define CFG_WINDOW_TYPE 0
+#define IO_WINDOW_TYPE 1
+#define MEM_WINDOW_TYPE 2
+#define IB_WIN_SIZE ((u64)256 * 1024 * 1024 * 1024)
+#define MAX_PIO_WINDOWS 8
/* Parameters for the waiting for link up routine */
-#define LINK_WAIT_MAX_RETRIES 10
-#define LINK_WAIT_MIN 90000
-#define LINK_WAIT_MAX 100000
+#define LINK_WAIT_MAX_RETRIES 10
+#define LINK_WAIT_MIN 90000
+#define LINK_WAIT_MAX 100000
+
+#define PAGED_ADDR_BNDRY 0xc00
+#define OFFSET_TO_PAGE_ADDR(off) \
+ ((off & PAGE_LO_MASK) | PAGED_ADDR_BNDRY)
+#define OFFSET_TO_PAGE_IDX(off) \
+ ((off >> PAGE_SEL_OFFSET_SHIFT) & PAGE_SEL_MASK)
struct mobiveil_msi { /* MSI information */
struct mutex lock; /* protect bitmap variable */
@@ -145,15 +156,119 @@ struct mobiveil_pcie {
struct mobiveil_msi msi;
};
-static inline void csr_writel(struct mobiveil_pcie *pcie, const u32 value,
- const u32 reg)
+/*
+ * mobiveil_pcie_sel_page - routine to access paged register
+ *
+ * Registers whose address greater than PAGED_ADDR_BNDRY (0xc00) are paged,
+ * for this scheme to work extracted higher 6 bits of the offset will be
+ * written to pg_sel field of PAB_CTRL register and rest of the lower 10
+ * bits enabled with PAGED_ADDR_BNDRY are used as offset of the register.
+ */
+static void mobiveil_pcie_sel_page(struct mobiveil_pcie *pcie, u8 pg_idx)
+{
+ u32 val;
+
+ val = readl(pcie->csr_axi_slave_base + PAB_CTRL);
+ val &= ~(PAGE_SEL_MASK << PAGE_SEL_SHIFT);
+ val |= (pg_idx & PAGE_SEL_MASK) << PAGE_SEL_SHIFT;
+
+ writel(val, pcie->csr_axi_slave_base + PAB_CTRL);
+}
+
+static void *mobiveil_pcie_comp_addr(struct mobiveil_pcie *pcie, u32 off)
+{
+ if (off < PAGED_ADDR_BNDRY) {
+ /* For directly accessed registers, clear the pg_sel field */
+ mobiveil_pcie_sel_page(pcie, 0);
+ return pcie->csr_axi_slave_base + off;
+ }
+
+ mobiveil_pcie_sel_page(pcie, OFFSET_TO_PAGE_IDX(off));
+ return pcie->csr_axi_slave_base + OFFSET_TO_PAGE_ADDR(off);
+}
+
+static int mobiveil_pcie_read(void __iomem *addr, int size, u32 *val)
{
- writel_relaxed(value, pcie->csr_axi_slave_base + reg);
+ if ((uintptr_t)addr & (size - 1)) {
+ *val = 0;
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+
+ switch (size) {
+ case 4:
+ *val = readl(addr);
+ break;
+ case 2:
+ *val = readw(addr);
+ break;
+ case 1:
+ *val = readb(addr);
+ break;
+ default:
+ *val = 0;
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
}
-static inline u32 csr_readl(struct mobiveil_pcie *pcie, const u32 reg)
+static int mobiveil_pcie_write(void __iomem *addr, int size, u32 val)
{
- return readl_relaxed(pcie->csr_axi_slave_base + reg);
+ if ((uintptr_t)addr & (size - 1))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ switch (size) {
+ case 4:
+ writel(val, addr);
+ break;
+ case 2:
+ writew(val, addr);
+ break;
+ case 1:
+ writeb(val, addr);
+ break;
+ default:
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static u32 csr_read(struct mobiveil_pcie *pcie, u32 off, size_t size)
+{
+ void *addr;
+ u32 val;
+ int ret;
+
+ addr = mobiveil_pcie_comp_addr(pcie, off);
+
+ ret = mobiveil_pcie_read(addr, size, &val);
+ if (ret)
+ dev_err(&pcie->pdev->dev, "read CSR address failed\n");
+
+ return val;
+}
+
+static void csr_write(struct mobiveil_pcie *pcie, u32 val, u32 off, size_t size)
+{
+ void *addr;
+ int ret;
+
+ addr = mobiveil_pcie_comp_addr(pcie, off);
+
+ ret = mobiveil_pcie_write(addr, size, val);
+ if (ret)
+ dev_err(&pcie->pdev->dev, "write CSR address failed\n");
+}
+
+static u32 csr_readl(struct mobiveil_pcie *pcie, u32 off)
+{
+ return csr_read(pcie, off, 0x4);
+}
+
+static void csr_writel(struct mobiveil_pcie *pcie, u32 val, u32 off)
+{
+ csr_write(pcie, val, off, 0x4);
}
static bool mobiveil_pcie_link_up(struct mobiveil_pcie *pcie)
@@ -174,7 +289,7 @@ static bool mobiveil_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
* Do not read more than one device on the bus directly
* attached to RC
*/
- if ((bus->primary == pcie->root_bus_nr) && (devfn > 0))
+ if ((bus->primary == pcie->root_bus_nr) && (PCI_SLOT(devfn) > 0))
return false;
return true;
@@ -185,17 +300,17 @@ static bool mobiveil_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
* root port or endpoint
*/
static void __iomem *mobiveil_pcie_map_bus(struct pci_bus *bus,
- unsigned int devfn, int where)
+ unsigned int devfn, int where)
{
struct mobiveil_pcie *pcie = bus->sysdata;
+ u32 value;
if (!mobiveil_pcie_valid_device(bus, devfn))
return NULL;
- if (bus->number == pcie->root_bus_nr) {
- /* RC config access */
+ /* RC config access */
+ if (bus->number == pcie->root_bus_nr)
return pcie->csr_axi_slave_base + where;
- }
/*
* EP config access (in Config/APIO space)
@@ -203,10 +318,12 @@ static void __iomem *mobiveil_pcie_map_bus(struct pci_bus *bus,
* (BDF) in PAB_AXI_AMAP_PEX_WIN_L0 Register.
* Relies on pci_lock serialization
*/
- csr_writel(pcie, bus->number << PAB_BUS_SHIFT |
- PCI_SLOT(devfn) << PAB_DEVICE_SHIFT |
- PCI_FUNC(devfn) << PAB_FUNCTION_SHIFT,
- PAB_AXI_AMAP_PEX_WIN_L(WIN_NUM_0));
+ value = bus->number << PAB_BUS_SHIFT |
+ PCI_SLOT(devfn) << PAB_DEVICE_SHIFT |
+ PCI_FUNC(devfn) << PAB_FUNCTION_SHIFT;
+
+ csr_writel(pcie, value, PAB_AXI_AMAP_PEX_WIN_L(WIN_NUM_0));
+
return pcie->config_axi_slave_base + where;
}
@@ -241,24 +358,29 @@ static void mobiveil_pcie_isr(struct irq_desc *desc)
/* Handle INTx */
if (intr_status & PAB_INTP_INTX_MASK) {
- shifted_status = csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT) >>
- PAB_INTX_START;
+ shifted_status = csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT);
+ shifted_status &= PAB_INTP_INTX_MASK;
+ shifted_status >>= PAB_INTX_START;
do {
for_each_set_bit(bit, &shifted_status, PCI_NUM_INTX) {
virq = irq_find_mapping(pcie->intx_domain,
- bit + 1);
+ bit + 1);
if (virq)
generic_handle_irq(virq);
else
- dev_err_ratelimited(dev,
- "unexpected IRQ, INT%d\n", bit);
+ dev_err_ratelimited(dev, "unexpected IRQ, INT%d\n",
+ bit);
- /* clear interrupt */
- csr_writel(pcie,
- shifted_status << PAB_INTX_START,
- PAB_INTP_AMBA_MISC_STAT);
+ /* clear interrupt handled */
+ csr_writel(pcie, 1 << (PAB_INTX_START + bit),
+ PAB_INTP_AMBA_MISC_STAT);
}
- } while ((shifted_status >> PAB_INTX_START) != 0);
+
+ shifted_status = csr_readl(pcie,
+ PAB_INTP_AMBA_MISC_STAT);
+ shifted_status &= PAB_INTP_INTX_MASK;
+ shifted_status >>= PAB_INTX_START;
+ } while (shifted_status != 0);
}
/* read extra MSI status register */
@@ -266,8 +388,7 @@ static void mobiveil_pcie_isr(struct irq_desc *desc)
/* handle MSI interrupts */
while (msi_status & 1) {
- msi_data = readl_relaxed(pcie->apb_csr_base
- + MSI_DATA_OFFSET);
+ msi_data = readl_relaxed(pcie->apb_csr_base + MSI_DATA_OFFSET);
/*
* MSI_STATUS_OFFSET register gets updated to zero
@@ -276,18 +397,18 @@ static void mobiveil_pcie_isr(struct irq_desc *desc)
* two dummy reads.
*/
msi_addr_lo = readl_relaxed(pcie->apb_csr_base +
- MSI_ADDR_L_OFFSET);
+ MSI_ADDR_L_OFFSET);
msi_addr_hi = readl_relaxed(pcie->apb_csr_base +
- MSI_ADDR_H_OFFSET);
+ MSI_ADDR_H_OFFSET);
dev_dbg(dev, "MSI registers, data: %08x, addr: %08x:%08x\n",
- msi_data, msi_addr_hi, msi_addr_lo);
+ msi_data, msi_addr_hi, msi_addr_lo);
virq = irq_find_mapping(msi->dev_domain, msi_data);
if (virq)
generic_handle_irq(virq);
msi_status = readl_relaxed(pcie->apb_csr_base +
- MSI_STATUS_OFFSET);
+ MSI_STATUS_OFFSET);
}
/* Clear the interrupt status */
@@ -304,7 +425,7 @@ static int mobiveil_pcie_parse_dt(struct mobiveil_pcie *pcie)
/* map config resource */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "config_axi_slave");
+ "config_axi_slave");
pcie->config_axi_slave_base = devm_pci_remap_cfg_resource(dev, res);
if (IS_ERR(pcie->config_axi_slave_base))
return PTR_ERR(pcie->config_axi_slave_base);
@@ -312,7 +433,7 @@ static int mobiveil_pcie_parse_dt(struct mobiveil_pcie *pcie)
/* map csr resource */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "csr_axi_slave");
+ "csr_axi_slave");
pcie->csr_axi_slave_base = devm_pci_remap_cfg_resource(dev, res);
if (IS_ERR(pcie->csr_axi_slave_base))
return PTR_ERR(pcie->csr_axi_slave_base);
@@ -337,92 +458,50 @@ static int mobiveil_pcie_parse_dt(struct mobiveil_pcie *pcie)
return -ENODEV;
}
- irq_set_chained_handler_and_data(pcie->irq, mobiveil_pcie_isr, pcie);
-
return 0;
}
-/*
- * select_paged_register - routine to access paged register of root complex
- *
- * registers of RC are paged, for this scheme to work
- * extracted higher 6 bits of the offset will be written to pg_sel
- * field of PAB_CTRL register and rest of the lower 10 bits enabled with
- * PAGE_SEL_EN are used as offset of the register.
- */
-static void select_paged_register(struct mobiveil_pcie *pcie, u32 offset)
-{
- int pab_ctrl_dw, pg_sel;
-
- /* clear pg_sel field */
- pab_ctrl_dw = csr_readl(pcie, PAB_CTRL);
- pab_ctrl_dw = (pab_ctrl_dw & ~(PAGE_SEL_MASK << PAGE_SEL_SHIFT));
-
- /* set pg_sel field */
- pg_sel = (offset >> PAGE_SEL_OFFSET_SHIFT) & PAGE_SEL_MASK;
- pab_ctrl_dw |= ((pg_sel << PAGE_SEL_SHIFT));
- csr_writel(pcie, pab_ctrl_dw, PAB_CTRL);
-}
-
-static void write_paged_register(struct mobiveil_pcie *pcie,
- u32 val, u32 offset)
-{
- u32 off = (offset & PAGE_LO_MASK) | PAGE_SEL_EN;
-
- select_paged_register(pcie, offset);
- csr_writel(pcie, val, off);
-}
-
-static u32 read_paged_register(struct mobiveil_pcie *pcie, u32 offset)
-{
- u32 off = (offset & PAGE_LO_MASK) | PAGE_SEL_EN;
-
- select_paged_register(pcie, offset);
- return csr_readl(pcie, off);
-}
-
static void program_ib_windows(struct mobiveil_pcie *pcie, int win_num,
- int pci_addr, u32 type, u64 size)
+ u64 pci_addr, u32 type, u64 size)
{
- int pio_ctrl_val;
- int amap_ctrl_dw;
+ u32 value;
u64 size64 = ~(size - 1);
- if ((pcie->ib_wins_configured + 1) > pcie->ppio_wins) {
+ if (win_num >= pcie->ppio_wins) {
dev_err(&pcie->pdev->dev,
"ERROR: max inbound windows reached !\n");
return;
}
- pio_ctrl_val = csr_readl(pcie, PAB_PEX_PIO_CTRL);
- csr_writel(pcie,
- pio_ctrl_val | (1 << PIO_ENABLE_SHIFT), PAB_PEX_PIO_CTRL);
- amap_ctrl_dw = read_paged_register(pcie, PAB_PEX_AMAP_CTRL(win_num));
- amap_ctrl_dw = (amap_ctrl_dw | (type << AMAP_CTRL_TYPE_SHIFT));
- amap_ctrl_dw = (amap_ctrl_dw | (1 << AMAP_CTRL_EN_SHIFT));
+ value = csr_readl(pcie, PAB_PEX_AMAP_CTRL(win_num));
+ value &= ~(AMAP_CTRL_TYPE_MASK << AMAP_CTRL_TYPE_SHIFT | WIN_SIZE_MASK);
+ value |= type << AMAP_CTRL_TYPE_SHIFT | 1 << AMAP_CTRL_EN_SHIFT |
+ (lower_32_bits(size64) & WIN_SIZE_MASK);
+ csr_writel(pcie, value, PAB_PEX_AMAP_CTRL(win_num));
+
+ csr_writel(pcie, upper_32_bits(size64),
+ PAB_EXT_PEX_AMAP_SIZEN(win_num));
- write_paged_register(pcie, amap_ctrl_dw | lower_32_bits(size64),
- PAB_PEX_AMAP_CTRL(win_num));
+ csr_writel(pcie, pci_addr, PAB_PEX_AMAP_AXI_WIN(win_num));
- write_paged_register(pcie, upper_32_bits(size64),
- PAB_EXT_PEX_AMAP_SIZEN(win_num));
+ csr_writel(pcie, lower_32_bits(pci_addr),
+ PAB_PEX_AMAP_PEX_WIN_L(win_num));
+ csr_writel(pcie, upper_32_bits(pci_addr),
+ PAB_PEX_AMAP_PEX_WIN_H(win_num));
- write_paged_register(pcie, pci_addr, PAB_PEX_AMAP_AXI_WIN(win_num));
- write_paged_register(pcie, pci_addr, PAB_PEX_AMAP_PEX_WIN_L(win_num));
- write_paged_register(pcie, 0, PAB_PEX_AMAP_PEX_WIN_H(win_num));
+ pcie->ib_wins_configured++;
}
/*
* routine to program the outbound windows
*/
static void program_ob_windows(struct mobiveil_pcie *pcie, int win_num,
- u64 cpu_addr, u64 pci_addr, u32 config_io_bit, u64 size)
+ u64 cpu_addr, u64 pci_addr, u32 type, u64 size)
{
-
- u32 value, type;
+ u32 value;
u64 size64 = ~(size - 1);
- if ((pcie->ob_wins_configured + 1) > pcie->apio_wins) {
+ if (win_num >= pcie->apio_wins) {
dev_err(&pcie->pdev->dev,
"ERROR: max outbound windows reached !\n");
return;
@@ -432,28 +511,27 @@ static void program_ob_windows(struct mobiveil_pcie *pcie, int win_num,
* program Enable Bit to 1, Type Bit to (00) base 2, AXI Window Size Bit
* to 4 KB in PAB_AXI_AMAP_CTRL register
*/
- type = config_io_bit;
value = csr_readl(pcie, PAB_AXI_AMAP_CTRL(win_num));
- csr_writel(pcie, 1 << WIN_ENABLE_SHIFT | type << WIN_TYPE_SHIFT |
- lower_32_bits(size64), PAB_AXI_AMAP_CTRL(win_num));
+ value &= ~(WIN_TYPE_MASK << WIN_TYPE_SHIFT | WIN_SIZE_MASK);
+ value |= 1 << WIN_ENABLE_SHIFT | type << WIN_TYPE_SHIFT |
+ (lower_32_bits(size64) & WIN_SIZE_MASK);
+ csr_writel(pcie, value, PAB_AXI_AMAP_CTRL(win_num));
- write_paged_register(pcie, upper_32_bits(size64),
- PAB_EXT_AXI_AMAP_SIZE(win_num));
+ csr_writel(pcie, upper_32_bits(size64), PAB_EXT_AXI_AMAP_SIZE(win_num));
/*
* program AXI window base with appropriate value in
* PAB_AXI_AMAP_AXI_WIN0 register
*/
- value = csr_readl(pcie, PAB_AXI_AMAP_AXI_WIN(win_num));
- csr_writel(pcie, cpu_addr & (~AXI_WINDOW_ALIGN_MASK),
- PAB_AXI_AMAP_AXI_WIN(win_num));
-
- value = csr_readl(pcie, PAB_AXI_AMAP_PEX_WIN_H(win_num));
+ csr_writel(pcie, lower_32_bits(cpu_addr) & (~AXI_WINDOW_ALIGN_MASK),
+ PAB_AXI_AMAP_AXI_WIN(win_num));
+ csr_writel(pcie, upper_32_bits(cpu_addr),
+ PAB_EXT_AXI_AMAP_AXI_WIN(win_num));
csr_writel(pcie, lower_32_bits(pci_addr),
- PAB_AXI_AMAP_PEX_WIN_L(win_num));
+ PAB_AXI_AMAP_PEX_WIN_L(win_num));
csr_writel(pcie, upper_32_bits(pci_addr),
- PAB_AXI_AMAP_PEX_WIN_H(win_num));
+ PAB_AXI_AMAP_PEX_WIN_H(win_num));
pcie->ob_wins_configured++;
}
@@ -469,7 +547,9 @@ static int mobiveil_bringup_link(struct mobiveil_pcie *pcie)
usleep_range(LINK_WAIT_MIN, LINK_WAIT_MAX);
}
+
dev_err(&pcie->pdev->dev, "link never came up\n");
+
return -ETIMEDOUT;
}
@@ -482,50 +562,55 @@ static void mobiveil_pcie_enable_msi(struct mobiveil_pcie *pcie)
msi->msi_pages_phys = (phys_addr_t)msg_addr;
writel_relaxed(lower_32_bits(msg_addr),
- pcie->apb_csr_base + MSI_BASE_LO_OFFSET);
+ pcie->apb_csr_base + MSI_BASE_LO_OFFSET);
writel_relaxed(upper_32_bits(msg_addr),
- pcie->apb_csr_base + MSI_BASE_HI_OFFSET);
+ pcie->apb_csr_base + MSI_BASE_HI_OFFSET);
writel_relaxed(4096, pcie->apb_csr_base + MSI_SIZE_OFFSET);
writel_relaxed(1, pcie->apb_csr_base + MSI_ENABLE_OFFSET);
}
static int mobiveil_host_init(struct mobiveil_pcie *pcie)
{
- u32 value, pab_ctrl, type = 0;
- int err;
- struct resource_entry *win, *tmp;
-
- err = mobiveil_bringup_link(pcie);
- if (err) {
- dev_info(&pcie->pdev->dev, "link bring-up failed\n");
- return err;
- }
+ u32 value, pab_ctrl, type;
+ struct resource_entry *win;
+
+ /* setup bus numbers */
+ value = csr_readl(pcie, PCI_PRIMARY_BUS);
+ value &= 0xff000000;
+ value |= 0x00ff0100;
+ csr_writel(pcie, value, PCI_PRIMARY_BUS);
/*
* program Bus Master Enable Bit in Command Register in PAB Config
* Space
*/
value = csr_readl(pcie, PCI_COMMAND);
- csr_writel(pcie, value | PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
- PCI_COMMAND_MASTER, PCI_COMMAND);
+ value |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
+ csr_writel(pcie, value, PCI_COMMAND);
/*
* program PIO Enable Bit to 1 (and PEX PIO Enable to 1) in PAB_CTRL
* register
*/
pab_ctrl = csr_readl(pcie, PAB_CTRL);
- csr_writel(pcie, pab_ctrl | (1 << AMBA_PIO_ENABLE_SHIFT) |
- (1 << PEX_PIO_ENABLE_SHIFT), PAB_CTRL);
+ pab_ctrl |= (1 << AMBA_PIO_ENABLE_SHIFT) | (1 << PEX_PIO_ENABLE_SHIFT);
+ csr_writel(pcie, pab_ctrl, PAB_CTRL);
csr_writel(pcie, (PAB_INTP_INTX_MASK | PAB_INTP_MSI_MASK),
- PAB_INTP_AMBA_MISC_ENB);
+ PAB_INTP_AMBA_MISC_ENB);
/*
* program PIO Enable Bit to 1 and Config Window Enable Bit to 1 in
* PAB_AXI_PIO_CTRL Register
*/
value = csr_readl(pcie, PAB_AXI_PIO_CTRL);
- csr_writel(pcie, value | APIO_EN_MASK, PAB_AXI_PIO_CTRL);
+ value |= APIO_EN_MASK;
+ csr_writel(pcie, value, PAB_AXI_PIO_CTRL);
+
+ /* Enable PCIe PIO master */
+ value = csr_readl(pcie, PAB_PEX_PIO_CTRL);
+ value |= 1 << PIO_ENABLE_SHIFT;
+ csr_writel(pcie, value, PAB_PEX_PIO_CTRL);
/*
* we'll program one outbound window for config reads and
@@ -535,32 +620,38 @@ static int mobiveil_host_init(struct mobiveil_pcie *pcie)
*/
/* config outbound translation window */
- program_ob_windows(pcie, pcie->ob_wins_configured,
- pcie->ob_io_res->start, 0, CFG_WINDOW_TYPE,
- resource_size(pcie->ob_io_res));
+ program_ob_windows(pcie, WIN_NUM_0, pcie->ob_io_res->start, 0,
+ CFG_WINDOW_TYPE, resource_size(pcie->ob_io_res));
/* memory inbound translation window */
- program_ib_windows(pcie, WIN_NUM_1, 0, MEM_WINDOW_TYPE, IB_WIN_SIZE);
+ program_ib_windows(pcie, WIN_NUM_0, 0, MEM_WINDOW_TYPE, IB_WIN_SIZE);
/* Get the I/O and memory ranges from DT */
- resource_list_for_each_entry_safe(win, tmp, &pcie->resources) {
- type = 0;
+ resource_list_for_each_entry(win, &pcie->resources) {
if (resource_type(win->res) == IORESOURCE_MEM)
type = MEM_WINDOW_TYPE;
- if (resource_type(win->res) == IORESOURCE_IO)
+ else if (resource_type(win->res) == IORESOURCE_IO)
type = IO_WINDOW_TYPE;
- if (type) {
- /* configure outbound translation window */
- program_ob_windows(pcie, pcie->ob_wins_configured,
- win->res->start, 0, type,
- resource_size(win->res));
- }
+ else
+ continue;
+
+ /* configure outbound translation window */
+ program_ob_windows(pcie, pcie->ob_wins_configured,
+ win->res->start,
+ win->res->start - win->offset,
+ type, resource_size(win->res));
}
+ /* fixup for PCIe class register */
+ value = csr_readl(pcie, PAB_INTP_AXI_PIO_CLASS);
+ value &= 0xff;
+ value |= (PCI_CLASS_BRIDGE_PCI << 16);
+ csr_writel(pcie, value, PAB_INTP_AXI_PIO_CLASS);
+
/* setup MSI hardware registers */
mobiveil_pcie_enable_msi(pcie);
- return err;
+ return 0;
}
static void mobiveil_mask_intx_irq(struct irq_data *data)
@@ -574,7 +665,8 @@ static void mobiveil_mask_intx_irq(struct irq_data *data)
mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
raw_spin_lock_irqsave(&pcie->intx_mask_lock, flags);
shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
- csr_writel(pcie, (shifted_val & (~mask)), PAB_INTP_AMBA_MISC_ENB);
+ shifted_val &= ~mask;
+ csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
raw_spin_unlock_irqrestore(&pcie->intx_mask_lock, flags);
}
@@ -589,7 +681,8 @@ static void mobiveil_unmask_intx_irq(struct irq_data *data)
mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
raw_spin_lock_irqsave(&pcie->intx_mask_lock, flags);
shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
- csr_writel(pcie, (shifted_val | mask), PAB_INTP_AMBA_MISC_ENB);
+ shifted_val |= mask;
+ csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
raw_spin_unlock_irqrestore(&pcie->intx_mask_lock, flags);
}
@@ -603,10 +696,11 @@ static struct irq_chip intx_irq_chip = {
/* routine to setup the INTx related data */
static int mobiveil_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
- irq_hw_number_t hwirq)
+ irq_hw_number_t hwirq)
{
irq_set_chip_and_handler(irq, &intx_irq_chip, handle_level_irq);
irq_set_chip_data(irq, domain->host_data);
+
return 0;
}
@@ -623,7 +717,7 @@ static struct irq_chip mobiveil_msi_irq_chip = {
static struct msi_domain_info mobiveil_msi_domain_info = {
.flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX),
+ MSI_FLAG_PCI_MSIX),
.chip = &mobiveil_msi_irq_chip,
};
@@ -641,7 +735,7 @@ static void mobiveil_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
}
static int mobiveil_msi_set_affinity(struct irq_data *irq_data,
- const struct cpumask *mask, bool force)
+ const struct cpumask *mask, bool force)
{
return -EINVAL;
}
@@ -653,7 +747,8 @@ static struct irq_chip mobiveil_msi_bottom_irq_chip = {
};
static int mobiveil_irq_msi_domain_alloc(struct irq_domain *domain,
- unsigned int virq, unsigned int nr_irqs, void *args)
+ unsigned int virq,
+ unsigned int nr_irqs, void *args)
{
struct mobiveil_pcie *pcie = domain->host_data;
struct mobiveil_msi *msi = &pcie->msi;
@@ -673,13 +768,13 @@ static int mobiveil_irq_msi_domain_alloc(struct irq_domain *domain,
mutex_unlock(&msi->lock);
irq_domain_set_info(domain, virq, bit, &mobiveil_msi_bottom_irq_chip,
- domain->host_data, handle_level_irq,
- NULL, NULL);
+ domain->host_data, handle_level_irq, NULL, NULL);
return 0;
}
static void mobiveil_irq_msi_domain_free(struct irq_domain *domain,
- unsigned int virq, unsigned int nr_irqs)
+ unsigned int virq,
+ unsigned int nr_irqs)
{
struct irq_data *d = irq_domain_get_irq_data(domain, virq);
struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(d);
@@ -687,12 +782,11 @@ static void mobiveil_irq_msi_domain_free(struct irq_domain *domain,
mutex_lock(&msi->lock);
- if (!test_bit(d->hwirq, msi->msi_irq_in_use)) {
+ if (!test_bit(d->hwirq, msi->msi_irq_in_use))
dev_err(&pcie->pdev->dev, "trying to free unused MSI#%lu\n",
d->hwirq);
- } else {
+ else
__clear_bit(d->hwirq, msi->msi_irq_in_use);
- }
mutex_unlock(&msi->lock);
}
@@ -716,12 +810,14 @@ static int mobiveil_allocate_msi_domains(struct mobiveil_pcie *pcie)
}
msi->msi_domain = pci_msi_create_irq_domain(fwnode,
- &mobiveil_msi_domain_info, msi->dev_domain);
+ &mobiveil_msi_domain_info,
+ msi->dev_domain);
if (!msi->msi_domain) {
dev_err(dev, "failed to create MSI domain\n");
irq_domain_remove(msi->dev_domain);
return -ENOMEM;
}
+
return 0;
}
@@ -732,12 +828,12 @@ static int mobiveil_pcie_init_irq_domain(struct mobiveil_pcie *pcie)
int ret;
/* setup INTx */
- pcie->intx_domain = irq_domain_add_linear(node,
- PCI_NUM_INTX, &intx_domain_ops, pcie);
+ pcie->intx_domain = irq_domain_add_linear(node, PCI_NUM_INTX,
+ &intx_domain_ops, pcie);
if (!pcie->intx_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
- return -ENODEV;
+ return -ENOMEM;
}
raw_spin_lock_init(&pcie->intx_mask_lock);
@@ -763,11 +859,9 @@ static int mobiveil_pcie_probe(struct platform_device *pdev)
/* allocate the PCIe port */
bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
if (!bridge)
- return -ENODEV;
+ return -ENOMEM;
pcie = pci_host_bridge_priv(bridge);
- if (!pcie)
- return -ENOMEM;
pcie->pdev = pdev;
@@ -784,7 +878,7 @@ static int mobiveil_pcie_probe(struct platform_device *pdev)
&pcie->resources, &iobase);
if (ret) {
dev_err(dev, "Getting bridge resources failed\n");
- return -ENOMEM;
+ return ret;
}
/*
@@ -797,9 +891,6 @@ static int mobiveil_pcie_probe(struct platform_device *pdev)
goto error;
}
- /* fixup for PCIe class register */
- csr_writel(pcie, 0x060402ab, PAB_INTP_AXI_PIO_CLASS);
-
/* initialize the IRQ domains */
ret = mobiveil_pcie_init_irq_domain(pcie);
if (ret) {
@@ -807,6 +898,8 @@ static int mobiveil_pcie_probe(struct platform_device *pdev)
goto error;
}
+ irq_set_chained_handler_and_data(pcie->irq, mobiveil_pcie_isr, pcie);
+
ret = devm_request_pci_bus_resources(dev, &pcie->resources);
if (ret)
goto error;
@@ -820,6 +913,12 @@ static int mobiveil_pcie_probe(struct platform_device *pdev)
bridge->map_irq = of_irq_parse_and_map_pci;
bridge->swizzle_irq = pci_common_swizzle;
+ ret = mobiveil_bringup_link(pcie);
+ if (ret) {
+ dev_info(dev, "link bring-up failed\n");
+ goto error;
+ }
+
/* setup the kernel resources for the newly added PCIe root bus */
ret = pci_scan_root_bus_bridge(bridge);
if (ret)
@@ -848,10 +947,10 @@ MODULE_DEVICE_TABLE(of, mobiveil_pcie_of_match);
static struct platform_driver mobiveil_pcie_driver = {
.probe = mobiveil_pcie_probe,
.driver = {
- .name = "mobiveil-pcie",
- .of_match_table = mobiveil_pcie_of_match,
- .suppress_bind_attrs = true,
- },
+ .name = "mobiveil-pcie",
+ .of_match_table = mobiveil_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
};
builtin_platform_driver(mobiveil_pcie_driver);
diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c
index 3b031f00a94a..45c0f344ccd1 100644
--- a/drivers/pci/controller/pcie-xilinx-nwl.c
+++ b/drivers/pci/controller/pcie-xilinx-nwl.c
@@ -482,15 +482,13 @@ static int nwl_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
int i;
mutex_lock(&msi->lock);
- bit = bitmap_find_next_zero_area(msi->bitmap, INT_PCI_MSI_NR, 0,
- nr_irqs, 0);
- if (bit >= INT_PCI_MSI_NR) {
+ bit = bitmap_find_free_region(msi->bitmap, INT_PCI_MSI_NR,
+ get_count_order(nr_irqs));
+ if (bit < 0) {
mutex_unlock(&msi->lock);
return -ENOSPC;
}
- bitmap_set(msi->bitmap, bit, nr_irqs);
-
for (i = 0; i < nr_irqs; i++) {
irq_domain_set_info(domain, virq + i, bit + i, &nwl_irq_chip,
domain->host_data, handle_simple_irq,
@@ -508,7 +506,8 @@ static void nwl_irq_domain_free(struct irq_domain *domain, unsigned int virq,
struct nwl_msi *msi = &pcie->msi;
mutex_lock(&msi->lock);
- bitmap_clear(msi->bitmap, data->hwirq, nr_irqs);
+ bitmap_release_region(msi->bitmap, data->hwirq,
+ get_count_order(nr_irqs));
mutex_unlock(&msi->lock);
}
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index 999a5509e57e..4575e0c6dc4b 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -627,7 +627,7 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
* 32-bit resources. __pci_assign_resource() enforces that
* artificial restriction to make sure everything will fit.
*
- * The only way we could use a 64-bit non-prefechable MEMBAR is
+ * The only way we could use a 64-bit non-prefetchable MEMBAR is
* if its address is <4GB so that we can convert it to a 32-bit
* resource. To be visible to the host OS, all VMD endpoints must
* be initially configured by platform BIOS, which includes setting