aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/ats.c2
-rw-r--r--drivers/pci/controller/Kconfig4
-rw-r--r--drivers/pci/controller/dwc/Kconfig2
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c1
-rw-r--r--drivers/pci/controller/dwc/pcie-armada8k.c84
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c12
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c61
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h39
-rw-r--r--drivers/pci/controller/dwc/pcie-kirin.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c115
-rw-r--r--drivers/pci/controller/pci-aardvark.c2
-rw-r--r--drivers/pci/controller/pci-hyperv.c15
-rw-r--r--drivers/pci/controller/pci-tegra.c589
-rw-r--r--drivers/pci/controller/pcie-altera-msi.c10
-rw-r--r--drivers/pci/controller/pcie-altera.c69
-rw-r--r--drivers/pci/controller/pcie-iproc-platform.c2
-rw-r--r--drivers/pci/controller/pcie-iproc.c2
-rw-r--r--drivers/pci/controller/pcie-mobiveil.c525
-rw-r--r--drivers/pci/controller/pcie-xilinx-nwl.c11
-rw-r--r--drivers/pci/controller/vmd.c2
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c35
-rw-r--r--drivers/pci/endpoint/pci-epc-core.c3
-rw-r--r--drivers/pci/iov.c2
-rw-r--r--drivers/pci/mmap.c2
-rw-r--r--drivers/pci/msi.c43
-rw-r--r--drivers/pci/of.c8
-rw-r--r--drivers/pci/p2pdma.c72
-rw-r--r--drivers/pci/pci-acpi.c14
-rw-r--r--drivers/pci/pci-bridge-emul.c2
-rw-r--r--drivers/pci/pci-driver.c137
-rw-r--r--drivers/pci/pci-pf-stub.c2
-rw-r--r--drivers/pci/pci-sysfs.c5
-rw-r--r--drivers/pci/pci.c122
-rw-r--r--drivers/pci/pci.h9
-rw-r--r--drivers/pci/pcie/aer_inject.c2
-rw-r--r--drivers/pci/pcie/aspm.c20
-rw-r--r--drivers/pci/pcie/portdrv_core.c66
-rw-r--r--drivers/pci/probe.c30
-rw-r--r--drivers/pci/proc.c2
-rw-r--r--drivers/pci/quirks.c110
-rw-r--r--drivers/pci/search.c4
-rw-r--r--drivers/pci/setup-bus.c60
-rw-r--r--drivers/pci/slot.c1
-rw-r--r--drivers/pci/switch/Kconfig2
44 files changed, 1603 insertions, 699 deletions
diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c
index 97c08146534a..e18499243f84 100644
--- a/drivers/pci/ats.c
+++ b/drivers/pci/ats.c
@@ -432,7 +432,7 @@ EXPORT_SYMBOL_GPL(pci_prg_resp_pasid_required);
* @pdev: PCI device structure
*
* Returns negative value when PASID capability is not present.
- * Otherwise it returns the numer of supported PASIDs.
+ * Otherwise it returns the number of supported PASIDs.
*/
int pci_max_pasids(struct pci_dev *pdev)
{
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index 011c57cae4b0..fe9f9f13ce11 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -174,14 +174,14 @@ config PCIE_IPROC_MSI
PCIe controller
config PCIE_ALTERA
- bool "Altera PCIe controller"
+ tristate "Altera PCIe controller"
depends on ARM || NIOS2 || ARM64 || COMPILE_TEST
help
Say Y here if you want to enable PCIe controller support on Altera
FPGA.
config PCIE_ALTERA_MSI
- bool "Altera PCIe MSI feature"
+ tristate "Altera PCIe MSI feature"
depends on PCIE_ALTERA
depends on PCI_MSI_IRQ_DOMAIN
help
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig
index a6ce1ee51b4c..6ea778ae4877 100644
--- a/drivers/pci/controller/dwc/Kconfig
+++ b/drivers/pci/controller/dwc/Kconfig
@@ -90,7 +90,7 @@ config PCI_EXYNOS
config PCI_IMX6
bool "Freescale i.MX6/7/8 PCIe controller"
- depends on SOC_IMX6Q || SOC_IMX7D || (ARM64 && ARCH_MXC) || COMPILE_TEST
+ depends on ARCH_MXC || COMPILE_TEST
depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW_HOST
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c
index 419451efd58c..4234ddb4722f 100644
--- a/drivers/pci/controller/dwc/pci-dra7xx.c
+++ b/drivers/pci/controller/dwc/pci-dra7xx.c
@@ -26,6 +26,7 @@
#include <linux/types.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
+#include <linux/gpio/consumer.h>
#include "../../pci.h"
#include "pcie-designware.h"
diff --git a/drivers/pci/controller/dwc/pcie-armada8k.c b/drivers/pci/controller/dwc/pcie-armada8k.c
index 0c389a30ef5d..3d55dc78d999 100644
--- a/drivers/pci/controller/dwc/pcie-armada8k.c
+++ b/drivers/pci/controller/dwc/pcie-armada8k.c
@@ -25,10 +25,14 @@
#include "pcie-designware.h"
+#define ARMADA8K_PCIE_MAX_LANES PCIE_LNK_X4
+
struct armada8k_pcie {
struct dw_pcie *pci;
struct clk *clk;
struct clk *clk_reg;
+ struct phy *phy[ARMADA8K_PCIE_MAX_LANES];
+ unsigned int phy_count;
};
#define PCIE_VENDOR_REGS_OFFSET 0x8000
@@ -55,7 +59,7 @@ struct armada8k_pcie {
#define PCIE_ARUSER_REG (PCIE_VENDOR_REGS_OFFSET + 0x5C)
#define PCIE_AWUSER_REG (PCIE_VENDOR_REGS_OFFSET + 0x60)
/*
- * AR/AW Cache defauls: Normal memory, Write-Back, Read / Write
+ * AR/AW Cache defaults: Normal memory, Write-Back, Read / Write
* allocate
*/
#define ARCACHE_DEFAULT_VALUE 0x3511
@@ -67,6 +71,76 @@ struct armada8k_pcie {
#define to_armada8k_pcie(x) dev_get_drvdata((x)->dev)
+static void armada8k_pcie_disable_phys(struct armada8k_pcie *pcie)
+{
+ int i;
+
+ for (i = 0; i < ARMADA8K_PCIE_MAX_LANES; i++) {
+ phy_power_off(pcie->phy[i]);
+ phy_exit(pcie->phy[i]);
+ }
+}
+
+static int armada8k_pcie_enable_phys(struct armada8k_pcie *pcie)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < ARMADA8K_PCIE_MAX_LANES; i++) {
+ ret = phy_init(pcie->phy[i]);
+ if (ret)
+ return ret;
+
+ ret = phy_set_mode_ext(pcie->phy[i], PHY_MODE_PCIE,
+ pcie->phy_count);
+ if (ret) {
+ phy_exit(pcie->phy[i]);
+ return ret;
+ }
+
+ ret = phy_power_on(pcie->phy[i]);
+ if (ret) {
+ phy_exit(pcie->phy[i]);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int armada8k_pcie_setup_phys(struct armada8k_pcie *pcie)
+{
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ struct device_node *node = dev->of_node;
+ int ret = 0;
+ int i;
+
+ for (i = 0; i < ARMADA8K_PCIE_MAX_LANES; i++) {
+ pcie->phy[i] = devm_of_phy_get_by_index(dev, node, i);
+ if (IS_ERR(pcie->phy[i]) &&
+ (PTR_ERR(pcie->phy[i]) == -EPROBE_DEFER))
+ return PTR_ERR(pcie->phy[i]);
+
+ if (IS_ERR(pcie->phy[i])) {
+ pcie->phy[i] = NULL;
+ continue;
+ }
+
+ pcie->phy_count++;
+ }
+
+ /* Old bindings miss the PHY handle, so just warn if there is no PHY */
+ if (!pcie->phy_count)
+ dev_warn(dev, "No available PHY\n");
+
+ ret = armada8k_pcie_enable_phys(pcie);
+ if (ret)
+ dev_err(dev, "Failed to initialize PHY(s) (%d)\n", ret);
+
+ return ret;
+}
+
static int armada8k_pcie_link_up(struct dw_pcie *pci)
{
u32 reg;
@@ -249,14 +323,20 @@ static int armada8k_pcie_probe(struct platform_device *pdev)
goto fail_clkreg;
}
+ ret = armada8k_pcie_setup_phys(pcie);
+ if (ret)
+ goto fail_clkreg;
+
platform_set_drvdata(pdev, pcie);
ret = armada8k_add_pcie_port(pcie, pdev);
if (ret)
- goto fail_clkreg;
+ goto disable_phy;
return 0;
+disable_phy:
+ armada8k_pcie_disable_phys(pcie);
fail_clkreg:
clk_disable_unprepare(pcie->clk_reg);
fail:
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index 77db32529319..f93252d0da5b 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -311,6 +311,7 @@ void dw_pcie_msi_init(struct pcie_port *pp)
dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4,
upper_32_bits(msi_target));
}
+EXPORT_SYMBOL_GPL(dw_pcie_msi_init);
int dw_pcie_host_init(struct pcie_port *pp)
{
@@ -495,6 +496,16 @@ err_free_msi:
dw_pcie_free_msi(pp);
return ret;
}
+EXPORT_SYMBOL_GPL(dw_pcie_host_init);
+
+void dw_pcie_host_deinit(struct pcie_port *pp)
+{
+ pci_stop_root_bus(pp->root_bus);
+ pci_remove_root_bus(pp->root_bus);
+ if (pci_msi_enabled() && !pp->ops->msi_host_init)
+ dw_pcie_free_msi(pp);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_host_deinit);
static int dw_pcie_access_other_conf(struct pcie_port *pp, struct pci_bus *bus,
u32 devfn, int where, int size, u32 *val,
@@ -687,3 +698,4 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
val |= PORT_LOGIC_SPEED_CHANGE;
dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val);
}
+EXPORT_SYMBOL_GPL(dw_pcie_setup_rc);
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
index 9d7c51c32b3b..7d25102c304c 100644
--- a/drivers/pci/controller/dwc/pcie-designware.c
+++ b/drivers/pci/controller/dwc/pcie-designware.c
@@ -34,6 +34,7 @@ int dw_pcie_read(void __iomem *addr, int size, u32 *val)
return PCIBIOS_SUCCESSFUL;
}
+EXPORT_SYMBOL_GPL(dw_pcie_read);
int dw_pcie_write(void __iomem *addr, int size, u32 val)
{
@@ -51,69 +52,97 @@ int dw_pcie_write(void __iomem *addr, int size, u32 val)
return PCIBIOS_SUCCESSFUL;
}
+EXPORT_SYMBOL_GPL(dw_pcie_write);
-u32 __dw_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
- size_t size)
+u32 dw_pcie_read_dbi(struct dw_pcie *pci, u32 reg, size_t size)
{
int ret;
u32 val;
if (pci->ops->read_dbi)
- return pci->ops->read_dbi(pci, base, reg, size);
+ return pci->ops->read_dbi(pci, pci->dbi_base, reg, size);
- ret = dw_pcie_read(base + reg, size, &val);
+ ret = dw_pcie_read(pci->dbi_base + reg, size, &val);
if (ret)
dev_err(pci->dev, "Read DBI address failed\n");
return val;
}
+EXPORT_SYMBOL_GPL(dw_pcie_read_dbi);
-void __dw_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
- size_t size, u32 val)
+void dw_pcie_write_dbi(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
{
int ret;
if (pci->ops->write_dbi) {
- pci->ops->write_dbi(pci, base, reg, size, val);
+ pci->ops->write_dbi(pci, pci->dbi_base, reg, size, val);
return;
}
- ret = dw_pcie_write(base + reg, size, val);
+ ret = dw_pcie_write(pci->dbi_base + reg, size, val);
if (ret)
dev_err(pci->dev, "Write DBI address failed\n");
}
+EXPORT_SYMBOL_GPL(dw_pcie_write_dbi);
-u32 __dw_pcie_read_dbi2(struct dw_pcie *pci, void __iomem *base, u32 reg,
- size_t size)
+u32 dw_pcie_read_dbi2(struct dw_pcie *pci, u32 reg, size_t size)
{
int ret;
u32 val;
if (pci->ops->read_dbi2)
- return pci->ops->read_dbi2(pci, base, reg, size);
+ return pci->ops->read_dbi2(pci, pci->dbi_base2, reg, size);
- ret = dw_pcie_read(base + reg, size, &val);
+ ret = dw_pcie_read(pci->dbi_base2 + reg, size, &val);
if (ret)
dev_err(pci->dev, "read DBI address failed\n");
return val;
}
-void __dw_pcie_write_dbi2(struct dw_pcie *pci, void __iomem *base, u32 reg,
- size_t size, u32 val)
+void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
{
int ret;
if (pci->ops->write_dbi2) {
- pci->ops->write_dbi2(pci, base, reg, size, val);
+ pci->ops->write_dbi2(pci, pci->dbi_base2, reg, size, val);
return;
}
- ret = dw_pcie_write(base + reg, size, val);
+ ret = dw_pcie_write(pci->dbi_base2 + reg, size, val);
if (ret)
dev_err(pci->dev, "write DBI address failed\n");
}
+u32 dw_pcie_read_atu(struct dw_pcie *pci, u32 reg, size_t size)
+{
+ int ret;
+ u32 val;
+
+ if (pci->ops->read_dbi)
+ return pci->ops->read_dbi(pci, pci->atu_base, reg, size);
+
+ ret = dw_pcie_read(pci->atu_base + reg, size, &val);
+ if (ret)
+ dev_err(pci->dev, "Read ATU address failed\n");
+
+ return val;
+}
+
+void dw_pcie_write_atu(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
+{
+ int ret;
+
+ if (pci->ops->write_dbi) {
+ pci->ops->write_dbi(pci, pci->atu_base, reg, size, val);
+ return;
+ }
+
+ ret = dw_pcie_write(pci->atu_base + reg, size, val);
+ if (ret)
+ dev_err(pci->dev, "Write ATU address failed\n");
+}
+
static u32 dw_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg)
{
u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index b8993f2b78df..ffed084a0b4f 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -254,14 +254,12 @@ struct dw_pcie {
int dw_pcie_read(void __iomem *addr, int size, u32 *val);
int dw_pcie_write(void __iomem *addr, int size, u32 val);
-u32 __dw_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
- size_t size);
-void __dw_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
- size_t size, u32 val);
-u32 __dw_pcie_read_dbi2(struct dw_pcie *pci, void __iomem *base, u32 reg,
- size_t size);
-void __dw_pcie_write_dbi2(struct dw_pcie *pci, void __iomem *base, u32 reg,
- size_t size, u32 val);
+u32 dw_pcie_read_dbi(struct dw_pcie *pci, u32 reg, size_t size);
+void dw_pcie_write_dbi(struct dw_pcie *pci, u32 reg, size_t size, u32 val);
+u32 dw_pcie_read_dbi2(struct dw_pcie *pci, u32 reg, size_t size);
+void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val);
+u32 dw_pcie_read_atu(struct dw_pcie *pci, u32 reg, size_t size);
+void dw_pcie_write_atu(struct dw_pcie *pci, u32 reg, size_t size, u32 val);
int dw_pcie_link_up(struct dw_pcie *pci);
int dw_pcie_wait_for_link(struct dw_pcie *pci);
void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index,
@@ -275,52 +273,52 @@ void dw_pcie_setup(struct dw_pcie *pci);
static inline void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val)
{
- __dw_pcie_write_dbi(pci, pci->dbi_base, reg, 0x4, val);
+ dw_pcie_write_dbi(pci, reg, 0x4, val);
}
static inline u32 dw_pcie_readl_dbi(struct dw_pcie *pci, u32 reg)
{
- return __dw_pcie_read_dbi(pci, pci->dbi_base, reg, 0x4);
+ return dw_pcie_read_dbi(pci, reg, 0x4);
}
static inline void dw_pcie_writew_dbi(struct dw_pcie *pci, u32 reg, u16 val)
{
- __dw_pcie_write_dbi(pci, pci->dbi_base, reg, 0x2, val);
+ dw_pcie_write_dbi(pci, reg, 0x2, val);
}
static inline u16 dw_pcie_readw_dbi(struct dw_pcie *pci, u32 reg)
{
- return __dw_pcie_read_dbi(pci, pci->dbi_base, reg, 0x2);
+ return dw_pcie_read_dbi(pci, reg, 0x2);
}
static inline void dw_pcie_writeb_dbi(struct dw_pcie *pci, u32 reg, u8 val)
{
- __dw_pcie_write_dbi(pci, pci->dbi_base, reg, 0x1, val);
+ dw_pcie_write_dbi(pci, reg, 0x1, val);
}
static inline u8 dw_pcie_readb_dbi(struct dw_pcie *pci, u32 reg)
{
- return __dw_pcie_read_dbi(pci, pci->dbi_base, reg, 0x1);
+ return dw_pcie_read_dbi(pci, reg, 0x1);
}
static inline void dw_pcie_writel_dbi2(struct dw_pcie *pci, u32 reg, u32 val)
{
- __dw_pcie_write_dbi2(pci, pci->dbi_base2, reg, 0x4, val);
+ dw_pcie_write_dbi2(pci, reg, 0x4, val);
}
static inline u32 dw_pcie_readl_dbi2(struct dw_pcie *pci, u32 reg)
{
- return __dw_pcie_read_dbi2(pci, pci->dbi_base2, reg, 0x4);
+ return dw_pcie_read_dbi2(pci, reg, 0x4);
}
static inline void dw_pcie_writel_atu(struct dw_pcie *pci, u32 reg, u32 val)
{
- __dw_pcie_write_dbi(pci, pci->atu_base, reg, 0x4, val);
+ dw_pcie_write_atu(pci, reg, 0x4, val);
}
static inline u32 dw_pcie_readl_atu(struct dw_pcie *pci, u32 reg)
{
- return __dw_pcie_read_dbi(pci, pci->atu_base, reg, 0x4);
+ return dw_pcie_read_atu(pci, reg, 0x4);
}
static inline void dw_pcie_dbi_ro_wr_en(struct dw_pcie *pci)
@@ -351,6 +349,7 @@ void dw_pcie_msi_init(struct pcie_port *pp);
void dw_pcie_free_msi(struct pcie_port *pp);
void dw_pcie_setup_rc(struct pcie_port *pp);
int dw_pcie_host_init(struct pcie_port *pp);
+void dw_pcie_host_deinit(struct pcie_port *pp);
int dw_pcie_allocate_domains(struct pcie_port *pp);
#else
static inline irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
@@ -375,6 +374,10 @@ static inline int dw_pcie_host_init(struct pcie_port *pp)
return 0;
}
+static inline void dw_pcie_host_deinit(struct pcie_port *pp)
+{
+}
+
static inline int dw_pcie_allocate_domains(struct pcie_port *pp)
{
return 0;
diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c
index 9b599296205d..8df1914226be 100644
--- a/drivers/pci/controller/dwc/pcie-kirin.c
+++ b/drivers/pci/controller/dwc/pcie-kirin.c
@@ -2,7 +2,7 @@
/*
* PCIe host controller driver for Kirin Phone SoCs
*
- * Copyright (C) 2017 Hilisicon Electronics Co., Ltd.
+ * Copyright (C) 2017 HiSilicon Electronics Co., Ltd.
* http://www.huawei.com
*
* Author: Xiaowei Song <songxiaowei@huawei.com>
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index 0ed235d560e3..7e581748ee9f 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -112,10 +112,10 @@ struct qcom_pcie_resources_2_3_2 {
struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY];
};
+#define QCOM_PCIE_2_4_0_MAX_CLOCKS 4
struct qcom_pcie_resources_2_4_0 {
- struct clk *aux_clk;
- struct clk *master_clk;
- struct clk *slave_clk;
+ struct clk_bulk_data clks[QCOM_PCIE_2_4_0_MAX_CLOCKS];
+ int num_clks;
struct reset_control *axi_m_reset;
struct reset_control *axi_s_reset;
struct reset_control *pipe_reset;
@@ -178,6 +178,8 @@ static void qcom_ep_reset_assert(struct qcom_pcie *pcie)
static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
{
+ /* Ensure that PERST has been asserted for at least 100 ms */
+ msleep(100);
gpiod_set_value_cansleep(pcie->reset, 0);
usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
}
@@ -638,18 +640,20 @@ static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie)
struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
struct dw_pcie *pci = pcie->pci;
struct device *dev = pci->dev;
+ bool is_ipq = of_device_is_compatible(dev->of_node, "qcom,pcie-ipq4019");
+ int ret;
- res->aux_clk = devm_clk_get(dev, "aux");
- if (IS_ERR(res->aux_clk))
- return PTR_ERR(res->aux_clk);
+ res->clks[0].id = "aux";
+ res->clks[1].id = "master_bus";
+ res->clks[2].id = "slave_bus";
+ res->clks[3].id = "iface";
- res->master_clk = devm_clk_get(dev, "master_bus");
- if (IS_ERR(res->master_clk))
- return PTR_ERR(res->master_clk);
+ /* qcom,pcie-ipq4019 is defined without "iface" */
+ res->num_clks = is_ipq ? 3 : 4;
- res->slave_clk = devm_clk_get(dev, "slave_bus");
- if (IS_ERR(res->slave_clk))
- return PTR_ERR(res->slave_clk);
+ ret = devm_clk_bulk_get(dev, res->num_clks, res->clks);
+ if (ret < 0)
+ return ret;
res->axi_m_reset = devm_reset_control_get_exclusive(dev, "axi_m");
if (IS_ERR(res->axi_m_reset))
@@ -659,27 +663,33 @@ static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie)
if (IS_ERR(res->axi_s_reset))
return PTR_ERR(res->axi_s_reset);
- res->pipe_reset = devm_reset_control_get_exclusive(dev, "pipe");
- if (IS_ERR(res->pipe_reset))
- return PTR_ERR(res->pipe_reset);
-
- res->axi_m_vmid_reset = devm_reset_control_get_exclusive(dev,
- "axi_m_vmid");
- if (IS_ERR(res->axi_m_vmid_reset))
- return PTR_ERR(res->axi_m_vmid_reset);
-
- res->axi_s_xpu_reset = devm_reset_control_get_exclusive(dev,
- "axi_s_xpu");
- if (IS_ERR(res->axi_s_xpu_reset))
- return PTR_ERR(res->axi_s_xpu_reset);
-
- res->parf_reset = devm_reset_control_get_exclusive(dev, "parf");
- if (IS_ERR(res->parf_reset))
- return PTR_ERR(res->parf_reset);
-
- res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
- if (IS_ERR(res->phy_reset))
- return PTR_ERR(res->phy_reset);
+ if (is_ipq) {
+ /*
+ * These resources relates to the PHY or are secure clocks, but
+ * are controlled here for IPQ4019
+ */
+ res->pipe_reset = devm_reset_control_get_exclusive(dev, "pipe");
+ if (IS_ERR(res->pipe_reset))
+ return PTR_ERR(res->pipe_reset);
+
+ res->axi_m_vmid_reset = devm_reset_control_get_exclusive(dev,
+ "axi_m_vmid");
+ if (IS_ERR(res->axi_m_vmid_reset))
+ return PTR_ERR(res->axi_m_vmid_reset);
+
+ res->axi_s_xpu_reset = devm_reset_control_get_exclusive(dev,
+ "axi_s_xpu");
+ if (IS_ERR(res->axi_s_xpu_reset))
+ return PTR_ERR(res->axi_s_xpu_reset);
+
+ res->parf_reset = devm_reset_control_get_exclusive(dev, "parf");
+ if (IS_ERR(res->parf_reset))
+ return PTR_ERR(res->parf_reset);
+
+ res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
+ if (IS_ERR(res->phy_reset))
+ return PTR_ERR(res->phy_reset);
+ }
res->axi_m_sticky_reset = devm_reset_control_get_exclusive(dev,
"axi_m_sticky");
@@ -699,9 +709,11 @@ static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie)
if (IS_ERR(res->ahb_reset))
return PTR_ERR(res->ahb_reset);
- res->phy_ahb_reset = devm_reset_control_get_exclusive(dev, "phy_ahb");
- if (IS_ERR(res->phy_ahb_reset))
- return PTR_ERR(res->phy_ahb_reset);
+ if (is_ipq) {
+ res->phy_ahb_reset = devm_reset_control_get_exclusive(dev, "phy_ahb");
+ if (IS_ERR(res->phy_ahb_reset))
+ return PTR_ERR(res->phy_ahb_reset);
+ }
return 0;
}
@@ -719,9 +731,7 @@ static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie)
reset_control_assert(res->axi_m_sticky_reset);
reset_control_assert(res->pwr_reset);
reset_control_assert(res->ahb_reset);
- clk_disable_unprepare(res->aux_clk);
- clk_disable_unprepare(res->master_clk);
- clk_disable_unprepare(res->slave_clk);
+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
}
static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie)
@@ -850,23 +860,9 @@ static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie)
usleep_range(10000, 12000);
- ret = clk_prepare_enable(res->aux_clk);
- if (ret) {
- dev_err(dev, "cannot prepare/enable iface clock\n");
- goto err_clk_aux;
- }
-
- ret = clk_prepare_enable(res->master_clk);
- if (ret) {
- dev_err(dev, "cannot prepare/enable core clock\n");
- goto err_clk_axi_m;
- }
-
- ret = clk_prepare_enable(res->slave_clk);
- if (ret) {
- dev_err(dev, "cannot prepare/enable phy clock\n");
- goto err_clk_axi_s;
- }
+ ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
+ if (ret)
+ goto err_clks;
/* enable PCIe clocks and resets */
val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
@@ -891,11 +887,7 @@ static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie)
return 0;
-err_clk_axi_s:
- clk_disable_unprepare(res->master_clk);
-err_clk_axi_m:
- clk_disable_unprepare(res->aux_clk);
-err_clk_aux:
+err_clks:
reset_control_assert(res->ahb_reset);
err_rst_ahb:
reset_control_assert(res->pwr_reset);
@@ -1289,6 +1281,7 @@ static const struct of_device_id qcom_pcie_match[] = {
{ .compatible = "qcom,pcie-msm8996", .data = &ops_2_3_2 },
{ .compatible = "qcom,pcie-ipq8074", .data = &ops_2_3_3 },
{ .compatible = "qcom,pcie-ipq4019", .data = &ops_2_4_0 },
+ { .compatible = "qcom,pcie-qcs404", .data = &ops_2_4_0 },
{ }
};
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
index 134e0306ff00..fc0fe4d4de49 100644
--- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c
@@ -308,7 +308,7 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_MASK_REG);
- /* Unmask all MSI's */
+ /* Unmask all MSIs */
advk_writel(pcie, 0, PCIE_MSI_MASK_REG);
/* Enable summary interrupt for GIC SPI source */
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index 82acd6155adf..40b625458afa 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -1875,6 +1875,7 @@ static void hv_pci_devices_present(struct hv_pcibus_device *hbus,
static void hv_eject_device_work(struct work_struct *work)
{
struct pci_eject_response *ejct_pkt;
+ struct hv_pcibus_device *hbus;
struct hv_pci_dev *hpdev;
struct pci_dev *pdev;
unsigned long flags;
@@ -1885,6 +1886,7 @@ static void hv_eject_device_work(struct work_struct *work)
} ctxt;
hpdev = container_of(work, struct hv_pci_dev, wrk);
+ hbus = hpdev->hbus;
WARN_ON(hpdev->state != hv_pcichild_ejecting);
@@ -1895,8 +1897,7 @@ static void hv_eject_device_work(struct work_struct *work)
* because hbus->pci_bus may not exist yet.
*/
wslot = wslot_to_devfn(hpdev->desc.win_slot.slot);
- pdev = pci_get_domain_bus_and_slot(hpdev->hbus->sysdata.domain, 0,
- wslot);
+ pdev = pci_get_domain_bus_and_slot(hbus->sysdata.domain, 0, wslot);
if (pdev) {
pci_lock_rescan_remove();
pci_stop_and_remove_bus_device(pdev);
@@ -1904,9 +1905,9 @@ static void hv_eject_device_work(struct work_struct *work)
pci_unlock_rescan_remove();
}
- spin_lock_irqsave(&hpdev->hbus->device_list_lock, flags);
+ spin_lock_irqsave(&hbus->device_list_lock, flags);
list_del(&hpdev->list_entry);
- spin_unlock_irqrestore(&hpdev->hbus->device_list_lock, flags);
+ spin_unlock_irqrestore(&hbus->device_list_lock, flags);
if (hpdev->pci_slot)
pci_destroy_slot(hpdev->pci_slot);
@@ -1915,7 +1916,7 @@ static void hv_eject_device_work(struct work_struct *work)
ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message;
ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE;
ejct_pkt->wslot.slot = hpdev->desc.win_slot.slot;
- vmbus_sendpacket(hpdev->hbus->hdev->channel, ejct_pkt,
+ vmbus_sendpacket(hbus->hdev->channel, ejct_pkt,
sizeof(*ejct_pkt), (unsigned long)&ctxt.pkt,
VM_PKT_DATA_INBAND, 0);
@@ -1924,7 +1925,9 @@ static void hv_eject_device_work(struct work_struct *work)
/* For the two refs got in new_pcichild_device() */
put_pcichild(hpdev);
put_pcichild(hpdev);
- put_hvpcibus(hpdev->hbus);
+ /* hpdev has been freed. Do not use it any more. */
+
+ put_hvpcibus(hbus);
}
/**
diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c
index 464ba2538d52..9a917b2456f6 100644
--- a/drivers/pci/controller/pci-tegra.c
+++ b/drivers/pci/controller/pci-tegra.c
@@ -17,6 +17,7 @@
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/export.h>
+#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/irq.h>
@@ -30,6 +31,7 @@
#include <linux/of_platform.h>
#include <linux/pci.h>
#include <linux/phy/phy.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/sizes.h>
@@ -95,7 +97,8 @@
#define AFI_MSI_EN_VEC7 0xa8
#define AFI_CONFIGURATION 0xac
-#define AFI_CONFIGURATION_EN_FPCI (1 << 0)
+#define AFI_CONFIGURATION_EN_FPCI (1 << 0)
+#define AFI_CONFIGURATION_CLKEN_OVERRIDE (1 << 31)
#define AFI_FPCI_ERROR_MASKS 0xb0
@@ -159,13 +162,14 @@
#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211 (0x1 << 20)
#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411 (0x2 << 20)
#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111 (0x2 << 20)
+#define AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(x) (1 << ((x) + 29))
+#define AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO_ALL (0x7 << 29)
#define AFI_FUSE 0x104
#define AFI_FUSE_PCIE_T0_GEN2_DIS (1 << 2)
#define AFI_PEX0_CTRL 0x110
#define AFI_PEX1_CTRL 0x118
-#define AFI_PEX2_CTRL 0x128
#define AFI_PEX_CTRL_RST (1 << 0)
#define AFI_PEX_CTRL_CLKREQ_EN (1 << 1)
#define AFI_PEX_CTRL_REFCLK_EN (1 << 3)
@@ -177,20 +181,74 @@
#define AFI_PEXBIAS_CTRL_0 0x168
+#define RP_PRIV_XP_DL 0x00000494
+#define RP_PRIV_XP_DL_GEN2_UPD_FC_TSHOLD (0x1ff << 1)
+
+#define RP_RX_HDR_LIMIT 0x00000e00
+#define RP_RX_HDR_LIMIT_PW_MASK (0xff << 8)
+#define RP_RX_HDR_LIMIT_PW (0x0e << 8)
+
+#define RP_ECTL_2_R1 0x00000e84
+#define RP_ECTL_2_R1_RX_CTLE_1C_MASK 0xffff
+
+#define RP_ECTL_4_R1 0x00000e8c
+#define RP_ECTL_4_R1_RX_CDR_CTRL_1C_MASK (0xffff << 16)
+#define RP_ECTL_4_R1_RX_CDR_CTRL_1C_SHIFT 16
+
+#define RP_ECTL_5_R1 0x00000e90
+#define RP_ECTL_5_R1_RX_EQ_CTRL_L_1C_MASK 0xffffffff
+
+#define RP_ECTL_6_R1 0x00000e94
+#define RP_ECTL_6_R1_RX_EQ_CTRL_H_1C_MASK 0xffffffff
+
+#define RP_ECTL_2_R2 0x00000ea4
+#define RP_ECTL_2_R2_RX_CTLE_1C_MASK 0xffff
+
+#define RP_ECTL_4_R2 0x00000eac
+#define RP_ECTL_4_R2_RX_CDR_CTRL_1C_MASK (0xffff << 16)
+#define RP_ECTL_4_R2_RX_CDR_CTRL_1C_SHIFT 16
+
+#define RP_ECTL_5_R2 0x00000eb0
+#define RP_ECTL_5_R2_RX_EQ_CTRL_L_1C_MASK 0xffffffff
+
+#define RP_ECTL_6_R2 0x00000eb4
+#define RP_ECTL_6_R2_RX_EQ_CTRL_H_1C_MASK 0xffffffff
+
#define RP_VEND_XP 0x00000f00
-#define RP_VEND_XP_DL_UP (1 << 30)
+#define RP_VEND_XP_DL_UP (1 << 30)
+#define RP_VEND_XP_OPPORTUNISTIC_ACK (1 << 27)
+#define RP_VEND_XP_OPPORTUNISTIC_UPDATEFC (1 << 28)
+#define RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK (0xff << 18)
+
+#define RP_VEND_CTL0 0x00000f44
+#define RP_VEND_CTL0_DSK_RST_PULSE_WIDTH_MASK (0xf << 12)
+#define RP_VEND_CTL0_DSK_RST_PULSE_WIDTH (0x9 << 12)
+
+#define RP_VEND_CTL1 0x00000f48
+#define RP_VEND_CTL1_ERPT (1 << 13)
+
+#define RP_VEND_XP_BIST 0x00000f4c
+#define RP_VEND_XP_BIST_GOTO_L1_L2_AFTER_DLLP_DONE (1 << 28)
#define RP_VEND_CTL2 0x00000fa8
#define RP_VEND_CTL2_PCA_ENABLE (1 << 7)
#define RP_PRIV_MISC 0x00000fe0
-#define RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT (0xe << 0)
-#define RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT (0xf << 0)
+#define RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT (0xe << 0)
+#define RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT (0xf << 0)
+#define RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD_MASK (0x7f << 16)
+#define RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD (0xf << 16)
+#define RP_PRIV_MISC_CTLR_CLK_CLAMP_ENABLE (1 << 23)
+#define RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD_MASK (0x7f << 24)
+#define RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD (0xf << 24)
+#define RP_PRIV_MISC_TMS_CLK_CLAMP_ENABLE (1 << 31)
#define RP_LINK_CONTROL_STATUS 0x00000090
#define RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE 0x20000000
#define RP_LINK_CONTROL_STATUS_LINKSTAT_MASK 0x3fff0000
+#define RP_LINK_CONTROL_STATUS_2 0x000000b0
+
#define PADS_CTL_SEL 0x0000009c
#define PADS_CTL 0x000000a0
@@ -226,6 +284,7 @@
#define PADS_REFCLK_CFG_DRVI_SHIFT 12 /* 15:12 */
#define PME_ACK_TIMEOUT 10000
+#define LINK_RETRAIN_TIMEOUT 100000 /* in usec */
struct tegra_msi {
struct msi_controller chip;
@@ -249,10 +308,12 @@ struct tegra_pcie_soc {
unsigned int num_ports;
const struct tegra_pcie_port_soc *ports;
unsigned int msi_base_shift;
+ unsigned long afi_pex2_ctrl;
u32 pads_pll_ctl;
u32 tx_ref_sel;
u32 pads_refclk_cfg0;
u32 pads_refclk_cfg1;
+ u32 update_fc_threshold;
bool has_pex_clkreq_en;
bool has_pex_bias_ctrl;
bool has_intr_prsnt_sense;
@@ -260,6 +321,24 @@ struct tegra_pcie_soc {
bool has_gen2;
bool force_pca_enable;
bool program_uphy;
+ bool update_clamp_threshold;
+ bool program_deskew_time;
+ bool raw_violation_fixup;
+ bool update_fc_timer;
+ bool has_cache_bars;
+ struct {
+ struct {
+ u32 rp_ectl_2_r1;
+ u32 rp_ectl_4_r1;
+ u32 rp_ectl_5_r1;
+ u32 rp_ectl_6_r1;
+ u32 rp_ectl_2_r2;
+ u32 rp_ectl_4_r2;
+ u32 rp_ectl_5_r2;
+ u32 rp_ectl_6_r2;
+ } regs;
+ bool enable;
+ } ectl;
};
static inline struct tegra_msi *to_tegra_msi(struct msi_controller *chip)
@@ -321,6 +400,8 @@ struct tegra_pcie_port {
unsigned int lanes;
struct phy **phys;
+
+ struct gpio_desc *reset_gpio;
};
struct tegra_pcie_bus {
@@ -440,6 +521,7 @@ static struct pci_ops tegra_pcie_ops = {
static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port)
{
+ const struct tegra_pcie_soc *soc = port->pcie->soc;
unsigned long ret = 0;
switch (port->index) {
@@ -452,7 +534,7 @@ static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port)
break;
case 2:
- ret = AFI_PEX2_CTRL;
+ ret = soc->afi_pex2_ctrl;
break;
}
@@ -465,15 +547,162 @@ static void tegra_pcie_port_reset(struct tegra_pcie_port *port)
unsigned long value;
/* pulse reset signal */
- value = afi_readl(port->pcie, ctrl);
- value &= ~AFI_PEX_CTRL_RST;
- afi_writel(port->pcie, value, ctrl);
+ if (port->reset_gpio) {
+ gpiod_set_value(port->reset_gpio, 1);
+ } else {
+ value = afi_readl(port->pcie, ctrl);
+ value &= ~AFI_PEX_CTRL_RST;
+ afi_writel(port->pcie, value, ctrl);
+ }
usleep_range(1000, 2000);
- value = afi_readl(port->pcie, ctrl);
- value |= AFI_PEX_CTRL_RST;
- afi_writel(port->pcie, value, ctrl);
+ if (port->reset_gpio) {
+ gpiod_set_value(port->reset_gpio, 0);
+ } else {
+ value = afi_readl(port->pcie, ctrl);
+ value |= AFI_PEX_CTRL_RST;
+ afi_writel(port->pcie, value, ctrl);
+ }
+}
+
+static void tegra_pcie_enable_rp_features(struct tegra_pcie_port *port)
+{
+ const struct tegra_pcie_soc *soc = port->pcie->soc;
+ u32 value;
+
+ /* Enable AER capability */
+ value = readl(port->base + RP_VEND_CTL1);
+ value |= RP_VEND_CTL1_ERPT;
+ writel(value, port->base + RP_VEND_CTL1);
+
+ /* Optimal settings to enhance bandwidth */
+ value = readl(port->base + RP_VEND_XP);
+ value |= RP_VEND_XP_OPPORTUNISTIC_ACK;
+ value |= RP_VEND_XP_OPPORTUNISTIC_UPDATEFC;
+ writel(value, port->base + RP_VEND_XP);
+
+ /*
+ * LTSSM will wait for DLLP to finish before entering L1 or L2,
+ * to avoid truncation of PM messages which results in receiver errors
+ */
+ value = readl(port->base + RP_VEND_XP_BIST);
+ value |= RP_VEND_XP_BIST_GOTO_L1_L2_AFTER_DLLP_DONE;
+ writel(value, port->base + RP_VEND_XP_BIST);
+
+ value = readl(port->base + RP_PRIV_MISC);
+ value |= RP_PRIV_MISC_CTLR_CLK_CLAMP_ENABLE;
+ value |= RP_PRIV_MISC_TMS_CLK_CLAMP_ENABLE;
+
+ if (soc->update_clamp_threshold) {
+ value &= ~(RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD_MASK |
+ RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD_MASK);
+ value |= RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD |
+ RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD;
+ }
+
+ writel(value, port->base + RP_PRIV_MISC);
+}
+
+static void tegra_pcie_program_ectl_settings(struct tegra_pcie_port *port)
+{
+ const struct tegra_pcie_soc *soc = port->pcie->soc;
+ u32 value;
+
+ value = readl(port->base + RP_ECTL_2_R1);
+ value &= ~RP_ECTL_2_R1_RX_CTLE_1C_MASK;
+ value |= soc->ectl.regs.rp_ectl_2_r1;
+ writel(value, port->base + RP_ECTL_2_R1);
+
+ value = readl(port->base + RP_ECTL_4_R1);
+ value &= ~RP_ECTL_4_R1_RX_CDR_CTRL_1C_MASK;
+ value |= soc->ectl.regs.rp_ectl_4_r1 <<
+ RP_ECTL_4_R1_RX_CDR_CTRL_1C_SHIFT;
+ writel(value, port->base + RP_ECTL_4_R1);
+
+ value = readl(port->base + RP_ECTL_5_R1);
+ value &= ~RP_ECTL_5_R1_RX_EQ_CTRL_L_1C_MASK;
+ value |= soc->ectl.regs.rp_ectl_5_r1;
+ writel(value, port->base + RP_ECTL_5_R1);
+
+ value = readl(port->base + RP_ECTL_6_R1);
+ value &= ~RP_ECTL_6_R1_RX_EQ_CTRL_H_1C_MASK;
+ value |= soc->ectl.regs.rp_ectl_6_r1;
+ writel(value, port->base + RP_ECTL_6_R1);
+
+ value = readl(port->base + RP_ECTL_2_R2);
+ value &= ~RP_ECTL_2_R2_RX_CTLE_1C_MASK;
+ value |= soc->ectl.regs.rp_ectl_2_r2;
+ writel(value, port->base + RP_ECTL_2_R2);
+
+ value = readl(port->base + RP_ECTL_4_R2);
+ value &= ~RP_ECTL_4_R2_RX_CDR_CTRL_1C_MASK;
+ value |= soc->ectl.regs.rp_ectl_4_r2 <<
+ RP_ECTL_4_R2_RX_CDR_CTRL_1C_SHIFT;
+ writel(value, port->base + RP_ECTL_4_R2);
+
+ value = readl(port->base + RP_ECTL_5_R2);
+ value &= ~RP_ECTL_5_R2_RX_EQ_CTRL_L_1C_MASK;
+ value |= soc->ectl.regs.rp_ectl_5_r2;
+ writel(value, port->base + RP_ECTL_5_R2);
+
+ value = readl(port->base + RP_ECTL_6_R2);
+ value &= ~RP_ECTL_6_R2_RX_EQ_CTRL_H_1C_MASK;
+ value |= soc->ectl.regs.rp_ectl_6_r2;
+ writel(value, port->base + RP_ECTL_6_R2);
+}
+
+static void tegra_pcie_apply_sw_fixup(struct tegra_pcie_port *port)
+{
+ const struct tegra_pcie_soc *soc = port->pcie->soc;
+ u32 value;
+
+ /*
+ * Sometimes link speed change from Gen2 to Gen1 fails due to
+ * instability in deskew logic on lane-0. Increase the deskew
+ * retry time to resolve this issue.
+ */
+ if (soc->program_deskew_time) {
+ value = readl(port->base + RP_VEND_CTL0);
+ value &= ~RP_VEND_CTL0_DSK_RST_PULSE_WIDTH_MASK;
+ value |= RP_VEND_CTL0_DSK_RST_PULSE_WIDTH;
+ writel(value, port->base + RP_VEND_CTL0);
+ }
+
+ /* Fixup for read after write violation. */
+ if (soc->raw_violation_fixup) {
+ value = readl(port->base + RP_RX_HDR_LIMIT);
+ value &= ~RP_RX_HDR_LIMIT_PW_MASK;
+ value |= RP_RX_HDR_LIMIT_PW;
+ writel(value, port->base + RP_RX_HDR_LIMIT);
+
+ value = readl(port->base + RP_PRIV_XP_DL);
+ value |= RP_PRIV_XP_DL_GEN2_UPD_FC_TSHOLD;
+ writel(value, port->base + RP_PRIV_XP_DL);
+
+ value = readl(port->base + RP_VEND_XP);
+ value &= ~RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK;
+ value |= soc->update_fc_threshold;
+ writel(value, port->base + RP_VEND_XP);
+ }
+
+ if (soc->update_fc_timer) {
+ value = readl(port->base + RP_VEND_XP);
+ value &= ~RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK;
+ value |= soc->update_fc_threshold;
+ writel(value, port->base + RP_VEND_XP);
+ }
+
+ /*
+ * PCIe link doesn't come up with few legacy PCIe endpoints if
+ * root port advertises both Gen-1 and Gen-2 speeds in Tegra.
+ * Hence, the strategy followed here is to initially advertise
+ * only Gen-1 and after link is up, retrain link to Gen-2 speed
+ */
+ value = readl(port->base + RP_LINK_CONTROL_STATUS_2);
+ value &= ~PCI_EXP_LNKSTA_CLS;
+ value |= PCI_EXP_LNKSTA_CLS_2_5GB;
+ writel(value, port->base + RP_LINK_CONTROL_STATUS_2);
}
static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
@@ -500,6 +729,13 @@ static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
value |= RP_VEND_CTL2_PCA_ENABLE;
writel(value, port->base + RP_VEND_CTL2);
}
+
+ tegra_pcie_enable_rp_features(port);
+
+ if (soc->ectl.enable)
+ tegra_pcie_program_ectl_settings(port);
+
+ tegra_pcie_apply_sw_fixup(port);
}
static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
@@ -521,6 +757,12 @@ static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
value &= ~AFI_PEX_CTRL_REFCLK_EN;
afi_writel(port->pcie, value, ctrl);
+
+ /* disable PCIe port and set CLKREQ# as GPIO to allow PLLE power down */
+ value = afi_readl(port->pcie, AFI_PCIE_CONFIG);
+ value |= AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
+ value |= AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(port->index);
+ afi_writel(port->pcie, value, AFI_PCIE_CONFIG);
}
static void tegra_pcie_port_free(struct tegra_pcie_port *port)
@@ -545,12 +787,15 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_fixup_class);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_fixup_class);
-/* Tegra PCIE requires relaxed ordering */
+/* Tegra20 and Tegra30 PCIE requires relaxed ordering */
static void tegra_pcie_relax_enable(struct pci_dev *dev)
{
pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
}
-DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_relax_enable);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_relax_enable);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_relax_enable);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_relax_enable);
static int tegra_pcie_request_resources(struct tegra_pcie *pcie)
{
@@ -635,7 +880,7 @@ static irqreturn_t tegra_pcie_isr(int irq, void *arg)
* do not pollute kernel log with master abort reports since they
* happen a lot during enumeration
*/
- if (code == AFI_INTR_MASTER_ABORT)
+ if (code == AFI_INTR_MASTER_ABORT || code == AFI_INTR_PE_PRSNT_SENSE)
dev_dbg(dev, "%s, signature: %08x\n", err_msg[code], signature);
else
dev_err(dev, "%s, signature: %08x\n", err_msg[code], signature);
@@ -704,11 +949,13 @@ static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
afi_writel(pcie, 0, AFI_AXI_BAR5_SZ);
afi_writel(pcie, 0, AFI_FPCI_BAR5);
- /* map all upstream transactions as uncached */
- afi_writel(pcie, 0, AFI_CACHE_BAR0_ST);
- afi_writel(pcie, 0, AFI_CACHE_BAR0_SZ);
- afi_writel(pcie, 0, AFI_CACHE_BAR1_ST);
- afi_writel(pcie, 0, AFI_CACHE_BAR1_SZ);
+ if (pcie->soc->has_cache_bars) {
+ /* map all upstream transactions as uncached */
+ afi_writel(pcie, 0, AFI_CACHE_BAR0_ST);
+ afi_writel(pcie, 0, AFI_CACHE_BAR0_SZ);
+ afi_writel(pcie, 0, AFI_CACHE_BAR1_ST);
+ afi_writel(pcie, 0, AFI_CACHE_BAR1_SZ);
+ }
/* MSI translations are setup only when needed */
afi_writel(pcie, 0, AFI_MSI_FPCI_BAR_ST);
@@ -852,7 +1099,6 @@ static int tegra_pcie_port_phy_power_off(struct tegra_pcie_port *port)
static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie)
{
struct device *dev = pcie->dev;
- const struct tegra_pcie_soc *soc = pcie->soc;
struct tegra_pcie_port *port;
int err;
@@ -878,12 +1124,6 @@ static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie)
}
}
- /* Configure the reference clock driver */
- pads_writel(pcie, soc->pads_refclk_cfg0, PADS_REFCLK_CFG0);
-
- if (soc->num_ports > 2)
- pads_writel(pcie, soc->pads_refclk_cfg1, PADS_REFCLK_CFG1);
-
return 0;
}
@@ -918,13 +1158,11 @@ static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie)
return 0;
}
-static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
+static void tegra_pcie_enable_controller(struct tegra_pcie *pcie)
{
- struct device *dev = pcie->dev;
const struct tegra_pcie_soc *soc = pcie->soc;
struct tegra_pcie_port *port;
unsigned long value;
- int err;
/* enable PLL power down */
if (pcie->phy) {
@@ -942,9 +1180,12 @@ static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
value = afi_readl(pcie, AFI_PCIE_CONFIG);
value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK;
value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config;
+ value |= AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO_ALL;
- list_for_each_entry(port, &pcie->ports, list)
+ list_for_each_entry(port, &pcie->ports, list) {
value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
+ value &= ~AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(port->index);
+ }
afi_writel(pcie, value, AFI_PCIE_CONFIG);
@@ -958,20 +1199,10 @@ static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
afi_writel(pcie, value, AFI_FUSE);
}
- if (soc->program_uphy) {
- err = tegra_pcie_phy_power_on(pcie);
- if (err < 0) {
- dev_err(dev, "failed to power on PHY(s): %d\n", err);
- return err;
- }
- }
-
- /* take the PCIe interface module out of reset */
- reset_control_deassert(pcie->pcie_xrst);
-
- /* finally enable PCIe */
+ /* Disable AFI dynamic clock gating and enable PCIe */
value = afi_readl(pcie, AFI_CONFIGURATION);
value |= AFI_CONFIGURATION_EN_FPCI;
+ value |= AFI_CONFIGURATION_CLKEN_OVERRIDE;
afi_writel(pcie, value, AFI_CONFIGURATION);
value = AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR |
@@ -989,22 +1220,6 @@ static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
/* disable all exceptions */
afi_writel(pcie, 0, AFI_FPCI_ERROR_MASKS);
-
- return 0;
-}
-
-static void tegra_pcie_disable_controller(struct tegra_pcie *pcie)
-{
- int err;
-
- reset_control_assert(pcie->pcie_xrst);
-
- if (pcie->soc->program_uphy) {
- err = tegra_pcie_phy_power_off(pcie);
- if (err < 0)
- dev_err(pcie->dev, "failed to power off PHY(s): %d\n",
- err);
- }
}
static void tegra_pcie_power_off(struct tegra_pcie *pcie)
@@ -1014,13 +1229,11 @@ static void tegra_pcie_power_off(struct tegra_pcie *pcie)
int err;
reset_control_assert(pcie->afi_rst);
- reset_control_assert(pcie->pex_rst);
clk_disable_unprepare(pcie->pll_e);
if (soc->has_cml_clk)
clk_disable_unprepare(pcie->cml_clk);
clk_disable_unprepare(pcie->afi_clk);
- clk_disable_unprepare(pcie->pex_clk);
if (!dev->pm_domain)
tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
@@ -1048,46 +1261,66 @@ static int tegra_pcie_power_on(struct tegra_pcie *pcie)
if (err < 0)
dev_err(dev, "failed to enable regulators: %d\n", err);
- if (dev->pm_domain) {
- err = clk_prepare_enable(pcie->pex_clk);
+ if (!dev->pm_domain) {
+ err = tegra_powergate_power_on(TEGRA_POWERGATE_PCIE);
if (err) {
- dev_err(dev, "failed to enable PEX clock: %d\n", err);
- return err;
+ dev_err(dev, "failed to power ungate: %d\n", err);
+ goto regulator_disable;
}
- reset_control_deassert(pcie->pex_rst);
- } else {
- err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE,
- pcie->pex_clk,
- pcie->pex_rst);
+ err = tegra_powergate_remove_clamping(TEGRA_POWERGATE_PCIE);
if (err) {
- dev_err(dev, "powerup sequence failed: %d\n", err);
- return err;
+ dev_err(dev, "failed to remove clamp: %d\n", err);
+ goto powergate;
}
}
- reset_control_deassert(pcie->afi_rst);
-
err = clk_prepare_enable(pcie->afi_clk);
if (err < 0) {
dev_err(dev, "failed to enable AFI clock: %d\n", err);
- return err;
+ goto powergate;
}
if (soc->has_cml_clk) {
err = clk_prepare_enable(pcie->cml_clk);
if (err < 0) {
dev_err(dev, "failed to enable CML clock: %d\n", err);
- return err;
+ goto disable_afi_clk;
}
}
err = clk_prepare_enable(pcie->pll_e);
if (err < 0) {
dev_err(dev, "failed to enable PLLE clock: %d\n", err);
- return err;
+ goto disable_cml_clk;
}
+ reset_control_deassert(pcie->afi_rst);
+
return 0;
+
+disable_cml_clk:
+ if (soc->has_cml_clk)
+ clk_disable_unprepare(pcie->cml_clk);
+disable_afi_clk:
+ clk_disable_unprepare(pcie->afi_clk);
+powergate:
+ if (!dev->pm_domain)
+ tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
+regulator_disable:
+ regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
+
+ return err;
+}
+
+static void tegra_pcie_apply_pad_settings(struct tegra_pcie *pcie)
+{
+ const struct tegra_pcie_soc *soc = pcie->soc;
+
+ /* Configure the reference clock driver */
+ pads_writel(pcie, soc->pads_refclk_cfg0, PADS_REFCLK_CFG0);
+
+ if (soc->num_ports > 2)
+ pads_writel(pcie, soc->pads_refclk_cfg1, PADS_REFCLK_CFG1);
}
static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
@@ -1647,6 +1880,15 @@ static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
return 0;
}
+static void tegra_pcie_disable_interrupts(struct tegra_pcie *pcie)
+{
+ u32 value;
+
+ value = afi_readl(pcie, AFI_INTR_MASK);
+ value &= ~AFI_INTR_MASK_INT_MASK;
+ afi_writel(pcie, value, AFI_INTR_MASK);
+}
+
static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
u32 *xbar)
{
@@ -1990,6 +2232,7 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
struct tegra_pcie_port *rp;
unsigned int index;
u32 value;
+ char *label;
err = of_pci_get_devfn(port);
if (err < 0) {
@@ -2048,6 +2291,31 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
if (IS_ERR(rp->base))
return PTR_ERR(rp->base);
+ label = devm_kasprintf(dev, GFP_KERNEL, "pex-reset-%u", index);
+ if (!label) {
+ dev_err(dev, "failed to create reset GPIO label\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * Returns -ENOENT if reset-gpios property is not populated
+ * and in this case fall back to using AFI per port register
+ * to toggle PERST# SFIO line.
+ */
+ rp->reset_gpio = devm_gpiod_get_from_of_node(dev, port,
+ "reset-gpios", 0,
+ GPIOD_OUT_LOW,
+ label);
+ if (IS_ERR(rp->reset_gpio)) {
+ if (PTR_ERR(rp->reset_gpio) == -ENOENT) {
+ rp->reset_gpio = NULL;
+ } else {
+ dev_err(dev, "failed to get reset GPIO: %d\n",
+ err);
+ return PTR_ERR(rp->reset_gpio);
+ }
+ }
+
list_add_tail(&rp->list, &pcie->ports);
}
@@ -2095,7 +2363,7 @@ static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
} while (--timeout);
if (!timeout) {
- dev_err(dev, "link %u down, retrying\n", port->index);
+ dev_dbg(dev, "link %u down, retrying\n", port->index);
goto retry;
}
@@ -2117,6 +2385,64 @@ retry:
return false;
}
+static void tegra_pcie_change_link_speed(struct tegra_pcie *pcie)
+{
+ struct device *dev = pcie->dev;
+ struct tegra_pcie_port *port;
+ ktime_t deadline;
+ u32 value;
+
+ list_for_each_entry(port, &pcie->ports, list) {
+ /*
+ * "Supported Link Speeds Vector" in "Link Capabilities 2"
+ * is not supported by Tegra. tegra_pcie_change_link_speed()
+ * is called only for Tegra chips which support Gen2.
+ * So there no harm if supported link speed is not verified.
+ */
+ value = readl(port->base + RP_LINK_CONTROL_STATUS_2);
+ value &= ~PCI_EXP_LNKSTA_CLS;
+ value |= PCI_EXP_LNKSTA_CLS_5_0GB;
+ writel(value, port->base + RP_LINK_CONTROL_STATUS_2);
+
+ /*
+ * Poll until link comes back from recovery to avoid race
+ * condition.
+ */
+ deadline = ktime_add_us(ktime_get(), LINK_RETRAIN_TIMEOUT);
+
+ while (ktime_before(ktime_get(), deadline)) {
+ value = readl(port->base + RP_LINK_CONTROL_STATUS);
+ if ((value & PCI_EXP_LNKSTA_LT) == 0)
+ break;
+
+ usleep_range(2000, 3000);
+ }
+
+ if (value & PCI_EXP_LNKSTA_LT)
+ dev_warn(dev, "PCIe port %u link is in recovery\n",
+ port->index);
+
+ /* Retrain the link */
+ value = readl(port->base + RP_LINK_CONTROL_STATUS);
+ value |= PCI_EXP_LNKCTL_RL;
+ writel(value, port->base + RP_LINK_CONTROL_STATUS);
+
+ deadline = ktime_add_us(ktime_get(), LINK_RETRAIN_TIMEOUT);
+
+ while (ktime_before(ktime_get(), deadline)) {
+ value = readl(port->base + RP_LINK_CONTROL_STATUS);
+ if ((value & PCI_EXP_LNKSTA_LT) == 0)
+ break;
+
+ usleep_range(2000, 3000);
+ }
+
+ if (value & PCI_EXP_LNKSTA_LT)
+ dev_err(dev, "failed to retrain link of port %u\n",
+ port->index);
+ }
+}
+
static void tegra_pcie_enable_ports(struct tegra_pcie *pcie)
{
struct device *dev = pcie->dev;
@@ -2127,7 +2453,12 @@ static void tegra_pcie_enable_ports(struct tegra_pcie *pcie)
port->index, port->lanes);
tegra_pcie_port_enable(port);
+ }
+ /* Start LTSSM from Tegra side */
+ reset_control_deassert(pcie->pcie_xrst);
+
+ list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
if (tegra_pcie_port_check_link(port))
continue;
@@ -2136,12 +2467,17 @@ static void tegra_pcie_enable_ports(struct tegra_pcie *pcie)
tegra_pcie_port_disable(port);
tegra_pcie_port_free(port);
}
+
+ if (pcie->soc->has_gen2)
+ tegra_pcie_change_link_speed(pcie);
}
static void tegra_pcie_disable_ports(struct tegra_pcie *pcie)
{
struct tegra_pcie_port *port, *tmp;
+ reset_control_assert(pcie->pcie_xrst);
+
list_for_each_entry_safe(port, tmp, &pcie->ports, list)
tegra_pcie_port_disable(port);
}
@@ -2155,6 +2491,7 @@ static const struct tegra_pcie_soc tegra20_pcie = {
.num_ports = 2,
.ports = tegra20_pcie_ports,
.msi_base_shift = 0,
+ .afi_pex2_ctrl = 0x128,
.pads_pll_ctl = PADS_PLL_CTL_TEGRA20,
.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10,
.pads_refclk_cfg0 = 0xfa5cfa5c,
@@ -2165,6 +2502,12 @@ static const struct tegra_pcie_soc tegra20_pcie = {
.has_gen2 = false,
.force_pca_enable = false,
.program_uphy = true,
+ .update_clamp_threshold = false,
+ .program_deskew_time = false,
+ .raw_violation_fixup = false,
+ .update_fc_timer = false,
+ .has_cache_bars = true,
+ .ectl.enable = false,
};
static const struct tegra_pcie_port_soc tegra30_pcie_ports[] = {
@@ -2188,6 +2531,12 @@ static const struct tegra_pcie_soc tegra30_pcie = {
.has_gen2 = false,
.force_pca_enable = false,
.program_uphy = true,
+ .update_clamp_threshold = false,
+ .program_deskew_time = false,
+ .raw_violation_fixup = false,
+ .update_fc_timer = false,
+ .has_cache_bars = false,
+ .ectl.enable = false,
};
static const struct tegra_pcie_soc tegra124_pcie = {
@@ -2197,6 +2546,8 @@ static const struct tegra_pcie_soc tegra124_pcie = {
.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
.pads_refclk_cfg0 = 0x44ac44ac,
+ /* FC threshold is bit[25:18] */
+ .update_fc_threshold = 0x03fc0000,
.has_pex_clkreq_en = true,
.has_pex_bias_ctrl = true,
.has_intr_prsnt_sense = true,
@@ -2204,6 +2555,12 @@ static const struct tegra_pcie_soc tegra124_pcie = {
.has_gen2 = true,
.force_pca_enable = false,
.program_uphy = true,
+ .update_clamp_threshold = true,
+ .program_deskew_time = false,
+ .raw_violation_fixup = true,
+ .update_fc_timer = false,
+ .has_cache_bars = false,
+ .ectl.enable = false,
};
static const struct tegra_pcie_soc tegra210_pcie = {
@@ -2213,6 +2570,8 @@ static const struct tegra_pcie_soc tegra210_pcie = {
.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
.pads_refclk_cfg0 = 0x90b890b8,
+ /* FC threshold is bit[25:18] */
+ .update_fc_threshold = 0x01800000,
.has_pex_clkreq_en = true,
.has_pex_bias_ctrl = true,
.has_intr_prsnt_sense = true,
@@ -2220,6 +2579,24 @@ static const struct tegra_pcie_soc tegra210_pcie = {
.has_gen2 = true,
.force_pca_enable = true,
.program_uphy = true,
+ .update_clamp_threshold = true,
+ .program_deskew_time = true,
+ .raw_violation_fixup = false,
+ .update_fc_timer = true,
+ .has_cache_bars = false,
+ .ectl = {
+ .regs = {
+ .rp_ectl_2_r1 = 0x0000000f,
+ .rp_ectl_4_r1 = 0x00000067,
+ .rp_ectl_5_r1 = 0x55010000,
+ .rp_ectl_6_r1 = 0x00000001,
+ .rp_ectl_2_r2 = 0x0000008f,
+ .rp_ectl_4_r2 = 0x000000c7,
+ .rp_ectl_5_r2 = 0x55010000,
+ .rp_ectl_6_r2 = 0x00000001,
+ },
+ .enable = true,
+ },
};
static const struct tegra_pcie_port_soc tegra186_pcie_ports[] = {
@@ -2232,6 +2609,7 @@ static const struct tegra_pcie_soc tegra186_pcie = {
.num_ports = 3,
.ports = tegra186_pcie_ports,
.msi_base_shift = 8,
+ .afi_pex2_ctrl = 0x19c,
.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
.pads_refclk_cfg0 = 0x80b880b8,
@@ -2243,6 +2621,12 @@ static const struct tegra_pcie_soc tegra186_pcie = {
.has_gen2 = true,
.force_pca_enable = false,
.program_uphy = false,
+ .update_clamp_threshold = false,
+ .program_deskew_time = false,
+ .raw_violation_fixup = false,
+ .update_fc_timer = false,
+ .has_cache_bars = false,
+ .ectl.enable = false,
};
static const struct of_device_id tegra_pcie_of_match[] = {
@@ -2485,16 +2869,32 @@ static int __maybe_unused tegra_pcie_pm_suspend(struct device *dev)
{
struct tegra_pcie *pcie = dev_get_drvdata(dev);
struct tegra_pcie_port *port;
+ int err;
list_for_each_entry(port, &pcie->ports, list)
tegra_pcie_pme_turnoff(port);
tegra_pcie_disable_ports(pcie);
+ /*
+ * AFI_INTR is unmasked in tegra_pcie_enable_controller(), mask it to
+ * avoid unwanted interrupts raised by AFI after pex_rst is asserted.
+ */
+ tegra_pcie_disable_interrupts(pcie);
+
+ if (pcie->soc->program_uphy) {
+ err = tegra_pcie_phy_power_off(pcie);
+ if (err < 0)
+ dev_err(dev, "failed to power off PHY(s): %d\n", err);
+ }
+
+ reset_control_assert(pcie->pex_rst);
+ clk_disable_unprepare(pcie->pex_clk);
+
if (IS_ENABLED(CONFIG_PCI_MSI))
tegra_pcie_disable_msi(pcie);
- tegra_pcie_disable_controller(pcie);
+ pinctrl_pm_select_idle_state(dev);
tegra_pcie_power_off(pcie);
return 0;
@@ -2510,20 +2910,45 @@ static int __maybe_unused tegra_pcie_pm_resume(struct device *dev)
dev_err(dev, "tegra pcie power on fail: %d\n", err);
return err;
}
- err = tegra_pcie_enable_controller(pcie);
- if (err) {
- dev_err(dev, "tegra pcie controller enable fail: %d\n", err);
+
+ err = pinctrl_pm_select_default_state(dev);
+ if (err < 0) {
+ dev_err(dev, "failed to disable PCIe IO DPD: %d\n", err);
goto poweroff;
}
+
+ tegra_pcie_enable_controller(pcie);
tegra_pcie_setup_translations(pcie);
if (IS_ENABLED(CONFIG_PCI_MSI))
tegra_pcie_enable_msi(pcie);
+ err = clk_prepare_enable(pcie->pex_clk);
+ if (err) {
+ dev_err(dev, "failed to enable PEX clock: %d\n", err);
+ goto pex_dpd_enable;
+ }
+
+ reset_control_deassert(pcie->pex_rst);
+
+ if (pcie->soc->program_uphy) {
+ err = tegra_pcie_phy_power_on(pcie);
+ if (err < 0) {
+ dev_err(dev, "failed to power on PHY(s): %d\n", err);
+ goto disable_pex_clk;
+ }
+ }
+
+ tegra_pcie_apply_pad_settings(pcie);
tegra_pcie_enable_ports(pcie);
return 0;
+disable_pex_clk:
+ reset_control_assert(pcie->pex_rst);
+ clk_disable_unprepare(pcie->pex_clk);
+pex_dpd_enable:
+ pinctrl_pm_select_idle_state(dev);
poweroff:
tegra_pcie_power_off(pcie);
diff --git a/drivers/pci/controller/pcie-altera-msi.c b/drivers/pci/controller/pcie-altera-msi.c
index 025ef7d9a046..16d938920ca5 100644
--- a/drivers/pci/controller/pcie-altera-msi.c
+++ b/drivers/pci/controller/pcie-altera-msi.c
@@ -10,6 +10,7 @@
#include <linux/interrupt.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/init.h>
+#include <linux/module.h>
#include <linux/msi.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
@@ -288,4 +289,13 @@ static int __init altera_msi_init(void)
{
return platform_driver_register(&altera_msi_driver);
}
+
+static void __exit altera_msi_exit(void)
+{
+ platform_driver_unregister(&altera_msi_driver);
+}
+
subsys_initcall(altera_msi_init);
+MODULE_DEVICE_TABLE(of, altera_msi_of_match);
+module_exit(altera_msi_exit);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pcie-altera.c b/drivers/pci/controller/pcie-altera.c
index 27edcebd1726..d2497ca43828 100644
--- a/drivers/pci/controller/pcie-altera.c
+++ b/drivers/pci/controller/pcie-altera.c
@@ -10,6 +10,7 @@
#include <linux/interrupt.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/init.h>
+#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
@@ -43,6 +44,8 @@
#define S10_RP_RXCPL_STATUS 0x200C
#define S10_RP_CFG_ADDR(pcie, reg) \
(((pcie)->hip_base) + (reg) + (1 << 20))
+#define S10_RP_SECONDARY(pcie) \
+ readb(S10_RP_CFG_ADDR(pcie, PCI_SECONDARY_BUS))
/* TLP configuration type 0 and 1 */
#define TLP_FMTTYPE_CFGRD0 0x04 /* Configuration Read Type 0 */
@@ -54,14 +57,9 @@
#define TLP_WRITE_TAG 0x10
#define RP_DEVFN 0
#define TLP_REQ_ID(bus, devfn) (((bus) << 8) | (devfn))
-#define TLP_CFGRD_DW0(pcie, bus) \
- ((((bus == pcie->root_bus_nr) ? pcie->pcie_data->cfgrd0 \
- : pcie->pcie_data->cfgrd1) << 24) | \
- TLP_PAYLOAD_SIZE)
-#define TLP_CFGWR_DW0(pcie, bus) \
- ((((bus == pcie->root_bus_nr) ? pcie->pcie_data->cfgwr0 \
- : pcie->pcie_data->cfgwr1) << 24) | \
- TLP_PAYLOAD_SIZE)
+#define TLP_CFG_DW0(pcie, cfg) \
+ (((cfg) << 24) | \
+ TLP_PAYLOAD_SIZE)
#define TLP_CFG_DW1(pcie, tag, be) \
(((TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN)) << 16) | (tag << 8) | (be))
#define TLP_CFG_DW2(bus, devfn, offset) \
@@ -321,14 +319,31 @@ static void s10_tlp_write_packet(struct altera_pcie *pcie, u32 *headers,
s10_tlp_write_tx(pcie, data, RP_TX_EOP);
}
+static void get_tlp_header(struct altera_pcie *pcie, u8 bus, u32 devfn,
+ int where, u8 byte_en, bool read, u32 *headers)
+{
+ u8 cfg;
+ u8 cfg0 = read ? pcie->pcie_data->cfgrd0 : pcie->pcie_data->cfgwr0;
+ u8 cfg1 = read ? pcie->pcie_data->cfgrd1 : pcie->pcie_data->cfgwr1;
+ u8 tag = read ? TLP_READ_TAG : TLP_WRITE_TAG;
+
+ if (pcie->pcie_data->version == ALTERA_PCIE_V1)
+ cfg = (bus == pcie->root_bus_nr) ? cfg0 : cfg1;
+ else
+ cfg = (bus > S10_RP_SECONDARY(pcie)) ? cfg0 : cfg1;
+
+ headers[0] = TLP_CFG_DW0(pcie, cfg);
+ headers[1] = TLP_CFG_DW1(pcie, tag, byte_en);
+ headers[2] = TLP_CFG_DW2(bus, devfn, where);
+}
+
static int tlp_cfg_dword_read(struct altera_pcie *pcie, u8 bus, u32 devfn,
int where, u8 byte_en, u32 *value)
{
u32 headers[TLP_HDR_SIZE];
- headers[0] = TLP_CFGRD_DW0(pcie, bus);
- headers[1] = TLP_CFG_DW1(pcie, TLP_READ_TAG, byte_en);
- headers[2] = TLP_CFG_DW2(bus, devfn, where);
+ get_tlp_header(pcie, bus, devfn, where, byte_en, true,
+ headers);
pcie->pcie_data->ops->tlp_write_pkt(pcie, headers, 0, false);
@@ -341,9 +356,8 @@ static int tlp_cfg_dword_write(struct altera_pcie *pcie, u8 bus, u32 devfn,
u32 headers[TLP_HDR_SIZE];
int ret;
- headers[0] = TLP_CFGWR_DW0(pcie, bus);
- headers[1] = TLP_CFG_DW1(pcie, TLP_WRITE_TAG, byte_en);
- headers[2] = TLP_CFG_DW2(bus, devfn, where);
+ get_tlp_header(pcie, bus, devfn, where, byte_en, false,
+ headers);
/* check alignment to Qword */
if ((where & 0x7) == 0)
@@ -705,6 +719,13 @@ static int altera_pcie_init_irq_domain(struct altera_pcie *pcie)
return 0;
}
+static void altera_pcie_irq_teardown(struct altera_pcie *pcie)
+{
+ irq_set_chained_handler_and_data(pcie->irq, NULL, NULL);
+ irq_domain_remove(pcie->irq_domain);
+ irq_dispose_mapping(pcie->irq);
+}
+
static int altera_pcie_parse_dt(struct altera_pcie *pcie)
{
struct device *dev = &pcie->pdev->dev;
@@ -798,6 +819,7 @@ static int altera_pcie_probe(struct platform_device *pdev)
pcie = pci_host_bridge_priv(bridge);
pcie->pdev = pdev;
+ platform_set_drvdata(pdev, pcie);
match = of_match_device(altera_pcie_of_match, &pdev->dev);
if (!match)
@@ -855,13 +877,28 @@ static int altera_pcie_probe(struct platform_device *pdev)
return ret;
}
+static int altera_pcie_remove(struct platform_device *pdev)
+{
+ struct altera_pcie *pcie = platform_get_drvdata(pdev);
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
+
+ pci_stop_root_bus(bridge->bus);
+ pci_remove_root_bus(bridge->bus);
+ pci_free_resource_list(&pcie->resources);
+ altera_pcie_irq_teardown(pcie);
+
+ return 0;
+}
+
static struct platform_driver altera_pcie_driver = {
.probe = altera_pcie_probe,
+ .remove = altera_pcie_remove,
.driver = {
.name = "altera-pcie",
.of_match_table = altera_pcie_of_match,
- .suppress_bind_attrs = true,
},
};
-builtin_platform_driver(altera_pcie_driver);
+MODULE_DEVICE_TABLE(of, altera_pcie_of_match);
+module_platform_driver(altera_pcie_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pcie-iproc-platform.c b/drivers/pci/controller/pcie-iproc-platform.c
index f30f5f3fb5c1..5a3550b6bb29 100644
--- a/drivers/pci/controller/pcie-iproc-platform.c
+++ b/drivers/pci/controller/pcie-iproc-platform.c
@@ -87,7 +87,7 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
/*
* DT nodes are not used by all platforms that use the iProc PCIe
- * core driver. For platforms that require explict inbound mapping
+ * core driver. For platforms that require explicit inbound mapping
* configuration, "dma-ranges" would have been present in DT
*/
pcie->need_ib_cfg = of_property_read_bool(np, "dma-ranges");
diff --git a/drivers/pci/controller/pcie-iproc.c b/drivers/pci/controller/pcie-iproc.c
index e3ca46497470..2d457bfdaf66 100644
--- a/drivers/pci/controller/pcie-iproc.c
+++ b/drivers/pci/controller/pcie-iproc.c
@@ -163,7 +163,7 @@ enum iproc_pcie_ib_map_type {
* @size_unit: inbound mapping region size unit, could be SZ_1K, SZ_1M, or
* SZ_1G
* @region_sizes: list of supported inbound mapping region sizes in KB, MB, or
- * GB, depedning on the size unit
+ * GB, depending on the size unit
* @nr_sizes: number of supported inbound mapping region sizes
* @nr_windows: number of supported inbound mapping windows for the region
* @imap_addr_offset: register offset between the upper and lower 32-bit
diff --git a/drivers/pci/controller/pcie-mobiveil.c b/drivers/pci/controller/pcie-mobiveil.c
index 77052a0712d0..672e633601c7 100644
--- a/drivers/pci/controller/pcie-mobiveil.c
+++ b/drivers/pci/controller/pcie-mobiveil.c
@@ -31,56 +31,61 @@
* translation tables are grouped into windows, each window registers are
* grouped into blocks of 4 or 16 registers each
*/
-#define PAB_REG_BLOCK_SIZE 16
-#define PAB_EXT_REG_BLOCK_SIZE 4
+#define PAB_REG_BLOCK_SIZE 16
+#define PAB_EXT_REG_BLOCK_SIZE 4
-#define PAB_REG_ADDR(offset, win) (offset + (win * PAB_REG_BLOCK_SIZE))
-#define PAB_EXT_REG_ADDR(offset, win) (offset + (win * PAB_EXT_REG_BLOCK_SIZE))
+#define PAB_REG_ADDR(offset, win) \
+ (offset + (win * PAB_REG_BLOCK_SIZE))
+#define PAB_EXT_REG_ADDR(offset, win) \
+ (offset + (win * PAB_EXT_REG_BLOCK_SIZE))
-#define LTSSM_STATUS 0x0404
-#define LTSSM_STATUS_L0_MASK 0x3f
-#define LTSSM_STATUS_L0 0x2d
+#define LTSSM_STATUS 0x0404
+#define LTSSM_STATUS_L0_MASK 0x3f
+#define LTSSM_STATUS_L0 0x2d
-#define PAB_CTRL 0x0808
-#define AMBA_PIO_ENABLE_SHIFT 0
-#define PEX_PIO_ENABLE_SHIFT 1
-#define PAGE_SEL_SHIFT 13
-#define PAGE_SEL_MASK 0x3f
-#define PAGE_LO_MASK 0x3ff
-#define PAGE_SEL_EN 0xc00
-#define PAGE_SEL_OFFSET_SHIFT 10
+#define PAB_CTRL 0x0808
+#define AMBA_PIO_ENABLE_SHIFT 0
+#define PEX_PIO_ENABLE_SHIFT 1
+#define PAGE_SEL_SHIFT 13
+#define PAGE_SEL_MASK 0x3f
+#define PAGE_LO_MASK 0x3ff
+#define PAGE_SEL_OFFSET_SHIFT 10
-#define PAB_AXI_PIO_CTRL 0x0840
-#define APIO_EN_MASK 0xf
+#define PAB_AXI_PIO_CTRL 0x0840
+#define APIO_EN_MASK 0xf
-#define PAB_PEX_PIO_CTRL 0x08c0
-#define PIO_ENABLE_SHIFT 0
+#define PAB_PEX_PIO_CTRL 0x08c0
+#define PIO_ENABLE_SHIFT 0
#define PAB_INTP_AMBA_MISC_ENB 0x0b0c
-#define PAB_INTP_AMBA_MISC_STAT 0x0b1c
+#define PAB_INTP_AMBA_MISC_STAT 0x0b1c
#define PAB_INTP_INTX_MASK 0x01e0
#define PAB_INTP_MSI_MASK 0x8
-#define PAB_AXI_AMAP_CTRL(win) PAB_REG_ADDR(0x0ba0, win)
-#define WIN_ENABLE_SHIFT 0
-#define WIN_TYPE_SHIFT 1
+#define PAB_AXI_AMAP_CTRL(win) PAB_REG_ADDR(0x0ba0, win)
+#define WIN_ENABLE_SHIFT 0
+#define WIN_TYPE_SHIFT 1
+#define WIN_TYPE_MASK 0x3
+#define WIN_SIZE_MASK 0xfffffc00
#define PAB_EXT_AXI_AMAP_SIZE(win) PAB_EXT_REG_ADDR(0xbaf0, win)
+#define PAB_EXT_AXI_AMAP_AXI_WIN(win) PAB_EXT_REG_ADDR(0x80a0, win)
#define PAB_AXI_AMAP_AXI_WIN(win) PAB_REG_ADDR(0x0ba4, win)
#define AXI_WINDOW_ALIGN_MASK 3
#define PAB_AXI_AMAP_PEX_WIN_L(win) PAB_REG_ADDR(0x0ba8, win)
-#define PAB_BUS_SHIFT 24
-#define PAB_DEVICE_SHIFT 19
-#define PAB_FUNCTION_SHIFT 16
+#define PAB_BUS_SHIFT 24
+#define PAB_DEVICE_SHIFT 19
+#define PAB_FUNCTION_SHIFT 16
#define PAB_AXI_AMAP_PEX_WIN_H(win) PAB_REG_ADDR(0x0bac, win)
#define PAB_INTP_AXI_PIO_CLASS 0x474
-#define PAB_PEX_AMAP_CTRL(win) PAB_REG_ADDR(0x4ba0, win)
-#define AMAP_CTRL_EN_SHIFT 0
-#define AMAP_CTRL_TYPE_SHIFT 1
+#define PAB_PEX_AMAP_CTRL(win) PAB_REG_ADDR(0x4ba0, win)
+#define AMAP_CTRL_EN_SHIFT 0
+#define AMAP_CTRL_TYPE_SHIFT 1
+#define AMAP_CTRL_TYPE_MASK 3
#define PAB_EXT_PEX_AMAP_SIZEN(win) PAB_EXT_REG_ADDR(0xbef0, win)
#define PAB_PEX_AMAP_AXI_WIN(win) PAB_REG_ADDR(0x4ba4, win)
@@ -88,34 +93,40 @@
#define PAB_PEX_AMAP_PEX_WIN_H(win) PAB_REG_ADDR(0x4bac, win)
/* starting offset of INTX bits in status register */
-#define PAB_INTX_START 5
+#define PAB_INTX_START 5
/* supported number of MSI interrupts */
-#define PCI_NUM_MSI 16
+#define PCI_NUM_MSI 16
/* MSI registers */
-#define MSI_BASE_LO_OFFSET 0x04
-#define MSI_BASE_HI_OFFSET 0x08
-#define MSI_SIZE_OFFSET 0x0c
-#define MSI_ENABLE_OFFSET 0x14
-#define MSI_STATUS_OFFSET 0x18
-#define MSI_DATA_OFFSET 0x20
-#define MSI_ADDR_L_OFFSET 0x24
-#define MSI_ADDR_H_OFFSET 0x28
+#define MSI_BASE_LO_OFFSET 0x04
+#define MSI_BASE_HI_OFFSET 0x08
+#define MSI_SIZE_OFFSET 0x0c
+#define MSI_ENABLE_OFFSET 0x14
+#define MSI_STATUS_OFFSET 0x18
+#define MSI_DATA_OFFSET 0x20
+#define MSI_ADDR_L_OFFSET 0x24
+#define MSI_ADDR_H_OFFSET 0x28
/* outbound and inbound window definitions */
-#define WIN_NUM_0 0
-#define WIN_NUM_1 1
-#define CFG_WINDOW_TYPE 0
-#define IO_WINDOW_TYPE 1
-#define MEM_WINDOW_TYPE 2
-#define IB_WIN_SIZE ((u64)256 * 1024 * 1024 * 1024)
-#define MAX_PIO_WINDOWS 8
+#define WIN_NUM_0 0
+#define WIN_NUM_1 1
+#define CFG_WINDOW_TYPE 0
+#define IO_WINDOW_TYPE 1
+#define MEM_WINDOW_TYPE 2
+#define IB_WIN_SIZE ((u64)256 * 1024 * 1024 * 1024)
+#define MAX_PIO_WINDOWS 8
/* Parameters for the waiting for link up routine */
-#define LINK_WAIT_MAX_RETRIES 10
-#define LINK_WAIT_MIN 90000
-#define LINK_WAIT_MAX 100000
+#define LINK_WAIT_MAX_RETRIES 10
+#define LINK_WAIT_MIN 90000
+#define LINK_WAIT_MAX 100000
+
+#define PAGED_ADDR_BNDRY 0xc00
+#define OFFSET_TO_PAGE_ADDR(off) \
+ ((off & PAGE_LO_MASK) | PAGED_ADDR_BNDRY)
+#define OFFSET_TO_PAGE_IDX(off) \
+ ((off >> PAGE_SEL_OFFSET_SHIFT) & PAGE_SEL_MASK)
struct mobiveil_msi { /* MSI information */
struct mutex lock; /* protect bitmap variable */
@@ -145,15 +156,119 @@ struct mobiveil_pcie {
struct mobiveil_msi msi;
};
-static inline void csr_writel(struct mobiveil_pcie *pcie, const u32 value,
- const u32 reg)
+/*
+ * mobiveil_pcie_sel_page - routine to access paged register
+ *
+ * Registers whose address greater than PAGED_ADDR_BNDRY (0xc00) are paged,
+ * for this scheme to work extracted higher 6 bits of the offset will be
+ * written to pg_sel field of PAB_CTRL register and rest of the lower 10
+ * bits enabled with PAGED_ADDR_BNDRY are used as offset of the register.
+ */
+static void mobiveil_pcie_sel_page(struct mobiveil_pcie *pcie, u8 pg_idx)
+{
+ u32 val;
+
+ val = readl(pcie->csr_axi_slave_base + PAB_CTRL);
+ val &= ~(PAGE_SEL_MASK << PAGE_SEL_SHIFT);
+ val |= (pg_idx & PAGE_SEL_MASK) << PAGE_SEL_SHIFT;
+
+ writel(val, pcie->csr_axi_slave_base + PAB_CTRL);
+}
+
+static void *mobiveil_pcie_comp_addr(struct mobiveil_pcie *pcie, u32 off)
+{
+ if (off < PAGED_ADDR_BNDRY) {
+ /* For directly accessed registers, clear the pg_sel field */
+ mobiveil_pcie_sel_page(pcie, 0);
+ return pcie->csr_axi_slave_base + off;
+ }
+
+ mobiveil_pcie_sel_page(pcie, OFFSET_TO_PAGE_IDX(off));
+ return pcie->csr_axi_slave_base + OFFSET_TO_PAGE_ADDR(off);
+}
+
+static int mobiveil_pcie_read(void __iomem *addr, int size, u32 *val)
{
- writel_relaxed(value, pcie->csr_axi_slave_base + reg);
+ if ((uintptr_t)addr & (size - 1)) {
+ *val = 0;
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+
+ switch (size) {
+ case 4:
+ *val = readl(addr);
+ break;
+ case 2:
+ *val = readw(addr);
+ break;
+ case 1:
+ *val = readb(addr);
+ break;
+ default:
+ *val = 0;
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
}
-static inline u32 csr_readl(struct mobiveil_pcie *pcie, const u32 reg)
+static int mobiveil_pcie_write(void __iomem *addr, int size, u32 val)
{
- return readl_relaxed(pcie->csr_axi_slave_base + reg);
+ if ((uintptr_t)addr & (size - 1))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ switch (size) {
+ case 4:
+ writel(val, addr);
+ break;
+ case 2:
+ writew(val, addr);
+ break;
+ case 1:
+ writeb(val, addr);
+ break;
+ default:
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static u32 csr_read(struct mobiveil_pcie *pcie, u32 off, size_t size)
+{
+ void *addr;
+ u32 val;
+ int ret;
+
+ addr = mobiveil_pcie_comp_addr(pcie, off);
+
+ ret = mobiveil_pcie_read(addr, size, &val);
+ if (ret)
+ dev_err(&pcie->pdev->dev, "read CSR address failed\n");
+
+ return val;
+}
+
+static void csr_write(struct mobiveil_pcie *pcie, u32 val, u32 off, size_t size)
+{
+ void *addr;
+ int ret;
+
+ addr = mobiveil_pcie_comp_addr(pcie, off);
+
+ ret = mobiveil_pcie_write(addr, size, val);
+ if (ret)
+ dev_err(&pcie->pdev->dev, "write CSR address failed\n");
+}
+
+static u32 csr_readl(struct mobiveil_pcie *pcie, u32 off)
+{
+ return csr_read(pcie, off, 0x4);
+}
+
+static void csr_writel(struct mobiveil_pcie *pcie, u32 val, u32 off)
+{
+ csr_write(pcie, val, off, 0x4);
}
static bool mobiveil_pcie_link_up(struct mobiveil_pcie *pcie)
@@ -174,7 +289,7 @@ static bool mobiveil_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
* Do not read more than one device on the bus directly
* attached to RC
*/
- if ((bus->primary == pcie->root_bus_nr) && (devfn > 0))
+ if ((bus->primary == pcie->root_bus_nr) && (PCI_SLOT(devfn) > 0))
return false;
return true;
@@ -185,17 +300,17 @@ static bool mobiveil_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
* root port or endpoint
*/
static void __iomem *mobiveil_pcie_map_bus(struct pci_bus *bus,
- unsigned int devfn, int where)
+ unsigned int devfn, int where)
{
struct mobiveil_pcie *pcie = bus->sysdata;
+ u32 value;
if (!mobiveil_pcie_valid_device(bus, devfn))
return NULL;
- if (bus->number == pcie->root_bus_nr) {
- /* RC config access */
+ /* RC config access */
+ if (bus->number == pcie->root_bus_nr)
return pcie->csr_axi_slave_base + where;
- }
/*
* EP config access (in Config/APIO space)
@@ -203,10 +318,12 @@ static void __iomem *mobiveil_pcie_map_bus(struct pci_bus *bus,
* (BDF) in PAB_AXI_AMAP_PEX_WIN_L0 Register.
* Relies on pci_lock serialization
*/
- csr_writel(pcie, bus->number << PAB_BUS_SHIFT |
- PCI_SLOT(devfn) << PAB_DEVICE_SHIFT |
- PCI_FUNC(devfn) << PAB_FUNCTION_SHIFT,
- PAB_AXI_AMAP_PEX_WIN_L(WIN_NUM_0));
+ value = bus->number << PAB_BUS_SHIFT |
+ PCI_SLOT(devfn) << PAB_DEVICE_SHIFT |
+ PCI_FUNC(devfn) << PAB_FUNCTION_SHIFT;
+
+ csr_writel(pcie, value, PAB_AXI_AMAP_PEX_WIN_L(WIN_NUM_0));
+
return pcie->config_axi_slave_base + where;
}
@@ -241,24 +358,29 @@ static void mobiveil_pcie_isr(struct irq_desc *desc)
/* Handle INTx */
if (intr_status & PAB_INTP_INTX_MASK) {
- shifted_status = csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT) >>
- PAB_INTX_START;
+ shifted_status = csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT);
+ shifted_status &= PAB_INTP_INTX_MASK;
+ shifted_status >>= PAB_INTX_START;
do {
for_each_set_bit(bit, &shifted_status, PCI_NUM_INTX) {
virq = irq_find_mapping(pcie->intx_domain,
- bit + 1);
+ bit + 1);
if (virq)
generic_handle_irq(virq);
else
- dev_err_ratelimited(dev,
- "unexpected IRQ, INT%d\n", bit);
+ dev_err_ratelimited(dev, "unexpected IRQ, INT%d\n",
+ bit);
- /* clear interrupt */
- csr_writel(pcie,
- shifted_status << PAB_INTX_START,
- PAB_INTP_AMBA_MISC_STAT);
+ /* clear interrupt handled */
+ csr_writel(pcie, 1 << (PAB_INTX_START + bit),
+ PAB_INTP_AMBA_MISC_STAT);
}
- } while ((shifted_status >> PAB_INTX_START) != 0);
+
+ shifted_status = csr_readl(pcie,
+ PAB_INTP_AMBA_MISC_STAT);
+ shifted_status &= PAB_INTP_INTX_MASK;
+ shifted_status >>= PAB_INTX_START;
+ } while (shifted_status != 0);
}
/* read extra MSI status register */
@@ -266,8 +388,7 @@ static void mobiveil_pcie_isr(struct irq_desc *desc)
/* handle MSI interrupts */
while (msi_status & 1) {
- msi_data = readl_relaxed(pcie->apb_csr_base
- + MSI_DATA_OFFSET);
+ msi_data = readl_relaxed(pcie->apb_csr_base + MSI_DATA_OFFSET);
/*
* MSI_STATUS_OFFSET register gets updated to zero
@@ -276,18 +397,18 @@ static void mobiveil_pcie_isr(struct irq_desc *desc)
* two dummy reads.
*/
msi_addr_lo = readl_relaxed(pcie->apb_csr_base +
- MSI_ADDR_L_OFFSET);
+ MSI_ADDR_L_OFFSET);
msi_addr_hi = readl_relaxed(pcie->apb_csr_base +
- MSI_ADDR_H_OFFSET);
+ MSI_ADDR_H_OFFSET);
dev_dbg(dev, "MSI registers, data: %08x, addr: %08x:%08x\n",
- msi_data, msi_addr_hi, msi_addr_lo);
+ msi_data, msi_addr_hi, msi_addr_lo);
virq = irq_find_mapping(msi->dev_domain, msi_data);
if (virq)
generic_handle_irq(virq);
msi_status = readl_relaxed(pcie->apb_csr_base +
- MSI_STATUS_OFFSET);
+ MSI_STATUS_OFFSET);
}
/* Clear the interrupt status */
@@ -304,7 +425,7 @@ static int mobiveil_pcie_parse_dt(struct mobiveil_pcie *pcie)
/* map config resource */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "config_axi_slave");
+ "config_axi_slave");
pcie->config_axi_slave_base = devm_pci_remap_cfg_resource(dev, res);
if (IS_ERR(pcie->config_axi_slave_base))
return PTR_ERR(pcie->config_axi_slave_base);
@@ -312,7 +433,7 @@ static int mobiveil_pcie_parse_dt(struct mobiveil_pcie *pcie)
/* map csr resource */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "csr_axi_slave");
+ "csr_axi_slave");
pcie->csr_axi_slave_base = devm_pci_remap_cfg_resource(dev, res);
if (IS_ERR(pcie->csr_axi_slave_base))
return PTR_ERR(pcie->csr_axi_slave_base);
@@ -337,92 +458,50 @@ static int mobiveil_pcie_parse_dt(struct mobiveil_pcie *pcie)
return -ENODEV;
}
- irq_set_chained_handler_and_data(pcie->irq, mobiveil_pcie_isr, pcie);
-
return 0;
}
-/*
- * select_paged_register - routine to access paged register of root complex
- *
- * registers of RC are paged, for this scheme to work
- * extracted higher 6 bits of the offset will be written to pg_sel
- * field of PAB_CTRL register and rest of the lower 10 bits enabled with
- * PAGE_SEL_EN are used as offset of the register.
- */
-static void select_paged_register(struct mobiveil_pcie *pcie, u32 offset)
-{
- int pab_ctrl_dw, pg_sel;
-
- /* clear pg_sel field */
- pab_ctrl_dw = csr_readl(pcie, PAB_CTRL);
- pab_ctrl_dw = (pab_ctrl_dw & ~(PAGE_SEL_MASK << PAGE_SEL_SHIFT));
-
- /* set pg_sel field */
- pg_sel = (offset >> PAGE_SEL_OFFSET_SHIFT) & PAGE_SEL_MASK;
- pab_ctrl_dw |= ((pg_sel << PAGE_SEL_SHIFT));
- csr_writel(pcie, pab_ctrl_dw, PAB_CTRL);
-}
-
-static void write_paged_register(struct mobiveil_pcie *pcie,
- u32 val, u32 offset)
-{
- u32 off = (offset & PAGE_LO_MASK) | PAGE_SEL_EN;
-
- select_paged_register(pcie, offset);
- csr_writel(pcie, val, off);
-}
-
-static u32 read_paged_register(struct mobiveil_pcie *pcie, u32 offset)
-{
- u32 off = (offset & PAGE_LO_MASK) | PAGE_SEL_EN;
-
- select_paged_register(pcie, offset);
- return csr_readl(pcie, off);
-}
-
static void program_ib_windows(struct mobiveil_pcie *pcie, int win_num,
- int pci_addr, u32 type, u64 size)
+ u64 pci_addr, u32 type, u64 size)
{
- int pio_ctrl_val;
- int amap_ctrl_dw;
+ u32 value;
u64 size64 = ~(size - 1);
- if ((pcie->ib_wins_configured + 1) > pcie->ppio_wins) {
+ if (win_num >= pcie->ppio_wins) {
dev_err(&pcie->pdev->dev,
"ERROR: max inbound windows reached !\n");
return;
}
- pio_ctrl_val = csr_readl(pcie, PAB_PEX_PIO_CTRL);
- csr_writel(pcie,
- pio_ctrl_val | (1 << PIO_ENABLE_SHIFT), PAB_PEX_PIO_CTRL);
- amap_ctrl_dw = read_paged_register(pcie, PAB_PEX_AMAP_CTRL(win_num));
- amap_ctrl_dw = (amap_ctrl_dw | (type << AMAP_CTRL_TYPE_SHIFT));
- amap_ctrl_dw = (amap_ctrl_dw | (1 << AMAP_CTRL_EN_SHIFT));
+ value = csr_readl(pcie, PAB_PEX_AMAP_CTRL(win_num));
+ value &= ~(AMAP_CTRL_TYPE_MASK << AMAP_CTRL_TYPE_SHIFT | WIN_SIZE_MASK);
+ value |= type << AMAP_CTRL_TYPE_SHIFT | 1 << AMAP_CTRL_EN_SHIFT |
+ (lower_32_bits(size64) & WIN_SIZE_MASK);
+ csr_writel(pcie, value, PAB_PEX_AMAP_CTRL(win_num));
+
+ csr_writel(pcie, upper_32_bits(size64),
+ PAB_EXT_PEX_AMAP_SIZEN(win_num));
- write_paged_register(pcie, amap_ctrl_dw | lower_32_bits(size64),
- PAB_PEX_AMAP_CTRL(win_num));
+ csr_writel(pcie, pci_addr, PAB_PEX_AMAP_AXI_WIN(win_num));
- write_paged_register(pcie, upper_32_bits(size64),
- PAB_EXT_PEX_AMAP_SIZEN(win_num));
+ csr_writel(pcie, lower_32_bits(pci_addr),
+ PAB_PEX_AMAP_PEX_WIN_L(win_num));
+ csr_writel(pcie, upper_32_bits(pci_addr),
+ PAB_PEX_AMAP_PEX_WIN_H(win_num));
- write_paged_register(pcie, pci_addr, PAB_PEX_AMAP_AXI_WIN(win_num));
- write_paged_register(pcie, pci_addr, PAB_PEX_AMAP_PEX_WIN_L(win_num));
- write_paged_register(pcie, 0, PAB_PEX_AMAP_PEX_WIN_H(win_num));
+ pcie->ib_wins_configured++;
}
/*
* routine to program the outbound windows
*/
static void program_ob_windows(struct mobiveil_pcie *pcie, int win_num,
- u64 cpu_addr, u64 pci_addr, u32 config_io_bit, u64 size)
+ u64 cpu_addr, u64 pci_addr, u32 type, u64 size)
{
-
- u32 value, type;
+ u32 value;
u64 size64 = ~(size - 1);
- if ((pcie->ob_wins_configured + 1) > pcie->apio_wins) {
+ if (win_num >= pcie->apio_wins) {
dev_err(&pcie->pdev->dev,
"ERROR: max outbound windows reached !\n");
return;
@@ -432,28 +511,27 @@ static void program_ob_windows(struct mobiveil_pcie *pcie, int win_num,
* program Enable Bit to 1, Type Bit to (00) base 2, AXI Window Size Bit
* to 4 KB in PAB_AXI_AMAP_CTRL register
*/
- type = config_io_bit;
value = csr_readl(pcie, PAB_AXI_AMAP_CTRL(win_num));
- csr_writel(pcie, 1 << WIN_ENABLE_SHIFT | type << WIN_TYPE_SHIFT |
- lower_32_bits(size64), PAB_AXI_AMAP_CTRL(win_num));
+ value &= ~(WIN_TYPE_MASK << WIN_TYPE_SHIFT | WIN_SIZE_MASK);
+ value |= 1 << WIN_ENABLE_SHIFT | type << WIN_TYPE_SHIFT |
+ (lower_32_bits(size64) & WIN_SIZE_MASK);
+ csr_writel(pcie, value, PAB_AXI_AMAP_CTRL(win_num));
- write_paged_register(pcie, upper_32_bits(size64),
- PAB_EXT_AXI_AMAP_SIZE(win_num));
+ csr_writel(pcie, upper_32_bits(size64), PAB_EXT_AXI_AMAP_SIZE(win_num));
/*
* program AXI window base with appropriate value in
* PAB_AXI_AMAP_AXI_WIN0 register
*/
- value = csr_readl(pcie, PAB_AXI_AMAP_AXI_WIN(win_num));
- csr_writel(pcie, cpu_addr & (~AXI_WINDOW_ALIGN_MASK),
- PAB_AXI_AMAP_AXI_WIN(win_num));
-
- value = csr_readl(pcie, PAB_AXI_AMAP_PEX_WIN_H(win_num));
+ csr_writel(pcie, lower_32_bits(cpu_addr) & (~AXI_WINDOW_ALIGN_MASK),
+ PAB_AXI_AMAP_AXI_WIN(win_num));
+ csr_writel(pcie, upper_32_bits(cpu_addr),
+ PAB_EXT_AXI_AMAP_AXI_WIN(win_num));
csr_writel(pcie, lower_32_bits(pci_addr),
- PAB_AXI_AMAP_PEX_WIN_L(win_num));
+ PAB_AXI_AMAP_PEX_WIN_L(win_num));
csr_writel(pcie, upper_32_bits(pci_addr),
- PAB_AXI_AMAP_PEX_WIN_H(win_num));
+ PAB_AXI_AMAP_PEX_WIN_H(win_num));
pcie->ob_wins_configured++;
}
@@ -469,7 +547,9 @@ static int mobiveil_bringup_link(struct mobiveil_pcie *pcie)
usleep_range(LINK_WAIT_MIN, LINK_WAIT_MAX);
}
+
dev_err(&pcie->pdev->dev, "link never came up\n");
+
return -ETIMEDOUT;
}
@@ -482,50 +562,55 @@ static void mobiveil_pcie_enable_msi(struct mobiveil_pcie *pcie)
msi->msi_pages_phys = (phys_addr_t)msg_addr;
writel_relaxed(lower_32_bits(msg_addr),
- pcie->apb_csr_base + MSI_BASE_LO_OFFSET);
+ pcie->apb_csr_base + MSI_BASE_LO_OFFSET);
writel_relaxed(upper_32_bits(msg_addr),
- pcie->apb_csr_base + MSI_BASE_HI_OFFSET);
+ pcie->apb_csr_base + MSI_BASE_HI_OFFSET);
writel_relaxed(4096, pcie->apb_csr_base + MSI_SIZE_OFFSET);
writel_relaxed(1, pcie->apb_csr_base + MSI_ENABLE_OFFSET);
}
static int mobiveil_host_init(struct mobiveil_pcie *pcie)
{
- u32 value, pab_ctrl, type = 0;
- int err;
- struct resource_entry *win, *tmp;
-
- err = mobiveil_bringup_link(pcie);
- if (err) {
- dev_info(&pcie->pdev->dev, "link bring-up failed\n");
- return err;
- }
+ u32 value, pab_ctrl, type;
+ struct resource_entry *win;
+
+ /* setup bus numbers */
+ value = csr_readl(pcie, PCI_PRIMARY_BUS);
+ value &= 0xff000000;
+ value |= 0x00ff0100;
+ csr_writel(pcie, value, PCI_PRIMARY_BUS);
/*
* program Bus Master Enable Bit in Command Register in PAB Config
* Space
*/
value = csr_readl(pcie, PCI_COMMAND);
- csr_writel(pcie, value | PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
- PCI_COMMAND_MASTER, PCI_COMMAND);
+ value |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
+ csr_writel(pcie, value, PCI_COMMAND);
/*
* program PIO Enable Bit to 1 (and PEX PIO Enable to 1) in PAB_CTRL
* register
*/
pab_ctrl = csr_readl(pcie, PAB_CTRL);
- csr_writel(pcie, pab_ctrl | (1 << AMBA_PIO_ENABLE_SHIFT) |
- (1 << PEX_PIO_ENABLE_SHIFT), PAB_CTRL);
+ pab_ctrl |= (1 << AMBA_PIO_ENABLE_SHIFT) | (1 << PEX_PIO_ENABLE_SHIFT);
+ csr_writel(pcie, pab_ctrl, PAB_CTRL);
csr_writel(pcie, (PAB_INTP_INTX_MASK | PAB_INTP_MSI_MASK),
- PAB_INTP_AMBA_MISC_ENB);
+ PAB_INTP_AMBA_MISC_ENB);
/*
* program PIO Enable Bit to 1 and Config Window Enable Bit to 1 in
* PAB_AXI_PIO_CTRL Register
*/
value = csr_readl(pcie, PAB_AXI_PIO_CTRL);
- csr_writel(pcie, value | APIO_EN_MASK, PAB_AXI_PIO_CTRL);
+ value |= APIO_EN_MASK;
+ csr_writel(pcie, value, PAB_AXI_PIO_CTRL);
+
+ /* Enable PCIe PIO master */
+ value = csr_readl(pcie, PAB_PEX_PIO_CTRL);
+ value |= 1 << PIO_ENABLE_SHIFT;
+ csr_writel(pcie, value, PAB_PEX_PIO_CTRL);
/*
* we'll program one outbound window for config reads and
@@ -535,32 +620,38 @@ static int mobiveil_host_init(struct mobiveil_pcie *pcie)
*/
/* config outbound translation window */
- program_ob_windows(pcie, pcie->ob_wins_configured,
- pcie->ob_io_res->start, 0, CFG_WINDOW_TYPE,
- resource_size(pcie->ob_io_res));
+ program_ob_windows(pcie, WIN_NUM_0, pcie->ob_io_res->start, 0,
+ CFG_WINDOW_TYPE, resource_size(pcie->ob_io_res));
/* memory inbound translation window */
- program_ib_windows(pcie, WIN_NUM_1, 0, MEM_WINDOW_TYPE, IB_WIN_SIZE);
+ program_ib_windows(pcie, WIN_NUM_0, 0, MEM_WINDOW_TYPE, IB_WIN_SIZE);
/* Get the I/O and memory ranges from DT */
- resource_list_for_each_entry_safe(win, tmp, &pcie->resources) {
- type = 0;
+ resource_list_for_each_entry(win, &pcie->resources) {
if (resource_type(win->res) == IORESOURCE_MEM)
type = MEM_WINDOW_TYPE;
- if (resource_type(win->res) == IORESOURCE_IO)
+ else if (resource_type(win->res) == IORESOURCE_IO)
type = IO_WINDOW_TYPE;
- if (type) {
- /* configure outbound translation window */
- program_ob_windows(pcie, pcie->ob_wins_configured,
- win->res->start, 0, type,
- resource_size(win->res));
- }
+ else
+ continue;
+
+ /* configure outbound translation window */
+ program_ob_windows(pcie, pcie->ob_wins_configured,
+ win->res->start,
+ win->res->start - win->offset,
+ type, resource_size(win->res));
}
+ /* fixup for PCIe class register */
+ value = csr_readl(pcie, PAB_INTP_AXI_PIO_CLASS);
+ value &= 0xff;
+ value |= (PCI_CLASS_BRIDGE_PCI << 16);
+ csr_writel(pcie, value, PAB_INTP_AXI_PIO_CLASS);
+
/* setup MSI hardware registers */
mobiveil_pcie_enable_msi(pcie);
- return err;
+ return 0;
}
static void mobiveil_mask_intx_irq(struct irq_data *data)
@@ -574,7 +665,8 @@ static void mobiveil_mask_intx_irq(struct irq_data *data)
mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
raw_spin_lock_irqsave(&pcie->intx_mask_lock, flags);
shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
- csr_writel(pcie, (shifted_val & (~mask)), PAB_INTP_AMBA_MISC_ENB);
+ shifted_val &= ~mask;
+ csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
raw_spin_unlock_irqrestore(&pcie->intx_mask_lock, flags);
}
@@ -589,7 +681,8 @@ static void mobiveil_unmask_intx_irq(struct irq_data *data)
mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
raw_spin_lock_irqsave(&pcie->intx_mask_lock, flags);
shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
- csr_writel(pcie, (shifted_val | mask), PAB_INTP_AMBA_MISC_ENB);
+ shifted_val |= mask;
+ csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
raw_spin_unlock_irqrestore(&pcie->intx_mask_lock, flags);
}
@@ -603,10 +696,11 @@ static struct irq_chip intx_irq_chip = {
/* routine to setup the INTx related data */
static int mobiveil_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
- irq_hw_number_t hwirq)
+ irq_hw_number_t hwirq)
{
irq_set_chip_and_handler(irq, &intx_irq_chip, handle_level_irq);
irq_set_chip_data(irq, domain->host_data);
+
return 0;
}
@@ -623,7 +717,7 @@ static struct irq_chip mobiveil_msi_irq_chip = {
static struct msi_domain_info mobiveil_msi_domain_info = {
.flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX),
+ MSI_FLAG_PCI_MSIX),
.chip = &mobiveil_msi_irq_chip,
};
@@ -641,7 +735,7 @@ static void mobiveil_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
}
static int mobiveil_msi_set_affinity(struct irq_data *irq_data,
- const struct cpumask *mask, bool force)
+ const struct cpumask *mask, bool force)
{
return -EINVAL;
}
@@ -653,7 +747,8 @@ static struct irq_chip mobiveil_msi_bottom_irq_chip = {
};
static int mobiveil_irq_msi_domain_alloc(struct irq_domain *domain,
- unsigned int virq, unsigned int nr_irqs, void *args)
+ unsigned int virq,
+ unsigned int nr_irqs, void *args)
{
struct mobiveil_pcie *pcie = domain->host_data;
struct mobiveil_msi *msi = &pcie->msi;
@@ -673,13 +768,13 @@ static int mobiveil_irq_msi_domain_alloc(struct irq_domain *domain,
mutex_unlock(&msi->lock);
irq_domain_set_info(domain, virq, bit, &mobiveil_msi_bottom_irq_chip,
- domain->host_data, handle_level_irq,
- NULL, NULL);
+ domain->host_data, handle_level_irq, NULL, NULL);
return 0;
}
static void mobiveil_irq_msi_domain_free(struct irq_domain *domain,
- unsigned int virq, unsigned int nr_irqs)
+ unsigned int virq,
+ unsigned int nr_irqs)
{
struct irq_data *d = irq_domain_get_irq_data(domain, virq);
struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(d);
@@ -687,12 +782,11 @@ static void mobiveil_irq_msi_domain_free(struct irq_domain *domain,
mutex_lock(&msi->lock);
- if (!test_bit(d->hwirq, msi->msi_irq_in_use)) {
+ if (!test_bit(d->hwirq, msi->msi_irq_in_use))
dev_err(&pcie->pdev->dev, "trying to free unused MSI#%lu\n",
d->hwirq);
- } else {
+ else
__clear_bit(d->hwirq, msi->msi_irq_in_use);
- }
mutex_unlock(&msi->lock);
}
@@ -716,12 +810,14 @@ static int mobiveil_allocate_msi_domains(struct mobiveil_pcie *pcie)
}
msi->msi_domain = pci_msi_create_irq_domain(fwnode,
- &mobiveil_msi_domain_info, msi->dev_domain);
+ &mobiveil_msi_domain_info,
+ msi->dev_domain);
if (!msi->msi_domain) {
dev_err(dev, "failed to create MSI domain\n");
irq_domain_remove(msi->dev_domain);
return -ENOMEM;
}
+
return 0;
}
@@ -732,12 +828,12 @@ static int mobiveil_pcie_init_irq_domain(struct mobiveil_pcie *pcie)
int ret;
/* setup INTx */
- pcie->intx_domain = irq_domain_add_linear(node,
- PCI_NUM_INTX, &intx_domain_ops, pcie);
+ pcie->intx_domain = irq_domain_add_linear(node, PCI_NUM_INTX,
+ &intx_domain_ops, pcie);
if (!pcie->intx_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
- return -ENODEV;
+ return -ENOMEM;
}
raw_spin_lock_init(&pcie->intx_mask_lock);
@@ -763,11 +859,9 @@ static int mobiveil_pcie_probe(struct platform_device *pdev)
/* allocate the PCIe port */
bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
if (!bridge)
- return -ENODEV;
+ return -ENOMEM;
pcie = pci_host_bridge_priv(bridge);
- if (!pcie)
- return -ENOMEM;
pcie->pdev = pdev;
@@ -784,7 +878,7 @@ static int mobiveil_pcie_probe(struct platform_device *pdev)
&pcie->resources, &iobase);
if (ret) {
dev_err(dev, "Getting bridge resources failed\n");
- return -ENOMEM;
+ return ret;
}
/*
@@ -797,9 +891,6 @@ static int mobiveil_pcie_probe(struct platform_device *pdev)
goto error;
}
- /* fixup for PCIe class register */
- csr_writel(pcie, 0x060402ab, PAB_INTP_AXI_PIO_CLASS);
-
/* initialize the IRQ domains */
ret = mobiveil_pcie_init_irq_domain(pcie);
if (ret) {
@@ -807,6 +898,8 @@ static int mobiveil_pcie_probe(struct platform_device *pdev)
goto error;
}
+ irq_set_chained_handler_and_data(pcie->irq, mobiveil_pcie_isr, pcie);
+
ret = devm_request_pci_bus_resources(dev, &pcie->resources);
if (ret)
goto error;
@@ -820,6 +913,12 @@ static int mobiveil_pcie_probe(struct platform_device *pdev)
bridge->map_irq = of_irq_parse_and_map_pci;
bridge->swizzle_irq = pci_common_swizzle;
+ ret = mobiveil_bringup_link(pcie);
+ if (ret) {
+ dev_info(dev, "link bring-up failed\n");
+ goto error;
+ }
+
/* setup the kernel resources for the newly added PCIe root bus */
ret = pci_scan_root_bus_bridge(bridge);
if (ret)
@@ -848,10 +947,10 @@ MODULE_DEVICE_TABLE(of, mobiveil_pcie_of_match);
static struct platform_driver mobiveil_pcie_driver = {
.probe = mobiveil_pcie_probe,
.driver = {
- .name = "mobiveil-pcie",
- .of_match_table = mobiveil_pcie_of_match,
- .suppress_bind_attrs = true,
- },
+ .name = "mobiveil-pcie",
+ .of_match_table = mobiveil_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
};
builtin_platform_driver(mobiveil_pcie_driver);
diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c
index 3b031f00a94a..45c0f344ccd1 100644
--- a/drivers/pci/controller/pcie-xilinx-nwl.c
+++ b/drivers/pci/controller/pcie-xilinx-nwl.c
@@ -482,15 +482,13 @@ static int nwl_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
int i;
mutex_lock(&msi->lock);
- bit = bitmap_find_next_zero_area(msi->bitmap, INT_PCI_MSI_NR, 0,
- nr_irqs, 0);
- if (bit >= INT_PCI_MSI_NR) {
+ bit = bitmap_find_free_region(msi->bitmap, INT_PCI_MSI_NR,
+ get_count_order(nr_irqs));
+ if (bit < 0) {
mutex_unlock(&msi->lock);
return -ENOSPC;
}
- bitmap_set(msi->bitmap, bit, nr_irqs);
-
for (i = 0; i < nr_irqs; i++) {
irq_domain_set_info(domain, virq + i, bit + i, &nwl_irq_chip,
domain->host_data, handle_simple_irq,
@@ -508,7 +506,8 @@ static void nwl_irq_domain_free(struct irq_domain *domain, unsigned int virq,
struct nwl_msi *msi = &pcie->msi;
mutex_lock(&msi->lock);
- bitmap_clear(msi->bitmap, data->hwirq, nr_irqs);
+ bitmap_release_region(msi->bitmap, data->hwirq,
+ get_count_order(nr_irqs));
mutex_unlock(&msi->lock);
}
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index 999a5509e57e..4575e0c6dc4b 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -627,7 +627,7 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
* 32-bit resources. __pci_assign_resource() enforces that
* artificial restriction to make sure everything will fit.
*
- * The only way we could use a 64-bit non-prefechable MEMBAR is
+ * The only way we could use a 64-bit non-prefetchable MEMBAR is
* if its address is <4GB so that we can convert it to a 32-bit
* resource. To be visible to the host OS, all VMD endpoints must
* be initially configured by platform BIOS, which includes setting
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index 27806987e93b..1cfe3687a211 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -381,15 +381,15 @@ static void pci_epf_test_unbind(struct pci_epf *epf)
epf_bar = &epf->bar[bar];
if (epf_test->reg[bar]) {
- pci_epf_free_space(epf, epf_test->reg[bar], bar);
pci_epc_clear_bar(epc, epf->func_no, epf_bar);
+ pci_epf_free_space(epf, epf_test->reg[bar], bar);
}
}
}
static int pci_epf_test_set_bar(struct pci_epf *epf)
{
- int bar;
+ int bar, add;
int ret;
struct pci_epf_bar *epf_bar;
struct pci_epc *epc = epf->epc;
@@ -400,8 +400,14 @@ static int pci_epf_test_set_bar(struct pci_epf *epf)
epc_features = epf_test->epc_features;
- for (bar = BAR_0; bar <= BAR_5; bar++) {
+ for (bar = BAR_0; bar <= BAR_5; bar += add) {
epf_bar = &epf->bar[bar];
+ /*
+ * pci_epc_set_bar() sets PCI_BASE_ADDRESS_MEM_TYPE_64
+ * if the specific implementation required a 64-bit BAR,
+ * even if we only requested a 32-bit BAR.
+ */
+ add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1;
if (!!(epc_features->reserved_bar & (1 << bar)))
continue;
@@ -413,13 +419,6 @@ static int pci_epf_test_set_bar(struct pci_epf *epf)
if (bar == test_reg_bar)
return ret;
}
- /*
- * pci_epc_set_bar() sets PCI_BASE_ADDRESS_MEM_TYPE_64
- * if the specific implementation required a 64-bit BAR,
- * even if we only requested a 32-bit BAR.
- */
- if (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64)
- bar++;
}
return 0;
@@ -431,13 +430,19 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf)
struct device *dev = &epf->dev;
struct pci_epf_bar *epf_bar;
void *base;
- int bar;
+ int bar, add;
enum pci_barno test_reg_bar = epf_test->test_reg_bar;
const struct pci_epc_features *epc_features;
+ size_t test_reg_size;
epc_features = epf_test->epc_features;
- base = pci_epf_alloc_space(epf, sizeof(struct pci_epf_test_reg),
+ if (epc_features->bar_fixed_size[test_reg_bar])
+ test_reg_size = bar_size[test_reg_bar];
+ else
+ test_reg_size = sizeof(struct pci_epf_test_reg);
+
+ base = pci_epf_alloc_space(epf, test_reg_size,
test_reg_bar, epc_features->align);
if (!base) {
dev_err(dev, "Failed to allocated register space\n");
@@ -445,8 +450,10 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf)
}
epf_test->reg[test_reg_bar] = base;
- for (bar = BAR_0; bar <= BAR_5; bar++) {
+ for (bar = BAR_0; bar <= BAR_5; bar += add) {
epf_bar = &epf->bar[bar];
+ add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1;
+
if (bar == test_reg_bar)
continue;
@@ -459,8 +466,6 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf)
dev_err(dev, "Failed to allocate space for BAR%d\n",
bar);
epf_test->reg[bar] = base;
- if (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64)
- bar++;
}
return 0;
diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
index e4712a0f249c..2091508c1620 100644
--- a/drivers/pci/endpoint/pci-epc-core.c
+++ b/drivers/pci/endpoint/pci-epc-core.c
@@ -519,11 +519,12 @@ void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf)
{
unsigned long flags;
- if (!epc || IS_ERR(epc))
+ if (!epc || IS_ERR(epc) || !epf)
return;
spin_lock_irqsave(&epc->lock, flags);
list_del(&epf->list);
+ epf->epc = NULL;
spin_unlock_irqrestore(&epc->lock, flags);
}
EXPORT_SYMBOL_GPL(pci_epc_remove_epf);
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index 3aa115ed3a65..525fd3f272b3 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -132,8 +132,6 @@ static void pci_read_vf_config_common(struct pci_dev *virtfn)
&physfn->sriov->subsystem_vendor);
pci_read_config_word(virtfn, PCI_SUBSYSTEM_ID,
&physfn->sriov->subsystem_device);
-
- physfn->sriov->cfg_size = pci_cfg_space_size(virtfn);
}
int pci_iov_add_virtfn(struct pci_dev *dev, int id)
diff --git a/drivers/pci/mmap.c b/drivers/pci/mmap.c
index 24505b08de40..b8c9011987f4 100644
--- a/drivers/pci/mmap.c
+++ b/drivers/pci/mmap.c
@@ -73,7 +73,7 @@ int pci_mmap_resource_range(struct pci_dev *pdev, int bar,
#elif defined(HAVE_PCI_MMAP) /* && !ARCH_GENERIC_PCI_MMAP_RESOURCE */
/*
- * Legacy setup: Impement pci_mmap_resource_range() as a wrapper around
+ * Legacy setup: Implement pci_mmap_resource_range() as a wrapper around
* the architecture's pci_mmap_page_range(), converting to "user visible"
* addresses as necessary.
*/
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index e039b740fe74..59a6d232f77a 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -237,7 +237,7 @@ static void msi_set_mask_bit(struct irq_data *data, u32 flag)
}
/**
- * pci_msi_mask_irq - Generic irq chip callback to mask PCI/MSI interrupts
+ * pci_msi_mask_irq - Generic IRQ chip callback to mask PCI/MSI interrupts
* @data: pointer to irqdata associated to that interrupt
*/
void pci_msi_mask_irq(struct irq_data *data)
@@ -247,7 +247,7 @@ void pci_msi_mask_irq(struct irq_data *data)
EXPORT_SYMBOL_GPL(pci_msi_mask_irq);
/**
- * pci_msi_unmask_irq - Generic irq chip callback to unmask PCI/MSI interrupts
+ * pci_msi_unmask_irq - Generic IRQ chip callback to unmask PCI/MSI interrupts
* @data: pointer to irqdata associated to that interrupt
*/
void pci_msi_unmask_irq(struct irq_data *data)
@@ -588,11 +588,11 @@ static int msi_verify_entries(struct pci_dev *dev)
* msi_capability_init - configure device's MSI capability structure
* @dev: pointer to the pci_dev data structure of MSI device function
* @nvec: number of interrupts to allocate
- * @affd: description of automatic irq affinity assignments (may be %NULL)
+ * @affd: description of automatic IRQ affinity assignments (may be %NULL)
*
* Setup the MSI capability structure of the device with the requested
* number of interrupts. A return value of zero indicates the successful
- * setup of an entry with the new MSI irq. A negative return value indicates
+ * setup of an entry with the new MSI IRQ. A negative return value indicates
* an error, and a positive return value indicates the number of interrupts
* which could have been allocated.
*/
@@ -609,7 +609,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec,
if (!entry)
return -ENOMEM;
- /* All MSIs are unmasked by default, Mask them all */
+ /* All MSIs are unmasked by default; mask them all */
mask = msi_mask(entry->msi_attrib.multi_cap);
msi_mask_irq(entry, mask, mask);
@@ -637,7 +637,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec,
return ret;
}
- /* Set MSI enabled bits */
+ /* Set MSI enabled bits */
pci_intx_for_msi(dev, 0);
pci_msi_set_enable(dev, 1);
dev->msi_enabled = 1;
@@ -729,11 +729,11 @@ static void msix_program_entries(struct pci_dev *dev,
* @dev: pointer to the pci_dev data structure of MSI-X device function
* @entries: pointer to an array of struct msix_entry entries
* @nvec: number of @entries
- * @affd: Optional pointer to enable automatic affinity assignement
+ * @affd: Optional pointer to enable automatic affinity assignment
*
* Setup the MSI-X capability structure of device function with a
- * single MSI-X irq. A return of zero indicates the successful setup of
- * requested MSI-X entries with allocated irqs or non-zero for otherwise.
+ * single MSI-X IRQ. A return of zero indicates the successful setup of
+ * requested MSI-X entries with allocated IRQs or non-zero for otherwise.
**/
static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
int nvec, struct irq_affinity *affd)
@@ -789,7 +789,7 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
out_avail:
if (ret < 0) {
/*
- * If we had some success, report the number of irqs
+ * If we had some success, report the number of IRQs
* we succeeded in setting up.
*/
struct msi_desc *entry;
@@ -812,7 +812,7 @@ out_free:
/**
* pci_msi_supported - check whether MSI may be enabled on a device
* @dev: pointer to the pci_dev data structure of MSI device function
- * @nvec: how many MSIs have been requested ?
+ * @nvec: how many MSIs have been requested?
*
* Look at global flags, the device itself, and its parent buses
* to determine if MSI/-X are supported for the device. If MSI/-X is
@@ -896,7 +896,7 @@ static void pci_msi_shutdown(struct pci_dev *dev)
/* Keep cached state to be restored */
__pci_msi_desc_mask_irq(desc, mask, ~mask);
- /* Restore dev->irq to its default pin-assertion irq */
+ /* Restore dev->irq to its default pin-assertion IRQ */
dev->irq = desc->msi_attrib.default_irq;
pcibios_alloc_irq(dev);
}
@@ -958,7 +958,7 @@ static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries,
}
}
- /* Check whether driver already requested for MSI irq */
+ /* Check whether driver already requested for MSI IRQ */
if (dev->msi_enabled) {
pci_info(dev, "can't enable MSI-X (MSI IRQ already assigned)\n");
return -EINVAL;
@@ -1026,7 +1026,7 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
if (!pci_msi_supported(dev, minvec))
return -EINVAL;
- /* Check whether driver already requested MSI-X irqs */
+ /* Check whether driver already requested MSI-X IRQs */
if (dev->msix_enabled) {
pci_info(dev, "can't enable MSI (MSI-X already enabled)\n");
return -EINVAL;
@@ -1113,8 +1113,8 @@ static int __pci_enable_msix_range(struct pci_dev *dev,
* pci_enable_msix_range - configure device's MSI-X capability structure
* @dev: pointer to the pci_dev data structure of MSI-X device function
* @entries: pointer to an array of MSI-X entries
- * @minvec: minimum number of MSI-X irqs requested
- * @maxvec: maximum number of MSI-X irqs requested
+ * @minvec: minimum number of MSI-X IRQs requested
+ * @maxvec: maximum number of MSI-X IRQs requested
*
* Setup the MSI-X capability structure of device function with a maximum
* possible number of interrupts in the range between @minvec and @maxvec
@@ -1179,7 +1179,7 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
return msi_vecs;
}
- /* use legacy irq if allowed */
+ /* use legacy IRQ if allowed */
if (flags & PCI_IRQ_LEGACY) {
if (min_vecs == 1 && dev->irq) {
/*
@@ -1248,7 +1248,7 @@ int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
EXPORT_SYMBOL(pci_irq_vector);
/**
- * pci_irq_get_affinity - return the affinity of a particular msi vector
+ * pci_irq_get_affinity - return the affinity of a particular MSI vector
* @dev: PCI device to operate on
* @nr: device-relative interrupt vector index (0-based).
*/
@@ -1280,7 +1280,7 @@ const struct cpumask *pci_irq_get_affinity(struct pci_dev *dev, int nr)
EXPORT_SYMBOL(pci_irq_get_affinity);
/**
- * pci_irq_get_node - return the numa node of a particular msi vector
+ * pci_irq_get_node - return the NUMA node of a particular MSI vector
* @pdev: PCI device to operate on
* @vec: device-relative interrupt vector index (0-based).
*/
@@ -1330,7 +1330,7 @@ void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg)
/**
* pci_msi_domain_calc_hwirq - Generate a unique ID for an MSI source
* @dev: Pointer to the PCI device
- * @desc: Pointer to the msi descriptor
+ * @desc: Pointer to the MSI descriptor
*
* The ID number is only used within the irqdomain.
*/
@@ -1348,7 +1348,8 @@ static inline bool pci_msi_desc_is_multi_msi(struct msi_desc *desc)
}
/**
- * pci_msi_domain_check_cap - Verify that @domain supports the capabilities for @dev
+ * pci_msi_domain_check_cap - Verify that @domain supports the capabilities
+ * for @dev
* @domain: The interrupt domain to check
* @info: The domain info for verification
* @dev: The device to check
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
index 73d5adec0a28..bc7b27a28795 100644
--- a/drivers/pci/of.c
+++ b/drivers/pci/of.c
@@ -22,12 +22,15 @@ void pci_set_of_node(struct pci_dev *dev)
return;
dev->dev.of_node = of_pci_find_child_device(dev->bus->dev.of_node,
dev->devfn);
+ if (dev->dev.of_node)
+ dev->dev.fwnode = &dev->dev.of_node->fwnode;
}
void pci_release_of_node(struct pci_dev *dev)
{
of_node_put(dev->dev.of_node);
dev->dev.of_node = NULL;
+ dev->dev.fwnode = NULL;
}
void pci_set_bus_of_node(struct pci_bus *bus)
@@ -41,13 +44,18 @@ void pci_set_bus_of_node(struct pci_bus *bus)
if (node && of_property_read_bool(node, "external-facing"))
bus->self->untrusted = true;
}
+
bus->dev.of_node = node;
+
+ if (bus->dev.of_node)
+ bus->dev.fwnode = &bus->dev.of_node->fwnode;
}
void pci_release_bus_of_node(struct pci_bus *bus)
{
of_node_put(bus->dev.of_node);
bus->dev.of_node = NULL;
+ bus->dev.fwnode = NULL;
}
struct device_node * __weak pcibios_get_phb_of_node(struct pci_bus *bus)
diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c
index a98126ad9c3a..234476226529 100644
--- a/drivers/pci/p2pdma.c
+++ b/drivers/pci/p2pdma.c
@@ -18,18 +18,13 @@
#include <linux/percpu-refcount.h>
#include <linux/random.h>
#include <linux/seq_buf.h>
+#include <linux/iommu.h>
struct pci_p2pdma {
struct gen_pool *pool;
bool p2pmem_published;
};
-struct p2pdma_pagemap {
- struct dev_pagemap pgmap;
- struct percpu_ref ref;
- struct completion ref_done;
-};
-
static ssize_t size_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -78,31 +73,6 @@ static const struct attribute_group p2pmem_group = {
.name = "p2pmem",
};
-static struct p2pdma_pagemap *to_p2p_pgmap(struct percpu_ref *ref)
-{
- return container_of(ref, struct p2pdma_pagemap, ref);
-}
-
-static void pci_p2pdma_percpu_release(struct percpu_ref *ref)
-{
- struct p2pdma_pagemap *p2p_pgmap = to_p2p_pgmap(ref);
-
- complete(&p2p_pgmap->ref_done);
-}
-
-static void pci_p2pdma_percpu_kill(struct percpu_ref *ref)
-{
- percpu_ref_kill(ref);
-}
-
-static void pci_p2pdma_percpu_cleanup(struct percpu_ref *ref)
-{
- struct p2pdma_pagemap *p2p_pgmap = to_p2p_pgmap(ref);
-
- wait_for_completion(&p2p_pgmap->ref_done);
- percpu_ref_exit(&p2p_pgmap->ref);
-}
-
static void pci_p2pdma_release(void *data)
{
struct pci_dev *pdev = data;
@@ -165,7 +135,6 @@ out:
int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
u64 offset)
{
- struct p2pdma_pagemap *p2p_pgmap;
struct dev_pagemap *pgmap;
void *addr;
int error;
@@ -188,27 +157,15 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
return error;
}
- p2p_pgmap = devm_kzalloc(&pdev->dev, sizeof(*p2p_pgmap), GFP_KERNEL);
- if (!p2p_pgmap)
+ pgmap = devm_kzalloc(&pdev->dev, sizeof(*pgmap), GFP_KERNEL);
+ if (!pgmap)
return -ENOMEM;
-
- init_completion(&p2p_pgmap->ref_done);
- error = percpu_ref_init(&p2p_pgmap->ref,
- pci_p2pdma_percpu_release, 0, GFP_KERNEL);
- if (error)
- goto pgmap_free;
-
- pgmap = &p2p_pgmap->pgmap;
-
pgmap->res.start = pci_resource_start(pdev, bar) + offset;
pgmap->res.end = pgmap->res.start + size - 1;
pgmap->res.flags = pci_resource_flags(pdev, bar);
- pgmap->ref = &p2p_pgmap->ref;
pgmap->type = MEMORY_DEVICE_PCI_P2PDMA;
pgmap->pci_p2pdma_bus_offset = pci_bus_address(pdev, bar) -
pci_resource_start(pdev, bar);
- pgmap->kill = pci_p2pdma_percpu_kill;
- pgmap->cleanup = pci_p2pdma_percpu_cleanup;
addr = devm_memremap_pages(&pdev->dev, pgmap);
if (IS_ERR(addr)) {
@@ -219,7 +176,7 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
error = gen_pool_add_owner(pdev->p2pdma->pool, (unsigned long)addr,
pci_bus_address(pdev, bar) + offset,
resource_size(&pgmap->res), dev_to_node(&pdev->dev),
- &p2p_pgmap->ref);
+ pgmap->ref);
if (error)
goto pages_free;
@@ -231,14 +188,14 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
pages_free:
devm_memunmap_pages(&pdev->dev, pgmap);
pgmap_free:
- devm_kfree(&pdev->dev, p2p_pgmap);
+ devm_kfree(&pdev->dev, pgmap);
return error;
}
EXPORT_SYMBOL_GPL(pci_p2pdma_add_resource);
/*
* Note this function returns the parent PCI device with a
- * reference taken. It is the caller's responsibily to drop
+ * reference taken. It is the caller's responsibility to drop
* the reference.
*/
static struct pci_dev *find_parent_pci_dev(struct device *dev)
@@ -299,6 +256,9 @@ static bool root_complex_whitelist(struct pci_dev *dev)
struct pci_dev *root = pci_get_slot(host->bus, PCI_DEVFN(0, 0));
unsigned short vendor, device;
+ if (iommu_present(dev->dev.bus))
+ return false;
+
if (!root)
return false;
@@ -395,7 +355,7 @@ static int upstream_bridge_distance(struct pci_dev *provider,
/*
* Allow the connection if both devices are on a whitelisted root
- * complex, but add an arbitary large value to the distance.
+ * complex, but add an arbitrary large value to the distance.
*/
if (root_complex_whitelist(provider) &&
root_complex_whitelist(client))
@@ -454,7 +414,7 @@ static int upstream_bridge_distance_warn(struct pci_dev *provider,
}
/**
- * pci_p2pdma_distance_many - Determive the cumulative distance between
+ * pci_p2pdma_distance_many - Determine the cumulative distance between
* a p2pdma provider and the clients in use.
* @provider: p2pdma provider to check against the client list
* @clients: array of devices to check (NULL-terminated)
@@ -483,6 +443,14 @@ int pci_p2pdma_distance_many(struct pci_dev *provider, struct device **clients,
return -1;
for (i = 0; i < num_clients; i++) {
+ if (IS_ENABLED(CONFIG_DMA_VIRT_OPS) &&
+ clients[i]->dma_ops == &dma_virt_ops) {
+ if (verbose)
+ dev_warn(clients[i],
+ "cannot be used for peer-to-peer DMA because the driver makes use of dma_virt_ops\n");
+ return -1;
+ }
+
pci_client = find_parent_pci_dev(clients[i]);
if (!pci_client) {
if (verbose)
@@ -761,7 +729,7 @@ int pci_p2pdma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
* p2pdma mappings are not compatible with devices that use
* dma_virt_ops. If the upper layers do the right thing
* this should never happen because it will be prevented
- * by the check in pci_p2pdma_add_client()
+ * by the check in pci_p2pdma_distance_many()
*/
if (WARN_ON_ONCE(IS_ENABLED(CONFIG_DMA_VIRT_OPS) &&
dev->dma_ops == &dma_virt_ops))
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 1897847ceb0c..45049f558860 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -685,12 +685,21 @@ static pci_power_t acpi_pci_get_power_state(struct pci_dev *dev)
if (!adev || !acpi_device_power_manageable(adev))
return PCI_UNKNOWN;
- if (acpi_device_get_power(adev, &state) || state == ACPI_STATE_UNKNOWN)
+ state = adev->power.state;
+ if (state == ACPI_STATE_UNKNOWN)
return PCI_UNKNOWN;
return state_conv[state];
}
+static void acpi_pci_refresh_power_state(struct pci_dev *dev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
+
+ if (adev && acpi_device_power_manageable(adev))
+ acpi_device_update_power(adev, NULL);
+}
+
static int acpi_pci_propagate_wakeup(struct pci_bus *bus, bool enable)
{
while (bus->parent) {
@@ -748,6 +757,7 @@ static const struct pci_platform_pm_ops acpi_pci_platform_pm = {
.is_manageable = acpi_pci_power_manageable,
.set_state = acpi_pci_set_power_state,
.get_state = acpi_pci_get_power_state,
+ .refresh_state = acpi_pci_refresh_power_state,
.choose_state = acpi_pci_choose_state,
.set_wakeup = acpi_pci_wakeup,
.need_resume = acpi_pci_need_resume,
@@ -901,6 +911,7 @@ static void pci_acpi_setup(struct device *dev)
device_wakeup_enable(dev);
acpi_pci_wakeup(pci_dev, false);
+ acpi_device_power_add_dependent(adev, dev);
}
static void pci_acpi_cleanup(struct device *dev)
@@ -913,6 +924,7 @@ static void pci_acpi_cleanup(struct device *dev)
pci_acpi_remove_pm_notifier(adev);
if (adev->wakeup.flags.valid) {
+ acpi_device_power_remove_dependent(adev, dev);
if (pci_dev->bridge_d3)
device_wakeup_disable(dev);
diff --git a/drivers/pci/pci-bridge-emul.c b/drivers/pci/pci-bridge-emul.c
index 83fb077d0b41..06083b86d4f4 100644
--- a/drivers/pci/pci-bridge-emul.c
+++ b/drivers/pci/pci-bridge-emul.c
@@ -305,7 +305,7 @@ int pci_bridge_emul_init(struct pci_bridge_emul *bridge,
}
/*
- * Cleanup a pci_bridge_emul structure that was previously initilized
+ * Cleanup a pci_bridge_emul structure that was previously initialized
* using pci_bridge_emul_init().
*/
void pci_bridge_emul_cleanup(struct pci_bridge_emul *bridge)
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 5eadbc3d0969..a8124e47bf6e 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -399,7 +399,8 @@ void __weak pcibios_free_irq(struct pci_dev *dev)
#ifdef CONFIG_PCI_IOV
static inline bool pci_device_can_probe(struct pci_dev *pdev)
{
- return (!pdev->is_virtfn || pdev->physfn->sriov->drivers_autoprobe);
+ return (!pdev->is_virtfn || pdev->physfn->sriov->drivers_autoprobe ||
+ pdev->driver_override);
}
#else
static inline bool pci_device_can_probe(struct pci_dev *pdev)
@@ -414,6 +415,9 @@ static int pci_device_probe(struct device *dev)
struct pci_dev *pci_dev = to_pci_dev(dev);
struct pci_driver *drv = to_pci_driver(dev->driver);
+ if (!pci_device_can_probe(pci_dev))
+ return -ENODEV;
+
pci_assign_irq(pci_dev);
error = pcibios_alloc_irq(pci_dev);
@@ -421,12 +425,10 @@ static int pci_device_probe(struct device *dev)
return error;
pci_dev_get(pci_dev);
- if (pci_device_can_probe(pci_dev)) {
- error = __pci_device_probe(drv, pci_dev);
- if (error) {
- pcibios_free_irq(pci_dev);
- pci_dev_put(pci_dev);
- }
+ error = __pci_device_probe(drv, pci_dev);
+ if (error) {
+ pcibios_free_irq(pci_dev);
+ pci_dev_put(pci_dev);
}
return error;
@@ -524,7 +526,6 @@ static void pci_pm_default_resume_early(struct pci_dev *pci_dev)
pci_power_up(pci_dev);
pci_restore_state(pci_dev);
pci_pme_restore(pci_dev);
- pci_fixup_device(pci_fixup_resume_early, pci_dev);
}
/*
@@ -679,6 +680,7 @@ static bool pci_has_legacy_pm_support(struct pci_dev *pci_dev)
static int pci_pm_prepare(struct device *dev)
{
struct device_driver *drv = dev->driver;
+ struct pci_dev *pci_dev = to_pci_dev(dev);
if (drv && drv->pm && drv->pm->prepare) {
int error = drv->pm->prepare(dev);
@@ -688,7 +690,15 @@ static int pci_pm_prepare(struct device *dev)
if (!error && dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_PREPARE))
return 0;
}
- return pci_dev_keep_suspended(to_pci_dev(dev));
+ if (pci_dev_need_resume(pci_dev))
+ return 0;
+
+ /*
+ * The PME setting needs to be adjusted here in case the direct-complete
+ * optimization is used with respect to this device.
+ */
+ pci_dev_adjust_pme(pci_dev);
+ return 1;
}
static void pci_pm_complete(struct device *dev)
@@ -702,7 +712,14 @@ static void pci_pm_complete(struct device *dev)
if (pm_runtime_suspended(dev) && pm_resume_via_firmware()) {
pci_power_t pre_sleep_state = pci_dev->current_state;
- pci_update_current_state(pci_dev, pci_dev->current_state);
+ pci_refresh_power_state(pci_dev);
+ /*
+ * On platforms with ACPI this check may also trigger for
+ * devices sharing power resources if one of those power
+ * resources has been activated as a result of a change of the
+ * power state of another device sharing it. However, in that
+ * case it is also better to resume the device, in general.
+ */
if (pci_dev->current_state < pre_sleep_state)
pm_request_resume(dev);
}
@@ -758,9 +775,11 @@ static int pci_pm_suspend(struct device *dev)
* better to resume the device from runtime suspend here.
*/
if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) ||
- !pci_dev_keep_suspended(pci_dev)) {
+ pci_dev_need_resume(pci_dev)) {
pm_runtime_resume(dev);
pci_dev->state_saved = false;
+ } else {
+ pci_dev_adjust_pme(pci_dev);
}
if (pm->suspend) {
@@ -831,18 +850,16 @@ static int pci_pm_suspend_noirq(struct device *dev)
if (pci_dev->skip_bus_pm) {
/*
- * The function is running for the second time in a row without
+ * Either the device is a bridge with a child in D0 below it, or
+ * the function is running for the second time in a row without
* going through full resume, which is possible only during
- * suspend-to-idle in a spurious wakeup case. Moreover, the
- * device was originally left in D0, so its power state should
- * not be changed here and the device register values saved
- * originally should be restored on resume again.
+ * suspend-to-idle in a spurious wakeup case. The device should
+ * be in D0 at this point, but if it is a bridge, it may be
+ * necessary to save its state.
*/
- pci_dev->state_saved = true;
- } else if (pci_dev->state_saved) {
- if (pci_dev->current_state == PCI_D0)
- pci_dev->skip_bus_pm = true;
- } else {
+ if (!pci_dev->state_saved)
+ pci_save_state(pci_dev);
+ } else if (!pci_dev->state_saved) {
pci_save_state(pci_dev);
if (pci_power_manageable(pci_dev))
pci_prepare_to_sleep(pci_dev);
@@ -851,6 +868,22 @@ static int pci_pm_suspend_noirq(struct device *dev)
dev_dbg(dev, "PCI PM: Suspend power state: %s\n",
pci_power_name(pci_dev->current_state));
+ if (pci_dev->current_state == PCI_D0) {
+ pci_dev->skip_bus_pm = true;
+ /*
+ * Per PCI PM r1.2, table 6-1, a bridge must be in D0 if any
+ * downstream device is in D0, so avoid changing the power state
+ * of the parent bridge by setting the skip_bus_pm flag for it.
+ */
+ if (pci_dev->bus->self)
+ pci_dev->bus->self->skip_bus_pm = true;
+ }
+
+ if (pci_dev->skip_bus_pm && pm_suspend_no_platform()) {
+ dev_dbg(dev, "PCI PM: Skipped\n");
+ goto Fixup;
+ }
+
pci_pm_set_unknown_state(pci_dev);
/*
@@ -898,7 +931,16 @@ static int pci_pm_resume_noirq(struct device *dev)
if (dev_pm_smart_suspend_and_suspended(dev))
pm_runtime_set_active(dev);
- pci_pm_default_resume_early(pci_dev);
+ /*
+ * In the suspend-to-idle case, devices left in D0 during suspend will
+ * stay in D0, so it is not necessary to restore or update their
+ * configuration here and attempting to put them into D0 again is
+ * pointless, so avoid doing that.
+ */
+ if (!(pci_dev->skip_bus_pm && pm_suspend_no_platform()))
+ pci_pm_default_resume_early(pci_dev);
+
+ pci_fixup_device(pci_fixup_resume_early, pci_dev);
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_resume_early(dev);
@@ -972,15 +1014,15 @@ static int pci_pm_freeze(struct device *dev)
}
/*
- * This used to be done in pci_pm_prepare() for all devices and some
- * drivers may depend on it, so do it here. Ideally, runtime-suspended
- * devices should not be touched during freeze/thaw transitions,
- * however.
+ * Resume all runtime-suspended devices before creating a snapshot
+ * image of system memory, because the restore kernel generally cannot
+ * be expected to always handle them consistently and they need to be
+ * put into the runtime-active metastate during system resume anyway,
+ * so it is better to ensure that the state saved in the image will be
+ * always consistent with that.
*/
- if (!dev_pm_smart_suspend_and_suspended(dev)) {
- pm_runtime_resume(dev);
- pci_dev->state_saved = false;
- }
+ pm_runtime_resume(dev);
+ pci_dev->state_saved = false;
if (pm->freeze) {
int error;
@@ -994,22 +1036,11 @@ static int pci_pm_freeze(struct device *dev)
return 0;
}
-static int pci_pm_freeze_late(struct device *dev)
-{
- if (dev_pm_smart_suspend_and_suspended(dev))
- return 0;
-
- return pm_generic_freeze_late(dev);
-}
-
static int pci_pm_freeze_noirq(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
struct device_driver *drv = dev->driver;
- if (dev_pm_smart_suspend_and_suspended(dev))
- return 0;
-
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_suspend_late(dev, PMSG_FREEZE);
@@ -1039,16 +1070,6 @@ static int pci_pm_thaw_noirq(struct device *dev)
struct device_driver *drv = dev->driver;
int error = 0;
- /*
- * If the device is in runtime suspend, the code below may not work
- * correctly with it, so skip that code and make the PM core skip all of
- * the subsequent "thaw" callbacks for the device.
- */
- if (dev_pm_smart_suspend_and_suspended(dev)) {
- dev_pm_skip_next_resume_phases(dev);
- return 0;
- }
-
if (pcibios_pm_ops.thaw_noirq) {
error = pcibios_pm_ops.thaw_noirq(dev);
if (error)
@@ -1108,10 +1129,13 @@ static int pci_pm_poweroff(struct device *dev)
/* The reason to do that is the same as in pci_pm_suspend(). */
if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) ||
- !pci_dev_keep_suspended(pci_dev))
+ pci_dev_need_resume(pci_dev)) {
pm_runtime_resume(dev);
+ pci_dev->state_saved = false;
+ } else {
+ pci_dev_adjust_pme(pci_dev);
+ }
- pci_dev->state_saved = false;
if (pm->poweroff) {
int error;
@@ -1183,10 +1207,6 @@ static int pci_pm_restore_noirq(struct device *dev)
struct device_driver *drv = dev->driver;
int error = 0;
- /* This is analogous to the pci_pm_resume_noirq() case. */
- if (dev_pm_smart_suspend_and_suspended(dev))
- pm_runtime_set_active(dev);
-
if (pcibios_pm_ops.restore_noirq) {
error = pcibios_pm_ops.restore_noirq(dev);
if (error)
@@ -1194,6 +1214,7 @@ static int pci_pm_restore_noirq(struct device *dev)
}
pci_pm_default_resume_early(pci_dev);
+ pci_fixup_device(pci_fixup_resume_early, pci_dev);
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_resume_early(dev);
@@ -1235,7 +1256,6 @@ static int pci_pm_restore(struct device *dev)
#else /* !CONFIG_HIBERNATE_CALLBACKS */
#define pci_pm_freeze NULL
-#define pci_pm_freeze_late NULL
#define pci_pm_freeze_noirq NULL
#define pci_pm_thaw NULL
#define pci_pm_thaw_noirq NULL
@@ -1361,7 +1381,6 @@ static const struct dev_pm_ops pci_dev_pm_ops = {
.suspend_late = pci_pm_suspend_late,
.resume = pci_pm_resume,
.freeze = pci_pm_freeze,
- .freeze_late = pci_pm_freeze_late,
.thaw = pci_pm_thaw,
.poweroff = pci_pm_poweroff,
.poweroff_late = pci_pm_poweroff_late,
diff --git a/drivers/pci/pci-pf-stub.c b/drivers/pci/pci-pf-stub.c
index 9795649fc6f9..ef293e735c55 100644
--- a/drivers/pci/pci-pf-stub.c
+++ b/drivers/pci/pci-pf-stub.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* pci-pf-stub - simple stub driver for PCI SR-IOV PF device
*
- * This driver is meant to act as a "whitelist" for devices that provde
+ * This driver is meant to act as a "whitelist" for devices that provide
* SR-IOV functionality while at the same time not actually needing a
* driver of their own.
*/
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 6d27475e39b2..965c72104150 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -182,6 +182,9 @@ static ssize_t current_link_speed_show(struct device *dev,
return -EINVAL;
switch (linkstat & PCI_EXP_LNKSTA_CLS) {
+ case PCI_EXP_LNKSTA_CLS_32_0GB:
+ speed = "32 GT/s";
+ break;
case PCI_EXP_LNKSTA_CLS_16_0GB:
speed = "16 GT/s";
break;
@@ -477,7 +480,7 @@ static ssize_t remove_store(struct device *dev, struct device_attribute *attr,
pci_stop_and_remove_bus_device_locked(to_pci_dev(dev));
return count;
}
-static struct device_attribute dev_remove_attr = __ATTR(remove,
+static struct device_attribute dev_remove_attr = __ATTR_IGNORE_LOCKDEP(remove,
(S_IWUSR|S_IWGRP),
NULL, remove_store);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 8abc843b1615..29ed5ec1ac27 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -777,6 +777,12 @@ static inline pci_power_t platform_pci_get_power_state(struct pci_dev *dev)
return pci_platform_pm ? pci_platform_pm->get_state(dev) : PCI_UNKNOWN;
}
+static inline void platform_pci_refresh_power_state(struct pci_dev *dev)
+{
+ if (pci_platform_pm && pci_platform_pm->refresh_state)
+ pci_platform_pm->refresh_state(dev);
+}
+
static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
{
return pci_platform_pm ?
@@ -938,6 +944,21 @@ void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
}
/**
+ * pci_refresh_power_state - Refresh the given device's power state data
+ * @dev: Target PCI device.
+ *
+ * Ask the platform to refresh the devices power state information and invoke
+ * pci_update_current_state() to update its current PCI power state.
+ */
+void pci_refresh_power_state(struct pci_dev *dev)
+{
+ if (platform_pci_power_manageable(dev))
+ platform_pci_refresh_power_state(dev);
+
+ pci_update_current_state(dev, dev->current_state);
+}
+
+/**
* pci_power_up - Put the given device into D0 forcibly
* @dev: PCI device to power up
*/
@@ -1004,15 +1025,10 @@ static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
if (state == PCI_D0) {
pci_platform_power_transition(dev, PCI_D0);
/*
- * Mandatory power management transition delays, see
- * PCI Express Base Specification Revision 2.0 Section
- * 6.6.1: Conventional Reset. Do not delay for
- * devices powered on/off by corresponding bridge,
- * because have already delayed for the bridge.
+ * Mandatory power management transition delays are
+ * handled in the PCIe portdrv resume hooks.
*/
if (dev->runtime_d3cold) {
- if (dev->d3cold_delay && !dev->imm_ready)
- msleep(dev->d3cold_delay);
/*
* When powering on a bridge from D3cold, the
* whole hierarchy may be powered on into
@@ -2065,6 +2081,13 @@ static void pci_pme_list_scan(struct work_struct *work)
*/
if (bridge && bridge->current_state != PCI_D0)
continue;
+ /*
+ * If the device is in D3cold it should not be
+ * polled either.
+ */
+ if (pme_dev->dev->current_state == PCI_D3cold)
+ continue;
+
pci_pme_wakeup(pme_dev->dev, NULL);
} else {
list_del(&pme_dev->list);
@@ -2459,45 +2482,56 @@ bool pci_dev_run_wake(struct pci_dev *dev)
EXPORT_SYMBOL_GPL(pci_dev_run_wake);
/**
- * pci_dev_keep_suspended - Check if the device can stay in the suspended state.
+ * pci_dev_need_resume - Check if it is necessary to resume the device.
* @pci_dev: Device to check.
*
- * Return 'true' if the device is runtime-suspended, it doesn't have to be
+ * Return 'true' if the device is not runtime-suspended or it has to be
* reconfigured due to wakeup settings difference between system and runtime
- * suspend and the current power state of it is suitable for the upcoming
- * (system) transition.
- *
- * If the device is not configured for system wakeup, disable PME for it before
- * returning 'true' to prevent it from waking up the system unnecessarily.
+ * suspend, or the current power state of it is not suitable for the upcoming
+ * (system-wide) transition.
*/
-bool pci_dev_keep_suspended(struct pci_dev *pci_dev)
+bool pci_dev_need_resume(struct pci_dev *pci_dev)
{
struct device *dev = &pci_dev->dev;
- bool wakeup = device_may_wakeup(dev);
+ pci_power_t target_state;
- if (!pm_runtime_suspended(dev)
- || pci_target_state(pci_dev, wakeup) != pci_dev->current_state
- || platform_pci_need_resume(pci_dev))
- return false;
+ if (!pm_runtime_suspended(dev) || platform_pci_need_resume(pci_dev))
+ return true;
+
+ target_state = pci_target_state(pci_dev, device_may_wakeup(dev));
/*
- * At this point the device is good to go unless it's been configured
- * to generate PME at the runtime suspend time, but it is not supposed
- * to wake up the system. In that case, simply disable PME for it
- * (it will have to be re-enabled on exit from system resume).
- *
- * If the device's power state is D3cold and the platform check above
- * hasn't triggered, the device's configuration is suitable and we don't
- * need to manipulate it at all.
+ * If the earlier platform check has not triggered, D3cold is just power
+ * removal on top of D3hot, so no need to resume the device in that
+ * case.
*/
+ return target_state != pci_dev->current_state &&
+ target_state != PCI_D3cold &&
+ pci_dev->current_state != PCI_D3hot;
+}
+
+/**
+ * pci_dev_adjust_pme - Adjust PME setting for a suspended device.
+ * @pci_dev: Device to check.
+ *
+ * If the device is suspended and it is not configured for system wakeup,
+ * disable PME for it to prevent it from waking up the system unnecessarily.
+ *
+ * Note that if the device's power state is D3cold and the platform check in
+ * pci_dev_need_resume() has not triggered, the device's configuration need not
+ * be changed.
+ */
+void pci_dev_adjust_pme(struct pci_dev *pci_dev)
+{
+ struct device *dev = &pci_dev->dev;
+
spin_lock_irq(&dev->power.lock);
- if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold &&
- !wakeup)
+ if (pm_runtime_suspended(dev) && !device_may_wakeup(dev) &&
+ pci_dev->current_state < PCI_D3cold)
__pci_pme_active(pci_dev, false);
spin_unlock_irq(&dev->power.lock);
- return true;
}
/**
@@ -4501,7 +4535,7 @@ static int pci_af_flr(struct pci_dev *dev, int probe)
/*
* Wait for Transaction Pending bit to clear. A word-aligned test
- * is used, so we use the conrol offset rather than status and shift
+ * is used, so we use the control offset rather than status and shift
* the test bit to match.
*/
if (!pci_wait_for_pending(dev, pos + PCI_AF_CTRL,
@@ -4568,14 +4602,16 @@ static int pci_pm_reset(struct pci_dev *dev, int probe)
return pci_dev_wait(dev, "PM D3->D0", PCIE_RESET_READY_POLL_MS);
}
+
/**
- * pcie_wait_for_link - Wait until link is active or inactive
+ * pcie_wait_for_link_delay - Wait until link is active or inactive
* @pdev: Bridge device
* @active: waiting for active or inactive?
+ * @delay: Delay to wait after link has become active (in ms)
*
* Use this to wait till link becomes active or inactive.
*/
-bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
+bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active, int delay)
{
int timeout = 1000;
bool ret;
@@ -4612,13 +4648,25 @@ bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
timeout -= 10;
}
if (active && ret)
- msleep(100);
+ msleep(delay);
else if (ret != active)
pci_info(pdev, "Data Link Layer Link Active not %s in 1000 msec\n",
active ? "set" : "cleared");
return ret == active;
}
+/**
+ * pcie_wait_for_link - Wait until link is active or inactive
+ * @pdev: Bridge device
+ * @active: waiting for active or inactive?
+ *
+ * Use this to wait till link becomes active or inactive.
+ */
+bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
+{
+ return pcie_wait_for_link_delay(pdev, active, 100);
+}
+
void pci_reset_secondary_bus(struct pci_dev *dev)
{
u16 ctrl;
@@ -5621,7 +5669,9 @@ enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev)
*/
pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2);
if (lnkcap2) { /* PCIe r3.0-compliant */
- if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_16_0GB)
+ if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_32_0GB)
+ return PCIE_SPEED_32_0GT;
+ else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_16_0GB)
return PCIE_SPEED_16_0GT;
else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
return PCIE_SPEED_8_0GT;
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 9cb99380c61e..1be03a97cb92 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -51,6 +51,8 @@ int pci_bus_error_reset(struct pci_dev *dev);
*
* @get_state: queries the platform firmware for a device's current power state
*
+ * @refresh_state: asks the platform to refresh the device's power state data
+ *
* @choose_state: returns PCI power state of given device preferred by the
* platform; to be used during system-wide transitions from a
* sleeping state to the working state and vice versa
@@ -69,6 +71,7 @@ struct pci_platform_pm_ops {
bool (*is_manageable)(struct pci_dev *dev);
int (*set_state)(struct pci_dev *dev, pci_power_t state);
pci_power_t (*get_state)(struct pci_dev *dev);
+ void (*refresh_state)(struct pci_dev *dev);
pci_power_t (*choose_state)(struct pci_dev *dev);
int (*set_wakeup)(struct pci_dev *dev, bool enable);
bool (*need_resume)(struct pci_dev *dev);
@@ -76,13 +79,15 @@ struct pci_platform_pm_ops {
int pci_set_platform_pm(const struct pci_platform_pm_ops *ops);
void pci_update_current_state(struct pci_dev *dev, pci_power_t state);
+void pci_refresh_power_state(struct pci_dev *dev);
void pci_power_up(struct pci_dev *dev);
void pci_disable_enabled_device(struct pci_dev *dev);
int pci_finish_runtime_suspend(struct pci_dev *dev);
void pcie_clear_root_pme_status(struct pci_dev *dev);
int __pci_pme_wakeup(struct pci_dev *dev, void *ign);
void pci_pme_restore(struct pci_dev *dev);
-bool pci_dev_keep_suspended(struct pci_dev *dev);
+bool pci_dev_need_resume(struct pci_dev *dev);
+void pci_dev_adjust_pme(struct pci_dev *dev);
void pci_dev_complete_resume(struct pci_dev *pci_dev);
void pci_config_pm_runtime_get(struct pci_dev *dev);
void pci_config_pm_runtime_put(struct pci_dev *dev);
@@ -293,7 +298,6 @@ struct pci_sriov {
u16 driver_max_VFs; /* Max num VFs driver supports */
struct pci_dev *dev; /* Lowest numbered PF */
struct pci_dev *self; /* This PF */
- u32 cfg_size; /* VF config space size */
u32 class; /* VF device */
u8 hdr_type; /* VF header type */
u16 subsystem_vendor; /* VF subsystem vendor */
@@ -493,6 +497,7 @@ static inline int pci_dev_specific_disable_acs_redir(struct pci_dev *dev)
void pcie_do_recovery(struct pci_dev *dev, enum pci_channel_state state,
u32 service);
+bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active, int delay);
bool pcie_wait_for_link(struct pci_dev *pdev, bool active);
#ifdef CONFIG_PCIEASPM
void pcie_aspm_init_link_state(struct pci_dev *pdev);
diff --git a/drivers/pci/pcie/aer_inject.c b/drivers/pci/pcie/aer_inject.c
index 043b8b0cfcc5..6988fe7389b9 100644
--- a/drivers/pci/pcie/aer_inject.c
+++ b/drivers/pci/pcie/aer_inject.c
@@ -2,7 +2,7 @@
/*
* PCIe AER software error injection support.
*
- * Debuging PCIe AER code is quite difficult because it is hard to
+ * Debugging PCIe AER code is quite difficult because it is hard to
* trigger various real hardware errors. Software based error
* injection can fake almost all kinds of errors with the help of a
* user space helper tool aer-inject, which can be gotten from:
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index fd4cb75088f9..e44af7f4d37f 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -1062,18 +1062,18 @@ void pcie_aspm_powersave_config_link(struct pci_dev *pdev)
up_read(&pci_bus_sem);
}
-static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
+static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
{
struct pci_dev *parent = pdev->bus->self;
struct pcie_link_state *link;
if (!pci_is_pcie(pdev))
- return;
+ return 0;
if (pdev->has_secondary_link)
parent = pdev;
if (!parent || !parent->link_state)
- return;
+ return -EINVAL;
/*
* A driver requested that ASPM be disabled on this device, but
@@ -1085,7 +1085,7 @@ static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
*/
if (aspm_disabled) {
pci_warn(pdev, "can't disable ASPM; OS doesn't have ASPM control\n");
- return;
+ return -EPERM;
}
if (sem)
@@ -1105,11 +1105,13 @@ static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
mutex_unlock(&aspm_lock);
if (sem)
up_read(&pci_bus_sem);
+
+ return 0;
}
-void pci_disable_link_state_locked(struct pci_dev *pdev, int state)
+int pci_disable_link_state_locked(struct pci_dev *pdev, int state)
{
- __pci_disable_link_state(pdev, state, false);
+ return __pci_disable_link_state(pdev, state, false);
}
EXPORT_SYMBOL(pci_disable_link_state_locked);
@@ -1117,14 +1119,14 @@ EXPORT_SYMBOL(pci_disable_link_state_locked);
* pci_disable_link_state - Disable device's link state, so the link will
* never enter specific states. Note that if the BIOS didn't grant ASPM
* control to the OS, this does nothing because we can't touch the LNKCTL
- * register.
+ * register. Returns 0 or a negative errno.
*
* @pdev: PCI device
* @state: ASPM link state to disable
*/
-void pci_disable_link_state(struct pci_dev *pdev, int state)
+int pci_disable_link_state(struct pci_dev *pdev, int state)
{
- __pci_disable_link_state(pdev, state, true);
+ return __pci_disable_link_state(pdev, state, true);
}
EXPORT_SYMBOL(pci_disable_link_state);
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 1b330129089f..308c3e0c4a34 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -9,6 +9,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/kernel.h>
+#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
@@ -378,6 +379,67 @@ static int pm_iter(struct device *dev, void *data)
return 0;
}
+static int get_downstream_delay(struct pci_bus *bus)
+{
+ struct pci_dev *pdev;
+ int min_delay = 100;
+ int max_delay = 0;
+
+ list_for_each_entry(pdev, &bus->devices, bus_list) {
+ if (!pdev->imm_ready)
+ min_delay = 0;
+ else if (pdev->d3cold_delay < min_delay)
+ min_delay = pdev->d3cold_delay;
+ if (pdev->d3cold_delay > max_delay)
+ max_delay = pdev->d3cold_delay;
+ }
+
+ return max(min_delay, max_delay);
+}
+
+/*
+ * wait_for_downstream_link - Wait for downstream link to establish
+ * @pdev: PCIe port whose downstream link is waited
+ *
+ * Handle delays according to PCIe 4.0 section 6.6.1 before configuration
+ * access to the downstream component is permitted.
+ *
+ * This blocks PCI core resume of the hierarchy below this port until the
+ * link is trained. Should be called before resuming port services to
+ * prevent pciehp from starting to tear-down the hierarchy too soon.
+ */
+static void wait_for_downstream_link(struct pci_dev *pdev)
+{
+ int delay;
+
+ if (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT &&
+ pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM)
+ return;
+
+ if (pci_dev_is_disconnected(pdev))
+ return;
+
+ if (!pdev->subordinate || list_empty(&pdev->subordinate->devices) ||
+ !pdev->bridge_d3)
+ return;
+
+ delay = get_downstream_delay(pdev->subordinate);
+ if (!delay)
+ return;
+
+ dev_dbg(&pdev->dev, "waiting downstream link for %d ms\n", delay);
+
+ /*
+ * If downstream port does not support speeds greater than 5 GT/s
+ * need to wait 100ms. For higher speeds (gen3) we need to wait
+ * first for the data link layer to become active.
+ */
+ if (pcie_get_speed_cap(pdev) <= PCIE_SPEED_5_0GT)
+ msleep(delay);
+ else
+ pcie_wait_for_link_delay(pdev, true, delay);
+}
+
/**
* pcie_port_device_suspend - suspend port services associated with a PCIe port
* @dev: PCI Express port to handle
@@ -391,6 +453,8 @@ int pcie_port_device_suspend(struct device *dev)
int pcie_port_device_resume_noirq(struct device *dev)
{
size_t off = offsetof(struct pcie_port_service_driver, resume_noirq);
+
+ wait_for_downstream_link(to_pci_dev(dev));
return device_for_each_child(dev, &off, pm_iter);
}
@@ -421,6 +485,8 @@ int pcie_port_device_runtime_suspend(struct device *dev)
int pcie_port_device_runtime_resume(struct device *dev)
{
size_t off = offsetof(struct pcie_port_service_driver, runtime_resume);
+
+ wait_for_downstream_link(to_pci_dev(dev));
return device_for_each_child(dev, &off, pm_iter);
}
#endif /* PM */
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 0e8e2c186f50..a3c7338fad86 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -64,7 +64,7 @@ static struct resource *get_pci_domain_busn_res(int domain_nr)
return &r->res;
}
-static int find_anything(struct device *dev, void *data)
+static int find_anything(struct device *dev, const void *data)
{
return 1;
}
@@ -668,7 +668,7 @@ const unsigned char pcie_link_speed[] = {
PCIE_SPEED_5_0GT, /* 2 */
PCIE_SPEED_8_0GT, /* 3 */
PCIE_SPEED_16_0GT, /* 4 */
- PCI_SPEED_UNKNOWN, /* 5 */
+ PCIE_SPEED_32_0GT, /* 5 */
PCI_SPEED_UNKNOWN, /* 6 */
PCI_SPEED_UNKNOWN, /* 7 */
PCI_SPEED_UNKNOWN, /* 8 */
@@ -1555,17 +1555,6 @@ static int pci_cfg_space_size_ext(struct pci_dev *dev)
return PCI_CFG_SPACE_EXP_SIZE;
}
-#ifdef CONFIG_PCI_IOV
-static bool is_vf0(struct pci_dev *dev)
-{
- if (pci_iov_virtfn_devfn(dev->physfn, 0) == dev->devfn &&
- pci_iov_virtfn_bus(dev->physfn, 0) == dev->bus->number)
- return true;
-
- return false;
-}
-#endif
-
int pci_cfg_space_size(struct pci_dev *dev)
{
int pos;
@@ -1573,9 +1562,18 @@ int pci_cfg_space_size(struct pci_dev *dev)
u16 class;
#ifdef CONFIG_PCI_IOV
- /* Read cached value for all VFs except for VF0 */
- if (dev->is_virtfn && !is_vf0(dev))
- return dev->physfn->sriov->cfg_size;
+ /*
+ * Per the SR-IOV specification (rev 1.1, sec 3.5), VFs are required to
+ * implement a PCIe capability and therefore must implement extended
+ * config space. We can skip the NO_EXTCFG test below and the
+ * reachability/aliasing test in pci_cfg_space_size_ext() by virtue of
+ * the fact that the SR-IOV capability on the PF resides in extended
+ * config space and must be accessible and non-aliased to have enabled
+ * support for this VF. This is a micro performance optimization for
+ * systems supporting many VFs.
+ */
+ if (dev->is_virtfn)
+ return PCI_CFG_SPACE_EXP_SIZE;
#endif
if (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_EXTCFG)
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
index 445b51db75b0..fe7fe678965b 100644
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -377,7 +377,7 @@ static int show_device(struct seq_file *m, void *v)
}
seq_putc(m, '\t');
if (drv)
- seq_printf(m, "%s", drv->name);
+ seq_puts(m, drv->name);
seq_putc(m, '\n');
return 0;
}
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 0f16acc323c6..208aacf39329 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -4934,35 +4934,49 @@ static void quirk_fsl_no_msi(struct pci_dev *pdev)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, quirk_fsl_no_msi);
/*
- * GPUs with integrated HDA controller for streaming audio to attached displays
- * need a device link from the HDA controller (consumer) to the GPU (supplier)
- * so that the GPU is powered up whenever the HDA controller is accessed.
- * The GPU and HDA controller are functions 0 and 1 of the same PCI device.
- * The device link stays in place until shutdown (or removal of the PCI device
- * if it's hotplugged). Runtime PM is allowed by default on the HDA controller
- * to prevent it from permanently keeping the GPU awake.
+ * Although not allowed by the spec, some multi-function devices have
+ * dependencies of one function (consumer) on another (supplier). For the
+ * consumer to work in D0, the supplier must also be in D0. Create a
+ * device link from the consumer to the supplier to enforce this
+ * dependency. Runtime PM is allowed by default on the consumer to prevent
+ * it from permanently keeping the supplier awake.
*/
-static void quirk_gpu_hda(struct pci_dev *hda)
+static void pci_create_device_link(struct pci_dev *pdev, unsigned int consumer,
+ unsigned int supplier, unsigned int class,
+ unsigned int class_shift)
{
- struct pci_dev *gpu;
+ struct pci_dev *supplier_pdev;
- if (PCI_FUNC(hda->devfn) != 1)
+ if (PCI_FUNC(pdev->devfn) != consumer)
return;
- gpu = pci_get_domain_bus_and_slot(pci_domain_nr(hda->bus),
- hda->bus->number,
- PCI_DEVFN(PCI_SLOT(hda->devfn), 0));
- if (!gpu || (gpu->class >> 16) != PCI_BASE_CLASS_DISPLAY) {
- pci_dev_put(gpu);
+ supplier_pdev = pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
+ pdev->bus->number,
+ PCI_DEVFN(PCI_SLOT(pdev->devfn), supplier));
+ if (!supplier_pdev || (supplier_pdev->class >> class_shift) != class) {
+ pci_dev_put(supplier_pdev);
return;
}
- if (!device_link_add(&hda->dev, &gpu->dev,
- DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME))
- pci_err(hda, "cannot link HDA to GPU %s\n", pci_name(gpu));
+ if (device_link_add(&pdev->dev, &supplier_pdev->dev,
+ DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME))
+ pci_info(pdev, "D0 power state depends on %s\n",
+ pci_name(supplier_pdev));
+ else
+ pci_err(pdev, "Cannot enforce power dependency on %s\n",
+ pci_name(supplier_pdev));
+
+ pm_runtime_allow(&pdev->dev);
+ pci_dev_put(supplier_pdev);
+}
- pm_runtime_allow(&hda->dev);
- pci_dev_put(gpu);
+/*
+ * Create device link for GPUs with integrated HDA controller for streaming
+ * audio to attached displays.
+ */
+static void quirk_gpu_hda(struct pci_dev *hda)
+{
+ pci_create_device_link(hda, 1, 0, PCI_BASE_CLASS_DISPLAY, 16);
}
DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda);
@@ -4972,6 +4986,62 @@ DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda);
/*
+ * Create device link for NVIDIA GPU with integrated USB xHCI Host
+ * controller to VGA.
+ */
+static void quirk_gpu_usb(struct pci_dev *usb)
+{
+ pci_create_device_link(usb, 2, 0, PCI_BASE_CLASS_DISPLAY, 16);
+}
+DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
+ PCI_CLASS_SERIAL_USB, 8, quirk_gpu_usb);
+
+/*
+ * Create device link for NVIDIA GPU with integrated Type-C UCSI controller
+ * to VGA. Currently there is no class code defined for UCSI device over PCI
+ * so using UNKNOWN class for now and it will be updated when UCSI
+ * over PCI gets a class code.
+ */
+#define PCI_CLASS_SERIAL_UNKNOWN 0x0c80
+static void quirk_gpu_usb_typec_ucsi(struct pci_dev *ucsi)
+{
+ pci_create_device_link(ucsi, 3, 0, PCI_BASE_CLASS_DISPLAY, 16);
+}
+DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
+ PCI_CLASS_SERIAL_UNKNOWN, 8,
+ quirk_gpu_usb_typec_ucsi);
+
+/*
+ * Enable the NVIDIA GPU integrated HDA controller if the BIOS left it
+ * disabled. https://devtalk.nvidia.com/default/topic/1024022
+ */
+static void quirk_nvidia_hda(struct pci_dev *gpu)
+{
+ u8 hdr_type;
+ u32 val;
+
+ /* There was no integrated HDA controller before MCP89 */
+ if (gpu->device < PCI_DEVICE_ID_NVIDIA_GEFORCE_320M)
+ return;
+
+ /* Bit 25 at offset 0x488 enables the HDA controller */
+ pci_read_config_dword(gpu, 0x488, &val);
+ if (val & BIT(25))
+ return;
+
+ pci_info(gpu, "Enabling HDA controller\n");
+ pci_write_config_dword(gpu, 0x488, val | BIT(25));
+
+ /* The GPU becomes a multi-function device when the HDA is enabled */
+ pci_read_config_byte(gpu, PCI_HEADER_TYPE, &hdr_type);
+ gpu->multifunction = !!(hdr_type & 0x80);
+}
+DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
+ PCI_BASE_CLASS_DISPLAY, 16, quirk_nvidia_hda);
+DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
+ PCI_BASE_CLASS_DISPLAY, 16, quirk_nvidia_hda);
+
+/*
* Some IDT switches incorrectly flag an ACS Source Validation error on
* completions for config read requests even though PCIe r4.0, sec
* 6.12.1.1, says that completions are never affected by ACS Source
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index 5c7922612733..7f4e65872b8d 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -236,10 +236,10 @@ struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus,
}
EXPORT_SYMBOL(pci_get_domain_bus_and_slot);
-static int match_pci_dev_by_id(struct device *dev, void *data)
+static int match_pci_dev_by_id(struct device *dev, const void *data)
{
struct pci_dev *pdev = to_pci_dev(dev);
- struct pci_device_id *id = data;
+ const struct pci_device_id *id = data;
if (pci_match_one_device(id, pdev))
return 1;
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 0cdd5ff389de..79b1fa6519be 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -1684,10 +1684,15 @@ static enum enable_type pci_realloc_detect(struct pci_bus *bus,
enum enable_type enable_local)
{
bool unassigned = false;
+ struct pci_host_bridge *host;
if (enable_local != undefined)
return enable_local;
+ host = pci_find_host_bridge(bus);
+ if (host->preserve_config)
+ return auto_disabled;
+
pci_walk_bus(bus, iov_resources_unassigned, &unassigned);
if (unassigned)
return auto_enabled;
@@ -1861,16 +1866,6 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus,
available_mmio_pref);
/*
- * Calculate the total amount of extra resource space we can
- * pass to bridges below this one. This is basically the
- * extra space reduced by the minimal required space for the
- * non-hotplug bridges.
- */
- remaining_io = available_io;
- remaining_mmio = available_mmio;
- remaining_mmio_pref = available_mmio_pref;
-
- /*
* Calculate how many hotplug bridges and normal bridges there
* are on this bus. We will distribute the additional available
* resources between hotplug bridges.
@@ -1882,6 +1877,34 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus,
normal_bridges++;
}
+ /*
+ * There is only one bridge on the bus so it gets all available
+ * resources which it can then distribute to the possible hotplug
+ * bridges below.
+ */
+ if (hotplug_bridges + normal_bridges == 1) {
+ dev = list_first_entry(&bus->devices, struct pci_dev, bus_list);
+ if (dev->subordinate) {
+ pci_bus_distribute_available_resources(dev->subordinate,
+ add_list, available_io, available_mmio,
+ available_mmio_pref);
+ }
+ return;
+ }
+
+ if (hotplug_bridges == 0)
+ return;
+
+ /*
+ * Calculate the total amount of extra resource space we can
+ * pass to bridges below this one. This is basically the
+ * extra space reduced by the minimal required space for the
+ * non-hotplug bridges.
+ */
+ remaining_io = available_io;
+ remaining_mmio = available_mmio;
+ remaining_mmio_pref = available_mmio_pref;
+
for_each_pci_bridge(dev, bus) {
const struct resource *res;
@@ -1906,21 +1929,6 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus,
}
/*
- * There is only one bridge on the bus so it gets all available
- * resources which it can then distribute to the possible hotplug
- * bridges below.
- */
- if (hotplug_bridges + normal_bridges == 1) {
- dev = list_first_entry(&bus->devices, struct pci_dev, bus_list);
- if (dev->subordinate) {
- pci_bus_distribute_available_resources(dev->subordinate,
- add_list, available_io, available_mmio,
- available_mmio_pref);
- }
- return;
- }
-
- /*
* Go over devices on this bus and distribute the remaining
* resource space between hotplug bridges.
*/
@@ -1936,8 +1944,6 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus,
* Distribute available extra resources equally between
* hotplug-capable downstream ports taking alignment into
* account.
- *
- * Here hotplug_bridges is always != 0.
*/
align = pci_resource_alignment(bridge, io_res);
io = div64_ul(available_io, hotplug_bridges);
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index f4d92b1afe7b..ae4aa0e1f2f4 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -75,6 +75,7 @@ static const char *pci_bus_speed_strings[] = {
"5.0 GT/s PCIe", /* 0x15 */
"8.0 GT/s PCIe", /* 0x16 */
"16.0 GT/s PCIe", /* 0x17 */
+ "32.0 GT/s PCIe", /* 0x18 */
};
static ssize_t bus_speed_read(enum pci_bus_speed speed, char *buf)
diff --git a/drivers/pci/switch/Kconfig b/drivers/pci/switch/Kconfig
index aee28a5bb98f..d370f4ce0492 100644
--- a/drivers/pci/switch/Kconfig
+++ b/drivers/pci/switch/Kconfig
@@ -9,7 +9,7 @@ config PCI_SW_SWITCHTEC
Enables support for the management interface for the MicroSemi
Switchtec series of PCIe switches. Supports userspace access
to submit MRPC commands to the switch via /dev/switchtecX
- devices. See <file:Documentation/switchtec.txt> for more
+ devices. See <file:Documentation/driver-api/switchtec.rst> for more
information.
endmenu