aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/drivers/pci/controller
diff options
context:
space:
mode:
authorDmitry Torokhov <dmitry.torokhov@gmail.com>2022-08-02 10:06:12 -0700
committerDmitry Torokhov <dmitry.torokhov@gmail.com>2022-08-02 10:06:12 -0700
commit8bb5e7f4dcd9b9ef22a3ea25c9066a8a968f12dd (patch)
tree0f1383880607a227142f9388a066959926233ff1 /drivers/pci/controller
parentInput: document the units for resolution of size axes (diff)
parentInput: adc-joystick - fix ordering in adc_joystick_probe() (diff)
downloadwireguard-linux-8bb5e7f4dcd9b9ef22a3ea25c9066a8a968f12dd.tar.xz
wireguard-linux-8bb5e7f4dcd9b9ef22a3ea25c9066a8a968f12dd.zip
Merge branch 'next' into for-linus
Prepare input updates for 5.20 (or 6.0) merge window.
Diffstat (limited to 'drivers/pci/controller')
-rw-r--r--drivers/pci/controller/Kconfig4
-rw-r--r--drivers/pci/controller/cadence/pci-j721e.c3
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-ep.c21
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-host.c10
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.h7
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c42
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.c8
-rw-r--r--drivers/pci/controller/dwc/pci-meson.c16
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c10
-rw-r--r--drivers/pci/controller/dwc/pcie-dw-rockchip.c119
-rw-r--r--drivers/pci/controller/dwc/pcie-fu740.c57
-rw-r--r--drivers/pci/controller/dwc/pcie-kirin.c3
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-ep.c91
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c123
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c9
-rw-r--r--drivers/pci/controller/dwc/pcie-uniphier-ep.c142
-rw-r--r--drivers/pci/controller/mobiveil/pcie-mobiveil-host.c2
-rw-r--r--drivers/pci/controller/pci-aardvark.c350
-rw-r--r--drivers/pci/controller/pci-hyperv.c489
-rw-r--r--drivers/pci/controller/pci-loongson.c2
-rw-r--r--drivers/pci/controller/pci-mvebu.c511
-rw-r--r--drivers/pci/controller/pci-tegra.c2
-rw-r--r--drivers/pci/controller/pci-versatile.c3
-rw-r--r--drivers/pci/controller/pci-xgene.c36
-rw-r--r--drivers/pci/controller/pcie-apple.c2
-rw-r--r--drivers/pci/controller/pcie-brcmstb.c257
-rw-r--r--drivers/pci/controller/pcie-iproc-bcma.c2
-rw-r--r--drivers/pci/controller/pcie-iproc.c11
-rw-r--r--drivers/pci/controller/pcie-mediatek-gen3.c10
-rw-r--r--drivers/pci/controller/pcie-mediatek.c1
-rw-r--r--drivers/pci/controller/pcie-microchip-host.c18
-rw-r--r--drivers/pci/controller/pcie-rcar-host.c132
-rw-r--r--drivers/pci/controller/pcie-rockchip-ep.c3
-rw-r--r--drivers/pci/controller/pcie-rockchip-host.c2
-rw-r--r--drivers/pci/controller/pcie-rockchip.h1
-rw-r--r--drivers/pci/controller/vmd.c7
36 files changed, 1663 insertions, 843 deletions
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index 601f2531ee91..b8d96d38064d 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -10,6 +10,10 @@ config PCI_MVEBU
depends on ARM
depends on OF
select PCI_BRIDGE_EMUL
+ help
+ Add support for Marvell EBU PCIe controller. This PCIe controller
+ is used on 32-bit Marvell ARM SoCs: Dove, Kirkwood, Armada 370,
+ Armada XP, Armada 375, Armada 38x and Armada 39x.
config PCI_AARDVARK
tristate "Aardvark PCIe controller"
diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c
index 768d33f9ebc8..a82f845cc4b5 100644
--- a/drivers/pci/controller/cadence/pci-j721e.c
+++ b/drivers/pci/controller/cadence/pci-j721e.c
@@ -69,6 +69,7 @@ struct j721e_pcie_data {
enum j721e_pcie_mode mode;
unsigned int quirk_retrain_flag:1;
unsigned int quirk_detect_quiet_flag:1;
+ unsigned int quirk_disable_flr:1;
u32 linkdown_irq_regfield;
unsigned int byte_access_allowed:1;
};
@@ -307,6 +308,7 @@ static const struct j721e_pcie_data j7200_pcie_rc_data = {
static const struct j721e_pcie_data j7200_pcie_ep_data = {
.mode = PCI_MODE_EP,
.quirk_detect_quiet_flag = true,
+ .quirk_disable_flr = true,
};
static const struct j721e_pcie_data am64_pcie_rc_data = {
@@ -405,6 +407,7 @@ static int j721e_pcie_probe(struct platform_device *pdev)
return -ENOMEM;
ep->quirk_detect_quiet_flag = data->quirk_detect_quiet_flag;
+ ep->quirk_disable_flr = data->quirk_disable_flr;
cdns_pcie = &ep->pcie;
cdns_pcie->dev = dev;
diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c
index 88e05b9c2e5b..b8b655d4047e 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-ep.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c
@@ -187,8 +187,7 @@ static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn,
struct cdns_pcie *pcie = &ep->pcie;
u32 r;
- r = find_first_zero_bit(&ep->ob_region_map,
- sizeof(ep->ob_region_map) * BITS_PER_LONG);
+ r = find_first_zero_bit(&ep->ob_region_map, BITS_PER_LONG);
if (r >= ep->max_regions - 1) {
dev_err(&epc->dev, "no free outbound region\n");
return -EINVAL;
@@ -565,7 +564,8 @@ static int cdns_pcie_ep_start(struct pci_epc *epc)
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
struct device *dev = pcie->dev;
- int ret;
+ int max_epfs = sizeof(epc->function_num_map) * 8;
+ int ret, value, epf;
/*
* BIT(0) is hardwired to 1, hence function 0 is always enabled
@@ -573,6 +573,21 @@ static int cdns_pcie_ep_start(struct pci_epc *epc)
*/
cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, epc->function_num_map);
+ if (ep->quirk_disable_flr) {
+ for (epf = 0; epf < max_epfs; epf++) {
+ if (!(epc->function_num_map & BIT(epf)))
+ continue;
+
+ value = cdns_pcie_ep_fn_readl(pcie, epf,
+ CDNS_PCIE_EP_FUNC_DEV_CAP_OFFSET +
+ PCI_EXP_DEVCAP);
+ value &= ~PCI_EXP_DEVCAP_FLR;
+ cdns_pcie_ep_fn_writel(pcie, epf,
+ CDNS_PCIE_EP_FUNC_DEV_CAP_OFFSET +
+ PCI_EXP_DEVCAP, value);
+ }
+ }
+
ret = cdns_pcie_start_link(pcie);
if (ret) {
dev_err(dev, "Failed to start link\n");
diff --git a/drivers/pci/controller/cadence/pcie-cadence-host.c b/drivers/pci/controller/cadence/pcie-cadence-host.c
index fb96d37a135c..940c7dd701d6 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-host.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-host.c
@@ -123,6 +123,14 @@ static int cdns_pcie_retrain(struct cdns_pcie *pcie)
return ret;
}
+static void cdns_pcie_host_enable_ptm_response(struct cdns_pcie *pcie)
+{
+ u32 val;
+
+ val = cdns_pcie_readl(pcie, CDNS_PCIE_LM_PTM_CTRL);
+ cdns_pcie_writel(pcie, CDNS_PCIE_LM_PTM_CTRL, val | CDNS_PCIE_LM_TPM_CTRL_PTMRSEN);
+}
+
static int cdns_pcie_host_start_link(struct cdns_pcie_rc *rc)
{
struct cdns_pcie *pcie = &rc->pcie;
@@ -501,6 +509,8 @@ int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
if (rc->quirk_detect_quiet_flag)
cdns_pcie_detect_quiet_min_delay_set(&rc->pcie);
+ cdns_pcie_host_enable_ptm_response(pcie);
+
ret = cdns_pcie_start_link(pcie);
if (ret) {
dev_err(dev, "Failed to start link\n");
diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h
index c8a27b6290ce..190786e47df9 100644
--- a/drivers/pci/controller/cadence/pcie-cadence.h
+++ b/drivers/pci/controller/cadence/pcie-cadence.h
@@ -116,6 +116,10 @@
#define LM_RC_BAR_CFG_APERTURE(bar, aperture) \
(((aperture) - 2) << ((bar) * 8))
+/* PTM Control Register */
+#define CDNS_PCIE_LM_PTM_CTRL (CDNS_PCIE_LM_BASE + 0x0da8)
+#define CDNS_PCIE_LM_TPM_CTRL_PTMRSEN BIT(17)
+
/*
* Endpoint Function Registers (PCI configuration space for endpoint functions)
*/
@@ -123,6 +127,7 @@
#define CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET 0x90
#define CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET 0xb0
+#define CDNS_PCIE_EP_FUNC_DEV_CAP_OFFSET 0xc0
#define CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET 0x200
/*
@@ -357,6 +362,7 @@ struct cdns_pcie_epf {
* minimize time between read and write
* @epf: Structure to hold info about endpoint function
* @quirk_detect_quiet_flag: LTSSM Detect Quiet min delay set as quirk
+ * @quirk_disable_flr: Disable FLR (Function Level Reset) quirk flag
*/
struct cdns_pcie_ep {
struct cdns_pcie pcie;
@@ -372,6 +378,7 @@ struct cdns_pcie_ep {
spinlock_t lock;
struct cdns_pcie_epf *epf;
unsigned int quirk_detect_quiet_flag:1;
+ unsigned int quirk_disable_flr:1;
};
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 6974bd5aa116..7a285fb0f619 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -408,6 +408,11 @@ static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
dev_err(dev, "failed to disable vpcie regulator: %d\n",
ret);
}
+
+ /* Some boards don't have PCIe reset GPIO. */
+ if (gpio_is_valid(imx6_pcie->reset_gpio))
+ gpio_set_value_cansleep(imx6_pcie->reset_gpio,
+ imx6_pcie->gpio_active_high);
}
static unsigned int imx6_pcie_grp_offset(const struct imx6_pcie *imx6_pcie)
@@ -453,10 +458,6 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
case IMX7D:
break;
case IMX8MM:
- ret = clk_prepare_enable(imx6_pcie->pcie_aux);
- if (ret)
- dev_err(dev, "unable to enable pcie_aux clock\n");
- break;
case IMX8MQ:
ret = clk_prepare_enable(imx6_pcie->pcie_aux);
if (ret) {
@@ -544,15 +545,6 @@ static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
/* allow the clocks to stabilize */
usleep_range(200, 500);
- /* Some boards don't have PCIe reset GPIO. */
- if (gpio_is_valid(imx6_pcie->reset_gpio)) {
- gpio_set_value_cansleep(imx6_pcie->reset_gpio,
- imx6_pcie->gpio_active_high);
- msleep(100);
- gpio_set_value_cansleep(imx6_pcie->reset_gpio,
- !imx6_pcie->gpio_active_high);
- }
-
switch (imx6_pcie->drvdata->variant) {
case IMX8MQ:
reset_control_deassert(imx6_pcie->pciephy_reset);
@@ -599,6 +591,15 @@ static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
break;
}
+ /* Some boards don't have PCIe reset GPIO. */
+ if (gpio_is_valid(imx6_pcie->reset_gpio)) {
+ msleep(100);
+ gpio_set_value_cansleep(imx6_pcie->reset_gpio,
+ !imx6_pcie->gpio_active_high);
+ /* Wait for 100ms after PERST# deassertion (PCIe r5.0, 6.6.1) */
+ msleep(100);
+ }
+
return;
err_ref_clk:
@@ -809,9 +810,7 @@ static int imx6_pcie_start_link(struct dw_pcie *pci)
/* Start LTSSM. */
imx6_pcie_ltssm_enable(dev);
- ret = dw_pcie_wait_for_link(pci);
- if (ret)
- goto err_reset_phy;
+ dw_pcie_wait_for_link(pci);
if (pci->link_gen == 2) {
/* Allow Gen2 mode after the link is up. */
@@ -847,11 +846,7 @@ static int imx6_pcie_start_link(struct dw_pcie *pci)
}
/* Make sure link training is finished as well! */
- ret = dw_pcie_wait_for_link(pci);
- if (ret) {
- dev_err(dev, "Failed to bring link up!\n");
- goto err_reset_phy;
- }
+ dw_pcie_wait_for_link(pci);
} else {
dev_info(dev, "Link: Gen2 disabled\n");
}
@@ -923,6 +918,7 @@ static void imx6_pcie_pm_turnoff(struct imx6_pcie *imx6_pcie)
/* Others poke directly at IOMUXC registers */
switch (imx6_pcie->drvdata->variant) {
case IMX6SX:
+ case IMX6QP:
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6SX_GPR12_PCIE_PM_TURN_OFF,
IMX6SX_GPR12_PCIE_PM_TURN_OFF);
@@ -983,6 +979,7 @@ static int imx6_pcie_suspend_noirq(struct device *dev)
case IMX8MM:
if (phy_power_off(imx6_pcie->phy))
dev_err(dev, "unable to power off PHY\n");
+ phy_exit(imx6_pcie->phy);
break;
default:
break;
@@ -1252,7 +1249,8 @@ static const struct imx6_pcie_drvdata drvdata[] = {
[IMX6QP] = {
.variant = IMX6QP,
.flags = IMX6_PCIE_FLAG_IMX6_PHY |
- IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE,
+ IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE |
+ IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
.dbi_length = 0x200,
},
[IMX7D] = {
diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
index 1c2ee4e13f1c..d10e5fd0f83c 100644
--- a/drivers/pci/controller/dwc/pci-keystone.c
+++ b/drivers/pci/controller/dwc/pci-keystone.c
@@ -531,13 +531,13 @@ static void ks_pcie_quirk(struct pci_dev *dev)
struct pci_dev *bridge;
static const struct pci_device_id rc_pci_devids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2HK),
- .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
+ .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, },
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2E),
- .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
+ .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, },
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2L),
- .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
+ .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, },
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2G),
- .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
+ .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, },
{ 0, },
};
diff --git a/drivers/pci/controller/dwc/pci-meson.c b/drivers/pci/controller/dwc/pci-meson.c
index 686ded034f22..f44bf347904a 100644
--- a/drivers/pci/controller/dwc/pci-meson.c
+++ b/drivers/pci/controller/dwc/pci-meson.c
@@ -313,14 +313,14 @@ static int meson_pcie_rd_own_conf(struct pci_bus *bus, u32 devfn,
* cannot program the PCI_CLASS_DEVICE register, so we must fabricate
* the return value in the config accessors.
*/
- if (where == PCI_CLASS_REVISION && size == 4)
- *val = (PCI_CLASS_BRIDGE_PCI << 16) | (*val & 0xffff);
- else if (where == PCI_CLASS_DEVICE && size == 2)
- *val = PCI_CLASS_BRIDGE_PCI;
- else if (where == PCI_CLASS_DEVICE && size == 1)
- *val = PCI_CLASS_BRIDGE_PCI & 0xff;
- else if (where == PCI_CLASS_DEVICE + 1 && size == 1)
- *val = (PCI_CLASS_BRIDGE_PCI >> 8) & 0xff;
+ if ((where & ~3) == PCI_CLASS_REVISION) {
+ if (size <= 2)
+ *val = (*val & ((1 << (size * 8)) - 1)) << (8 * (where & 3));
+ *val &= ~0xffffff00;
+ *val |= PCI_CLASS_BRIDGE_PCI_NORMAL << 8;
+ if (size <= 2)
+ *val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1);
+ }
return PCIBIOS_SUCCESSFUL;
}
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index f4755f3a03be..9979302532b7 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -362,6 +362,12 @@ int dw_pcie_host_init(struct pcie_port *pp)
if (ret < 0)
return ret;
} else if (pp->has_msi_ctrl) {
+ u32 ctrl, num_ctrls;
+
+ num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
+ for (ctrl = 0; ctrl < num_ctrls; ctrl++)
+ pp->irq_mask[ctrl] = ~0;
+
if (!pp->msi_irq) {
pp->msi_irq = platform_get_irq_byname_optional(pdev, "msi");
if (pp->msi_irq < 0) {
@@ -390,7 +396,8 @@ int dw_pcie_host_init(struct pcie_port *pp)
sizeof(pp->msi_msg),
DMA_FROM_DEVICE,
DMA_ATTR_SKIP_CPU_SYNC);
- if (dma_mapping_error(pci->dev, pp->msi_data)) {
+ ret = dma_mapping_error(pci->dev, pp->msi_data);
+ if (ret) {
dev_err(pci->dev, "Failed to map MSI data\n");
pp->msi_data = 0;
goto err_free_msi;
@@ -541,7 +548,6 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
/* Initialize IRQ Status array */
for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
- pp->irq_mask[ctrl] = ~0;
dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK +
(ctrl * MSI_REG_CTRL_BLOCK_SIZE),
pp->irq_mask[ctrl]);
diff --git a/drivers/pci/controller/dwc/pcie-dw-rockchip.c b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
index c9b341e55cbb..8c5bb9d7cc36 100644
--- a/drivers/pci/controller/dwc/pcie-dw-rockchip.c
+++ b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
@@ -10,9 +10,12 @@
#include <linux/clk.h>
#include <linux/gpio/consumer.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -26,6 +29,7 @@
*/
#define HIWORD_UPDATE(mask, val) (((mask) << 16) | (val))
#define HIWORD_UPDATE_BIT(val) HIWORD_UPDATE(val, val)
+#define HIWORD_DISABLE_BIT(val) HIWORD_UPDATE(val, ~val)
#define to_rockchip_pcie(x) dev_get_drvdata((x)->dev)
@@ -36,10 +40,12 @@
#define PCIE_LINKUP (PCIE_SMLH_LINKUP | PCIE_RDLH_LINKUP)
#define PCIE_L0S_ENTRY 0x11
#define PCIE_CLIENT_GENERAL_CONTROL 0x0
+#define PCIE_CLIENT_INTR_STATUS_LEGACY 0x8
+#define PCIE_CLIENT_INTR_MASK_LEGACY 0x1c
#define PCIE_CLIENT_GENERAL_DEBUG 0x104
-#define PCIE_CLIENT_HOT_RESET_CTRL 0x180
+#define PCIE_CLIENT_HOT_RESET_CTRL 0x180
#define PCIE_CLIENT_LTSSM_STATUS 0x300
-#define PCIE_LTSSM_ENABLE_ENHANCE BIT(4)
+#define PCIE_LTSSM_ENABLE_ENHANCE BIT(4)
#define PCIE_LTSSM_STATUS_MASK GENMASK(5, 0)
struct rockchip_pcie {
@@ -51,6 +57,7 @@ struct rockchip_pcie {
struct reset_control *rst;
struct gpio_desc *rst_gpio;
struct regulator *vpcie3v3;
+ struct irq_domain *irq_domain;
};
static int rockchip_pcie_readl_apb(struct rockchip_pcie *rockchip,
@@ -65,6 +72,78 @@ static void rockchip_pcie_writel_apb(struct rockchip_pcie *rockchip,
writel_relaxed(val, rockchip->apb_base + reg);
}
+static void rockchip_pcie_legacy_int_handler(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct rockchip_pcie *rockchip = irq_desc_get_handler_data(desc);
+ unsigned long reg, hwirq;
+
+ chained_irq_enter(chip, desc);
+
+ reg = rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_INTR_STATUS_LEGACY);
+
+ for_each_set_bit(hwirq, &reg, 4)
+ generic_handle_domain_irq(rockchip->irq_domain, hwirq);
+
+ chained_irq_exit(chip, desc);
+}
+
+static void rockchip_intx_mask(struct irq_data *data)
+{
+ rockchip_pcie_writel_apb(irq_data_get_irq_chip_data(data),
+ HIWORD_UPDATE_BIT(BIT(data->hwirq)),
+ PCIE_CLIENT_INTR_MASK_LEGACY);
+};
+
+static void rockchip_intx_unmask(struct irq_data *data)
+{
+ rockchip_pcie_writel_apb(irq_data_get_irq_chip_data(data),
+ HIWORD_DISABLE_BIT(BIT(data->hwirq)),
+ PCIE_CLIENT_INTR_MASK_LEGACY);
+};
+
+static struct irq_chip rockchip_intx_irq_chip = {
+ .name = "INTx",
+ .irq_mask = rockchip_intx_mask,
+ .irq_unmask = rockchip_intx_unmask,
+ .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND,
+};
+
+static int rockchip_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &rockchip_intx_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops intx_domain_ops = {
+ .map = rockchip_pcie_intx_map,
+};
+
+static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip)
+{
+ struct device *dev = rockchip->pci.dev;
+ struct device_node *intc;
+
+ intc = of_get_child_by_name(dev->of_node, "legacy-interrupt-controller");
+ if (!intc) {
+ dev_err(dev, "missing child interrupt-controller node\n");
+ return -EINVAL;
+ }
+
+ rockchip->irq_domain = irq_domain_add_linear(intc, PCI_NUM_INTX,
+ &intx_domain_ops, rockchip);
+ of_node_put(intc);
+ if (!rockchip->irq_domain) {
+ dev_err(dev, "failed to get a INTx IRQ domain\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static void rockchip_pcie_enable_ltssm(struct rockchip_pcie *rockchip)
{
rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_ENABLE_LTSSM,
@@ -111,7 +190,20 @@ static int rockchip_pcie_host_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
+ struct device *dev = rockchip->pci.dev;
u32 val = HIWORD_UPDATE_BIT(PCIE_LTSSM_ENABLE_ENHANCE);
+ int irq, ret;
+
+ irq = of_irq_get_byname(dev->of_node, "legacy");
+ if (irq < 0)
+ return irq;
+
+ ret = rockchip_pcie_init_irq_domain(rockchip);
+ if (ret < 0)
+ dev_err(dev, "failed to init irq domain\n");
+
+ irq_set_chained_handler_and_data(irq, rockchip_pcie_legacy_int_handler,
+ rockchip);
/* LTSSM enable control mode */
rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_HOT_RESET_CTRL);
@@ -152,6 +244,11 @@ static int rockchip_pcie_resource_get(struct platform_device *pdev,
if (IS_ERR(rockchip->rst_gpio))
return PTR_ERR(rockchip->rst_gpio);
+ rockchip->rst = devm_reset_control_array_get_exclusive(&pdev->dev);
+ if (IS_ERR(rockchip->rst))
+ return dev_err_probe(&pdev->dev, PTR_ERR(rockchip->rst),
+ "failed to get reset lines\n");
+
return 0;
}
@@ -182,18 +279,6 @@ static void rockchip_pcie_phy_deinit(struct rockchip_pcie *rockchip)
phy_power_off(rockchip->phy);
}
-static int rockchip_pcie_reset_control_release(struct rockchip_pcie *rockchip)
-{
- struct device *dev = rockchip->pci.dev;
-
- rockchip->rst = devm_reset_control_array_get_exclusive(dev);
- if (IS_ERR(rockchip->rst))
- return dev_err_probe(dev, PTR_ERR(rockchip->rst),
- "failed to get reset lines\n");
-
- return reset_control_deassert(rockchip->rst);
-}
-
static const struct dw_pcie_ops dw_pcie_ops = {
.link_up = rockchip_pcie_link_up,
.start_link = rockchip_pcie_start_link,
@@ -222,6 +307,10 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
if (ret)
return ret;
+ ret = reset_control_assert(rockchip->rst);
+ if (ret)
+ return ret;
+
/* DON'T MOVE ME: must be enable before PHY init */
rockchip->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3");
if (IS_ERR(rockchip->vpcie3v3)) {
@@ -241,7 +330,7 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
if (ret)
goto disable_regulator;
- ret = rockchip_pcie_reset_control_release(rockchip);
+ ret = reset_control_deassert(rockchip->rst);
if (ret)
goto deinit_phy;
diff --git a/drivers/pci/controller/dwc/pcie-fu740.c b/drivers/pci/controller/dwc/pcie-fu740.c
index 00cde9a248b5..02cc70d8cc06 100644
--- a/drivers/pci/controller/dwc/pcie-fu740.c
+++ b/drivers/pci/controller/dwc/pcie-fu740.c
@@ -181,10 +181,59 @@ static int fu740_pcie_start_link(struct dw_pcie *pci)
{
struct device *dev = pci->dev;
struct fu740_pcie *afp = dev_get_drvdata(dev);
+ u8 cap_exp = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ int ret;
+ u32 orig, tmp;
+
+ /*
+ * Force 2.5GT/s when starting the link, due to some devices not
+ * probing at higher speeds. This happens with the PCIe switch
+ * on the Unmatched board when U-Boot has not initialised the PCIe.
+ * The fix in U-Boot is to force 2.5GT/s, which then gets cleared
+ * by the soft reset done by this driver.
+ */
+ dev_dbg(dev, "cap_exp at %x\n", cap_exp);
+ dw_pcie_dbi_ro_wr_en(pci);
+
+ tmp = dw_pcie_readl_dbi(pci, cap_exp + PCI_EXP_LNKCAP);
+ orig = tmp & PCI_EXP_LNKCAP_SLS;
+ tmp &= ~PCI_EXP_LNKCAP_SLS;
+ tmp |= PCI_EXP_LNKCAP_SLS_2_5GB;
+ dw_pcie_writel_dbi(pci, cap_exp + PCI_EXP_LNKCAP, tmp);
/* Enable LTSSM */
writel_relaxed(0x1, afp->mgmt_base + PCIEX8MGMT_APP_LTSSM_ENABLE);
- return 0;
+
+ ret = dw_pcie_wait_for_link(pci);
+ if (ret) {
+ dev_err(dev, "error: link did not start\n");
+ goto err;
+ }
+
+ tmp = dw_pcie_readl_dbi(pci, cap_exp + PCI_EXP_LNKCAP);
+ if ((tmp & PCI_EXP_LNKCAP_SLS) != orig) {
+ dev_dbg(dev, "changing speed back to original\n");
+
+ tmp &= ~PCI_EXP_LNKCAP_SLS;
+ tmp |= orig;
+ dw_pcie_writel_dbi(pci, cap_exp + PCI_EXP_LNKCAP, tmp);
+
+ tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
+ tmp |= PORT_LOGIC_SPEED_CHANGE;
+ dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp);
+
+ ret = dw_pcie_wait_for_link(pci);
+ if (ret) {
+ dev_err(dev, "error: link did not start at new speed\n");
+ goto err;
+ }
+ }
+
+ ret = 0;
+err:
+ WARN_ON(ret); /* we assume that errors will be very rare */
+ dw_pcie_dbi_ro_wr_dis(pci);
+ return ret;
}
static int fu740_pcie_host_init(struct pcie_port *pp)
@@ -224,7 +273,7 @@ static int fu740_pcie_host_init(struct pcie_port *pp)
/* Clear hold_phy_rst */
writel_relaxed(0x0, afp->mgmt_base + PCIEX8MGMT_APP_HOLD_PHY_RST);
/* Enable pcieauxclk */
- ret = clk_prepare_enable(afp->pcie_aux);
+ clk_prepare_enable(afp->pcie_aux);
/* Set RC mode */
writel_relaxed(0x4, afp->mgmt_base + PCIEX8MGMT_DEVICE_TYPE);
@@ -259,11 +308,11 @@ static int fu740_pcie_probe(struct platform_device *pdev)
return PTR_ERR(afp->mgmt_base);
/* Fetch GPIOs */
- afp->reset = devm_gpiod_get_optional(dev, "reset-gpios", GPIOD_OUT_LOW);
+ afp->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(afp->reset))
return dev_err_probe(dev, PTR_ERR(afp->reset), "unable to get reset-gpios\n");
- afp->pwren = devm_gpiod_get_optional(dev, "pwren-gpios", GPIOD_OUT_LOW);
+ afp->pwren = devm_gpiod_get_optional(dev, "pwren", GPIOD_OUT_LOW);
if (IS_ERR(afp->pwren))
return dev_err_probe(dev, PTR_ERR(afp->pwren), "unable to get pwren-gpios\n");
diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c
index c625fc6bb287..a52cad269f85 100644
--- a/drivers/pci/controller/dwc/pcie-kirin.c
+++ b/drivers/pci/controller/dwc/pcie-kirin.c
@@ -332,9 +332,6 @@ static int hi3660_pcie_phy_init(struct platform_device *pdev,
pcie->phy_priv = phy;
phy->dev = dev;
- /* registers */
- pdev = container_of(dev, struct platform_device, dev);
-
ret = hi3660_pcie_phy_get_clk(phy);
if (ret)
return ret;
diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c
index 6ce8eddf3a37..ec99116ad05c 100644
--- a/drivers/pci/controller/dwc/pcie-qcom-ep.c
+++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c
@@ -223,11 +223,8 @@ static void qcom_pcie_dw_stop_link(struct dw_pcie *pci)
disable_irq(pcie_ep->perst_irq);
}
-static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
+static int qcom_pcie_enable_resources(struct qcom_pcie_ep *pcie_ep)
{
- struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
- struct device *dev = pci->dev;
- u32 val, offset;
int ret;
ret = clk_bulk_prepare_enable(ARRAY_SIZE(qcom_pcie_ep_clks),
@@ -247,6 +244,38 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
if (ret)
goto err_phy_exit;
+ return 0;
+
+err_phy_exit:
+ phy_exit(pcie_ep->phy);
+err_disable_clk:
+ clk_bulk_disable_unprepare(ARRAY_SIZE(qcom_pcie_ep_clks),
+ qcom_pcie_ep_clks);
+
+ return ret;
+}
+
+static void qcom_pcie_disable_resources(struct qcom_pcie_ep *pcie_ep)
+{
+ phy_power_off(pcie_ep->phy);
+ phy_exit(pcie_ep->phy);
+ clk_bulk_disable_unprepare(ARRAY_SIZE(qcom_pcie_ep_clks),
+ qcom_pcie_ep_clks);
+}
+
+static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
+{
+ struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
+ struct device *dev = pci->dev;
+ u32 val, offset;
+ int ret;
+
+ ret = qcom_pcie_enable_resources(pcie_ep);
+ if (ret) {
+ dev_err(dev, "Failed to enable resources: %d\n", ret);
+ return ret;
+ }
+
/* Assert WAKE# to RC to indicate device is ready */
gpiod_set_value_cansleep(pcie_ep->wake, 1);
usleep_range(WAKE_DELAY_US, WAKE_DELAY_US + 500);
@@ -335,7 +364,7 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
ret = dw_pcie_ep_init_complete(&pcie_ep->pci.ep);
if (ret) {
dev_err(dev, "Failed to complete initialization: %d\n", ret);
- goto err_phy_power_off;
+ goto err_disable_resources;
}
/*
@@ -355,13 +384,8 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
return 0;
-err_phy_power_off:
- phy_power_off(pcie_ep->phy);
-err_phy_exit:
- phy_exit(pcie_ep->phy);
-err_disable_clk:
- clk_bulk_disable_unprepare(ARRAY_SIZE(qcom_pcie_ep_clks),
- qcom_pcie_ep_clks);
+err_disable_resources:
+ qcom_pcie_disable_resources(pcie_ep);
return ret;
}
@@ -376,10 +400,7 @@ static void qcom_pcie_perst_assert(struct dw_pcie *pci)
return;
}
- phy_power_off(pcie_ep->phy);
- phy_exit(pcie_ep->phy);
- clk_bulk_disable_unprepare(ARRAY_SIZE(qcom_pcie_ep_clks),
- qcom_pcie_ep_clks);
+ qcom_pcie_disable_resources(pcie_ep);
pcie_ep->link_status = QCOM_PCIE_EP_LINK_DISABLED;
}
@@ -643,43 +664,26 @@ static int qcom_pcie_ep_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = clk_bulk_prepare_enable(ARRAY_SIZE(qcom_pcie_ep_clks),
- qcom_pcie_ep_clks);
- if (ret)
+ ret = qcom_pcie_enable_resources(pcie_ep);
+ if (ret) {
+ dev_err(dev, "Failed to enable resources: %d\n", ret);
return ret;
-
- ret = qcom_pcie_ep_core_reset(pcie_ep);
- if (ret)
- goto err_disable_clk;
-
- ret = phy_init(pcie_ep->phy);
- if (ret)
- goto err_disable_clk;
-
- /* PHY needs to be powered on for dw_pcie_ep_init() */
- ret = phy_power_on(pcie_ep->phy);
- if (ret)
- goto err_phy_exit;
+ }
ret = dw_pcie_ep_init(&pcie_ep->pci.ep);
if (ret) {
dev_err(dev, "Failed to initialize endpoint: %d\n", ret);
- goto err_phy_power_off;
+ goto err_disable_resources;
}
ret = qcom_pcie_ep_enable_irq_resources(pdev, pcie_ep);
if (ret)
- goto err_phy_power_off;
+ goto err_disable_resources;
return 0;
-err_phy_power_off:
- phy_power_off(pcie_ep->phy);
-err_phy_exit:
- phy_exit(pcie_ep->phy);
-err_disable_clk:
- clk_bulk_disable_unprepare(ARRAY_SIZE(qcom_pcie_ep_clks),
- qcom_pcie_ep_clks);
+err_disable_resources:
+ qcom_pcie_disable_resources(pcie_ep);
return ret;
}
@@ -691,10 +695,7 @@ static int qcom_pcie_ep_remove(struct platform_device *pdev)
if (pcie_ep->link_status == QCOM_PCIE_EP_LINK_DISABLED)
return 0;
- phy_power_off(pcie_ep->phy);
- phy_exit(pcie_ep->phy);
- clk_bulk_disable_unprepare(ARRAY_SIZE(qcom_pcie_ep_clks),
- qcom_pcie_ep_clks);
+ qcom_pcie_disable_resources(pcie_ep);
return 0;
}
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index c19cd506ed3f..2ea13750b492 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -161,7 +161,7 @@ struct qcom_pcie_resources_2_3_3 {
/* 6 clocks typically, 7 for sm8250 */
struct qcom_pcie_resources_2_7_0 {
- struct clk_bulk_data clks[7];
+ struct clk_bulk_data clks[9];
int num_clks;
struct regulator_bulk_data supplies[2];
struct reset_control *pci_reset;
@@ -195,6 +195,10 @@ struct qcom_pcie_ops {
struct qcom_pcie_cfg {
const struct qcom_pcie_ops *ops;
unsigned int pipe_clk_need_muxing:1;
+ unsigned int has_tbu_clk:1;
+ unsigned int has_ddrss_sf_tbu_clk:1;
+ unsigned int has_aggre0_clk:1;
+ unsigned int has_aggre1_clk:1;
};
struct qcom_pcie {
@@ -204,8 +208,7 @@ struct qcom_pcie {
union qcom_pcie_resources res;
struct phy *phy;
struct gpio_desc *reset;
- const struct qcom_pcie_ops *ops;
- unsigned int pipe_clk_need_muxing:1;
+ const struct qcom_pcie_cfg *cfg;
};
#define to_qcom_pcie(x) dev_get_drvdata((x)->dev)
@@ -229,8 +232,8 @@ static int qcom_pcie_start_link(struct dw_pcie *pci)
struct qcom_pcie *pcie = to_qcom_pcie(pci);
/* Enable Link Training state machine */
- if (pcie->ops->ltssm_enable)
- pcie->ops->ltssm_enable(pcie);
+ if (pcie->cfg->ops->ltssm_enable)
+ pcie->cfg->ops->ltssm_enable(pcie);
return 0;
}
@@ -1146,6 +1149,7 @@ static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie)
struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
struct dw_pcie *pci = pcie->pci;
struct device *dev = pci->dev;
+ unsigned int idx;
int ret;
res->pci_reset = devm_reset_control_get_exclusive(dev, "pci");
@@ -1159,24 +1163,28 @@ static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie)
if (ret)
return ret;
- res->clks[0].id = "aux";
- res->clks[1].id = "cfg";
- res->clks[2].id = "bus_master";
- res->clks[3].id = "bus_slave";
- res->clks[4].id = "slave_q2a";
- res->clks[5].id = "tbu";
- if (of_device_is_compatible(dev->of_node, "qcom,pcie-sm8250")) {
- res->clks[6].id = "ddrss_sf_tbu";
- res->num_clks = 7;
- } else {
- res->num_clks = 6;
- }
+ idx = 0;
+ res->clks[idx++].id = "aux";
+ res->clks[idx++].id = "cfg";
+ res->clks[idx++].id = "bus_master";
+ res->clks[idx++].id = "bus_slave";
+ res->clks[idx++].id = "slave_q2a";
+ if (pcie->cfg->has_tbu_clk)
+ res->clks[idx++].id = "tbu";
+ if (pcie->cfg->has_ddrss_sf_tbu_clk)
+ res->clks[idx++].id = "ddrss_sf_tbu";
+ if (pcie->cfg->has_aggre0_clk)
+ res->clks[idx++].id = "aggre0";
+ if (pcie->cfg->has_aggre1_clk)
+ res->clks[idx++].id = "aggre1";
+
+ res->num_clks = idx;
ret = devm_clk_bulk_get(dev, res->num_clks, res->clks);
if (ret < 0)
return ret;
- if (pcie->pipe_clk_need_muxing) {
+ if (pcie->cfg->pipe_clk_need_muxing) {
res->pipe_clk_src = devm_clk_get(dev, "pipe_mux");
if (IS_ERR(res->pipe_clk_src))
return PTR_ERR(res->pipe_clk_src);
@@ -1209,7 +1217,7 @@ static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)
}
/* Set TCXO as clock source for pcie_pipe_clk_src */
- if (pcie->pipe_clk_need_muxing)
+ if (pcie->cfg->pipe_clk_need_muxing)
clk_set_parent(res->pipe_clk_src, res->ref_clk_src);
ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
@@ -1230,11 +1238,8 @@ static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)
goto err_disable_clocks;
}
- ret = clk_prepare_enable(res->pipe_clk);
- if (ret) {
- dev_err(dev, "cannot prepare/enable pipe clock\n");
- goto err_disable_clocks;
- }
+ /* Wait for reset to complete, required on SM8450 */
+ usleep_range(1000, 1500);
/* configure PCIe to RC mode */
writel(DEVICE_TYPE_RC, pcie->parf + PCIE20_PARF_DEVICE_TYPE);
@@ -1284,7 +1289,7 @@ static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie)
struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
/* Set pipe clock as clock source for pcie_pipe_clk_src */
- if (pcie->pipe_clk_need_muxing)
+ if (pcie->cfg->pipe_clk_need_muxing)
clk_set_parent(res->pipe_clk_src, res->phy_pipe_clk);
return clk_prepare_enable(res->pipe_clk);
@@ -1384,7 +1389,7 @@ static int qcom_pcie_host_init(struct pcie_port *pp)
qcom_ep_reset_assert(pcie);
- ret = pcie->ops->init(pcie);
+ ret = pcie->cfg->ops->init(pcie);
if (ret)
return ret;
@@ -1392,16 +1397,16 @@ static int qcom_pcie_host_init(struct pcie_port *pp)
if (ret)
goto err_deinit;
- if (pcie->ops->post_init) {
- ret = pcie->ops->post_init(pcie);
+ if (pcie->cfg->ops->post_init) {
+ ret = pcie->cfg->ops->post_init(pcie);
if (ret)
goto err_disable_phy;
}
qcom_ep_reset_deassert(pcie);
- if (pcie->ops->config_sid) {
- ret = pcie->ops->config_sid(pcie);
+ if (pcie->cfg->ops->config_sid) {
+ ret = pcie->cfg->ops->config_sid(pcie);
if (ret)
goto err;
}
@@ -1410,12 +1415,12 @@ static int qcom_pcie_host_init(struct pcie_port *pp)
err:
qcom_ep_reset_assert(pcie);
- if (pcie->ops->post_deinit)
- pcie->ops->post_deinit(pcie);
+ if (pcie->cfg->ops->post_deinit)
+ pcie->cfg->ops->post_deinit(pcie);
err_disable_phy:
phy_power_off(pcie->phy);
err_deinit:
- pcie->ops->deinit(pcie);
+ pcie->cfg->ops->deinit(pcie);
return ret;
}
@@ -1509,17 +1514,48 @@ static const struct qcom_pcie_cfg ipq4019_cfg = {
static const struct qcom_pcie_cfg sdm845_cfg = {
.ops = &ops_2_7_0,
+ .has_tbu_clk = true,
+};
+
+static const struct qcom_pcie_cfg sm8150_cfg = {
+ /* sm8150 has qcom IP rev 1.5.0. However 1.5.0 ops are same as
+ * 1.9.0, so reuse the same.
+ */
+ .ops = &ops_1_9_0,
};
static const struct qcom_pcie_cfg sm8250_cfg = {
.ops = &ops_1_9_0,
+ .has_tbu_clk = true,
+ .has_ddrss_sf_tbu_clk = true,
+};
+
+static const struct qcom_pcie_cfg sm8450_pcie0_cfg = {
+ .ops = &ops_1_9_0,
+ .has_ddrss_sf_tbu_clk = true,
+ .pipe_clk_need_muxing = true,
+ .has_aggre0_clk = true,
+ .has_aggre1_clk = true,
+};
+
+static const struct qcom_pcie_cfg sm8450_pcie1_cfg = {
+ .ops = &ops_1_9_0,
+ .has_ddrss_sf_tbu_clk = true,
+ .pipe_clk_need_muxing = true,
+ .has_aggre1_clk = true,
};
static const struct qcom_pcie_cfg sc7280_cfg = {
.ops = &ops_1_9_0,
+ .has_tbu_clk = true,
.pipe_clk_need_muxing = true,
};
+static const struct qcom_pcie_cfg sc8180x_cfg = {
+ .ops = &ops_1_9_0,
+ .has_tbu_clk = true,
+};
+
static const struct dw_pcie_ops dw_pcie_ops = {
.link_up = qcom_pcie_link_up,
.start_link = qcom_pcie_start_link,
@@ -1559,8 +1595,7 @@ static int qcom_pcie_probe(struct platform_device *pdev)
pcie->pci = pci;
- pcie->ops = pcie_cfg->ops;
- pcie->pipe_clk_need_muxing = pcie_cfg->pipe_clk_need_muxing;
+ pcie->cfg = pcie_cfg;
pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH);
if (IS_ERR(pcie->reset)) {
@@ -1586,29 +1621,28 @@ static int qcom_pcie_probe(struct platform_device *pdev)
goto err_pm_runtime_put;
}
- ret = pcie->ops->get_resources(pcie);
+ ret = pcie->cfg->ops->get_resources(pcie);
if (ret)
goto err_pm_runtime_put;
pp->ops = &qcom_pcie_dw_ops;
ret = phy_init(pcie->phy);
- if (ret) {
- pm_runtime_disable(&pdev->dev);
+ if (ret)
goto err_pm_runtime_put;
- }
platform_set_drvdata(pdev, pcie);
ret = dw_pcie_host_init(pp);
if (ret) {
dev_err(dev, "cannot initialize host\n");
- pm_runtime_disable(&pdev->dev);
- goto err_pm_runtime_put;
+ goto err_phy_exit;
}
return 0;
+err_phy_exit:
+ phy_exit(pcie->phy);
err_pm_runtime_put:
pm_runtime_put(dev);
pm_runtime_disable(dev);
@@ -1626,15 +1660,18 @@ static const struct of_device_id qcom_pcie_match[] = {
{ .compatible = "qcom,pcie-ipq4019", .data = &ipq4019_cfg },
{ .compatible = "qcom,pcie-qcs404", .data = &ipq4019_cfg },
{ .compatible = "qcom,pcie-sdm845", .data = &sdm845_cfg },
+ { .compatible = "qcom,pcie-sm8150", .data = &sm8150_cfg },
{ .compatible = "qcom,pcie-sm8250", .data = &sm8250_cfg },
- { .compatible = "qcom,pcie-sc8180x", .data = &sm8250_cfg },
+ { .compatible = "qcom,pcie-sc8180x", .data = &sc8180x_cfg },
+ { .compatible = "qcom,pcie-sm8450-pcie0", .data = &sm8450_pcie0_cfg },
+ { .compatible = "qcom,pcie-sm8450-pcie1", .data = &sm8450_pcie1_cfg },
{ .compatible = "qcom,pcie-sc7280", .data = &sc7280_cfg },
{ }
};
static void qcom_fixup_class(struct pci_dev *dev)
{
- dev->class = PCI_CLASS_BRIDGE_PCI << 8;
+ dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL;
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0101, qcom_fixup_class);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0104, qcom_fixup_class);
diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
index b1b5f836a806..cc2678490162 100644
--- a/drivers/pci/controller/dwc/pcie-tegra194.c
+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -186,8 +186,6 @@
#define N_FTS_VAL 52
#define FTS_VAL 52
-#define PORT_LOGIC_MSI_CTRL_INT_0_EN 0x828
-
#define GEN3_EQ_CONTROL_OFF 0x8a8
#define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT 8
#define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK GENMASK(23, 8)
@@ -2189,9 +2187,6 @@ static int tegra194_pcie_suspend_noirq(struct device *dev)
if (!pcie->link_state)
return 0;
- /* Save MSI interrupt vector */
- pcie->msi_ctrl_int = dw_pcie_readl_dbi(&pcie->pci,
- PORT_LOGIC_MSI_CTRL_INT_0_EN);
tegra_pcie_downstream_dev_to_D0(pcie);
tegra194_pcie_pme_turnoff(pcie);
tegra_pcie_unconfig_controller(pcie);
@@ -2223,10 +2218,6 @@ static int tegra194_pcie_resume_noirq(struct device *dev)
if (ret < 0)
goto fail_host_init;
- /* Restore MSI interrupt vector */
- dw_pcie_writel_dbi(&pcie->pci, PORT_LOGIC_MSI_CTRL_INT_0_EN,
- pcie->msi_ctrl_int);
-
return 0;
fail_host_init:
diff --git a/drivers/pci/controller/dwc/pcie-uniphier-ep.c b/drivers/pci/controller/dwc/pcie-uniphier-ep.c
index 69810c6b0d58..4d0a587c0ba5 100644
--- a/drivers/pci/controller/dwc/pcie-uniphier-ep.c
+++ b/drivers/pci/controller/dwc/pcie-uniphier-ep.c
@@ -10,6 +10,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/init.h>
+#include <linux/iopoll.h>
#include <linux/of_device.h>
#include <linux/pci.h>
#include <linux/phy/phy.h>
@@ -31,6 +32,17 @@
#define PCL_RSTCTRL2 0x0024
#define PCL_RSTCTRL_PHY_RESET BIT(0)
+#define PCL_PINCTRL0 0x002c
+#define PCL_PERST_PLDN_REGEN BIT(12)
+#define PCL_PERST_NOE_REGEN BIT(11)
+#define PCL_PERST_OUT_REGEN BIT(8)
+#define PCL_PERST_PLDN_REGVAL BIT(4)
+#define PCL_PERST_NOE_REGVAL BIT(3)
+#define PCL_PERST_OUT_REGVAL BIT(0)
+
+#define PCL_PIPEMON 0x0044
+#define PCL_PCLK_ALIVE BIT(15)
+
#define PCL_MODE 0x8000
#define PCL_MODE_REGEN BIT(8)
#define PCL_MODE_REGVAL BIT(0)
@@ -51,6 +63,9 @@
#define PCL_APP_INTX 0x8074
#define PCL_APP_INTX_SYS_INT BIT(0)
+#define PCL_APP_PM0 0x8078
+#define PCL_SYS_AUX_PWR_DET BIT(8)
+
/* assertion time of INTx in usec */
#define PCL_INTX_WIDTH_USEC 30
@@ -60,7 +75,14 @@ struct uniphier_pcie_ep_priv {
struct clk *clk, *clk_gio;
struct reset_control *rst, *rst_gio;
struct phy *phy;
- const struct pci_epc_features *features;
+ const struct uniphier_pcie_ep_soc_data *data;
+};
+
+struct uniphier_pcie_ep_soc_data {
+ bool has_gio;
+ void (*init)(struct uniphier_pcie_ep_priv *priv);
+ int (*wait)(struct uniphier_pcie_ep_priv *priv);
+ const struct pci_epc_features features;
};
#define to_uniphier_pcie(x) dev_get_drvdata((x)->dev)
@@ -91,7 +113,7 @@ static void uniphier_pcie_phy_reset(struct uniphier_pcie_ep_priv *priv,
writel(val, priv->base + PCL_RSTCTRL2);
}
-static void uniphier_pcie_init_ep(struct uniphier_pcie_ep_priv *priv)
+static void uniphier_pcie_pro5_init_ep(struct uniphier_pcie_ep_priv *priv)
{
u32 val;
@@ -116,6 +138,55 @@ static void uniphier_pcie_init_ep(struct uniphier_pcie_ep_priv *priv)
msleep(100);
}
+static void uniphier_pcie_nx1_init_ep(struct uniphier_pcie_ep_priv *priv)
+{
+ u32 val;
+
+ /* set EP mode */
+ val = readl(priv->base + PCL_MODE);
+ val |= PCL_MODE_REGEN | PCL_MODE_REGVAL;
+ writel(val, priv->base + PCL_MODE);
+
+ /* use auxiliary power detection */
+ val = readl(priv->base + PCL_APP_PM0);
+ val |= PCL_SYS_AUX_PWR_DET;
+ writel(val, priv->base + PCL_APP_PM0);
+
+ /* assert PERST# */
+ val = readl(priv->base + PCL_PINCTRL0);
+ val &= ~(PCL_PERST_NOE_REGVAL | PCL_PERST_OUT_REGVAL
+ | PCL_PERST_PLDN_REGVAL);
+ val |= PCL_PERST_NOE_REGEN | PCL_PERST_OUT_REGEN
+ | PCL_PERST_PLDN_REGEN;
+ writel(val, priv->base + PCL_PINCTRL0);
+
+ uniphier_pcie_ltssm_enable(priv, false);
+
+ usleep_range(100000, 200000);
+
+ /* deassert PERST# */
+ val = readl(priv->base + PCL_PINCTRL0);
+ val |= PCL_PERST_OUT_REGVAL | PCL_PERST_OUT_REGEN;
+ writel(val, priv->base + PCL_PINCTRL0);
+}
+
+static int uniphier_pcie_nx1_wait_ep(struct uniphier_pcie_ep_priv *priv)
+{
+ u32 status;
+ int ret;
+
+ /* wait PIPE clock */
+ ret = readl_poll_timeout(priv->base + PCL_PIPEMON, status,
+ status & PCL_PCLK_ALIVE, 100000, 1000000);
+ if (ret) {
+ dev_err(priv->pci.dev,
+ "Failed to initialize controller in EP mode\n");
+ return ret;
+ }
+
+ return 0;
+}
+
static int uniphier_pcie_start_link(struct dw_pcie *pci)
{
struct uniphier_pcie_ep_priv *priv = to_uniphier_pcie(pci);
@@ -209,7 +280,7 @@ uniphier_pcie_get_features(struct dw_pcie_ep *ep)
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct uniphier_pcie_ep_priv *priv = to_uniphier_pcie(pci);
- return priv->features;
+ return &priv->data->features;
}
static const struct dw_pcie_ep_ops uniphier_pcie_ep_ops = {
@@ -238,7 +309,8 @@ static int uniphier_pcie_ep_enable(struct uniphier_pcie_ep_priv *priv)
if (ret)
goto out_rst_assert;
- uniphier_pcie_init_ep(priv);
+ if (priv->data->init)
+ priv->data->init(priv);
uniphier_pcie_phy_reset(priv, true);
@@ -248,8 +320,16 @@ static int uniphier_pcie_ep_enable(struct uniphier_pcie_ep_priv *priv)
uniphier_pcie_phy_reset(priv, false);
+ if (priv->data->wait) {
+ ret = priv->data->wait(priv);
+ if (ret)
+ goto out_phy_exit;
+ }
+
return 0;
+out_phy_exit:
+ phy_exit(priv->phy);
out_rst_gio_assert:
reset_control_assert(priv->rst_gio);
out_rst_assert:
@@ -277,8 +357,8 @@ static int uniphier_pcie_ep_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- priv->features = of_device_get_match_data(dev);
- if (WARN_ON(!priv->features))
+ priv->data = of_device_get_match_data(dev);
+ if (WARN_ON(!priv->data))
return -EINVAL;
priv->pci.dev = dev;
@@ -288,13 +368,15 @@ static int uniphier_pcie_ep_probe(struct platform_device *pdev)
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
- priv->clk_gio = devm_clk_get(dev, "gio");
- if (IS_ERR(priv->clk_gio))
- return PTR_ERR(priv->clk_gio);
+ if (priv->data->has_gio) {
+ priv->clk_gio = devm_clk_get(dev, "gio");
+ if (IS_ERR(priv->clk_gio))
+ return PTR_ERR(priv->clk_gio);
- priv->rst_gio = devm_reset_control_get_shared(dev, "gio");
- if (IS_ERR(priv->rst_gio))
- return PTR_ERR(priv->rst_gio);
+ priv->rst_gio = devm_reset_control_get_shared(dev, "gio");
+ if (IS_ERR(priv->rst_gio))
+ return PTR_ERR(priv->rst_gio);
+ }
priv->clk = devm_clk_get(dev, "link");
if (IS_ERR(priv->clk))
@@ -321,13 +403,31 @@ static int uniphier_pcie_ep_probe(struct platform_device *pdev)
return dw_pcie_ep_init(&priv->pci.ep);
}
-static const struct pci_epc_features uniphier_pro5_data = {
- .linkup_notifier = false,
- .msi_capable = true,
- .msix_capable = false,
- .align = 1 << 16,
- .bar_fixed_64bit = BIT(BAR_0) | BIT(BAR_2) | BIT(BAR_4),
- .reserved_bar = BIT(BAR_4),
+static const struct uniphier_pcie_ep_soc_data uniphier_pro5_data = {
+ .has_gio = true,
+ .init = uniphier_pcie_pro5_init_ep,
+ .wait = NULL,
+ .features = {
+ .linkup_notifier = false,
+ .msi_capable = true,
+ .msix_capable = false,
+ .align = 1 << 16,
+ .bar_fixed_64bit = BIT(BAR_0) | BIT(BAR_2) | BIT(BAR_4),
+ .reserved_bar = BIT(BAR_4),
+ },
+};
+
+static const struct uniphier_pcie_ep_soc_data uniphier_nx1_data = {
+ .has_gio = false,
+ .init = uniphier_pcie_nx1_init_ep,
+ .wait = uniphier_pcie_nx1_wait_ep,
+ .features = {
+ .linkup_notifier = false,
+ .msi_capable = true,
+ .msix_capable = false,
+ .align = 1 << 12,
+ .bar_fixed_64bit = BIT(BAR_0) | BIT(BAR_2) | BIT(BAR_4),
+ },
};
static const struct of_device_id uniphier_pcie_ep_match[] = {
@@ -335,6 +435,10 @@ static const struct of_device_id uniphier_pcie_ep_match[] = {
.compatible = "socionext,uniphier-pro5-pcie-ep",
.data = &uniphier_pro5_data,
},
+ {
+ .compatible = "socionext,uniphier-nx1-pcie-ep",
+ .data = &uniphier_nx1_data,
+ },
{ /* sentinel */ },
};
diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
index f3547aa60140..31a7bdebe540 100644
--- a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
+++ b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
@@ -295,7 +295,7 @@ int mobiveil_host_init(struct mobiveil_pcie *pcie, bool reinit)
/* fixup for PCIe class register */
value = mobiveil_csr_readl(pcie, PAB_INTP_AXI_PIO_CLASS);
value &= 0xff;
- value |= (PCI_CLASS_BRIDGE_PCI << 16);
+ value |= PCI_CLASS_BRIDGE_PCI_NORMAL << 8;
mobiveil_csr_writel(pcie, value, PAB_INTP_AXI_PIO_CLASS);
return 0;
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
index 4f5b44827d21..ffec82c8a523 100644
--- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c
@@ -38,10 +38,6 @@
#define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN BIT(6)
#define PCIE_CORE_ERR_CAPCTL_ECRC_CHCK BIT(7)
#define PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV BIT(8)
-#define PCIE_CORE_INT_A_ASSERT_ENABLE 1
-#define PCIE_CORE_INT_B_ASSERT_ENABLE 2
-#define PCIE_CORE_INT_C_ASSERT_ENABLE 3
-#define PCIE_CORE_INT_D_ASSERT_ENABLE 4
/* PIO registers base address and register offsets */
#define PIO_BASE_ADDR 0x4000
#define PIO_CTRL (PIO_BASE_ADDR + 0x0)
@@ -102,6 +98,10 @@
#define PCIE_MSG_PM_PME_MASK BIT(7)
#define PCIE_ISR0_MASK_REG (CONTROL_BASE_ADDR + 0x44)
#define PCIE_ISR0_MSI_INT_PENDING BIT(24)
+#define PCIE_ISR0_CORR_ERR BIT(11)
+#define PCIE_ISR0_NFAT_ERR BIT(12)
+#define PCIE_ISR0_FAT_ERR BIT(13)
+#define PCIE_ISR0_ERR_MASK GENMASK(13, 11)
#define PCIE_ISR0_INTX_ASSERT(val) BIT(16 + (val))
#define PCIE_ISR0_INTX_DEASSERT(val) BIT(20 + (val))
#define PCIE_ISR0_ALL_MASK GENMASK(31, 0)
@@ -272,17 +272,15 @@ struct advk_pcie {
u32 actions;
} wins[OB_WIN_COUNT];
u8 wins_count;
+ struct irq_domain *rp_irq_domain;
struct irq_domain *irq_domain;
struct irq_chip irq_chip;
raw_spinlock_t irq_lock;
struct irq_domain *msi_domain;
struct irq_domain *msi_inner_domain;
- struct irq_chip msi_bottom_irq_chip;
- struct irq_chip msi_irq_chip;
- struct msi_domain_info msi_domain_info;
+ raw_spinlock_t msi_irq_lock;
DECLARE_BITMAP(msi_used, MSI_IRQ_NUM);
struct mutex msi_used_lock;
- u16 msi_msg;
int link_gen;
struct pci_bridge_emul bridge;
struct gpio_desc *reset_gpio;
@@ -477,6 +475,7 @@ static void advk_pcie_disable_ob_win(struct advk_pcie *pcie, u8 win_num)
static void advk_pcie_setup_hw(struct advk_pcie *pcie)
{
+ phys_addr_t msi_addr;
u32 reg;
int i;
@@ -529,7 +528,7 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
*/
reg = advk_readl(pcie, PCIE_CORE_DEV_REV_REG);
reg &= ~0xffffff00;
- reg |= (PCI_CLASS_BRIDGE_PCI << 8) << 8;
+ reg |= PCI_CLASS_BRIDGE_PCI_NORMAL << 8;
advk_writel(pcie, reg, PCIE_CORE_DEV_REV_REG);
/* Disable Root Bridge I/O space, memory space and bus mastering */
@@ -565,6 +564,11 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
reg |= LANE_COUNT_1;
advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
+ /* Set MSI address */
+ msi_addr = virt_to_phys(pcie);
+ advk_writel(pcie, lower_32_bits(msi_addr), PCIE_MSI_ADDR_LOW_REG);
+ advk_writel(pcie, upper_32_bits(msi_addr), PCIE_MSI_ADDR_HIGH_REG);
+
/* Enable MSI */
reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG);
reg |= PCIE_CORE_CTRL2_MSI_ENABLE;
@@ -576,15 +580,20 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG);
advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG);
- /* Disable All ISR0/1 Sources */
- reg = PCIE_ISR0_ALL_MASK;
+ /* Disable All ISR0/1 and MSI Sources */
+ advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_MASK_REG);
+ advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_MASK_REG);
+ advk_writel(pcie, PCIE_MSI_ALL_MASK, PCIE_MSI_MASK_REG);
+
+ /* Unmask summary MSI interrupt */
+ reg = advk_readl(pcie, PCIE_ISR0_MASK_REG);
reg &= ~PCIE_ISR0_MSI_INT_PENDING;
advk_writel(pcie, reg, PCIE_ISR0_MASK_REG);
- advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_MASK_REG);
-
- /* Unmask all MSIs */
- advk_writel(pcie, ~(u32)PCIE_MSI_ALL_MASK, PCIE_MSI_MASK_REG);
+ /* Unmask PME interrupt for processing of PME requester */
+ reg = advk_readl(pcie, PCIE_ISR0_MASK_REG);
+ reg &= ~PCIE_MSG_PM_PME_MASK;
+ advk_writel(pcie, reg, PCIE_ISR0_MASK_REG);
/* Enable summary interrupt for GIC SPI source */
reg = PCIE_IRQ_ALL_MASK & (~PCIE_IRQ_ENABLE_INTS_MASK);
@@ -778,11 +787,15 @@ advk_pci_bridge_emul_base_conf_read(struct pci_bridge_emul *bridge,
case PCI_INTERRUPT_LINE: {
/*
* From the whole 32bit register we support reading from HW only
- * one bit: PCI_BRIDGE_CTL_BUS_RESET.
+ * two bits: PCI_BRIDGE_CTL_BUS_RESET and PCI_BRIDGE_CTL_SERR.
* Other bits are retrieved only from emulated config buffer.
*/
__le32 *cfgspace = (__le32 *)&bridge->conf;
u32 val = le32_to_cpu(cfgspace[PCI_INTERRUPT_LINE / 4]);
+ if (advk_readl(pcie, PCIE_ISR0_MASK_REG) & PCIE_ISR0_ERR_MASK)
+ val &= ~(PCI_BRIDGE_CTL_SERR << 16);
+ else
+ val |= PCI_BRIDGE_CTL_SERR << 16;
if (advk_readl(pcie, PCIE_CORE_CTRL1_REG) & HOT_RESET_GEN)
val |= PCI_BRIDGE_CTL_BUS_RESET << 16;
else
@@ -808,6 +821,19 @@ advk_pci_bridge_emul_base_conf_write(struct pci_bridge_emul *bridge,
break;
case PCI_INTERRUPT_LINE:
+ /*
+ * According to Figure 6-3: Pseudo Logic Diagram for Error
+ * Message Controls in PCIe base specification, SERR# Enable bit
+ * in Bridge Control register enable receiving of ERR_* messages
+ */
+ if (mask & (PCI_BRIDGE_CTL_SERR << 16)) {
+ u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG);
+ if (new & (PCI_BRIDGE_CTL_SERR << 16))
+ val &= ~PCIE_ISR0_ERR_MASK;
+ else
+ val |= PCIE_ISR0_ERR_MASK;
+ advk_writel(pcie, val, PCIE_ISR0_MASK_REG);
+ }
if (mask & (PCI_BRIDGE_CTL_BUS_RESET << 16)) {
u32 val = advk_readl(pcie, PCIE_CORE_CTRL1_REG);
if (new & (PCI_BRIDGE_CTL_BUS_RESET << 16))
@@ -835,20 +861,11 @@ advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
*value = PCI_EXP_SLTSTA_PDS << 16;
return PCI_BRIDGE_EMUL_HANDLED;
- case PCI_EXP_RTCTL: {
- u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG);
- *value = (val & PCIE_MSG_PM_PME_MASK) ? 0 : PCI_EXP_RTCTL_PMEIE;
- *value |= le16_to_cpu(bridge->pcie_conf.rootctl) & PCI_EXP_RTCTL_CRSSVE;
- *value |= PCI_EXP_RTCAP_CRSVIS << 16;
- return PCI_BRIDGE_EMUL_HANDLED;
- }
-
- case PCI_EXP_RTSTA: {
- u32 isr0 = advk_readl(pcie, PCIE_ISR0_REG);
- u32 msglog = advk_readl(pcie, PCIE_MSG_LOG_REG);
- *value = (isr0 & PCIE_MSG_PM_PME_MASK) << 16 | (msglog >> 16);
- return PCI_BRIDGE_EMUL_HANDLED;
- }
+ /*
+ * PCI_EXP_RTCTL and PCI_EXP_RTSTA are also supported, but do not need
+ * to be handled here, because their values are stored in emulated
+ * config space buffer, and we read them from there when needed.
+ */
case PCI_EXP_LNKCAP: {
u32 val = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg);
@@ -903,19 +920,18 @@ advk_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
break;
case PCI_EXP_RTCTL: {
- /* Only mask/unmask PME interrupt */
- u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG) &
- ~PCIE_MSG_PM_PME_MASK;
- if ((new & PCI_EXP_RTCTL_PMEIE) == 0)
- val |= PCIE_MSG_PM_PME_MASK;
- advk_writel(pcie, val, PCIE_ISR0_MASK_REG);
+ u16 rootctl = le16_to_cpu(bridge->pcie_conf.rootctl);
+ /* Only emulation of PMEIE and CRSSVE bits is provided */
+ rootctl &= PCI_EXP_RTCTL_PMEIE | PCI_EXP_RTCTL_CRSSVE;
+ bridge->pcie_conf.rootctl = cpu_to_le16(rootctl);
break;
}
- case PCI_EXP_RTSTA:
- new = (new & PCI_EXP_RTSTA_PME) >> 9;
- advk_writel(pcie, new, PCIE_ISR0_REG);
- break;
+ /*
+ * PCI_EXP_RTSTA is also supported, but does not need to be handled
+ * here, because its value is stored in emulated config space buffer,
+ * and we write it there when needed.
+ */
case PCI_EXP_DEVCTL:
case PCI_EXP_DEVCTL2:
@@ -928,7 +944,7 @@ advk_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
}
}
-static struct pci_bridge_emul_ops advk_pci_bridge_emul_ops = {
+static const struct pci_bridge_emul_ops advk_pci_bridge_emul_ops = {
.read_base = advk_pci_bridge_emul_base_conf_read,
.write_base = advk_pci_bridge_emul_base_conf_write,
.read_pcie = advk_pci_bridge_emul_pcie_conf_read,
@@ -959,7 +975,7 @@ static int advk_sw_pci_bridge_init(struct advk_pcie *pcie)
bridge->conf.pref_mem_limit = cpu_to_le16(PCI_PREF_RANGE_TYPE_64);
/* Support interrupt A for MSI feature */
- bridge->conf.intpin = PCIE_CORE_INT_A_ASSERT_ENABLE;
+ bridge->conf.intpin = PCI_INTERRUPT_INTA;
/* Aardvark HW provides PCIe Capability structure in version 2 */
bridge->pcie_conf.cap = cpu_to_le16(2);
@@ -981,8 +997,12 @@ static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus,
return false;
/*
- * If the link goes down after we check for link-up, nothing bad
- * happens but the config access times out.
+ * If the link goes down after we check for link-up, we have a problem:
+ * if a PIO request is executed while link-down, the whole controller
+ * gets stuck in a non-functional state, and even after link comes up
+ * again, PIO requests won't work anymore, and a reset of the whole PCIe
+ * controller is needed. Therefore we need to prevent sending PIO
+ * requests while the link is down.
*/
if (!pci_is_root_bus(bus) && !advk_pcie_link_up(pcie))
return false;
@@ -1180,11 +1200,11 @@ static void advk_msi_irq_compose_msi_msg(struct irq_data *data,
struct msi_msg *msg)
{
struct advk_pcie *pcie = irq_data_get_irq_chip_data(data);
- phys_addr_t msi_msg = virt_to_phys(&pcie->msi_msg);
+ phys_addr_t msi_addr = virt_to_phys(pcie);
- msg->address_lo = lower_32_bits(msi_msg);
- msg->address_hi = upper_32_bits(msi_msg);
- msg->data = data->irq;
+ msg->address_lo = lower_32_bits(msi_addr);
+ msg->address_hi = upper_32_bits(msi_addr);
+ msg->data = data->hwirq;
}
static int advk_msi_set_affinity(struct irq_data *irq_data,
@@ -1193,6 +1213,54 @@ static int advk_msi_set_affinity(struct irq_data *irq_data,
return -EINVAL;
}
+static void advk_msi_irq_mask(struct irq_data *d)
+{
+ struct advk_pcie *pcie = d->domain->host_data;
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ unsigned long flags;
+ u32 mask;
+
+ raw_spin_lock_irqsave(&pcie->msi_irq_lock, flags);
+ mask = advk_readl(pcie, PCIE_MSI_MASK_REG);
+ mask |= BIT(hwirq);
+ advk_writel(pcie, mask, PCIE_MSI_MASK_REG);
+ raw_spin_unlock_irqrestore(&pcie->msi_irq_lock, flags);
+}
+
+static void advk_msi_irq_unmask(struct irq_data *d)
+{
+ struct advk_pcie *pcie = d->domain->host_data;
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ unsigned long flags;
+ u32 mask;
+
+ raw_spin_lock_irqsave(&pcie->msi_irq_lock, flags);
+ mask = advk_readl(pcie, PCIE_MSI_MASK_REG);
+ mask &= ~BIT(hwirq);
+ advk_writel(pcie, mask, PCIE_MSI_MASK_REG);
+ raw_spin_unlock_irqrestore(&pcie->msi_irq_lock, flags);
+}
+
+static void advk_msi_top_irq_mask(struct irq_data *d)
+{
+ pci_msi_mask_irq(d);
+ irq_chip_mask_parent(d);
+}
+
+static void advk_msi_top_irq_unmask(struct irq_data *d)
+{
+ pci_msi_unmask_irq(d);
+ irq_chip_unmask_parent(d);
+}
+
+static struct irq_chip advk_msi_bottom_irq_chip = {
+ .name = "MSI",
+ .irq_compose_msi_msg = advk_msi_irq_compose_msi_msg,
+ .irq_set_affinity = advk_msi_set_affinity,
+ .irq_mask = advk_msi_irq_mask,
+ .irq_unmask = advk_msi_irq_unmask,
+};
+
static int advk_msi_irq_domain_alloc(struct irq_domain *domain,
unsigned int virq,
unsigned int nr_irqs, void *args)
@@ -1201,19 +1269,15 @@ static int advk_msi_irq_domain_alloc(struct irq_domain *domain,
int hwirq, i;
mutex_lock(&pcie->msi_used_lock);
- hwirq = bitmap_find_next_zero_area(pcie->msi_used, MSI_IRQ_NUM,
- 0, nr_irqs, 0);
- if (hwirq >= MSI_IRQ_NUM) {
- mutex_unlock(&pcie->msi_used_lock);
- return -ENOSPC;
- }
-
- bitmap_set(pcie->msi_used, hwirq, nr_irqs);
+ hwirq = bitmap_find_free_region(pcie->msi_used, MSI_IRQ_NUM,
+ order_base_2(nr_irqs));
mutex_unlock(&pcie->msi_used_lock);
+ if (hwirq < 0)
+ return -ENOSPC;
for (i = 0; i < nr_irqs; i++)
irq_domain_set_info(domain, virq + i, hwirq + i,
- &pcie->msi_bottom_irq_chip,
+ &advk_msi_bottom_irq_chip,
domain->host_data, handle_simple_irq,
NULL, NULL);
@@ -1227,7 +1291,7 @@ static void advk_msi_irq_domain_free(struct irq_domain *domain,
struct advk_pcie *pcie = domain->host_data;
mutex_lock(&pcie->msi_used_lock);
- bitmap_clear(pcie->msi_used, d->hwirq, nr_irqs);
+ bitmap_release_region(pcie->msi_used, d->hwirq, order_base_2(nr_irqs));
mutex_unlock(&pcie->msi_used_lock);
}
@@ -1269,7 +1333,6 @@ static int advk_pcie_irq_map(struct irq_domain *h,
{
struct advk_pcie *pcie = h->host_data;
- advk_pcie_irq_mask(irq_get_irq_data(virq));
irq_set_status_flags(virq, IRQ_LEVEL);
irq_set_chip_and_handler(virq, &pcie->irq_chip,
handle_level_irq);
@@ -1283,37 +1346,25 @@ static const struct irq_domain_ops advk_pcie_irq_domain_ops = {
.xlate = irq_domain_xlate_onecell,
};
+static struct irq_chip advk_msi_irq_chip = {
+ .name = "advk-MSI",
+ .irq_mask = advk_msi_top_irq_mask,
+ .irq_unmask = advk_msi_top_irq_unmask,
+};
+
+static struct msi_domain_info advk_msi_domain_info = {
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX,
+ .chip = &advk_msi_irq_chip,
+};
+
static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie)
{
struct device *dev = &pcie->pdev->dev;
- struct device_node *node = dev->of_node;
- struct irq_chip *bottom_ic, *msi_ic;
- struct msi_domain_info *msi_di;
- phys_addr_t msi_msg_phys;
+ raw_spin_lock_init(&pcie->msi_irq_lock);
mutex_init(&pcie->msi_used_lock);
- bottom_ic = &pcie->msi_bottom_irq_chip;
-
- bottom_ic->name = "MSI";
- bottom_ic->irq_compose_msi_msg = advk_msi_irq_compose_msi_msg;
- bottom_ic->irq_set_affinity = advk_msi_set_affinity;
-
- msi_ic = &pcie->msi_irq_chip;
- msi_ic->name = "advk-MSI";
-
- msi_di = &pcie->msi_domain_info;
- msi_di->flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_MULTI_PCI_MSI;
- msi_di->chip = msi_ic;
-
- msi_msg_phys = virt_to_phys(&pcie->msi_msg);
-
- advk_writel(pcie, lower_32_bits(msi_msg_phys),
- PCIE_MSI_ADDR_LOW_REG);
- advk_writel(pcie, upper_32_bits(msi_msg_phys),
- PCIE_MSI_ADDR_HIGH_REG);
-
pcie->msi_inner_domain =
irq_domain_add_linear(NULL, MSI_IRQ_NUM,
&advk_msi_domain_ops, pcie);
@@ -1321,8 +1372,9 @@ static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie)
return -ENOMEM;
pcie->msi_domain =
- pci_msi_create_irq_domain(of_node_to_fwnode(node),
- msi_di, pcie->msi_inner_domain);
+ pci_msi_create_irq_domain(dev_fwnode(dev),
+ &advk_msi_domain_info,
+ pcie->msi_inner_domain);
if (!pcie->msi_domain) {
irq_domain_remove(pcie->msi_inner_domain);
return -ENOMEM;
@@ -1363,7 +1415,6 @@ static int advk_pcie_init_irq_domain(struct advk_pcie *pcie)
}
irq_chip->irq_mask = advk_pcie_irq_mask;
- irq_chip->irq_mask_ack = advk_pcie_irq_mask;
irq_chip->irq_unmask = advk_pcie_irq_unmask;
pcie->irq_domain =
@@ -1385,10 +1436,73 @@ static void advk_pcie_remove_irq_domain(struct advk_pcie *pcie)
irq_domain_remove(pcie->irq_domain);
}
+static struct irq_chip advk_rp_irq_chip = {
+ .name = "advk-RP",
+};
+
+static int advk_pcie_rp_irq_map(struct irq_domain *h,
+ unsigned int virq, irq_hw_number_t hwirq)
+{
+ struct advk_pcie *pcie = h->host_data;
+
+ irq_set_chip_and_handler(virq, &advk_rp_irq_chip, handle_simple_irq);
+ irq_set_chip_data(virq, pcie);
+
+ return 0;
+}
+
+static const struct irq_domain_ops advk_pcie_rp_irq_domain_ops = {
+ .map = advk_pcie_rp_irq_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static int advk_pcie_init_rp_irq_domain(struct advk_pcie *pcie)
+{
+ pcie->rp_irq_domain = irq_domain_add_linear(NULL, 1,
+ &advk_pcie_rp_irq_domain_ops,
+ pcie);
+ if (!pcie->rp_irq_domain) {
+ dev_err(&pcie->pdev->dev, "Failed to add Root Port IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void advk_pcie_remove_rp_irq_domain(struct advk_pcie *pcie)
+{
+ irq_domain_remove(pcie->rp_irq_domain);
+}
+
+static void advk_pcie_handle_pme(struct advk_pcie *pcie)
+{
+ u32 requester = advk_readl(pcie, PCIE_MSG_LOG_REG) >> 16;
+
+ advk_writel(pcie, PCIE_MSG_PM_PME_MASK, PCIE_ISR0_REG);
+
+ /*
+ * PCIE_MSG_LOG_REG contains the last inbound message, so store
+ * the requester ID only when PME was not asserted yet.
+ * Also do not trigger PME interrupt when PME is still asserted.
+ */
+ if (!(le32_to_cpu(pcie->bridge.pcie_conf.rootsta) & PCI_EXP_RTSTA_PME)) {
+ pcie->bridge.pcie_conf.rootsta = cpu_to_le32(requester | PCI_EXP_RTSTA_PME);
+
+ /*
+ * Trigger PME interrupt only if PMEIE bit in Root Control is set.
+ * Aardvark HW returns zero for PCI_EXP_FLAGS_IRQ, so use PCIe interrupt 0.
+ */
+ if (!(le16_to_cpu(pcie->bridge.pcie_conf.rootctl) & PCI_EXP_RTCTL_PMEIE))
+ return;
+
+ if (generic_handle_domain_irq(pcie->rp_irq_domain, 0) == -EINVAL)
+ dev_err_ratelimited(&pcie->pdev->dev, "unhandled PME IRQ\n");
+ }
+}
+
static void advk_pcie_handle_msi(struct advk_pcie *pcie)
{
u32 msi_val, msi_mask, msi_status, msi_idx;
- u16 msi_data;
msi_mask = advk_readl(pcie, PCIE_MSI_MASK_REG);
msi_val = advk_readl(pcie, PCIE_MSI_STATUS_REG);
@@ -1398,13 +1512,9 @@ static void advk_pcie_handle_msi(struct advk_pcie *pcie)
if (!(BIT(msi_idx) & msi_status))
continue;
- /*
- * msi_idx contains bits [4:0] of the msi_data and msi_data
- * contains 16bit MSI interrupt number
- */
advk_writel(pcie, BIT(msi_idx), PCIE_MSI_STATUS_REG);
- msi_data = advk_readl(pcie, PCIE_MSI_PAYLOAD_REG) & PCIE_MSI_DATA_MASK;
- generic_handle_irq(msi_data);
+ if (generic_handle_domain_irq(pcie->msi_inner_domain, msi_idx) == -EINVAL)
+ dev_err_ratelimited(&pcie->pdev->dev, "unexpected MSI 0x%02x\n", msi_idx);
}
advk_writel(pcie, PCIE_ISR0_MSI_INT_PENDING,
@@ -1425,6 +1535,22 @@ static void advk_pcie_handle_int(struct advk_pcie *pcie)
isr1_mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
isr1_status = isr1_val & ((~isr1_mask) & PCIE_ISR1_ALL_MASK);
+ /* Process PME interrupt as the first one to do not miss PME requester id */
+ if (isr0_status & PCIE_MSG_PM_PME_MASK)
+ advk_pcie_handle_pme(pcie);
+
+ /* Process ERR interrupt */
+ if (isr0_status & PCIE_ISR0_ERR_MASK) {
+ advk_writel(pcie, PCIE_ISR0_ERR_MASK, PCIE_ISR0_REG);
+
+ /*
+ * Aardvark HW returns zero for PCI_ERR_ROOT_AER_IRQ, so use
+ * PCIe interrupt 0
+ */
+ if (generic_handle_domain_irq(pcie->rp_irq_domain, 0) == -EINVAL)
+ dev_err_ratelimited(&pcie->pdev->dev, "unhandled ERR IRQ\n");
+ }
+
/* Process MSI interrupts */
if (isr0_status & PCIE_ISR0_MSI_INT_PENDING)
advk_pcie_handle_msi(pcie);
@@ -1437,7 +1563,9 @@ static void advk_pcie_handle_int(struct advk_pcie *pcie)
advk_writel(pcie, PCIE_ISR1_INTX_ASSERT(i),
PCIE_ISR1_REG);
- generic_handle_domain_irq(pcie->irq_domain, i);
+ if (generic_handle_domain_irq(pcie->irq_domain, i) == -EINVAL)
+ dev_err_ratelimited(&pcie->pdev->dev, "unexpected INT%c IRQ\n",
+ (char)i + 'A');
}
}
@@ -1458,7 +1586,22 @@ static irqreturn_t advk_pcie_irq_handler(int irq, void *arg)
return IRQ_HANDLED;
}
-static void __maybe_unused advk_pcie_disable_phy(struct advk_pcie *pcie)
+static int advk_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ struct advk_pcie *pcie = dev->bus->sysdata;
+
+ /*
+ * Emulated root bridge has its own emulated irq chip and irq domain.
+ * Argument pin is the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD) and
+ * hwirq for irq_create_mapping() is indexed from zero.
+ */
+ if (pci_is_root_bus(dev->bus))
+ return irq_create_mapping(pcie->rp_irq_domain, pin - 1);
+ else
+ return of_irq_parse_and_map_pci(dev, slot, pin);
+}
+
+static void advk_pcie_disable_phy(struct advk_pcie *pcie)
{
phy_power_off(pcie->phy);
phy_exit(pcie->phy);
@@ -1482,9 +1625,7 @@ static int advk_pcie_enable_phy(struct advk_pcie *pcie)
}
ret = phy_power_on(pcie->phy);
- if (ret == -EOPNOTSUPP) {
- dev_warn(&pcie->pdev->dev, "PHY unsupported by firmware\n");
- } else if (ret) {
+ if (ret) {
phy_exit(pcie->phy);
return ret;
}
@@ -1667,11 +1808,21 @@ static int advk_pcie_probe(struct platform_device *pdev)
return ret;
}
+ ret = advk_pcie_init_rp_irq_domain(pcie);
+ if (ret) {
+ dev_err(dev, "Failed to initialize irq\n");
+ advk_pcie_remove_msi_irq_domain(pcie);
+ advk_pcie_remove_irq_domain(pcie);
+ return ret;
+ }
+
bridge->sysdata = pcie;
bridge->ops = &advk_pcie_ops;
+ bridge->map_irq = advk_pcie_map_irq;
ret = pci_host_probe(bridge);
if (ret < 0) {
+ advk_pcie_remove_rp_irq_domain(pcie);
advk_pcie_remove_msi_irq_domain(pcie);
advk_pcie_remove_irq_domain(pcie);
return ret;
@@ -1720,6 +1871,7 @@ static int advk_pcie_remove(struct platform_device *pdev)
advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG);
/* Remove IRQ domains */
+ advk_pcie_remove_rp_irq_domain(pcie);
advk_pcie_remove_msi_irq_domain(pcie);
advk_pcie_remove_irq_domain(pcie);
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index ae0bc2fee4ca..db814f7b93ba 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -92,6 +92,13 @@ static enum pci_protocol_version_t pci_protocol_versions[] = {
#define SLOT_NAME_SIZE 11
/*
+ * Size of requestor for VMbus; the value is based on the observation
+ * that having more than one request outstanding is 'rare', and so 64
+ * should be generous in ensuring that we don't ever run out.
+ */
+#define HV_PCI_RQSTOR_SIZE 64
+
+/*
* Message Types
*/
@@ -604,17 +611,137 @@ static unsigned int hv_msi_get_int_vector(struct irq_data *data)
return cfg->vector;
}
-static void hv_set_msi_entry_from_desc(union hv_msi_entry *msi_entry,
- struct msi_desc *msi_desc)
+static int hv_msi_prepare(struct irq_domain *domain, struct device *dev,
+ int nvec, msi_alloc_info_t *info)
{
- msi_entry->address.as_uint32 = msi_desc->msg.address_lo;
- msi_entry->data.as_uint32 = msi_desc->msg.data;
+ int ret = pci_msi_prepare(domain, dev, nvec, info);
+
+ /*
+ * By using the interrupt remapper in the hypervisor IOMMU, contiguous
+ * CPU vectors is not needed for multi-MSI
+ */
+ if (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI)
+ info->flags &= ~X86_IRQ_ALLOC_CONTIGUOUS_VECTORS;
+
+ return ret;
}
-static int hv_msi_prepare(struct irq_domain *domain, struct device *dev,
- int nvec, msi_alloc_info_t *info)
+/**
+ * hv_arch_irq_unmask() - "Unmask" the IRQ by setting its current
+ * affinity.
+ * @data: Describes the IRQ
+ *
+ * Build new a destination for the MSI and make a hypercall to
+ * update the Interrupt Redirection Table. "Device Logical ID"
+ * is built out of this PCI bus's instance GUID and the function
+ * number of the device.
+ */
+static void hv_arch_irq_unmask(struct irq_data *data)
{
- return pci_msi_prepare(domain, dev, nvec, info);
+ struct msi_desc *msi_desc = irq_data_get_msi_desc(data);
+ struct hv_retarget_device_interrupt *params;
+ struct tran_int_desc *int_desc;
+ struct hv_pcibus_device *hbus;
+ struct cpumask *dest;
+ cpumask_var_t tmp;
+ struct pci_bus *pbus;
+ struct pci_dev *pdev;
+ unsigned long flags;
+ u32 var_size = 0;
+ int cpu, nr_bank;
+ u64 res;
+
+ dest = irq_data_get_effective_affinity_mask(data);
+ pdev = msi_desc_to_pci_dev(msi_desc);
+ pbus = pdev->bus;
+ hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
+ int_desc = data->chip_data;
+
+ spin_lock_irqsave(&hbus->retarget_msi_interrupt_lock, flags);
+
+ params = &hbus->retarget_msi_interrupt_params;
+ memset(params, 0, sizeof(*params));
+ params->partition_id = HV_PARTITION_ID_SELF;
+ params->int_entry.source = HV_INTERRUPT_SOURCE_MSI;
+ params->int_entry.msi_entry.address.as_uint32 = int_desc->address & 0xffffffff;
+ params->int_entry.msi_entry.data.as_uint32 = int_desc->data;
+ params->device_id = (hbus->hdev->dev_instance.b[5] << 24) |
+ (hbus->hdev->dev_instance.b[4] << 16) |
+ (hbus->hdev->dev_instance.b[7] << 8) |
+ (hbus->hdev->dev_instance.b[6] & 0xf8) |
+ PCI_FUNC(pdev->devfn);
+ params->int_target.vector = hv_msi_get_int_vector(data);
+
+ /*
+ * Honoring apic->delivery_mode set to APIC_DELIVERY_MODE_FIXED by
+ * setting the HV_DEVICE_INTERRUPT_TARGET_MULTICAST flag results in a
+ * spurious interrupt storm. Not doing so does not seem to have a
+ * negative effect (yet?).
+ */
+
+ if (hbus->protocol_version >= PCI_PROTOCOL_VERSION_1_2) {
+ /*
+ * PCI_PROTOCOL_VERSION_1_2 supports the VP_SET version of the
+ * HVCALL_RETARGET_INTERRUPT hypercall, which also coincides
+ * with >64 VP support.
+ * ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED
+ * is not sufficient for this hypercall.
+ */
+ params->int_target.flags |=
+ HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET;
+
+ if (!alloc_cpumask_var(&tmp, GFP_ATOMIC)) {
+ res = 1;
+ goto exit_unlock;
+ }
+
+ cpumask_and(tmp, dest, cpu_online_mask);
+ nr_bank = cpumask_to_vpset(&params->int_target.vp_set, tmp);
+ free_cpumask_var(tmp);
+
+ if (nr_bank <= 0) {
+ res = 1;
+ goto exit_unlock;
+ }
+
+ /*
+ * var-sized hypercall, var-size starts after vp_mask (thus
+ * vp_set.format does not count, but vp_set.valid_bank_mask
+ * does).
+ */
+ var_size = 1 + nr_bank;
+ } else {
+ for_each_cpu_and(cpu, dest, cpu_online_mask) {
+ params->int_target.vp_mask |=
+ (1ULL << hv_cpu_number_to_vp_number(cpu));
+ }
+ }
+
+ res = hv_do_hypercall(HVCALL_RETARGET_INTERRUPT | (var_size << 17),
+ params, NULL);
+
+exit_unlock:
+ spin_unlock_irqrestore(&hbus->retarget_msi_interrupt_lock, flags);
+
+ /*
+ * During hibernation, when a CPU is offlined, the kernel tries
+ * to move the interrupt to the remaining CPUs that haven't
+ * been offlined yet. In this case, the below hv_do_hypercall()
+ * always fails since the vmbus channel has been closed:
+ * refer to cpu_disable_common() -> fixup_irqs() ->
+ * irq_migrate_all_off_this_cpu() -> migrate_one_irq().
+ *
+ * Suppress the error message for hibernation because the failure
+ * during hibernation does not matter (at this time all the devices
+ * have been frozen). Note: the correct affinity info is still updated
+ * into the irqdata data structure in migrate_one_irq() ->
+ * irq_do_set_affinity() -> hv_set_affinity(), so later when the VM
+ * resumes, hv_pci_restore_msi_state() is able to correctly restore
+ * the interrupt with the correct affinity.
+ */
+ if (!hv_result_success(res) && hbus->state != hv_pcibus_removing)
+ dev_err(&hbus->hdev->device,
+ "%s() failed: %#llx", __func__, res);
}
#elif defined(CONFIG_ARM64)
/*
@@ -651,14 +778,6 @@ static unsigned int hv_msi_get_int_vector(struct irq_data *irqd)
return irqd->parent_data->hwirq;
}
-static void hv_set_msi_entry_from_desc(union hv_msi_entry *msi_entry,
- struct msi_desc *msi_desc)
-{
- msi_entry->address = ((u64)msi_desc->msg.address_hi << 32) |
- msi_desc->msg.address_lo;
- msi_entry->data = msi_desc->msg.data;
-}
-
/*
* @nr_bm_irqs: Indicates the number of IRQs that were allocated from
* the bitmap.
@@ -839,6 +958,12 @@ static struct irq_domain *hv_pci_get_root_domain(void)
{
return hv_msi_gic_irq_domain;
}
+
+/*
+ * SPIs are used for interrupts of PCI devices and SPIs is managed via GICD
+ * registers which Hyper-V already supports, so no hypercall needed.
+ */
+static void hv_arch_irq_unmask(struct irq_data *data) { }
#endif /* CONFIG_ARM64 */
/**
@@ -856,11 +981,7 @@ static void hv_pci_generic_compl(void *context, struct pci_response *resp,
{
struct hv_pci_compl *comp_pkt = context;
- if (resp_packet_size >= offsetofend(struct pci_response, status))
- comp_pkt->completion_status = resp->status;
- else
- comp_pkt->completion_status = -1;
-
+ comp_pkt->completion_status = resp->status;
complete(&comp_pkt->host_event);
}
@@ -1400,6 +1521,10 @@ static void hv_int_desc_free(struct hv_pci_dev *hpdev,
u8 buffer[sizeof(struct pci_delete_interrupt)];
} ctxt;
+ if (!int_desc->vector_count) {
+ kfree(int_desc);
+ return;
+ }
memset(&ctxt, 0, sizeof(ctxt));
int_pkt = (struct pci_delete_interrupt *)&ctxt.pkt.message;
int_pkt->message_type.type =
@@ -1407,7 +1532,7 @@ static void hv_int_desc_free(struct hv_pci_dev *hpdev,
int_pkt->wslot.slot = hpdev->desc.win_slot.slot;
int_pkt->int_desc = *int_desc;
vmbus_sendpacket(hpdev->hbus->hdev->channel, int_pkt, sizeof(*int_pkt),
- (unsigned long)&ctxt.pkt, VM_PKT_DATA_INBAND, 0);
+ 0, VM_PKT_DATA_INBAND, 0);
kfree(int_desc);
}
@@ -1456,119 +1581,9 @@ static void hv_irq_mask(struct irq_data *data)
irq_chip_mask_parent(data);
}
-/**
- * hv_irq_unmask() - "Unmask" the IRQ by setting its current
- * affinity.
- * @data: Describes the IRQ
- *
- * Build new a destination for the MSI and make a hypercall to
- * update the Interrupt Redirection Table. "Device Logical ID"
- * is built out of this PCI bus's instance GUID and the function
- * number of the device.
- */
static void hv_irq_unmask(struct irq_data *data)
{
- struct msi_desc *msi_desc = irq_data_get_msi_desc(data);
- struct hv_retarget_device_interrupt *params;
- struct hv_pcibus_device *hbus;
- struct cpumask *dest;
- cpumask_var_t tmp;
- struct pci_bus *pbus;
- struct pci_dev *pdev;
- unsigned long flags;
- u32 var_size = 0;
- int cpu, nr_bank;
- u64 res;
-
- dest = irq_data_get_effective_affinity_mask(data);
- pdev = msi_desc_to_pci_dev(msi_desc);
- pbus = pdev->bus;
- hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
-
- spin_lock_irqsave(&hbus->retarget_msi_interrupt_lock, flags);
-
- params = &hbus->retarget_msi_interrupt_params;
- memset(params, 0, sizeof(*params));
- params->partition_id = HV_PARTITION_ID_SELF;
- params->int_entry.source = HV_INTERRUPT_SOURCE_MSI;
- hv_set_msi_entry_from_desc(&params->int_entry.msi_entry, msi_desc);
- params->device_id = (hbus->hdev->dev_instance.b[5] << 24) |
- (hbus->hdev->dev_instance.b[4] << 16) |
- (hbus->hdev->dev_instance.b[7] << 8) |
- (hbus->hdev->dev_instance.b[6] & 0xf8) |
- PCI_FUNC(pdev->devfn);
- params->int_target.vector = hv_msi_get_int_vector(data);
-
- /*
- * Honoring apic->delivery_mode set to APIC_DELIVERY_MODE_FIXED by
- * setting the HV_DEVICE_INTERRUPT_TARGET_MULTICAST flag results in a
- * spurious interrupt storm. Not doing so does not seem to have a
- * negative effect (yet?).
- */
-
- if (hbus->protocol_version >= PCI_PROTOCOL_VERSION_1_2) {
- /*
- * PCI_PROTOCOL_VERSION_1_2 supports the VP_SET version of the
- * HVCALL_RETARGET_INTERRUPT hypercall, which also coincides
- * with >64 VP support.
- * ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED
- * is not sufficient for this hypercall.
- */
- params->int_target.flags |=
- HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET;
-
- if (!alloc_cpumask_var(&tmp, GFP_ATOMIC)) {
- res = 1;
- goto exit_unlock;
- }
-
- cpumask_and(tmp, dest, cpu_online_mask);
- nr_bank = cpumask_to_vpset(&params->int_target.vp_set, tmp);
- free_cpumask_var(tmp);
-
- if (nr_bank <= 0) {
- res = 1;
- goto exit_unlock;
- }
-
- /*
- * var-sized hypercall, var-size starts after vp_mask (thus
- * vp_set.format does not count, but vp_set.valid_bank_mask
- * does).
- */
- var_size = 1 + nr_bank;
- } else {
- for_each_cpu_and(cpu, dest, cpu_online_mask) {
- params->int_target.vp_mask |=
- (1ULL << hv_cpu_number_to_vp_number(cpu));
- }
- }
-
- res = hv_do_hypercall(HVCALL_RETARGET_INTERRUPT | (var_size << 17),
- params, NULL);
-
-exit_unlock:
- spin_unlock_irqrestore(&hbus->retarget_msi_interrupt_lock, flags);
-
- /*
- * During hibernation, when a CPU is offlined, the kernel tries
- * to move the interrupt to the remaining CPUs that haven't
- * been offlined yet. In this case, the below hv_do_hypercall()
- * always fails since the vmbus channel has been closed:
- * refer to cpu_disable_common() -> fixup_irqs() ->
- * irq_migrate_all_off_this_cpu() -> migrate_one_irq().
- *
- * Suppress the error message for hibernation because the failure
- * during hibernation does not matter (at this time all the devices
- * have been frozen). Note: the correct affinity info is still updated
- * into the irqdata data structure in migrate_one_irq() ->
- * irq_do_set_affinity() -> hv_set_affinity(), so later when the VM
- * resumes, hv_pci_restore_msi_state() is able to correctly restore
- * the interrupt with the correct affinity.
- */
- if (!hv_result_success(res) && hbus->state != hv_pcibus_removing)
- dev_err(&hbus->hdev->device,
- "%s() failed: %#llx", __func__, res);
+ hv_arch_irq_unmask(data);
if (data->parent_data->chip->irq_unmask)
irq_chip_unmask_parent(data);
@@ -1587,19 +1602,24 @@ static void hv_pci_compose_compl(void *context, struct pci_response *resp,
struct pci_create_int_response *int_resp =
(struct pci_create_int_response *)resp;
+ if (resp_packet_size < sizeof(*int_resp)) {
+ comp_pkt->comp_pkt.completion_status = -1;
+ goto out;
+ }
comp_pkt->comp_pkt.completion_status = resp->status;
comp_pkt->int_desc = int_resp->int_desc;
+out:
complete(&comp_pkt->comp_pkt.host_event);
}
static u32 hv_compose_msi_req_v1(
struct pci_create_interrupt *int_pkt, struct cpumask *affinity,
- u32 slot, u8 vector)
+ u32 slot, u8 vector, u8 vector_count)
{
int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE;
int_pkt->wslot.slot = slot;
int_pkt->int_desc.vector = vector;
- int_pkt->int_desc.vector_count = 1;
+ int_pkt->int_desc.vector_count = vector_count;
int_pkt->int_desc.delivery_mode = DELIVERY_MODE;
/*
@@ -1622,14 +1642,14 @@ static int hv_compose_msi_req_get_cpu(struct cpumask *affinity)
static u32 hv_compose_msi_req_v2(
struct pci_create_interrupt2 *int_pkt, struct cpumask *affinity,
- u32 slot, u8 vector)
+ u32 slot, u8 vector, u8 vector_count)
{
int cpu;
int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE2;
int_pkt->wslot.slot = slot;
int_pkt->int_desc.vector = vector;
- int_pkt->int_desc.vector_count = 1;
+ int_pkt->int_desc.vector_count = vector_count;
int_pkt->int_desc.delivery_mode = DELIVERY_MODE;
cpu = hv_compose_msi_req_get_cpu(affinity);
int_pkt->int_desc.processor_array[0] =
@@ -1641,7 +1661,7 @@ static u32 hv_compose_msi_req_v2(
static u32 hv_compose_msi_req_v3(
struct pci_create_interrupt3 *int_pkt, struct cpumask *affinity,
- u32 slot, u32 vector)
+ u32 slot, u32 vector, u8 vector_count)
{
int cpu;
@@ -1649,7 +1669,7 @@ static u32 hv_compose_msi_req_v3(
int_pkt->wslot.slot = slot;
int_pkt->int_desc.vector = vector;
int_pkt->int_desc.reserved = 0;
- int_pkt->int_desc.vector_count = 1;
+ int_pkt->int_desc.vector_count = vector_count;
int_pkt->int_desc.delivery_mode = DELIVERY_MODE;
cpu = hv_compose_msi_req_get_cpu(affinity);
int_pkt->int_desc.processor_array[0] =
@@ -1680,6 +1700,8 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
struct cpumask *dest;
struct compose_comp_ctxt comp;
struct tran_int_desc *int_desc;
+ struct msi_desc *msi_desc;
+ u8 vector, vector_count;
struct {
struct pci_packet pci_pkt;
union {
@@ -1688,11 +1710,21 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
struct pci_create_interrupt3 v3;
} int_pkts;
} __packed ctxt;
-
+ u64 trans_id;
u32 size;
int ret;
- pdev = msi_desc_to_pci_dev(irq_data_get_msi_desc(data));
+ /* Reuse the previous allocation */
+ if (data->chip_data) {
+ int_desc = data->chip_data;
+ msg->address_hi = int_desc->address >> 32;
+ msg->address_lo = int_desc->address & 0xffffffff;
+ msg->data = int_desc->data;
+ return;
+ }
+
+ msi_desc = irq_data_get_msi_desc(data);
+ pdev = msi_desc_to_pci_dev(msi_desc);
dest = irq_data_get_effective_affinity_mask(data);
pbus = pdev->bus;
hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
@@ -1701,17 +1733,40 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
if (!hpdev)
goto return_null_message;
- /* Free any previous message that might have already been composed. */
- if (data->chip_data) {
- int_desc = data->chip_data;
- data->chip_data = NULL;
- hv_int_desc_free(hpdev, int_desc);
- }
-
int_desc = kzalloc(sizeof(*int_desc), GFP_ATOMIC);
if (!int_desc)
goto drop_reference;
+ if (!msi_desc->pci.msi_attrib.is_msix && msi_desc->nvec_used > 1) {
+ /*
+ * If this is not the first MSI of Multi MSI, we already have
+ * a mapping. Can exit early.
+ */
+ if (msi_desc->irq != data->irq) {
+ data->chip_data = int_desc;
+ int_desc->address = msi_desc->msg.address_lo |
+ (u64)msi_desc->msg.address_hi << 32;
+ int_desc->data = msi_desc->msg.data +
+ (data->irq - msi_desc->irq);
+ msg->address_hi = msi_desc->msg.address_hi;
+ msg->address_lo = msi_desc->msg.address_lo;
+ msg->data = int_desc->data;
+ put_pcichild(hpdev);
+ return;
+ }
+ /*
+ * The vector we select here is a dummy value. The correct
+ * value gets sent to the hypervisor in unmask(). This needs
+ * to be aligned with the count, and also not zero. Multi-msi
+ * is powers of 2 up to 32, so 32 will always work here.
+ */
+ vector = 32;
+ vector_count = msi_desc->nvec_used;
+ } else {
+ vector = hv_msi_get_int_vector(data);
+ vector_count = 1;
+ }
+
memset(&ctxt, 0, sizeof(ctxt));
init_completion(&comp.comp_pkt.host_event);
ctxt.pci_pkt.completion_func = hv_pci_compose_compl;
@@ -1722,7 +1777,8 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
size = hv_compose_msi_req_v1(&ctxt.int_pkts.v1,
dest,
hpdev->desc.win_slot.slot,
- hv_msi_get_int_vector(data));
+ vector,
+ vector_count);
break;
case PCI_PROTOCOL_VERSION_1_2:
@@ -1730,14 +1786,16 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
size = hv_compose_msi_req_v2(&ctxt.int_pkts.v2,
dest,
hpdev->desc.win_slot.slot,
- hv_msi_get_int_vector(data));
+ vector,
+ vector_count);
break;
case PCI_PROTOCOL_VERSION_1_4:
size = hv_compose_msi_req_v3(&ctxt.int_pkts.v3,
dest,
hpdev->desc.win_slot.slot,
- hv_msi_get_int_vector(data));
+ vector,
+ vector_count);
break;
default:
@@ -1750,10 +1808,10 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
goto free_int_desc;
}
- ret = vmbus_sendpacket(hpdev->hbus->hdev->channel, &ctxt.int_pkts,
- size, (unsigned long)&ctxt.pci_pkt,
- VM_PKT_DATA_INBAND,
- VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ ret = vmbus_sendpacket_getid(hpdev->hbus->hdev->channel, &ctxt.int_pkts,
+ size, (unsigned long)&ctxt.pci_pkt,
+ &trans_id, VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
if (ret) {
dev_err(&hbus->hdev->device,
"Sending request for interrupt failed: 0x%x",
@@ -1832,6 +1890,15 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
enable_tasklet:
tasklet_enable(&channel->callback_event);
+ /*
+ * The completion packet on the stack becomes invalid after 'return';
+ * remove the ID from the VMbus requestor if the identifier is still
+ * mapped to/associated with the packet. (The identifier could have
+ * been 're-used', i.e., already removed and (re-)mapped.)
+ *
+ * Cf. hv_pci_onchannelcallback().
+ */
+ vmbus_request_addr_match(channel, trans_id, (unsigned long)&ctxt.pci_pkt);
free_int_desc:
kfree(int_desc);
drop_reference:
@@ -2079,12 +2146,17 @@ static void prepopulate_bars(struct hv_pcibus_device *hbus)
}
}
if (high_size <= 1 && low_size <= 1) {
- /* Set the memory enable bit. */
- _hv_pcifront_read_config(hpdev, PCI_COMMAND, 2,
- &command);
- command |= PCI_COMMAND_MEMORY;
- _hv_pcifront_write_config(hpdev, PCI_COMMAND, 2,
- command);
+ /*
+ * No need to set the PCI_COMMAND_MEMORY bit as
+ * the core PCI driver doesn't require the bit
+ * to be pre-set. Actually here we intentionally
+ * keep the bit off so that the PCI BAR probing
+ * in the core PCI driver doesn't cause Hyper-V
+ * to unnecessarily unmap/map the virtual BARs
+ * from/to the physical BARs multiple times.
+ * This reduces the VM boot time significantly
+ * if the BAR sizes are huge.
+ */
break;
}
}
@@ -2220,12 +2292,14 @@ static void q_resource_requirements(void *context, struct pci_response *resp,
struct q_res_req_compl *completion = context;
struct pci_q_res_req_response *q_res_req =
(struct pci_q_res_req_response *)resp;
+ s32 status;
int i;
- if (resp->status < 0) {
+ status = (resp_packet_size < sizeof(*q_res_req)) ? -1 : resp->status;
+ if (status < 0) {
dev_err(&completion->hpdev->hbus->hdev->device,
"query resource requirements failed: %x\n",
- resp->status);
+ status);
} else {
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
completion->hpdev->probed_bar[i] =
@@ -2649,7 +2723,7 @@ static void hv_eject_device_work(struct work_struct *work)
ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE;
ejct_pkt->wslot.slot = hpdev->desc.win_slot.slot;
vmbus_sendpacket(hbus->hdev->channel, ejct_pkt,
- sizeof(*ejct_pkt), (unsigned long)&ctxt.pkt,
+ sizeof(*ejct_pkt), 0,
VM_PKT_DATA_INBAND, 0);
/* For the get_pcichild() in hv_pci_eject_device() */
@@ -2696,8 +2770,9 @@ static void hv_pci_onchannelcallback(void *context)
const int packet_size = 0x100;
int ret;
struct hv_pcibus_device *hbus = context;
+ struct vmbus_channel *chan = hbus->hdev->channel;
u32 bytes_recvd;
- u64 req_id;
+ u64 req_id, req_addr;
struct vmpacket_descriptor *desc;
unsigned char *buffer;
int bufferlen = packet_size;
@@ -2709,14 +2784,15 @@ static void hv_pci_onchannelcallback(void *context)
struct pci_dev_inval_block *inval;
struct pci_dev_incoming *dev_message;
struct hv_pci_dev *hpdev;
+ unsigned long flags;
buffer = kmalloc(bufferlen, GFP_ATOMIC);
if (!buffer)
return;
while (1) {
- ret = vmbus_recvpacket_raw(hbus->hdev->channel, buffer,
- bufferlen, &bytes_recvd, &req_id);
+ ret = vmbus_recvpacket_raw(chan, buffer, bufferlen,
+ &bytes_recvd, &req_id);
if (ret == -ENOBUFS) {
kfree(buffer);
@@ -2743,15 +2819,29 @@ static void hv_pci_onchannelcallback(void *context)
switch (desc->type) {
case VM_PKT_COMP:
+ lock_requestor(chan, flags);
+ req_addr = __vmbus_request_addr_match(chan, req_id,
+ VMBUS_RQST_ADDR_ANY);
+ if (req_addr == VMBUS_RQST_ERROR) {
+ unlock_requestor(chan, flags);
+ dev_err(&hbus->hdev->device,
+ "Invalid transaction ID %llx\n",
+ req_id);
+ break;
+ }
+ comp_packet = (struct pci_packet *)req_addr;
+ response = (struct pci_response *)buffer;
/*
- * The host is trusted, and thus it's safe to interpret
- * this transaction ID as a pointer.
+ * Call ->completion_func() within the critical section to make
+ * sure that the packet pointer is still valid during the call:
+ * here 'valid' means that there's a task still waiting for the
+ * completion, and that the packet data is still on the waiting
+ * task's stack. Cf. hv_compose_msi_msg().
*/
- comp_packet = (struct pci_packet *)req_id;
- response = (struct pci_response *)buffer;
comp_packet->completion_func(comp_packet->compl_ctxt,
response,
bytes_recvd);
+ unlock_requestor(chan, flags);
break;
case VM_PKT_DATA_INBAND:
@@ -2761,7 +2851,8 @@ static void hv_pci_onchannelcallback(void *context)
case PCI_BUS_RELATIONS:
bus_rel = (struct pci_bus_relations *)buffer;
- if (bytes_recvd <
+ if (bytes_recvd < sizeof(*bus_rel) ||
+ bytes_recvd <
struct_size(bus_rel, func,
bus_rel->device_count)) {
dev_err(&hbus->hdev->device,
@@ -2775,7 +2866,8 @@ static void hv_pci_onchannelcallback(void *context)
case PCI_BUS_RELATIONS2:
bus_rel2 = (struct pci_bus_relations2 *)buffer;
- if (bytes_recvd <
+ if (bytes_recvd < sizeof(*bus_rel2) ||
+ bytes_recvd <
struct_size(bus_rel2, func,
bus_rel2->device_count)) {
dev_err(&hbus->hdev->device,
@@ -2789,6 +2881,11 @@ static void hv_pci_onchannelcallback(void *context)
case PCI_EJECT:
dev_message = (struct pci_dev_incoming *)buffer;
+ if (bytes_recvd < sizeof(*dev_message)) {
+ dev_err(&hbus->hdev->device,
+ "eject message too small\n");
+ break;
+ }
hpdev = get_pcichild_wslot(hbus,
dev_message->wslot.slot);
if (hpdev) {
@@ -2800,6 +2897,11 @@ static void hv_pci_onchannelcallback(void *context)
case PCI_INVALIDATE_BLOCK:
inval = (struct pci_dev_inval_block *)buffer;
+ if (bytes_recvd < sizeof(*inval)) {
+ dev_err(&hbus->hdev->device,
+ "invalidate message too small\n");
+ break;
+ }
hpdev = get_pcichild_wslot(hbus,
inval->wslot.slot);
if (hpdev) {
@@ -3404,6 +3506,15 @@ static int hv_pci_probe(struct hv_device *hdev,
hbus->bridge->domain_nr = dom;
#ifdef CONFIG_X86
hbus->sysdata.domain = dom;
+#elif defined(CONFIG_ARM64)
+ /*
+ * Set the PCI bus parent to be the corresponding VMbus
+ * device. Then the VMbus device will be assigned as the
+ * ACPI companion in pcibios_root_bridge_prepare() and
+ * pci_dma_configure() will propagate device coherence
+ * information to devices created on the bus.
+ */
+ hbus->sysdata.parent = hdev->device.parent;
#endif
hbus->hdev = hdev;
@@ -3419,6 +3530,10 @@ static int hv_pci_probe(struct hv_device *hdev,
goto free_dom;
}
+ hdev->channel->next_request_id_callback = vmbus_next_request_id;
+ hdev->channel->request_addr_callback = vmbus_request_addr;
+ hdev->channel->rqstor_size = HV_PCI_RQSTOR_SIZE;
+
ret = vmbus_open(hdev->channel, pci_ring_size, pci_ring_size, NULL, 0,
hv_pci_onchannelcallback, hbus);
if (ret)
@@ -3549,6 +3664,7 @@ free_bus:
static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs)
{
struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
+ struct vmbus_channel *chan = hdev->channel;
struct {
struct pci_packet teardown_packet;
u8 buffer[sizeof(struct pci_message)];
@@ -3556,13 +3672,14 @@ static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs)
struct hv_pci_compl comp_pkt;
struct hv_pci_dev *hpdev, *tmp;
unsigned long flags;
+ u64 trans_id;
int ret;
/*
* After the host sends the RESCIND_CHANNEL message, it doesn't
* access the per-channel ringbuffer any longer.
*/
- if (hdev->channel->rescind)
+ if (chan->rescind)
return 0;
if (!keep_devs) {
@@ -3599,16 +3716,26 @@ static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs)
pkt.teardown_packet.compl_ctxt = &comp_pkt;
pkt.teardown_packet.message[0].type = PCI_BUS_D0EXIT;
- ret = vmbus_sendpacket(hdev->channel, &pkt.teardown_packet.message,
- sizeof(struct pci_message),
- (unsigned long)&pkt.teardown_packet,
- VM_PKT_DATA_INBAND,
- VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ ret = vmbus_sendpacket_getid(chan, &pkt.teardown_packet.message,
+ sizeof(struct pci_message),
+ (unsigned long)&pkt.teardown_packet,
+ &trans_id, VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
if (ret)
return ret;
- if (wait_for_completion_timeout(&comp_pkt.host_event, 10 * HZ) == 0)
+ if (wait_for_completion_timeout(&comp_pkt.host_event, 10 * HZ) == 0) {
+ /*
+ * The completion packet on the stack becomes invalid after
+ * 'return'; remove the ID from the VMbus requestor if the
+ * identifier is still mapped to/associated with the packet.
+ *
+ * Cf. hv_pci_onchannelcallback().
+ */
+ vmbus_request_addr_match(chan, trans_id,
+ (unsigned long)&pkt.teardown_packet);
return -ETIMEDOUT;
+ }
return 0;
}
@@ -3749,6 +3876,10 @@ static int hv_pci_resume(struct hv_device *hdev)
hbus->state = hv_pcibus_init;
+ hdev->channel->next_request_id_callback = vmbus_next_request_id;
+ hdev->channel->request_addr_callback = vmbus_request_addr;
+ hdev->channel->rqstor_size = HV_PCI_RQSTOR_SIZE;
+
ret = vmbus_open(hdev->channel, pci_ring_size, pci_ring_size, NULL, 0,
hv_pci_onchannelcallback, hbus);
if (ret)
diff --git a/drivers/pci/controller/pci-loongson.c b/drivers/pci/controller/pci-loongson.c
index 48169b1e3817..50a8e1d6f70a 100644
--- a/drivers/pci/controller/pci-loongson.c
+++ b/drivers/pci/controller/pci-loongson.c
@@ -35,7 +35,7 @@ struct loongson_pci {
/* Fixup wrong class code in PCIe bridges */
static void bridge_class_quirk(struct pci_dev *dev)
{
- dev->class = PCI_CLASS_BRIDGE_PCI << 8;
+ dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL;
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
DEV_PCIE_PORT_0, bridge_class_quirk);
diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c
index f8e82c5e2d87..c1ffdb06c971 100644
--- a/drivers/pci/controller/pci-mvebu.c
+++ b/drivers/pci/controller/pci-mvebu.c
@@ -8,6 +8,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/gpio.h>
@@ -32,8 +33,9 @@
#define PCIE_DEV_REV_OFF 0x0008
#define PCIE_BAR_LO_OFF(n) (0x0010 + ((n) << 3))
#define PCIE_BAR_HI_OFF(n) (0x0014 + ((n) << 3))
+#define PCIE_SSDEV_ID_OFF 0x002c
#define PCIE_CAP_PCIEXP 0x0060
-#define PCIE_HEADER_LOG_4_OFF 0x0128
+#define PCIE_CAP_PCIERR_OFF 0x0100
#define PCIE_BAR_CTRL_OFF(n) (0x1804 + (((n) - 1) * 4))
#define PCIE_WIN04_CTRL_OFF(n) (0x1820 + ((n) << 4))
#define PCIE_WIN04_BASE_OFF(n) (0x1824 + ((n) << 4))
@@ -53,9 +55,10 @@
PCIE_CONF_ADDR_EN)
#define PCIE_CONF_DATA_OFF 0x18fc
#define PCIE_INT_CAUSE_OFF 0x1900
+#define PCIE_INT_UNMASK_OFF 0x1910
+#define PCIE_INT_INTX(i) BIT(24+i)
#define PCIE_INT_PM_PME BIT(28)
-#define PCIE_MASK_OFF 0x1910
-#define PCIE_MASK_ENABLE_INTS 0x0f000000
+#define PCIE_INT_ALL_MASK GENMASK(31, 0)
#define PCIE_CTRL_OFF 0x1a00
#define PCIE_CTRL_X1_MODE 0x0001
#define PCIE_CTRL_RC_MODE BIT(1)
@@ -64,6 +67,12 @@
#define PCIE_STAT_BUS 0xff00
#define PCIE_STAT_DEV 0x1f0000
#define PCIE_STAT_LINK_DOWN BIT(0)
+#define PCIE_SSPL_OFF 0x1a0c
+#define PCIE_SSPL_VALUE_SHIFT 0
+#define PCIE_SSPL_VALUE_MASK GENMASK(7, 0)
+#define PCIE_SSPL_SCALE_SHIFT 8
+#define PCIE_SSPL_SCALE_MASK GENMASK(9, 8)
+#define PCIE_SSPL_ENABLE BIT(16)
#define PCIE_RC_RTSTA 0x1a14
#define PCIE_DEBUG_CTRL 0x1a60
#define PCIE_DEBUG_SOFT_RESET BIT(20)
@@ -93,6 +102,7 @@ struct mvebu_pcie_port {
void __iomem *base;
u32 port;
u32 lane;
+ bool is_x4;
int devfn;
unsigned int mem_target;
unsigned int mem_attr;
@@ -108,6 +118,11 @@ struct mvebu_pcie_port {
struct mvebu_pcie_window iowin;
u32 saved_pcie_stat;
struct resource regs;
+ u8 slot_power_limit_value;
+ u8 slot_power_limit_scale;
+ struct irq_domain *intx_irq_domain;
+ raw_spinlock_t irq_lock;
+ int intx_irq;
};
static inline void mvebu_writel(struct mvebu_pcie_port *port, u32 val, u32 reg)
@@ -233,13 +248,25 @@ static void mvebu_pcie_setup_wins(struct mvebu_pcie_port *port)
static void mvebu_pcie_setup_hw(struct mvebu_pcie_port *port)
{
- u32 ctrl, cmd, dev_rev, mask;
+ u32 ctrl, lnkcap, cmd, dev_rev, unmask, sspl;
/* Setup PCIe controller to Root Complex mode. */
ctrl = mvebu_readl(port, PCIE_CTRL_OFF);
ctrl |= PCIE_CTRL_RC_MODE;
mvebu_writel(port, ctrl, PCIE_CTRL_OFF);
+ /*
+ * Set Maximum Link Width to X1 or X4 in Root Port's PCIe Link
+ * Capability register. This register is defined by PCIe specification
+ * as read-only but this mvebu controller has it as read-write and must
+ * be set to number of SerDes PCIe lanes (1 or 4). If this register is
+ * not set correctly then link with endpoint card is not established.
+ */
+ lnkcap = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP);
+ lnkcap &= ~PCI_EXP_LNKCAP_MLW;
+ lnkcap |= (port->is_x4 ? 4 : 1) << 4;
+ mvebu_writel(port, lnkcap, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP);
+
/* Disable Root Bridge I/O space, memory space and bus mastering. */
cmd = mvebu_readl(port, PCIE_CMD_OFF);
cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
@@ -268,23 +295,71 @@ static void mvebu_pcie_setup_hw(struct mvebu_pcie_port *port)
*/
dev_rev = mvebu_readl(port, PCIE_DEV_REV_OFF);
dev_rev &= ~0xffffff00;
- dev_rev |= (PCI_CLASS_BRIDGE_PCI << 8) << 8;
+ dev_rev |= PCI_CLASS_BRIDGE_PCI_NORMAL << 8;
mvebu_writel(port, dev_rev, PCIE_DEV_REV_OFF);
/* Point PCIe unit MBUS decode windows to DRAM space. */
mvebu_pcie_setup_wins(port);
- /* Enable interrupt lines A-D. */
- mask = mvebu_readl(port, PCIE_MASK_OFF);
- mask |= PCIE_MASK_ENABLE_INTS;
- mvebu_writel(port, mask, PCIE_MASK_OFF);
+ /*
+ * Program Root Port to automatically send Set_Slot_Power_Limit
+ * PCIe Message when changing status from Dl_Down to Dl_Up and valid
+ * slot power limit was specified.
+ */
+ sspl = mvebu_readl(port, PCIE_SSPL_OFF);
+ sspl &= ~(PCIE_SSPL_VALUE_MASK | PCIE_SSPL_SCALE_MASK | PCIE_SSPL_ENABLE);
+ if (port->slot_power_limit_value) {
+ sspl |= port->slot_power_limit_value << PCIE_SSPL_VALUE_SHIFT;
+ sspl |= port->slot_power_limit_scale << PCIE_SSPL_SCALE_SHIFT;
+ sspl |= PCIE_SSPL_ENABLE;
+ }
+ mvebu_writel(port, sspl, PCIE_SSPL_OFF);
+
+ /* Mask all interrupt sources. */
+ mvebu_writel(port, ~PCIE_INT_ALL_MASK, PCIE_INT_UNMASK_OFF);
+
+ /* Clear all interrupt causes. */
+ mvebu_writel(port, ~PCIE_INT_ALL_MASK, PCIE_INT_CAUSE_OFF);
+
+ /* Check if "intx" interrupt was specified in DT. */
+ if (port->intx_irq > 0)
+ return;
+
+ /*
+ * Fallback code when "intx" interrupt was not specified in DT:
+ * Unmask all legacy INTx interrupts as driver does not provide a way
+ * for masking and unmasking of individual legacy INTx interrupts.
+ * Legacy INTx are reported via one shared GIC source and therefore
+ * kernel cannot distinguish which individual legacy INTx was triggered.
+ * These interrupts are shared, so it should not cause any issue. Just
+ * performance penalty as every PCIe interrupt handler needs to be
+ * called when some interrupt is triggered.
+ */
+ unmask = mvebu_readl(port, PCIE_INT_UNMASK_OFF);
+ unmask |= PCIE_INT_INTX(0) | PCIE_INT_INTX(1) |
+ PCIE_INT_INTX(2) | PCIE_INT_INTX(3);
+ mvebu_writel(port, unmask, PCIE_INT_UNMASK_OFF);
}
-static int mvebu_pcie_hw_rd_conf(struct mvebu_pcie_port *port,
- struct pci_bus *bus,
- u32 devfn, int where, int size, u32 *val)
+static struct mvebu_pcie_port *mvebu_pcie_find_port(struct mvebu_pcie *pcie,
+ struct pci_bus *bus,
+ int devfn);
+
+static int mvebu_pcie_child_rd_conf(struct pci_bus *bus, u32 devfn, int where,
+ int size, u32 *val)
{
- void __iomem *conf_data = port->base + PCIE_CONF_DATA_OFF;
+ struct mvebu_pcie *pcie = bus->sysdata;
+ struct mvebu_pcie_port *port;
+ void __iomem *conf_data;
+
+ port = mvebu_pcie_find_port(pcie, bus, devfn);
+ if (!port)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (!mvebu_pcie_link_up(port))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ conf_data = port->base + PCIE_CONF_DATA_OFF;
mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where),
PCIE_CONF_ADDR_OFF);
@@ -300,18 +375,27 @@ static int mvebu_pcie_hw_rd_conf(struct mvebu_pcie_port *port,
*val = readl_relaxed(conf_data);
break;
default:
- *val = 0xffffffff;
return PCIBIOS_BAD_REGISTER_NUMBER;
}
return PCIBIOS_SUCCESSFUL;
}
-static int mvebu_pcie_hw_wr_conf(struct mvebu_pcie_port *port,
- struct pci_bus *bus,
- u32 devfn, int where, int size, u32 val)
+static int mvebu_pcie_child_wr_conf(struct pci_bus *bus, u32 devfn,
+ int where, int size, u32 val)
{
- void __iomem *conf_data = port->base + PCIE_CONF_DATA_OFF;
+ struct mvebu_pcie *pcie = bus->sysdata;
+ struct mvebu_pcie_port *port;
+ void __iomem *conf_data;
+
+ port = mvebu_pcie_find_port(pcie, bus, devfn);
+ if (!port)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (!mvebu_pcie_link_up(port))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ conf_data = port->base + PCIE_CONF_DATA_OFF;
mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where),
PCIE_CONF_ADDR_OFF);
@@ -333,6 +417,11 @@ static int mvebu_pcie_hw_wr_conf(struct mvebu_pcie_port *port,
return PCIBIOS_SUCCESSFUL;
}
+static struct pci_ops mvebu_pcie_child_ops = {
+ .read = mvebu_pcie_child_rd_conf,
+ .write = mvebu_pcie_child_wr_conf,
+};
+
/*
* Remove windows, starting from the largest ones to the smallest
* ones.
@@ -438,12 +527,6 @@ static int mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
return mvebu_pcie_set_window(port, port->io_target, port->io_attr,
&desired, &port->iowin);
- if (!mvebu_has_ioport(port)) {
- dev_WARN(&port->pcie->pdev->dev,
- "Attempt to set IO when IO is disabled\n");
- return -EOPNOTSUPP;
- }
-
/*
* We read the PCI-to-PCI bridge emulated registers, and
* calculate the base address and size of the address decoding
@@ -552,20 +635,40 @@ mvebu_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
case PCI_EXP_LNKCAP:
/*
- * PCIe requires the clock power management capability to be
- * hard-wired to zero for downstream ports
+ * PCIe requires that the Clock Power Management capability bit
+ * is hard-wired to zero for downstream ports but HW returns 1.
+ * Additionally enable Data Link Layer Link Active Reporting
+ * Capable bit as DL_Active indication is provided too.
*/
- *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP) &
- ~PCI_EXP_LNKCAP_CLKPM;
+ *value = (mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP) &
+ ~PCI_EXP_LNKCAP_CLKPM) | PCI_EXP_LNKCAP_DLLLARC;
break;
case PCI_EXP_LNKCTL:
- *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL);
+ /* DL_Active indication is provided via PCIE_STAT_OFF */
+ *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL) |
+ (mvebu_pcie_link_up(port) ?
+ (PCI_EXP_LNKSTA_DLLLA << 16) : 0);
break;
- case PCI_EXP_SLTCTL:
- *value = PCI_EXP_SLTSTA_PDS << 16;
+ case PCI_EXP_SLTCTL: {
+ u16 slotctl = le16_to_cpu(bridge->pcie_conf.slotctl);
+ u16 slotsta = le16_to_cpu(bridge->pcie_conf.slotsta);
+ u32 val = 0;
+ /*
+ * When slot power limit was not specified in DT then
+ * ASPL_DISABLE bit is stored only in emulated config space.
+ * Otherwise reflect status of PCIE_SSPL_ENABLE bit in HW.
+ */
+ if (!port->slot_power_limit_value)
+ val |= slotctl & PCI_EXP_SLTCTL_ASPL_DISABLE;
+ else if (!(mvebu_readl(port, PCIE_SSPL_OFF) & PCIE_SSPL_ENABLE))
+ val |= PCI_EXP_SLTCTL_ASPL_DISABLE;
+ /* This callback is 32-bit and in high bits is slot status. */
+ val |= slotsta << 16;
+ *value = val;
break;
+ }
case PCI_EXP_RTSTA:
*value = mvebu_readl(port, PCIE_RC_RTSTA);
@@ -590,6 +693,37 @@ mvebu_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
return PCI_BRIDGE_EMUL_HANDLED;
}
+static pci_bridge_emul_read_status_t
+mvebu_pci_bridge_emul_ext_conf_read(struct pci_bridge_emul *bridge,
+ int reg, u32 *value)
+{
+ struct mvebu_pcie_port *port = bridge->data;
+
+ switch (reg) {
+ case 0:
+ case PCI_ERR_UNCOR_STATUS:
+ case PCI_ERR_UNCOR_MASK:
+ case PCI_ERR_UNCOR_SEVER:
+ case PCI_ERR_COR_STATUS:
+ case PCI_ERR_COR_MASK:
+ case PCI_ERR_CAP:
+ case PCI_ERR_HEADER_LOG+0:
+ case PCI_ERR_HEADER_LOG+4:
+ case PCI_ERR_HEADER_LOG+8:
+ case PCI_ERR_HEADER_LOG+12:
+ case PCI_ERR_ROOT_COMMAND:
+ case PCI_ERR_ROOT_STATUS:
+ case PCI_ERR_ROOT_ERR_SRC:
+ *value = mvebu_readl(port, PCIE_CAP_PCIERR_OFF + reg);
+ break;
+
+ default:
+ return PCI_BRIDGE_EMUL_NOT_HANDLED;
+ }
+
+ return PCI_BRIDGE_EMUL_HANDLED;
+}
+
static void
mvebu_pci_bridge_emul_base_conf_write(struct pci_bridge_emul *bridge,
int reg, u32 old, u32 new, u32 mask)
@@ -599,24 +733,18 @@ mvebu_pci_bridge_emul_base_conf_write(struct pci_bridge_emul *bridge,
switch (reg) {
case PCI_COMMAND:
- if (!mvebu_has_ioport(port)) {
- conf->command = cpu_to_le16(
- le16_to_cpu(conf->command) & ~PCI_COMMAND_IO);
- new &= ~PCI_COMMAND_IO;
- }
-
mvebu_writel(port, new, PCIE_CMD_OFF);
break;
case PCI_IO_BASE:
- if ((mask & 0xffff) && mvebu_pcie_handle_iobase_change(port)) {
+ if ((mask & 0xffff) && mvebu_has_ioport(port) &&
+ mvebu_pcie_handle_iobase_change(port)) {
/* On error disable IO range */
conf->iobase &= ~0xf0;
conf->iolimit &= ~0xf0;
+ conf->iobase |= 0xf0;
conf->iobaseupper = cpu_to_le16(0x0000);
conf->iolimitupper = cpu_to_le16(0x0000);
- if (mvebu_has_ioport(port))
- conf->iobase |= 0xf0;
}
break;
@@ -630,14 +758,14 @@ mvebu_pci_bridge_emul_base_conf_write(struct pci_bridge_emul *bridge,
break;
case PCI_IO_BASE_UPPER16:
- if (mvebu_pcie_handle_iobase_change(port)) {
+ if (mvebu_has_ioport(port) &&
+ mvebu_pcie_handle_iobase_change(port)) {
/* On error disable IO range */
conf->iobase &= ~0xf0;
conf->iolimit &= ~0xf0;
+ conf->iobase |= 0xf0;
conf->iobaseupper = cpu_to_le16(0x0000);
conf->iolimitupper = cpu_to_le16(0x0000);
- if (mvebu_has_ioport(port))
- conf->iobase |= 0xf0;
}
break;
@@ -675,16 +803,31 @@ mvebu_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
case PCI_EXP_LNKCTL:
/*
- * If we don't support CLKREQ, we must ensure that the
- * CLKREQ enable bit always reads zero. Since we haven't
- * had this capability, and it's dependent on board wiring,
- * disable it for the time being.
+ * PCIe requires that the Enable Clock Power Management bit
+ * is hard-wired to zero for downstream ports but HW allows
+ * to change it.
*/
new &= ~PCI_EXP_LNKCTL_CLKREQ_EN;
mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL);
break;
+ case PCI_EXP_SLTCTL:
+ /*
+ * Allow to change PCIE_SSPL_ENABLE bit only when slot power
+ * limit was specified in DT and configured into HW.
+ */
+ if ((mask & PCI_EXP_SLTCTL_ASPL_DISABLE) &&
+ port->slot_power_limit_value) {
+ u32 sspl = mvebu_readl(port, PCIE_SSPL_OFF);
+ if (new & PCI_EXP_SLTCTL_ASPL_DISABLE)
+ sspl &= ~PCIE_SSPL_ENABLE;
+ else
+ sspl |= PCIE_SSPL_ENABLE;
+ mvebu_writel(port, sspl, PCIE_SSPL_OFF);
+ }
+ break;
+
case PCI_EXP_RTSTA:
/*
* PME Status bit in Root Status Register (PCIE_RC_RTSTA)
@@ -709,11 +852,45 @@ mvebu_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
}
}
-static struct pci_bridge_emul_ops mvebu_pci_bridge_emul_ops = {
+static void
+mvebu_pci_bridge_emul_ext_conf_write(struct pci_bridge_emul *bridge,
+ int reg, u32 old, u32 new, u32 mask)
+{
+ struct mvebu_pcie_port *port = bridge->data;
+
+ switch (reg) {
+ /* These are W1C registers, so clear other bits */
+ case PCI_ERR_UNCOR_STATUS:
+ case PCI_ERR_COR_STATUS:
+ case PCI_ERR_ROOT_STATUS:
+ new &= mask;
+ fallthrough;
+
+ case PCI_ERR_UNCOR_MASK:
+ case PCI_ERR_UNCOR_SEVER:
+ case PCI_ERR_COR_MASK:
+ case PCI_ERR_CAP:
+ case PCI_ERR_HEADER_LOG+0:
+ case PCI_ERR_HEADER_LOG+4:
+ case PCI_ERR_HEADER_LOG+8:
+ case PCI_ERR_HEADER_LOG+12:
+ case PCI_ERR_ROOT_COMMAND:
+ case PCI_ERR_ROOT_ERR_SRC:
+ mvebu_writel(port, new, PCIE_CAP_PCIERR_OFF + reg);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static const struct pci_bridge_emul_ops mvebu_pci_bridge_emul_ops = {
.read_base = mvebu_pci_bridge_emul_base_conf_read,
.write_base = mvebu_pci_bridge_emul_base_conf_write,
.read_pcie = mvebu_pci_bridge_emul_pcie_conf_read,
.write_pcie = mvebu_pci_bridge_emul_pcie_conf_write,
+ .read_ext = mvebu_pci_bridge_emul_ext_conf_read,
+ .write_ext = mvebu_pci_bridge_emul_ext_conf_write,
};
/*
@@ -722,32 +899,57 @@ static struct pci_bridge_emul_ops mvebu_pci_bridge_emul_ops = {
*/
static int mvebu_pci_bridge_emul_init(struct mvebu_pcie_port *port)
{
+ unsigned int bridge_flags = PCI_BRIDGE_EMUL_NO_PREFMEM_FORWARD;
struct pci_bridge_emul *bridge = &port->bridge;
+ u32 dev_id = mvebu_readl(port, PCIE_DEV_ID_OFF);
+ u32 dev_rev = mvebu_readl(port, PCIE_DEV_REV_OFF);
+ u32 ssdev_id = mvebu_readl(port, PCIE_SSDEV_ID_OFF);
u32 pcie_cap = mvebu_readl(port, PCIE_CAP_PCIEXP);
u8 pcie_cap_ver = ((pcie_cap >> 16) & PCI_EXP_FLAGS_VERS);
- bridge->conf.vendor = PCI_VENDOR_ID_MARVELL;
- bridge->conf.device = mvebu_readl(port, PCIE_DEV_ID_OFF) >> 16;
- bridge->conf.class_revision =
- mvebu_readl(port, PCIE_DEV_REV_OFF) & 0xff;
+ bridge->conf.vendor = cpu_to_le16(dev_id & 0xffff);
+ bridge->conf.device = cpu_to_le16(dev_id >> 16);
+ bridge->conf.class_revision = cpu_to_le32(dev_rev & 0xff);
if (mvebu_has_ioport(port)) {
/* We support 32 bits I/O addressing */
bridge->conf.iobase = PCI_IO_RANGE_TYPE_32;
bridge->conf.iolimit = PCI_IO_RANGE_TYPE_32;
+ } else {
+ bridge_flags |= PCI_BRIDGE_EMUL_NO_IO_FORWARD;
}
/*
* Older mvebu hardware provides PCIe Capability structure only in
* version 1. New hardware provides it in version 2.
+ * Enable slot support which is emulated.
*/
- bridge->pcie_conf.cap = cpu_to_le16(pcie_cap_ver);
+ bridge->pcie_conf.cap = cpu_to_le16(pcie_cap_ver | PCI_EXP_FLAGS_SLOT);
+ /*
+ * Set Presence Detect State bit permanently as there is no support for
+ * unplugging PCIe card from the slot. Assume that PCIe card is always
+ * connected in slot.
+ *
+ * Set physical slot number to port+1 as mvebu ports are indexed from
+ * zero and zero value is reserved for ports within the same silicon
+ * as Root Port which is not mvebu case.
+ *
+ * Also set correct slot power limit.
+ */
+ bridge->pcie_conf.slotcap = cpu_to_le32(
+ FIELD_PREP(PCI_EXP_SLTCAP_SPLV, port->slot_power_limit_value) |
+ FIELD_PREP(PCI_EXP_SLTCAP_SPLS, port->slot_power_limit_scale) |
+ FIELD_PREP(PCI_EXP_SLTCAP_PSN, port->port+1));
+ bridge->pcie_conf.slotsta = cpu_to_le16(PCI_EXP_SLTSTA_PDS);
+
+ bridge->subsystem_vendor_id = ssdev_id & 0xffff;
+ bridge->subsystem_id = ssdev_id >> 16;
bridge->has_pcie = true;
bridge->data = port;
bridge->ops = &mvebu_pci_bridge_emul_ops;
- return pci_bridge_emul_init(bridge, PCI_BRIDGE_EMUL_NO_PREFETCHABLE_BAR);
+ return pci_bridge_emul_init(bridge, bridge_flags);
}
static inline struct mvebu_pcie *sys_to_pcie(struct pci_sys_data *sys)
@@ -784,25 +986,12 @@ static int mvebu_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
{
struct mvebu_pcie *pcie = bus->sysdata;
struct mvebu_pcie_port *port;
- int ret;
port = mvebu_pcie_find_port(pcie, bus, devfn);
if (!port)
return PCIBIOS_DEVICE_NOT_FOUND;
- /* Access the emulated PCI-to-PCI bridge */
- if (bus->number == 0)
- return pci_bridge_emul_conf_write(&port->bridge, where,
- size, val);
-
- if (!mvebu_pcie_link_up(port))
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- /* Access the real PCIe interface */
- ret = mvebu_pcie_hw_wr_conf(port, bus, devfn,
- where, size, val);
-
- return ret;
+ return pci_bridge_emul_conf_write(&port->bridge, where, size, val);
}
/* PCI configuration space read function */
@@ -811,25 +1000,12 @@ static int mvebu_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
{
struct mvebu_pcie *pcie = bus->sysdata;
struct mvebu_pcie_port *port;
- int ret;
port = mvebu_pcie_find_port(pcie, bus, devfn);
if (!port)
return PCIBIOS_DEVICE_NOT_FOUND;
- /* Access the emulated PCI-to-PCI bridge */
- if (bus->number == 0)
- return pci_bridge_emul_conf_read(&port->bridge, where,
- size, val);
-
- if (!mvebu_pcie_link_up(port))
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- /* Access the real PCIe interface */
- ret = mvebu_pcie_hw_rd_conf(port, bus, devfn,
- where, size, val);
-
- return ret;
+ return pci_bridge_emul_conf_read(&port->bridge, where, size, val);
}
static struct pci_ops mvebu_pcie_ops = {
@@ -837,6 +1013,108 @@ static struct pci_ops mvebu_pcie_ops = {
.write = mvebu_pcie_wr_conf,
};
+static void mvebu_pcie_intx_irq_mask(struct irq_data *d)
+{
+ struct mvebu_pcie_port *port = d->domain->host_data;
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ unsigned long flags;
+ u32 unmask;
+
+ raw_spin_lock_irqsave(&port->irq_lock, flags);
+ unmask = mvebu_readl(port, PCIE_INT_UNMASK_OFF);
+ unmask &= ~PCIE_INT_INTX(hwirq);
+ mvebu_writel(port, unmask, PCIE_INT_UNMASK_OFF);
+ raw_spin_unlock_irqrestore(&port->irq_lock, flags);
+}
+
+static void mvebu_pcie_intx_irq_unmask(struct irq_data *d)
+{
+ struct mvebu_pcie_port *port = d->domain->host_data;
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ unsigned long flags;
+ u32 unmask;
+
+ raw_spin_lock_irqsave(&port->irq_lock, flags);
+ unmask = mvebu_readl(port, PCIE_INT_UNMASK_OFF);
+ unmask |= PCIE_INT_INTX(hwirq);
+ mvebu_writel(port, unmask, PCIE_INT_UNMASK_OFF);
+ raw_spin_unlock_irqrestore(&port->irq_lock, flags);
+}
+
+static struct irq_chip intx_irq_chip = {
+ .name = "mvebu-INTx",
+ .irq_mask = mvebu_pcie_intx_irq_mask,
+ .irq_unmask = mvebu_pcie_intx_irq_unmask,
+};
+
+static int mvebu_pcie_intx_irq_map(struct irq_domain *h,
+ unsigned int virq, irq_hw_number_t hwirq)
+{
+ struct mvebu_pcie_port *port = h->host_data;
+
+ irq_set_status_flags(virq, IRQ_LEVEL);
+ irq_set_chip_and_handler(virq, &intx_irq_chip, handle_level_irq);
+ irq_set_chip_data(virq, port);
+
+ return 0;
+}
+
+static const struct irq_domain_ops mvebu_pcie_intx_irq_domain_ops = {
+ .map = mvebu_pcie_intx_irq_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static int mvebu_pcie_init_irq_domain(struct mvebu_pcie_port *port)
+{
+ struct device *dev = &port->pcie->pdev->dev;
+ struct device_node *pcie_intc_node;
+
+ raw_spin_lock_init(&port->irq_lock);
+
+ pcie_intc_node = of_get_next_child(port->dn, NULL);
+ if (!pcie_intc_node) {
+ dev_err(dev, "No PCIe Intc node found for %s\n", port->name);
+ return -ENODEV;
+ }
+
+ port->intx_irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
+ &mvebu_pcie_intx_irq_domain_ops,
+ port);
+ of_node_put(pcie_intc_node);
+ if (!port->intx_irq_domain) {
+ dev_err(dev, "Failed to get INTx IRQ domain for %s\n", port->name);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void mvebu_pcie_irq_handler(struct irq_desc *desc)
+{
+ struct mvebu_pcie_port *port = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct device *dev = &port->pcie->pdev->dev;
+ u32 cause, unmask, status;
+ int i;
+
+ chained_irq_enter(chip, desc);
+
+ cause = mvebu_readl(port, PCIE_INT_CAUSE_OFF);
+ unmask = mvebu_readl(port, PCIE_INT_UNMASK_OFF);
+ status = cause & unmask;
+
+ /* Process legacy INTx interrupts */
+ for (i = 0; i < PCI_NUM_INTX; i++) {
+ if (!(status & PCIE_INT_INTX(i)))
+ continue;
+
+ if (generic_handle_domain_irq(port->intx_irq_domain, i) == -EINVAL)
+ dev_err_ratelimited(dev, "unexpected INT%c IRQ\n", (char)i+'A');
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
static int mvebu_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
/* Interrupt support on mvebu emulated bridges is not implemented yet */
@@ -985,7 +1263,9 @@ static int mvebu_pcie_parse_port(struct mvebu_pcie *pcie,
{
struct device *dev = &pcie->pdev->dev;
enum of_gpio_flags flags;
+ u32 slot_power_limit;
int reset_gpio, ret;
+ u32 num_lanes;
port->pcie = pcie;
@@ -998,6 +1278,9 @@ static int mvebu_pcie_parse_port(struct mvebu_pcie *pcie,
if (of_property_read_u32(child, "marvell,pcie-lane", &port->lane))
port->lane = 0;
+ if (!of_property_read_u32(child, "num-lanes", &num_lanes) && num_lanes == 4)
+ port->is_x4 = true;
+
port->name = devm_kasprintf(dev, GFP_KERNEL, "pcie%d.%d", port->port,
port->lane);
if (!port->name) {
@@ -1030,6 +1313,21 @@ static int mvebu_pcie_parse_port(struct mvebu_pcie *pcie,
port->io_attr = -1;
}
+ /*
+ * Old DT bindings do not contain "intx" interrupt
+ * so do not fail probing driver when interrupt does not exist.
+ */
+ port->intx_irq = of_irq_get_byname(child, "intx");
+ if (port->intx_irq == -EPROBE_DEFER) {
+ ret = port->intx_irq;
+ goto err;
+ }
+ if (port->intx_irq <= 0) {
+ dev_warn(dev, "%s: legacy INTx interrupts cannot be masked individually, "
+ "%pOF does not contain intx interrupt\n",
+ port->name, child);
+ }
+
reset_gpio = of_get_named_gpio_flags(child, "reset-gpios", 0, &flags);
if (reset_gpio == -EPROBE_DEFER) {
ret = reset_gpio;
@@ -1066,6 +1364,15 @@ static int mvebu_pcie_parse_port(struct mvebu_pcie *pcie,
port->reset_gpio = gpio_to_desc(reset_gpio);
}
+ slot_power_limit = of_pci_get_slot_power_limit(child,
+ &port->slot_power_limit_value,
+ &port->slot_power_limit_scale);
+ if (slot_power_limit)
+ dev_info(dev, "%s: Slot power limit %u.%uW\n",
+ port->name,
+ slot_power_limit / 1000,
+ (slot_power_limit / 100) % 10);
+
port->clk = of_clk_get_by_name(child, NULL);
if (IS_ERR(port->clk)) {
dev_err(dev, "%s: cannot get clock\n", port->name);
@@ -1226,6 +1533,7 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
for (i = 0; i < pcie->nports; i++) {
struct mvebu_pcie_port *port = &pcie->ports[i];
+ int irq = port->intx_irq;
child = port->dn;
if (!child)
@@ -1253,6 +1561,22 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
continue;
}
+ if (irq > 0) {
+ ret = mvebu_pcie_init_irq_domain(port);
+ if (ret) {
+ dev_err(dev, "%s: cannot init irq domain\n",
+ port->name);
+ pci_bridge_emul_cleanup(&port->bridge);
+ devm_iounmap(dev, port->base);
+ port->base = NULL;
+ mvebu_pcie_powerdown(port);
+ continue;
+ }
+ irq_set_chained_handler_and_data(irq,
+ mvebu_pcie_irq_handler,
+ port);
+ }
+
/*
* PCIe topology exported by mvebu hw is quite complicated. In
* reality has something like N fully independent host bridges
@@ -1333,10 +1657,9 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
mvebu_pcie_set_local_bus_nr(port, 0);
}
- pcie->nports = i;
-
bridge->sysdata = pcie;
bridge->ops = &mvebu_pcie_ops;
+ bridge->child_ops = &mvebu_pcie_child_ops;
bridge->align_resource = mvebu_pcie_align_resource;
bridge->map_irq = mvebu_pcie_map_irq;
@@ -1347,7 +1670,7 @@ static int mvebu_pcie_remove(struct platform_device *pdev)
{
struct mvebu_pcie *pcie = platform_get_drvdata(pdev);
struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
- u32 cmd;
+ u32 cmd, sspl;
int i;
/* Remove PCI bus with all devices. */
@@ -1358,6 +1681,7 @@ static int mvebu_pcie_remove(struct platform_device *pdev)
for (i = 0; i < pcie->nports; i++) {
struct mvebu_pcie_port *port = &pcie->ports[i];
+ int irq = port->intx_irq;
if (!port->base)
continue;
@@ -1368,11 +1692,26 @@ static int mvebu_pcie_remove(struct platform_device *pdev)
mvebu_writel(port, cmd, PCIE_CMD_OFF);
/* Mask all interrupt sources. */
- mvebu_writel(port, 0, PCIE_MASK_OFF);
+ mvebu_writel(port, ~PCIE_INT_ALL_MASK, PCIE_INT_UNMASK_OFF);
+
+ /* Clear all interrupt causes. */
+ mvebu_writel(port, ~PCIE_INT_ALL_MASK, PCIE_INT_CAUSE_OFF);
+
+ if (irq > 0)
+ irq_set_chained_handler_and_data(irq, NULL, NULL);
+
+ /* Remove IRQ domains. */
+ if (port->intx_irq_domain)
+ irq_domain_remove(port->intx_irq_domain);
/* Free config space for emulated root bridge. */
pci_bridge_emul_cleanup(&port->bridge);
+ /* Disable sending Set_Slot_Power_Limit PCIe Message. */
+ sspl = mvebu_readl(port, PCIE_SSPL_OFF);
+ sspl &= ~(PCIE_SSPL_VALUE_MASK | PCIE_SSPL_SCALE_MASK | PCIE_SSPL_ENABLE);
+ mvebu_writel(port, sspl, PCIE_SSPL_OFF);
+
/* Disable and clear BARs and windows. */
mvebu_pcie_disable_wins(port);
diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c
index cb0aa65d6934..0457ec02ab70 100644
--- a/drivers/pci/controller/pci-tegra.c
+++ b/drivers/pci/controller/pci-tegra.c
@@ -726,7 +726,7 @@ static void tegra_pcie_port_free(struct tegra_pcie_port *port)
/* Tegra PCIE root complex wrongly reports device class */
static void tegra_pcie_fixup_class(struct pci_dev *dev)
{
- dev->class = PCI_CLASS_BRIDGE_PCI << 8;
+ dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL;
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_fixup_class);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class);
diff --git a/drivers/pci/controller/pci-versatile.c b/drivers/pci/controller/pci-versatile.c
index 653d5d0ecf81..7991d334e0f1 100644
--- a/drivers/pci/controller/pci-versatile.c
+++ b/drivers/pci/controller/pci-versatile.c
@@ -31,10 +31,9 @@ static u32 pci_slot_ignore;
static int __init versatile_pci_slot_ignore(char *str)
{
- int retval;
int slot;
- while ((retval = get_option(&str, &slot))) {
+ while (get_option(&str, &slot)) {
if ((slot < 0) || (slot > 31))
pr_err("Illegal slot value: %d\n", slot);
else
diff --git a/drivers/pci/controller/pci-xgene.c b/drivers/pci/controller/pci-xgene.c
index 0d5acbfc7143..eb6240958bb0 100644
--- a/drivers/pci/controller/pci-xgene.c
+++ b/drivers/pci/controller/pci-xgene.c
@@ -49,7 +49,6 @@
#define EN_REG 0x00000001
#define OB_LO_IO 0x00000002
#define XGENE_PCIE_DEVICEID 0xE004
-#define SZ_1T (SZ_1G*1024ULL)
#define PIPE_PHY_RATE_RD(src) ((0xc000 & (u32)(src)) >> 0xe)
#define XGENE_V1_PCI_EXP_CAP 0x40
@@ -465,7 +464,7 @@ static int xgene_pcie_select_ib_reg(u8 *ib_reg_mask, u64 size)
return 1;
}
- if ((size > SZ_1K) && (size < SZ_4G) && !(*ib_reg_mask & (1 << 0))) {
+ if ((size > SZ_1K) && (size < SZ_1T) && !(*ib_reg_mask & (1 << 0))) {
*ib_reg_mask |= (1 << 0);
return 0;
}
@@ -479,28 +478,27 @@ static int xgene_pcie_select_ib_reg(u8 *ib_reg_mask, u64 size)
}
static void xgene_pcie_setup_ib_reg(struct xgene_pcie *port,
- struct resource_entry *entry,
- u8 *ib_reg_mask)
+ struct of_pci_range *range, u8 *ib_reg_mask)
{
void __iomem *cfg_base = port->cfg_base;
struct device *dev = port->dev;
void __iomem *bar_addr;
u32 pim_reg;
- u64 cpu_addr = entry->res->start;
- u64 pci_addr = cpu_addr - entry->offset;
- u64 size = resource_size(entry->res);
+ u64 cpu_addr = range->cpu_addr;
+ u64 pci_addr = range->pci_addr;
+ u64 size = range->size;
u64 mask = ~(size - 1) | EN_REG;
u32 flags = PCI_BASE_ADDRESS_MEM_TYPE_64;
u32 bar_low;
int region;
- region = xgene_pcie_select_ib_reg(ib_reg_mask, size);
+ region = xgene_pcie_select_ib_reg(ib_reg_mask, range->size);
if (region < 0) {
dev_warn(dev, "invalid pcie dma-range config\n");
return;
}
- if (entry->res->flags & IORESOURCE_PREFETCH)
+ if (range->flags & IORESOURCE_PREFETCH)
flags |= PCI_BASE_ADDRESS_MEM_PREFETCH;
bar_low = pcie_bar_low_val((u32)cpu_addr, flags);
@@ -531,13 +529,25 @@ static void xgene_pcie_setup_ib_reg(struct xgene_pcie *port,
static int xgene_pcie_parse_map_dma_ranges(struct xgene_pcie *port)
{
- struct pci_host_bridge *bridge = pci_host_bridge_from_priv(port);
- struct resource_entry *entry;
+ struct device_node *np = port->node;
+ struct of_pci_range range;
+ struct of_pci_range_parser parser;
+ struct device *dev = port->dev;
u8 ib_reg_mask = 0;
- resource_list_for_each_entry(entry, &bridge->dma_ranges)
- xgene_pcie_setup_ib_reg(port, entry, &ib_reg_mask);
+ if (of_pci_dma_range_parser_init(&parser, np)) {
+ dev_err(dev, "missing dma-ranges property\n");
+ return -EINVAL;
+ }
+
+ /* Get the dma-ranges from DT */
+ for_each_of_pci_range(&parser, &range) {
+ u64 end = range.cpu_addr + range.size - 1;
+ dev_dbg(dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n",
+ range.flags, range.cpu_addr, end, range.pci_addr);
+ xgene_pcie_setup_ib_reg(port, &range, &ib_reg_mask);
+ }
return 0;
}
diff --git a/drivers/pci/controller/pcie-apple.c b/drivers/pci/controller/pcie-apple.c
index 854d95163112..a2c3c207a04b 100644
--- a/drivers/pci/controller/pcie-apple.c
+++ b/drivers/pci/controller/pcie-apple.c
@@ -219,7 +219,7 @@ static int apple_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
if (hwirq < 0)
return -ENOSPC;
- fwspec.param[1] += hwirq;
+ fwspec.param[fwspec.param_count - 2] += hwirq;
ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &fwspec);
if (ret)
diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c
index 375c0c40bbf8..e61058e13818 100644
--- a/drivers/pci/controller/pcie-brcmstb.c
+++ b/drivers/pci/controller/pcie-brcmstb.c
@@ -24,7 +24,6 @@
#include <linux/pci.h>
#include <linux/pci-ecam.h>
#include <linux/printk.h>
-#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <linux/sizes.h>
#include <linux/slab.h>
@@ -196,8 +195,6 @@ static inline void brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie,
static inline void brcm_pcie_perst_set_4908(struct brcm_pcie *pcie, u32 val);
static inline void brcm_pcie_perst_set_7278(struct brcm_pcie *pcie, u32 val);
static inline void brcm_pcie_perst_set_generic(struct brcm_pcie *pcie, u32 val);
-static int brcm_pcie_linkup(struct brcm_pcie *pcie);
-static int brcm_pcie_add_bus(struct pci_bus *bus);
enum {
RGR1_SW_INIT_1,
@@ -286,14 +283,6 @@ static const struct pcie_cfg_data bcm2711_cfg = {
.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
};
-struct subdev_regulators {
- unsigned int num_supplies;
- struct regulator_bulk_data supplies[];
-};
-
-static int pci_subdev_regulators_add_bus(struct pci_bus *bus);
-static void pci_subdev_regulators_remove_bus(struct pci_bus *bus);
-
struct brcm_msi {
struct device *dev;
void __iomem *base;
@@ -331,9 +320,6 @@ struct brcm_pcie {
u32 hw_rev;
void (*perst_set)(struct brcm_pcie *pcie, u32 val);
void (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val);
- bool refusal_mode;
- struct subdev_regulators *sr;
- bool ep_wakeup_capable;
};
static inline bool is_bmips(const struct brcm_pcie *pcie)
@@ -450,99 +436,6 @@ static int brcm_pcie_set_ssc(struct brcm_pcie *pcie)
return ssc && pll ? 0 : -EIO;
}
-static void *alloc_subdev_regulators(struct device *dev)
-{
- static const char * const supplies[] = {
- "vpcie3v3",
- "vpcie3v3aux",
- "vpcie12v",
- };
- const size_t size = sizeof(struct subdev_regulators)
- + sizeof(struct regulator_bulk_data) * ARRAY_SIZE(supplies);
- struct subdev_regulators *sr;
- int i;
-
- sr = devm_kzalloc(dev, size, GFP_KERNEL);
- if (sr) {
- sr->num_supplies = ARRAY_SIZE(supplies);
- for (i = 0; i < ARRAY_SIZE(supplies); i++)
- sr->supplies[i].supply = supplies[i];
- }
-
- return sr;
-}
-
-static int pci_subdev_regulators_add_bus(struct pci_bus *bus)
-{
- struct device *dev = &bus->dev;
- struct subdev_regulators *sr;
- int ret;
-
- if (!dev->of_node || !bus->parent || !pci_is_root_bus(bus->parent))
- return 0;
-
- if (dev->driver_data)
- dev_err(dev, "dev.driver_data unexpectedly non-NULL\n");
-
- sr = alloc_subdev_regulators(dev);
- if (!sr)
- return -ENOMEM;
-
- dev->driver_data = sr;
- ret = regulator_bulk_get(dev, sr->num_supplies, sr->supplies);
- if (ret)
- return ret;
-
- ret = regulator_bulk_enable(sr->num_supplies, sr->supplies);
- if (ret) {
- dev_err(dev, "failed to enable regulators for downstream device\n");
- return ret;
- }
-
- return 0;
-}
-
-static int brcm_pcie_add_bus(struct pci_bus *bus)
-{
- struct device *dev = &bus->dev;
- struct brcm_pcie *pcie = (struct brcm_pcie *) bus->sysdata;
- int ret;
-
- if (!dev->of_node || !bus->parent || !pci_is_root_bus(bus->parent))
- return 0;
-
- ret = pci_subdev_regulators_add_bus(bus);
- if (ret)
- return ret;
-
- /* Grab the regulators for suspend/resume */
- pcie->sr = bus->dev.driver_data;
-
- /*
- * If we have failed linkup there is no point to return an error as
- * currently it will cause a WARNING() from pci_alloc_child_bus().
- * We return 0 and turn on the "refusal_mode" so that any further
- * accesses to the pci_dev just get 0xffffffff
- */
- if (brcm_pcie_linkup(pcie) != 0)
- pcie->refusal_mode = true;
-
- return 0;
-}
-
-static void pci_subdev_regulators_remove_bus(struct pci_bus *bus)
-{
- struct device *dev = &bus->dev;
- struct subdev_regulators *sr = dev->driver_data;
-
- if (!sr || !bus->parent || !pci_is_root_bus(bus->parent))
- return;
-
- if (regulator_bulk_disable(sr->num_supplies, sr->supplies))
- dev_err(dev, "failed to disable regulators for downstream device\n");
- dev->driver_data = NULL;
-}
-
/* Limits operation to a specific generation (1, 2, or 3) */
static void brcm_pcie_set_gen(struct brcm_pcie *pcie, int gen)
{
@@ -858,18 +751,6 @@ static void __iomem *brcm_pcie_map_conf(struct pci_bus *bus, unsigned int devfn,
/* Accesses to the RC go right to the RC registers if slot==0 */
if (pci_is_root_bus(bus))
return PCI_SLOT(devfn) ? NULL : base + where;
- if (pcie->refusal_mode) {
- /*
- * At this point we do not have link. There will be a CPU
- * abort -- a quirk with this controller --if Linux tries
- * to read any config-space registers besides those
- * targeting the host bridge. To prevent this we hijack
- * the address to point to a safe access that will return
- * 0xffffffff.
- */
- writel(0xffffffff, base + PCIE_MISC_RC_BAR2_CONFIG_HI);
- return base + PCIE_MISC_RC_BAR2_CONFIG_HI + (where & 0x3);
- }
/* For devices, write to the config space index register */
idx = PCIE_ECAM_OFFSET(bus->number, devfn, 0);
@@ -898,8 +779,6 @@ static struct pci_ops brcm_pcie_ops = {
.map_bus = brcm_pcie_map_conf,
.read = pci_generic_config_read,
.write = pci_generic_config_write,
- .add_bus = brcm_pcie_add_bus,
- .remove_bus = pci_subdev_regulators_remove_bus,
};
static struct pci_ops brcm_pcie_ops32 = {
@@ -1047,9 +926,16 @@ static inline int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie,
static int brcm_pcie_setup(struct brcm_pcie *pcie)
{
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
u64 rc_bar2_offset, rc_bar2_size;
void __iomem *base = pcie->base;
- int ret, memc;
+ struct device *dev = pcie->dev;
+ struct resource_entry *entry;
+ bool ssc_good = false;
+ struct resource *res;
+ int num_out_wins = 0;
+ u16 nlw, cls, lnksta;
+ int i, ret, memc;
u32 tmp, burst, aspm_support;
/* Reset the bridge */
@@ -1139,40 +1025,6 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
if (pcie->gen)
brcm_pcie_set_gen(pcie, pcie->gen);
- /* Don't advertise L0s capability if 'aspm-no-l0s' */
- aspm_support = PCIE_LINK_STATE_L1;
- if (!of_property_read_bool(pcie->np, "aspm-no-l0s"))
- aspm_support |= PCIE_LINK_STATE_L0S;
- tmp = readl(base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
- u32p_replace_bits(&tmp, aspm_support,
- PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK);
- writel(tmp, base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
-
- /*
- * For config space accesses on the RC, show the right class for
- * a PCIe-PCIe bridge (the default setting is to be EP mode).
- */
- tmp = readl(base + PCIE_RC_CFG_PRIV1_ID_VAL3);
- u32p_replace_bits(&tmp, 0x060400,
- PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK);
- writel(tmp, base + PCIE_RC_CFG_PRIV1_ID_VAL3);
-
- return 0;
-}
-
-static int brcm_pcie_linkup(struct brcm_pcie *pcie)
-{
- struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
- struct device *dev = pcie->dev;
- void __iomem *base = pcie->base;
- struct resource_entry *entry;
- struct resource *res;
- int num_out_wins = 0;
- u16 nlw, cls, lnksta;
- bool ssc_good = false;
- u32 tmp;
- int ret, i;
-
/* Unassert the fundamental reset */
pcie->perst_set(pcie, 0);
@@ -1223,6 +1075,24 @@ static int brcm_pcie_linkup(struct brcm_pcie *pcie)
num_out_wins++;
}
+ /* Don't advertise L0s capability if 'aspm-no-l0s' */
+ aspm_support = PCIE_LINK_STATE_L1;
+ if (!of_property_read_bool(pcie->np, "aspm-no-l0s"))
+ aspm_support |= PCIE_LINK_STATE_L0S;
+ tmp = readl(base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
+ u32p_replace_bits(&tmp, aspm_support,
+ PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK);
+ writel(tmp, base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
+
+ /*
+ * For config space accesses on the RC, show the right class for
+ * a PCIe-PCIe bridge (the default setting is to be EP mode).
+ */
+ tmp = readl(base + PCIE_RC_CFG_PRIV1_ID_VAL3);
+ u32p_replace_bits(&tmp, 0x060400,
+ PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK);
+ writel(tmp, base + PCIE_RC_CFG_PRIV1_ID_VAL3);
+
if (pcie->ssc) {
ret = brcm_pcie_set_ssc(pcie);
if (ret == 0)
@@ -1351,21 +1221,9 @@ static void brcm_pcie_turn_off(struct brcm_pcie *pcie)
pcie->bridge_sw_init_set(pcie, 1);
}
-static int pci_dev_may_wakeup(struct pci_dev *dev, void *data)
-{
- bool *ret = data;
-
- if (device_may_wakeup(&dev->dev)) {
- *ret = true;
- dev_info(&dev->dev, "disable cancelled for wake-up device\n");
- }
- return (int) *ret;
-}
-
static int brcm_pcie_suspend(struct device *dev)
{
struct brcm_pcie *pcie = dev_get_drvdata(dev);
- struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
int ret;
brcm_pcie_turn_off(pcie);
@@ -1383,25 +1241,6 @@ static int brcm_pcie_suspend(struct device *dev)
return ret;
}
- if (pcie->sr) {
- /*
- * Now turn off the regulators, but if at least one
- * downstream device is enabled as a wake-up source, do not
- * turn off regulators.
- */
- pcie->ep_wakeup_capable = false;
- pci_walk_bus(bridge->bus, pci_dev_may_wakeup,
- &pcie->ep_wakeup_capable);
- if (!pcie->ep_wakeup_capable) {
- ret = regulator_bulk_disable(pcie->sr->num_supplies,
- pcie->sr->supplies);
- if (ret) {
- dev_err(dev, "Could not turn off regulators\n");
- reset_control_reset(pcie->rescal);
- return ret;
- }
- }
- }
clk_disable_unprepare(pcie->clk);
return 0;
@@ -1419,28 +1258,9 @@ static int brcm_pcie_resume(struct device *dev)
if (ret)
return ret;
- if (pcie->sr) {
- if (pcie->ep_wakeup_capable) {
- /*
- * We are resuming from a suspend. In the suspend we
- * did not disable the power supplies, so there is
- * no need to enable them (and falsely increase their
- * usage count).
- */
- pcie->ep_wakeup_capable = false;
- } else {
- ret = regulator_bulk_enable(pcie->sr->num_supplies,
- pcie->sr->supplies);
- if (ret) {
- dev_err(dev, "Could not turn on regulators\n");
- goto err_disable_clk;
- }
- }
- }
-
ret = reset_control_reset(pcie->rescal);
if (ret)
- goto err_regulator;
+ goto err_disable_clk;
ret = brcm_phy_start(pcie);
if (ret)
@@ -1461,10 +1281,6 @@ static int brcm_pcie_resume(struct device *dev)
if (ret)
goto err_reset;
- ret = brcm_pcie_linkup(pcie);
- if (ret)
- goto err_reset;
-
if (pcie->msi)
brcm_msi_set_regs(pcie->msi);
@@ -1472,9 +1288,6 @@ static int brcm_pcie_resume(struct device *dev)
err_reset:
reset_control_rearm(pcie->rescal);
-err_regulator:
- if (pcie->sr)
- regulator_bulk_disable(pcie->sr->num_supplies, pcie->sr->supplies);
err_disable_clk:
clk_disable_unprepare(pcie->clk);
return ret;
@@ -1606,17 +1419,7 @@ static int brcm_pcie_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pcie);
- ret = pci_host_probe(bridge);
- if (!ret && !brcm_pcie_link_up(pcie))
- ret = -ENODEV;
-
- if (ret) {
- brcm_pcie_remove(pdev);
- return ret;
- }
-
- return 0;
-
+ return pci_host_probe(bridge);
fail:
__brcm_pcie_remove(pcie);
return ret;
@@ -1625,8 +1428,8 @@ fail:
MODULE_DEVICE_TABLE(of, brcm_pcie_match);
static const struct dev_pm_ops brcm_pcie_pm_ops = {
- .suspend_noirq = brcm_pcie_suspend,
- .resume_noirq = brcm_pcie_resume,
+ .suspend = brcm_pcie_suspend,
+ .resume = brcm_pcie_resume,
};
static struct platform_driver brcm_pcie_driver = {
diff --git a/drivers/pci/controller/pcie-iproc-bcma.c b/drivers/pci/controller/pcie-iproc-bcma.c
index 54b6e6d5bc64..99a99900444d 100644
--- a/drivers/pci/controller/pcie-iproc-bcma.c
+++ b/drivers/pci/controller/pcie-iproc-bcma.c
@@ -18,7 +18,7 @@
/* NS: CLASS field is R/O, and set to wrong 0x200 value */
static void bcma_pcie2_fixup_class(struct pci_dev *dev)
{
- dev->class = PCI_CLASS_BRIDGE_PCI << 8;
+ dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL;
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x8011, bcma_pcie2_fixup_class);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x8012, bcma_pcie2_fixup_class);
diff --git a/drivers/pci/controller/pcie-iproc.c b/drivers/pci/controller/pcie-iproc.c
index b3e75bc61ff1..2519201b0e51 100644
--- a/drivers/pci/controller/pcie-iproc.c
+++ b/drivers/pci/controller/pcie-iproc.c
@@ -789,14 +789,13 @@ static int iproc_pcie_check_link(struct iproc_pcie *pcie)
return -EFAULT;
}
- /* force class to PCI_CLASS_BRIDGE_PCI (0x0604) */
+ /* force class to PCI_CLASS_BRIDGE_PCI_NORMAL (0x060400) */
#define PCI_BRIDGE_CTRL_REG_OFFSET 0x43c
-#define PCI_CLASS_BRIDGE_MASK 0xffff00
-#define PCI_CLASS_BRIDGE_SHIFT 8
+#define PCI_BRIDGE_CTRL_REG_CLASS_MASK 0xffffff
iproc_pci_raw_config_read32(pcie, 0, PCI_BRIDGE_CTRL_REG_OFFSET,
4, &class);
- class &= ~PCI_CLASS_BRIDGE_MASK;
- class |= (PCI_CLASS_BRIDGE_PCI << PCI_CLASS_BRIDGE_SHIFT);
+ class &= ~PCI_BRIDGE_CTRL_REG_CLASS_MASK;
+ class |= PCI_CLASS_BRIDGE_PCI_NORMAL;
iproc_pci_raw_config_write32(pcie, 0, PCI_BRIDGE_CTRL_REG_OFFSET,
4, class);
@@ -1581,7 +1580,7 @@ static void quirk_paxc_bridge(struct pci_dev *pdev)
* code that the bridge is not an Ethernet device.
*/
if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
- pdev->class = PCI_CLASS_BRIDGE_PCI << 8;
+ pdev->class = PCI_CLASS_BRIDGE_PCI_NORMAL;
/*
* MPSS is not being set properly (as it is currently 0). This is
diff --git a/drivers/pci/controller/pcie-mediatek-gen3.c b/drivers/pci/controller/pcie-mediatek-gen3.c
index 7705d61fba4c..5d9fd36b02d1 100644
--- a/drivers/pci/controller/pcie-mediatek-gen3.c
+++ b/drivers/pci/controller/pcie-mediatek-gen3.c
@@ -292,7 +292,7 @@ static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie)
/* Set class code */
val = readl_relaxed(pcie->base + PCIE_PCI_IDS_1);
val &= ~GENMASK(31, 8);
- val |= PCI_CLASS(PCI_CLASS_BRIDGE_PCI << 8);
+ val |= PCI_CLASS(PCI_CLASS_BRIDGE_PCI_NORMAL);
writel_relaxed(val, pcie->base + PCIE_PCI_IDS_1);
/* Mask all INTx interrupts */
@@ -838,6 +838,14 @@ static int mtk_pcie_setup(struct mtk_gen3_pcie *pcie)
if (err)
return err;
+ /*
+ * The controller may have been left out of reset by the bootloader
+ * so make sure that we get a clean start by asserting resets here.
+ */
+ reset_control_assert(pcie->phy_reset);
+ reset_control_assert(pcie->mac_reset);
+ usleep_range(10, 20);
+
/* Don't touch the hardware registers before power up */
err = mtk_pcie_power_up(pcie);
if (err)
diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c
index ddfbd4aebdec..be8bd919cb88 100644
--- a/drivers/pci/controller/pcie-mediatek.c
+++ b/drivers/pci/controller/pcie-mediatek.c
@@ -1008,6 +1008,7 @@ static int mtk_pcie_subsys_powerup(struct mtk_pcie *pcie)
"mediatek,generic-pciecfg");
if (cfg_node) {
pcie->cfg = syscon_node_to_regmap(cfg_node);
+ of_node_put(cfg_node);
if (IS_ERR(pcie->cfg))
return PTR_ERR(pcie->cfg);
}
diff --git a/drivers/pci/controller/pcie-microchip-host.c b/drivers/pci/controller/pcie-microchip-host.c
index 29d8e81e4181..dd5dba419047 100644
--- a/drivers/pci/controller/pcie-microchip-host.c
+++ b/drivers/pci/controller/pcie-microchip-host.c
@@ -406,6 +406,7 @@ static void mc_pcie_enable_msi(struct mc_pcie *port, void __iomem *base)
static void mc_handle_msi(struct irq_desc *desc)
{
struct mc_pcie *port = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
struct device *dev = port->dev;
struct mc_msi *msi = &port->msi;
void __iomem *bridge_base_addr =
@@ -414,8 +415,11 @@ static void mc_handle_msi(struct irq_desc *desc)
u32 bit;
int ret;
+ chained_irq_enter(chip, desc);
+
status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
if (status & PM_MSI_INT_MSI_MASK) {
+ writel_relaxed(status & PM_MSI_INT_MSI_MASK, bridge_base_addr + ISTATUS_LOCAL);
status = readl_relaxed(bridge_base_addr + ISTATUS_MSI);
for_each_set_bit(bit, &status, msi->num_vectors) {
ret = generic_handle_domain_irq(msi->dev_domain, bit);
@@ -424,6 +428,8 @@ static void mc_handle_msi(struct irq_desc *desc)
bit);
}
}
+
+ chained_irq_exit(chip, desc);
}
static void mc_msi_bottom_irq_ack(struct irq_data *data)
@@ -432,13 +438,8 @@ static void mc_msi_bottom_irq_ack(struct irq_data *data)
void __iomem *bridge_base_addr =
port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
u32 bitpos = data->hwirq;
- unsigned long status;
writel_relaxed(BIT(bitpos), bridge_base_addr + ISTATUS_MSI);
- status = readl_relaxed(bridge_base_addr + ISTATUS_MSI);
- if (!status)
- writel_relaxed(BIT(PM_MSI_INT_MSI_SHIFT),
- bridge_base_addr + ISTATUS_LOCAL);
}
static void mc_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
@@ -563,6 +564,7 @@ static int mc_allocate_msi_domains(struct mc_pcie *port)
static void mc_handle_intx(struct irq_desc *desc)
{
struct mc_pcie *port = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
struct device *dev = port->dev;
void __iomem *bridge_base_addr =
port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
@@ -570,6 +572,8 @@ static void mc_handle_intx(struct irq_desc *desc)
u32 bit;
int ret;
+ chained_irq_enter(chip, desc);
+
status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
if (status & PM_MSI_INT_INTX_MASK) {
status &= PM_MSI_INT_INTX_MASK;
@@ -581,6 +585,8 @@ static void mc_handle_intx(struct irq_desc *desc)
bit);
}
}
+
+ chained_irq_exit(chip, desc);
}
static void mc_ack_intx_irq(struct irq_data *data)
@@ -1115,7 +1121,7 @@ static const struct of_device_id mc_pcie_of_match[] = {
{},
};
-MODULE_DEVICE_TABLE(of, mc_pcie_of_match)
+MODULE_DEVICE_TABLE(of, mc_pcie_of_match);
static struct platform_driver mc_pcie_driver = {
.probe = pci_host_common_probe,
diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c
index 38b6e02edfa9..997c4df6a1e7 100644
--- a/drivers/pci/controller/pcie-rcar-host.c
+++ b/drivers/pci/controller/pcie-rcar-host.c
@@ -65,6 +65,42 @@ struct rcar_pcie_host {
int (*phy_init_fn)(struct rcar_pcie_host *host);
};
+static DEFINE_SPINLOCK(pmsr_lock);
+
+static int rcar_pcie_wakeup(struct device *pcie_dev, void __iomem *pcie_base)
+{
+ unsigned long flags;
+ u32 pmsr, val;
+ int ret = 0;
+
+ spin_lock_irqsave(&pmsr_lock, flags);
+
+ if (!pcie_base || pm_runtime_suspended(pcie_dev)) {
+ ret = -EINVAL;
+ goto unlock_exit;
+ }
+
+ pmsr = readl(pcie_base + PMSR);
+
+ /*
+ * Test if the PCIe controller received PM_ENTER_L1 DLLP and
+ * the PCIe controller is not in L1 link state. If true, apply
+ * fix, which will put the controller into L1 link state, from
+ * which it can return to L0s/L0 on its own.
+ */
+ if ((pmsr & PMEL1RX) && ((pmsr & PMSTATE) != PMSTATE_L1)) {
+ writel(L1IATN, pcie_base + PMCTLR);
+ ret = readl_poll_timeout_atomic(pcie_base + PMSR, val,
+ val & L1FAEG, 10, 1000);
+ WARN(ret, "Timeout waiting for L1 link state, ret=%d\n", ret);
+ writel(L1FAEG | PMEL1RX, pcie_base + PMSR);
+ }
+
+unlock_exit:
+ spin_unlock_irqrestore(&pmsr_lock, flags);
+ return ret;
+}
+
static struct rcar_pcie_host *msi_to_host(struct rcar_msi *msi)
{
return container_of(msi, struct rcar_pcie_host, msi);
@@ -78,6 +114,54 @@ static u32 rcar_read_conf(struct rcar_pcie *pcie, int where)
return val >> shift;
}
+#ifdef CONFIG_ARM
+#define __rcar_pci_rw_reg_workaround(instr) \
+ " .arch armv7-a\n" \
+ "1: " instr " %1, [%2]\n" \
+ "2: isb\n" \
+ "3: .pushsection .text.fixup,\"ax\"\n" \
+ " .align 2\n" \
+ "4: mov %0, #" __stringify(PCIBIOS_SET_FAILED) "\n" \
+ " b 3b\n" \
+ " .popsection\n" \
+ " .pushsection __ex_table,\"a\"\n" \
+ " .align 3\n" \
+ " .long 1b, 4b\n" \
+ " .long 2b, 4b\n" \
+ " .popsection\n"
+#endif
+
+static int rcar_pci_write_reg_workaround(struct rcar_pcie *pcie, u32 val,
+ unsigned int reg)
+{
+ int error = PCIBIOS_SUCCESSFUL;
+#ifdef CONFIG_ARM
+ asm volatile(
+ __rcar_pci_rw_reg_workaround("str")
+ : "+r"(error):"r"(val), "r"(pcie->base + reg) : "memory");
+#else
+ rcar_pci_write_reg(pcie, val, reg);
+#endif
+ return error;
+}
+
+static int rcar_pci_read_reg_workaround(struct rcar_pcie *pcie, u32 *val,
+ unsigned int reg)
+{
+ int error = PCIBIOS_SUCCESSFUL;
+#ifdef CONFIG_ARM
+ asm volatile(
+ __rcar_pci_rw_reg_workaround("ldr")
+ : "+r"(error), "=r"(*val) : "r"(pcie->base + reg) : "memory");
+
+ if (error != PCIBIOS_SUCCESSFUL)
+ PCI_SET_ERROR_RESPONSE(val);
+#else
+ *val = rcar_pci_read_reg(pcie, reg);
+#endif
+ return error;
+}
+
/* Serialization is provided by 'pci_lock' in drivers/pci/access.c */
static int rcar_pcie_config_access(struct rcar_pcie_host *host,
unsigned char access_type, struct pci_bus *bus,
@@ -85,6 +169,14 @@ static int rcar_pcie_config_access(struct rcar_pcie_host *host,
{
struct rcar_pcie *pcie = &host->pcie;
unsigned int dev, func, reg, index;
+ int ret;
+
+ /* Wake the bus up in case it is in L1 state. */
+ ret = rcar_pcie_wakeup(pcie->dev, pcie->base);
+ if (ret) {
+ PCI_SET_ERROR_RESPONSE(data);
+ return PCIBIOS_SET_FAILED;
+ }
dev = PCI_SLOT(devfn);
func = PCI_FUNC(devfn);
@@ -141,14 +233,14 @@ static int rcar_pcie_config_access(struct rcar_pcie_host *host,
return PCIBIOS_DEVICE_NOT_FOUND;
if (access_type == RCAR_PCI_ACCESS_READ)
- *data = rcar_pci_read_reg(pcie, PCIECDR);
+ ret = rcar_pci_read_reg_workaround(pcie, data, PCIECDR);
else
- rcar_pci_write_reg(pcie, *data, PCIECDR);
+ ret = rcar_pci_write_reg_workaround(pcie, *data, PCIECDR);
/* Disable the configuration access */
rcar_pci_write_reg(pcie, 0, PCIECCTLR);
- return PCIBIOS_SUCCESSFUL;
+ return ret;
}
static int rcar_pcie_read_conf(struct pci_bus *bus, unsigned int devfn,
@@ -370,7 +462,7 @@ static int rcar_pcie_hw_init(struct rcar_pcie *pcie)
* class to match. Hardware takes care of propagating the IDSETR
* settings, so there is no need to bother with a quirk.
*/
- rcar_pci_write_reg(pcie, PCI_CLASS_BRIDGE_PCI << 16, IDSETR1);
+ rcar_pci_write_reg(pcie, PCI_CLASS_BRIDGE_PCI_NORMAL << 8, IDSETR1);
/*
* Setup Secondary Bus Number & Subordinate Bus Number, even though
@@ -1050,40 +1142,10 @@ static struct platform_driver rcar_pcie_driver = {
};
#ifdef CONFIG_ARM
-static DEFINE_SPINLOCK(pmsr_lock);
static int rcar_pcie_aarch32_abort_handler(unsigned long addr,
unsigned int fsr, struct pt_regs *regs)
{
- unsigned long flags;
- u32 pmsr, val;
- int ret = 0;
-
- spin_lock_irqsave(&pmsr_lock, flags);
-
- if (!pcie_base || pm_runtime_suspended(pcie_dev)) {
- ret = 1;
- goto unlock_exit;
- }
-
- pmsr = readl(pcie_base + PMSR);
-
- /*
- * Test if the PCIe controller received PM_ENTER_L1 DLLP and
- * the PCIe controller is not in L1 link state. If true, apply
- * fix, which will put the controller into L1 link state, from
- * which it can return to L0s/L0 on its own.
- */
- if ((pmsr & PMEL1RX) && ((pmsr & PMSTATE) != PMSTATE_L1)) {
- writel(L1IATN, pcie_base + PMCTLR);
- ret = readl_poll_timeout_atomic(pcie_base + PMSR, val,
- val & L1FAEG, 10, 1000);
- WARN(ret, "Timeout waiting for L1 link state, ret=%d\n", ret);
- writel(L1FAEG | PMEL1RX, pcie_base + PMSR);
- }
-
-unlock_exit:
- spin_unlock_irqrestore(&pmsr_lock, flags);
- return ret;
+ return !fixup_exception(regs);
}
static const struct of_device_id rcar_pcie_abort_handler_of_match[] __initconst = {
diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c
index 5fb9ce6e536e..d1a200b93b2b 100644
--- a/drivers/pci/controller/pcie-rockchip-ep.c
+++ b/drivers/pci/controller/pcie-rockchip-ep.c
@@ -264,8 +264,7 @@ static int rockchip_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn,
struct rockchip_pcie *pcie = &ep->rockchip;
u32 r;
- r = find_first_zero_bit(&ep->ob_region_map,
- sizeof(ep->ob_region_map) * BITS_PER_LONG);
+ r = find_first_zero_bit(&ep->ob_region_map, BITS_PER_LONG);
/*
* Region 0 is reserved for configuration space and shouldn't
* be used elsewhere per TRM, so leave it out.
diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c
index 45a28880f322..7f56f99b4116 100644
--- a/drivers/pci/controller/pcie-rockchip-host.c
+++ b/drivers/pci/controller/pcie-rockchip-host.c
@@ -370,7 +370,7 @@ static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip)
rockchip_pcie_write(rockchip, ROCKCHIP_VENDOR_ID,
PCIE_CORE_CONFIG_VENDOR);
rockchip_pcie_write(rockchip,
- PCI_CLASS_BRIDGE_PCI << PCIE_RC_CONFIG_SCC_SHIFT,
+ PCI_CLASS_BRIDGE_PCI_NORMAL << 8,
PCIE_RC_CONFIG_RID_CCR);
/* Clear THP cap's next cap pointer to remove L1 substate cap */
diff --git a/drivers/pci/controller/pcie-rockchip.h b/drivers/pci/controller/pcie-rockchip.h
index 1650a5087450..32c3a859c26b 100644
--- a/drivers/pci/controller/pcie-rockchip.h
+++ b/drivers/pci/controller/pcie-rockchip.h
@@ -134,7 +134,6 @@
#define PCIE_RC_CONFIG_NORMAL_BASE 0x800000
#define PCIE_RC_CONFIG_BASE 0xa00000
#define PCIE_RC_CONFIG_RID_CCR (PCIE_RC_CONFIG_BASE + 0x08)
-#define PCIE_RC_CONFIG_SCC_SHIFT 16
#define PCIE_RC_CONFIG_DCR (PCIE_RC_CONFIG_BASE + 0xc4)
#define PCIE_RC_CONFIG_DCR_CSPL_SHIFT 18
#define PCIE_RC_CONFIG_DCR_CSPL_LIMIT 0xff
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index eb05cceab964..94a14a3d7e55 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -6,7 +6,6 @@
#include <linux/device.h>
#include <linux/interrupt.h>
-#include <linux/iommu.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -813,8 +812,7 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
* acceptable because the guest is usually CPU-limited and MSI
* remapping doesn't become a performance bottleneck.
*/
- if (iommu_capable(vmd->dev->dev.bus, IOMMU_CAP_INTR_REMAP) ||
- !(features & VMD_FEAT_CAN_BYPASS_MSI_REMAP) ||
+ if (!(features & VMD_FEAT_CAN_BYPASS_MSI_REMAP) ||
offset[0] || offset[1]) {
ret = vmd_alloc_irqs(vmd);
if (ret)
@@ -853,6 +851,9 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
vmd_attach_resources(vmd);
if (vmd->irq_domain)
dev_set_msi_domain(&vmd->bus->dev, vmd->irq_domain);
+ else
+ dev_set_msi_domain(&vmd->bus->dev,
+ dev_get_msi_domain(&vmd->dev->dev));
vmd_acpi_begin();