diff options
author | 2022-04-19 17:26:01 +0200 | |
---|---|---|
committer | 2022-04-19 17:26:01 +0200 | |
commit | 0aea30a07ec6b50de0fc5f5b2ec34a68ead86b61 (patch) | |
tree | ee7d7d116570f39e47399c8f691a5a7565077eeb /drivers/pci/controller | |
parent | ALSA: usb-audio: add mapping for MSI MAG X570S Torpedo MAX. (diff) | |
parent | firmware: cs_dsp: Fix overrun of unterminated control name string (diff) | |
download | wireguard-linux-0aea30a07ec6b50de0fc5f5b2ec34a68ead86b61.tar.xz wireguard-linux-0aea30a07ec6b50de0fc5f5b2ec34a68ead86b61.zip |
Merge tag 'asoc-fix-v5.18-rc3' of https://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound into for-linus
ASoC: Fixes for v5.18
A collection of fixes that came in since the merge window, plus one new
device ID for an x86 laptop. Nothing that really stands out with
particularly big impact outside of the affected device.
Diffstat (limited to 'drivers/pci/controller')
26 files changed, 1213 insertions, 544 deletions
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig index 601f2531ee91..b8d96d38064d 100644 --- a/drivers/pci/controller/Kconfig +++ b/drivers/pci/controller/Kconfig @@ -10,6 +10,10 @@ config PCI_MVEBU depends on ARM depends on OF select PCI_BRIDGE_EMUL + help + Add support for Marvell EBU PCIe controller. This PCIe controller + is used on 32-bit Marvell ARM SoCs: Dove, Kirkwood, Armada 370, + Armada XP, Armada 375, Armada 38x and Armada 39x. config PCI_AARDVARK tristate "Aardvark PCIe controller" diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c index 489586a4cdc7..768d33f9ebc8 100644 --- a/drivers/pci/controller/cadence/pci-j721e.c +++ b/drivers/pci/controller/cadence/pci-j721e.c @@ -356,8 +356,8 @@ static int j721e_pcie_probe(struct platform_device *pdev) const struct j721e_pcie_data *data; struct cdns_pcie *cdns_pcie; struct j721e_pcie *pcie; - struct cdns_pcie_rc *rc; - struct cdns_pcie_ep *ep; + struct cdns_pcie_rc *rc = NULL; + struct cdns_pcie_ep *ep = NULL; struct gpio_desc *gpiod; void __iomem *base; struct clk *clk; @@ -376,6 +376,46 @@ static int j721e_pcie_probe(struct platform_device *pdev) if (!pcie) return -ENOMEM; + switch (mode) { + case PCI_MODE_RC: + if (!IS_ENABLED(CONFIG_PCIE_CADENCE_HOST)) + return -ENODEV; + + bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc)); + if (!bridge) + return -ENOMEM; + + if (!data->byte_access_allowed) + bridge->ops = &cdns_ti_pcie_host_ops; + rc = pci_host_bridge_priv(bridge); + rc->quirk_retrain_flag = data->quirk_retrain_flag; + rc->quirk_detect_quiet_flag = data->quirk_detect_quiet_flag; + + cdns_pcie = &rc->pcie; + cdns_pcie->dev = dev; + cdns_pcie->ops = &j721e_pcie_ops; + pcie->cdns_pcie = cdns_pcie; + break; + case PCI_MODE_EP: + if (!IS_ENABLED(CONFIG_PCIE_CADENCE_EP)) + return -ENODEV; + + ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL); + if (!ep) + return -ENOMEM; + + ep->quirk_detect_quiet_flag = data->quirk_detect_quiet_flag; + + cdns_pcie = &ep->pcie; + cdns_pcie->dev = dev; + cdns_pcie->ops = &j721e_pcie_ops; + pcie->cdns_pcie = cdns_pcie; + break; + default: + dev_err(dev, "INVALID device type %d\n", mode); + return 0; + } + pcie->mode = mode; pcie->linkdown_irq_regfield = data->linkdown_irq_regfield; @@ -426,28 +466,6 @@ static int j721e_pcie_probe(struct platform_device *pdev) switch (mode) { case PCI_MODE_RC: - if (!IS_ENABLED(CONFIG_PCIE_CADENCE_HOST)) { - ret = -ENODEV; - goto err_get_sync; - } - - bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc)); - if (!bridge) { - ret = -ENOMEM; - goto err_get_sync; - } - - if (!data->byte_access_allowed) - bridge->ops = &cdns_ti_pcie_host_ops; - rc = pci_host_bridge_priv(bridge); - rc->quirk_retrain_flag = data->quirk_retrain_flag; - rc->quirk_detect_quiet_flag = data->quirk_detect_quiet_flag; - - cdns_pcie = &rc->pcie; - cdns_pcie->dev = dev; - cdns_pcie->ops = &j721e_pcie_ops; - pcie->cdns_pcie = cdns_pcie; - gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(gpiod)) { ret = PTR_ERR(gpiod); @@ -497,23 +515,6 @@ static int j721e_pcie_probe(struct platform_device *pdev) break; case PCI_MODE_EP: - if (!IS_ENABLED(CONFIG_PCIE_CADENCE_EP)) { - ret = -ENODEV; - goto err_get_sync; - } - - ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL); - if (!ep) { - ret = -ENOMEM; - goto err_get_sync; - } - ep->quirk_detect_quiet_flag = data->quirk_detect_quiet_flag; - - cdns_pcie = &ep->pcie; - cdns_pcie->dev = dev; - cdns_pcie->ops = &j721e_pcie_ops; - pcie->cdns_pcie = cdns_pcie; - ret = cdns_pcie_init_phy(dev, cdns_pcie); if (ret) { dev_err(dev, "Failed to init phy\n"); @@ -525,8 +526,6 @@ static int j721e_pcie_probe(struct platform_device *pdev) goto err_pcie_setup; break; - default: - dev_err(dev, "INVALID device type %d\n", mode); } return 0; diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c index 6974bd5aa116..6619e3caffe2 100644 --- a/drivers/pci/controller/dwc/pci-imx6.c +++ b/drivers/pci/controller/dwc/pci-imx6.c @@ -453,10 +453,6 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie) case IMX7D: break; case IMX8MM: - ret = clk_prepare_enable(imx6_pcie->pcie_aux); - if (ret) - dev_err(dev, "unable to enable pcie_aux clock\n"); - break; case IMX8MQ: ret = clk_prepare_enable(imx6_pcie->pcie_aux); if (ret) { @@ -809,9 +805,7 @@ static int imx6_pcie_start_link(struct dw_pcie *pci) /* Start LTSSM. */ imx6_pcie_ltssm_enable(dev); - ret = dw_pcie_wait_for_link(pci); - if (ret) - goto err_reset_phy; + dw_pcie_wait_for_link(pci); if (pci->link_gen == 2) { /* Allow Gen2 mode after the link is up. */ @@ -847,11 +841,7 @@ static int imx6_pcie_start_link(struct dw_pcie *pci) } /* Make sure link training is finished as well! */ - ret = dw_pcie_wait_for_link(pci); - if (ret) { - dev_err(dev, "Failed to bring link up!\n"); - goto err_reset_phy; - } + dw_pcie_wait_for_link(pci); } else { dev_info(dev, "Link: Gen2 disabled\n"); } @@ -923,6 +913,7 @@ static void imx6_pcie_pm_turnoff(struct imx6_pcie *imx6_pcie) /* Others poke directly at IOMUXC registers */ switch (imx6_pcie->drvdata->variant) { case IMX6SX: + case IMX6QP: regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6SX_GPR12_PCIE_PM_TURN_OFF, IMX6SX_GPR12_PCIE_PM_TURN_OFF); @@ -983,6 +974,7 @@ static int imx6_pcie_suspend_noirq(struct device *dev) case IMX8MM: if (phy_power_off(imx6_pcie->phy)) dev_err(dev, "unable to power off PHY\n"); + phy_exit(imx6_pcie->phy); break; default: break; @@ -1252,7 +1244,8 @@ static const struct imx6_pcie_drvdata drvdata[] = { [IMX6QP] = { .variant = IMX6QP, .flags = IMX6_PCIE_FLAG_IMX6_PHY | - IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE, + IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE | + IMX6_PCIE_FLAG_SUPPORTS_SUSPEND, .dbi_length = 0x200, }, [IMX7D] = { diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c index 1c2ee4e13f1c..d10e5fd0f83c 100644 --- a/drivers/pci/controller/dwc/pci-keystone.c +++ b/drivers/pci/controller/dwc/pci-keystone.c @@ -531,13 +531,13 @@ static void ks_pcie_quirk(struct pci_dev *dev) struct pci_dev *bridge; static const struct pci_device_id rc_pci_devids[] = { { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2HK), - .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, }, + .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, }, { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2E), - .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, }, + .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, }, { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2L), - .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, }, + .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, }, { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2G), - .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, }, + .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, }, { 0, }, }; diff --git a/drivers/pci/controller/dwc/pci-meson.c b/drivers/pci/controller/dwc/pci-meson.c index 686ded034f22..f44bf347904a 100644 --- a/drivers/pci/controller/dwc/pci-meson.c +++ b/drivers/pci/controller/dwc/pci-meson.c @@ -313,14 +313,14 @@ static int meson_pcie_rd_own_conf(struct pci_bus *bus, u32 devfn, * cannot program the PCI_CLASS_DEVICE register, so we must fabricate * the return value in the config accessors. */ - if (where == PCI_CLASS_REVISION && size == 4) - *val = (PCI_CLASS_BRIDGE_PCI << 16) | (*val & 0xffff); - else if (where == PCI_CLASS_DEVICE && size == 2) - *val = PCI_CLASS_BRIDGE_PCI; - else if (where == PCI_CLASS_DEVICE && size == 1) - *val = PCI_CLASS_BRIDGE_PCI & 0xff; - else if (where == PCI_CLASS_DEVICE + 1 && size == 1) - *val = (PCI_CLASS_BRIDGE_PCI >> 8) & 0xff; + if ((where & ~3) == PCI_CLASS_REVISION) { + if (size <= 2) + *val = (*val & ((1 << (size * 8)) - 1)) << (8 * (where & 3)); + *val &= ~0xffffff00; + *val |= PCI_CLASS_BRIDGE_PCI_NORMAL << 8; + if (size <= 2) + *val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1); + } return PCIBIOS_SUCCESSFUL; } diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c index f4755f3a03be..2fa86f32d964 100644 --- a/drivers/pci/controller/dwc/pcie-designware-host.c +++ b/drivers/pci/controller/dwc/pcie-designware-host.c @@ -362,6 +362,12 @@ int dw_pcie_host_init(struct pcie_port *pp) if (ret < 0) return ret; } else if (pp->has_msi_ctrl) { + u32 ctrl, num_ctrls; + + num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; + for (ctrl = 0; ctrl < num_ctrls; ctrl++) + pp->irq_mask[ctrl] = ~0; + if (!pp->msi_irq) { pp->msi_irq = platform_get_irq_byname_optional(pdev, "msi"); if (pp->msi_irq < 0) { @@ -541,7 +547,6 @@ void dw_pcie_setup_rc(struct pcie_port *pp) /* Initialize IRQ Status array */ for (ctrl = 0; ctrl < num_ctrls; ctrl++) { - pp->irq_mask[ctrl] = ~0; dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + (ctrl * MSI_REG_CTRL_BLOCK_SIZE), pp->irq_mask[ctrl]); diff --git a/drivers/pci/controller/dwc/pcie-fu740.c b/drivers/pci/controller/dwc/pcie-fu740.c index 00cde9a248b5..02cc70d8cc06 100644 --- a/drivers/pci/controller/dwc/pcie-fu740.c +++ b/drivers/pci/controller/dwc/pcie-fu740.c @@ -181,10 +181,59 @@ static int fu740_pcie_start_link(struct dw_pcie *pci) { struct device *dev = pci->dev; struct fu740_pcie *afp = dev_get_drvdata(dev); + u8 cap_exp = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); + int ret; + u32 orig, tmp; + + /* + * Force 2.5GT/s when starting the link, due to some devices not + * probing at higher speeds. This happens with the PCIe switch + * on the Unmatched board when U-Boot has not initialised the PCIe. + * The fix in U-Boot is to force 2.5GT/s, which then gets cleared + * by the soft reset done by this driver. + */ + dev_dbg(dev, "cap_exp at %x\n", cap_exp); + dw_pcie_dbi_ro_wr_en(pci); + + tmp = dw_pcie_readl_dbi(pci, cap_exp + PCI_EXP_LNKCAP); + orig = tmp & PCI_EXP_LNKCAP_SLS; + tmp &= ~PCI_EXP_LNKCAP_SLS; + tmp |= PCI_EXP_LNKCAP_SLS_2_5GB; + dw_pcie_writel_dbi(pci, cap_exp + PCI_EXP_LNKCAP, tmp); /* Enable LTSSM */ writel_relaxed(0x1, afp->mgmt_base + PCIEX8MGMT_APP_LTSSM_ENABLE); - return 0; + + ret = dw_pcie_wait_for_link(pci); + if (ret) { + dev_err(dev, "error: link did not start\n"); + goto err; + } + + tmp = dw_pcie_readl_dbi(pci, cap_exp + PCI_EXP_LNKCAP); + if ((tmp & PCI_EXP_LNKCAP_SLS) != orig) { + dev_dbg(dev, "changing speed back to original\n"); + + tmp &= ~PCI_EXP_LNKCAP_SLS; + tmp |= orig; + dw_pcie_writel_dbi(pci, cap_exp + PCI_EXP_LNKCAP, tmp); + + tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); + tmp |= PORT_LOGIC_SPEED_CHANGE; + dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp); + + ret = dw_pcie_wait_for_link(pci); + if (ret) { + dev_err(dev, "error: link did not start at new speed\n"); + goto err; + } + } + + ret = 0; +err: + WARN_ON(ret); /* we assume that errors will be very rare */ + dw_pcie_dbi_ro_wr_dis(pci); + return ret; } static int fu740_pcie_host_init(struct pcie_port *pp) @@ -224,7 +273,7 @@ static int fu740_pcie_host_init(struct pcie_port *pp) /* Clear hold_phy_rst */ writel_relaxed(0x0, afp->mgmt_base + PCIEX8MGMT_APP_HOLD_PHY_RST); /* Enable pcieauxclk */ - ret = clk_prepare_enable(afp->pcie_aux); + clk_prepare_enable(afp->pcie_aux); /* Set RC mode */ writel_relaxed(0x4, afp->mgmt_base + PCIEX8MGMT_DEVICE_TYPE); @@ -259,11 +308,11 @@ static int fu740_pcie_probe(struct platform_device *pdev) return PTR_ERR(afp->mgmt_base); /* Fetch GPIOs */ - afp->reset = devm_gpiod_get_optional(dev, "reset-gpios", GPIOD_OUT_LOW); + afp->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(afp->reset)) return dev_err_probe(dev, PTR_ERR(afp->reset), "unable to get reset-gpios\n"); - afp->pwren = devm_gpiod_get_optional(dev, "pwren-gpios", GPIOD_OUT_LOW); + afp->pwren = devm_gpiod_get_optional(dev, "pwren", GPIOD_OUT_LOW); if (IS_ERR(afp->pwren)) return dev_err_probe(dev, PTR_ERR(afp->pwren), "unable to get pwren-gpios\n"); diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c index fa6886d66488..a52cad269f85 100644 --- a/drivers/pci/controller/dwc/pcie-kirin.c +++ b/drivers/pci/controller/dwc/pcie-kirin.c @@ -332,9 +332,6 @@ static int hi3660_pcie_phy_init(struct platform_device *pdev, pcie->phy_priv = phy; phy->dev = dev; - /* registers */ - pdev = container_of(dev, struct platform_device, dev); - ret = hi3660_pcie_phy_get_clk(phy); if (ret) return ret; @@ -756,22 +753,28 @@ static int __exit kirin_pcie_remove(struct platform_device *pdev) return 0; } +struct kirin_pcie_data { + enum pcie_kirin_phy_type phy_type; +}; + +static const struct kirin_pcie_data kirin_960_data = { + .phy_type = PCIE_KIRIN_INTERNAL_PHY, +}; + +static const struct kirin_pcie_data kirin_970_data = { + .phy_type = PCIE_KIRIN_EXTERNAL_PHY, +}; + static const struct of_device_id kirin_pcie_match[] = { - { - .compatible = "hisilicon,kirin960-pcie", - .data = (void *)PCIE_KIRIN_INTERNAL_PHY - }, - { - .compatible = "hisilicon,kirin970-pcie", - .data = (void *)PCIE_KIRIN_EXTERNAL_PHY - }, + { .compatible = "hisilicon,kirin960-pcie", .data = &kirin_960_data }, + { .compatible = "hisilicon,kirin970-pcie", .data = &kirin_970_data }, {}, }; static int kirin_pcie_probe(struct platform_device *pdev) { - enum pcie_kirin_phy_type phy_type; struct device *dev = &pdev->dev; + const struct kirin_pcie_data *data; struct kirin_pcie *kirin_pcie; struct dw_pcie *pci; int ret; @@ -781,13 +784,12 @@ static int kirin_pcie_probe(struct platform_device *pdev) return -EINVAL; } - phy_type = (long)of_device_get_match_data(dev); - if (!phy_type) { + data = of_device_get_match_data(dev); + if (!data) { dev_err(dev, "OF data missing\n"); return -EINVAL; } - kirin_pcie = devm_kzalloc(dev, sizeof(struct kirin_pcie), GFP_KERNEL); if (!kirin_pcie) return -ENOMEM; @@ -800,7 +802,7 @@ static int kirin_pcie_probe(struct platform_device *pdev) pci->ops = &kirin_dw_pcie_ops; pci->pp.ops = &kirin_pcie_host_ops; kirin_pcie->pci = pci; - kirin_pcie->type = phy_type; + kirin_pcie->type = data->phy_type; ret = kirin_pcie_get_resource(kirin_pcie, pdev); if (ret) diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c index c19cd506ed3f..6ab90891801d 100644 --- a/drivers/pci/controller/dwc/pcie-qcom.c +++ b/drivers/pci/controller/dwc/pcie-qcom.c @@ -161,7 +161,7 @@ struct qcom_pcie_resources_2_3_3 { /* 6 clocks typically, 7 for sm8250 */ struct qcom_pcie_resources_2_7_0 { - struct clk_bulk_data clks[7]; + struct clk_bulk_data clks[9]; int num_clks; struct regulator_bulk_data supplies[2]; struct reset_control *pci_reset; @@ -195,6 +195,10 @@ struct qcom_pcie_ops { struct qcom_pcie_cfg { const struct qcom_pcie_ops *ops; unsigned int pipe_clk_need_muxing:1; + unsigned int has_tbu_clk:1; + unsigned int has_ddrss_sf_tbu_clk:1; + unsigned int has_aggre0_clk:1; + unsigned int has_aggre1_clk:1; }; struct qcom_pcie { @@ -204,8 +208,7 @@ struct qcom_pcie { union qcom_pcie_resources res; struct phy *phy; struct gpio_desc *reset; - const struct qcom_pcie_ops *ops; - unsigned int pipe_clk_need_muxing:1; + const struct qcom_pcie_cfg *cfg; }; #define to_qcom_pcie(x) dev_get_drvdata((x)->dev) @@ -229,8 +232,8 @@ static int qcom_pcie_start_link(struct dw_pcie *pci) struct qcom_pcie *pcie = to_qcom_pcie(pci); /* Enable Link Training state machine */ - if (pcie->ops->ltssm_enable) - pcie->ops->ltssm_enable(pcie); + if (pcie->cfg->ops->ltssm_enable) + pcie->cfg->ops->ltssm_enable(pcie); return 0; } @@ -1146,6 +1149,7 @@ static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie) struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; struct dw_pcie *pci = pcie->pci; struct device *dev = pci->dev; + unsigned int idx; int ret; res->pci_reset = devm_reset_control_get_exclusive(dev, "pci"); @@ -1159,24 +1163,28 @@ static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie) if (ret) return ret; - res->clks[0].id = "aux"; - res->clks[1].id = "cfg"; - res->clks[2].id = "bus_master"; - res->clks[3].id = "bus_slave"; - res->clks[4].id = "slave_q2a"; - res->clks[5].id = "tbu"; - if (of_device_is_compatible(dev->of_node, "qcom,pcie-sm8250")) { - res->clks[6].id = "ddrss_sf_tbu"; - res->num_clks = 7; - } else { - res->num_clks = 6; - } + idx = 0; + res->clks[idx++].id = "aux"; + res->clks[idx++].id = "cfg"; + res->clks[idx++].id = "bus_master"; + res->clks[idx++].id = "bus_slave"; + res->clks[idx++].id = "slave_q2a"; + if (pcie->cfg->has_tbu_clk) + res->clks[idx++].id = "tbu"; + if (pcie->cfg->has_ddrss_sf_tbu_clk) + res->clks[idx++].id = "ddrss_sf_tbu"; + if (pcie->cfg->has_aggre0_clk) + res->clks[idx++].id = "aggre0"; + if (pcie->cfg->has_aggre1_clk) + res->clks[idx++].id = "aggre1"; + + res->num_clks = idx; ret = devm_clk_bulk_get(dev, res->num_clks, res->clks); if (ret < 0) return ret; - if (pcie->pipe_clk_need_muxing) { + if (pcie->cfg->pipe_clk_need_muxing) { res->pipe_clk_src = devm_clk_get(dev, "pipe_mux"); if (IS_ERR(res->pipe_clk_src)) return PTR_ERR(res->pipe_clk_src); @@ -1209,7 +1217,7 @@ static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie) } /* Set TCXO as clock source for pcie_pipe_clk_src */ - if (pcie->pipe_clk_need_muxing) + if (pcie->cfg->pipe_clk_need_muxing) clk_set_parent(res->pipe_clk_src, res->ref_clk_src); ret = clk_bulk_prepare_enable(res->num_clks, res->clks); @@ -1236,6 +1244,9 @@ static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie) goto err_disable_clocks; } + /* Wait for reset to complete, required on SM8450 */ + usleep_range(1000, 1500); + /* configure PCIe to RC mode */ writel(DEVICE_TYPE_RC, pcie->parf + PCIE20_PARF_DEVICE_TYPE); @@ -1284,7 +1295,7 @@ static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie) struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; /* Set pipe clock as clock source for pcie_pipe_clk_src */ - if (pcie->pipe_clk_need_muxing) + if (pcie->cfg->pipe_clk_need_muxing) clk_set_parent(res->pipe_clk_src, res->phy_pipe_clk); return clk_prepare_enable(res->pipe_clk); @@ -1384,7 +1395,7 @@ static int qcom_pcie_host_init(struct pcie_port *pp) qcom_ep_reset_assert(pcie); - ret = pcie->ops->init(pcie); + ret = pcie->cfg->ops->init(pcie); if (ret) return ret; @@ -1392,16 +1403,16 @@ static int qcom_pcie_host_init(struct pcie_port *pp) if (ret) goto err_deinit; - if (pcie->ops->post_init) { - ret = pcie->ops->post_init(pcie); + if (pcie->cfg->ops->post_init) { + ret = pcie->cfg->ops->post_init(pcie); if (ret) goto err_disable_phy; } qcom_ep_reset_deassert(pcie); - if (pcie->ops->config_sid) { - ret = pcie->ops->config_sid(pcie); + if (pcie->cfg->ops->config_sid) { + ret = pcie->cfg->ops->config_sid(pcie); if (ret) goto err; } @@ -1410,12 +1421,12 @@ static int qcom_pcie_host_init(struct pcie_port *pp) err: qcom_ep_reset_assert(pcie); - if (pcie->ops->post_deinit) - pcie->ops->post_deinit(pcie); + if (pcie->cfg->ops->post_deinit) + pcie->cfg->ops->post_deinit(pcie); err_disable_phy: phy_power_off(pcie->phy); err_deinit: - pcie->ops->deinit(pcie); + pcie->cfg->ops->deinit(pcie); return ret; } @@ -1509,14 +1520,33 @@ static const struct qcom_pcie_cfg ipq4019_cfg = { static const struct qcom_pcie_cfg sdm845_cfg = { .ops = &ops_2_7_0, + .has_tbu_clk = true, }; static const struct qcom_pcie_cfg sm8250_cfg = { .ops = &ops_1_9_0, + .has_tbu_clk = true, + .has_ddrss_sf_tbu_clk = true, +}; + +static const struct qcom_pcie_cfg sm8450_pcie0_cfg = { + .ops = &ops_1_9_0, + .has_ddrss_sf_tbu_clk = true, + .pipe_clk_need_muxing = true, + .has_aggre0_clk = true, + .has_aggre1_clk = true, +}; + +static const struct qcom_pcie_cfg sm8450_pcie1_cfg = { + .ops = &ops_1_9_0, + .has_ddrss_sf_tbu_clk = true, + .pipe_clk_need_muxing = true, + .has_aggre1_clk = true, }; static const struct qcom_pcie_cfg sc7280_cfg = { .ops = &ops_1_9_0, + .has_tbu_clk = true, .pipe_clk_need_muxing = true, }; @@ -1559,8 +1589,7 @@ static int qcom_pcie_probe(struct platform_device *pdev) pcie->pci = pci; - pcie->ops = pcie_cfg->ops; - pcie->pipe_clk_need_muxing = pcie_cfg->pipe_clk_need_muxing; + pcie->cfg = pcie_cfg; pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH); if (IS_ERR(pcie->reset)) { @@ -1586,7 +1615,7 @@ static int qcom_pcie_probe(struct platform_device *pdev) goto err_pm_runtime_put; } - ret = pcie->ops->get_resources(pcie); + ret = pcie->cfg->ops->get_resources(pcie); if (ret) goto err_pm_runtime_put; @@ -1628,13 +1657,15 @@ static const struct of_device_id qcom_pcie_match[] = { { .compatible = "qcom,pcie-sdm845", .data = &sdm845_cfg }, { .compatible = "qcom,pcie-sm8250", .data = &sm8250_cfg }, { .compatible = "qcom,pcie-sc8180x", .data = &sm8250_cfg }, + { .compatible = "qcom,pcie-sm8450-pcie0", .data = &sm8450_pcie0_cfg }, + { .compatible = "qcom,pcie-sm8450-pcie1", .data = &sm8450_pcie1_cfg }, { .compatible = "qcom,pcie-sc7280", .data = &sc7280_cfg }, { } }; static void qcom_fixup_class(struct pci_dev *dev) { - dev->class = PCI_CLASS_BRIDGE_PCI << 8; + dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL; } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0101, qcom_fixup_class); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0104, qcom_fixup_class); diff --git a/drivers/pci/controller/dwc/pcie-uniphier-ep.c b/drivers/pci/controller/dwc/pcie-uniphier-ep.c index 69810c6b0d58..4d0a587c0ba5 100644 --- a/drivers/pci/controller/dwc/pcie-uniphier-ep.c +++ b/drivers/pci/controller/dwc/pcie-uniphier-ep.c @@ -10,6 +10,7 @@ #include <linux/clk.h> #include <linux/delay.h> #include <linux/init.h> +#include <linux/iopoll.h> #include <linux/of_device.h> #include <linux/pci.h> #include <linux/phy/phy.h> @@ -31,6 +32,17 @@ #define PCL_RSTCTRL2 0x0024 #define PCL_RSTCTRL_PHY_RESET BIT(0) +#define PCL_PINCTRL0 0x002c +#define PCL_PERST_PLDN_REGEN BIT(12) +#define PCL_PERST_NOE_REGEN BIT(11) +#define PCL_PERST_OUT_REGEN BIT(8) +#define PCL_PERST_PLDN_REGVAL BIT(4) +#define PCL_PERST_NOE_REGVAL BIT(3) +#define PCL_PERST_OUT_REGVAL BIT(0) + +#define PCL_PIPEMON 0x0044 +#define PCL_PCLK_ALIVE BIT(15) + #define PCL_MODE 0x8000 #define PCL_MODE_REGEN BIT(8) #define PCL_MODE_REGVAL BIT(0) @@ -51,6 +63,9 @@ #define PCL_APP_INTX 0x8074 #define PCL_APP_INTX_SYS_INT BIT(0) +#define PCL_APP_PM0 0x8078 +#define PCL_SYS_AUX_PWR_DET BIT(8) + /* assertion time of INTx in usec */ #define PCL_INTX_WIDTH_USEC 30 @@ -60,7 +75,14 @@ struct uniphier_pcie_ep_priv { struct clk *clk, *clk_gio; struct reset_control *rst, *rst_gio; struct phy *phy; - const struct pci_epc_features *features; + const struct uniphier_pcie_ep_soc_data *data; +}; + +struct uniphier_pcie_ep_soc_data { + bool has_gio; + void (*init)(struct uniphier_pcie_ep_priv *priv); + int (*wait)(struct uniphier_pcie_ep_priv *priv); + const struct pci_epc_features features; }; #define to_uniphier_pcie(x) dev_get_drvdata((x)->dev) @@ -91,7 +113,7 @@ static void uniphier_pcie_phy_reset(struct uniphier_pcie_ep_priv *priv, writel(val, priv->base + PCL_RSTCTRL2); } -static void uniphier_pcie_init_ep(struct uniphier_pcie_ep_priv *priv) +static void uniphier_pcie_pro5_init_ep(struct uniphier_pcie_ep_priv *priv) { u32 val; @@ -116,6 +138,55 @@ static void uniphier_pcie_init_ep(struct uniphier_pcie_ep_priv *priv) msleep(100); } +static void uniphier_pcie_nx1_init_ep(struct uniphier_pcie_ep_priv *priv) +{ + u32 val; + + /* set EP mode */ + val = readl(priv->base + PCL_MODE); + val |= PCL_MODE_REGEN | PCL_MODE_REGVAL; + writel(val, priv->base + PCL_MODE); + + /* use auxiliary power detection */ + val = readl(priv->base + PCL_APP_PM0); + val |= PCL_SYS_AUX_PWR_DET; + writel(val, priv->base + PCL_APP_PM0); + + /* assert PERST# */ + val = readl(priv->base + PCL_PINCTRL0); + val &= ~(PCL_PERST_NOE_REGVAL | PCL_PERST_OUT_REGVAL + | PCL_PERST_PLDN_REGVAL); + val |= PCL_PERST_NOE_REGEN | PCL_PERST_OUT_REGEN + | PCL_PERST_PLDN_REGEN; + writel(val, priv->base + PCL_PINCTRL0); + + uniphier_pcie_ltssm_enable(priv, false); + + usleep_range(100000, 200000); + + /* deassert PERST# */ + val = readl(priv->base + PCL_PINCTRL0); + val |= PCL_PERST_OUT_REGVAL | PCL_PERST_OUT_REGEN; + writel(val, priv->base + PCL_PINCTRL0); +} + +static int uniphier_pcie_nx1_wait_ep(struct uniphier_pcie_ep_priv *priv) +{ + u32 status; + int ret; + + /* wait PIPE clock */ + ret = readl_poll_timeout(priv->base + PCL_PIPEMON, status, + status & PCL_PCLK_ALIVE, 100000, 1000000); + if (ret) { + dev_err(priv->pci.dev, + "Failed to initialize controller in EP mode\n"); + return ret; + } + + return 0; +} + static int uniphier_pcie_start_link(struct dw_pcie *pci) { struct uniphier_pcie_ep_priv *priv = to_uniphier_pcie(pci); @@ -209,7 +280,7 @@ uniphier_pcie_get_features(struct dw_pcie_ep *ep) struct dw_pcie *pci = to_dw_pcie_from_ep(ep); struct uniphier_pcie_ep_priv *priv = to_uniphier_pcie(pci); - return priv->features; + return &priv->data->features; } static const struct dw_pcie_ep_ops uniphier_pcie_ep_ops = { @@ -238,7 +309,8 @@ static int uniphier_pcie_ep_enable(struct uniphier_pcie_ep_priv *priv) if (ret) goto out_rst_assert; - uniphier_pcie_init_ep(priv); + if (priv->data->init) + priv->data->init(priv); uniphier_pcie_phy_reset(priv, true); @@ -248,8 +320,16 @@ static int uniphier_pcie_ep_enable(struct uniphier_pcie_ep_priv *priv) uniphier_pcie_phy_reset(priv, false); + if (priv->data->wait) { + ret = priv->data->wait(priv); + if (ret) + goto out_phy_exit; + } + return 0; +out_phy_exit: + phy_exit(priv->phy); out_rst_gio_assert: reset_control_assert(priv->rst_gio); out_rst_assert: @@ -277,8 +357,8 @@ static int uniphier_pcie_ep_probe(struct platform_device *pdev) if (!priv) return -ENOMEM; - priv->features = of_device_get_match_data(dev); - if (WARN_ON(!priv->features)) + priv->data = of_device_get_match_data(dev); + if (WARN_ON(!priv->data)) return -EINVAL; priv->pci.dev = dev; @@ -288,13 +368,15 @@ static int uniphier_pcie_ep_probe(struct platform_device *pdev) if (IS_ERR(priv->base)) return PTR_ERR(priv->base); - priv->clk_gio = devm_clk_get(dev, "gio"); - if (IS_ERR(priv->clk_gio)) - return PTR_ERR(priv->clk_gio); + if (priv->data->has_gio) { + priv->clk_gio = devm_clk_get(dev, "gio"); + if (IS_ERR(priv->clk_gio)) + return PTR_ERR(priv->clk_gio); - priv->rst_gio = devm_reset_control_get_shared(dev, "gio"); - if (IS_ERR(priv->rst_gio)) - return PTR_ERR(priv->rst_gio); + priv->rst_gio = devm_reset_control_get_shared(dev, "gio"); + if (IS_ERR(priv->rst_gio)) + return PTR_ERR(priv->rst_gio); + } priv->clk = devm_clk_get(dev, "link"); if (IS_ERR(priv->clk)) @@ -321,13 +403,31 @@ static int uniphier_pcie_ep_probe(struct platform_device *pdev) return dw_pcie_ep_init(&priv->pci.ep); } -static const struct pci_epc_features uniphier_pro5_data = { - .linkup_notifier = false, - .msi_capable = true, - .msix_capable = false, - .align = 1 << 16, - .bar_fixed_64bit = BIT(BAR_0) | BIT(BAR_2) | BIT(BAR_4), - .reserved_bar = BIT(BAR_4), +static const struct uniphier_pcie_ep_soc_data uniphier_pro5_data = { + .has_gio = true, + .init = uniphier_pcie_pro5_init_ep, + .wait = NULL, + .features = { + .linkup_notifier = false, + .msi_capable = true, + .msix_capable = false, + .align = 1 << 16, + .bar_fixed_64bit = BIT(BAR_0) | BIT(BAR_2) | BIT(BAR_4), + .reserved_bar = BIT(BAR_4), + }, +}; + +static const struct uniphier_pcie_ep_soc_data uniphier_nx1_data = { + .has_gio = false, + .init = uniphier_pcie_nx1_init_ep, + .wait = uniphier_pcie_nx1_wait_ep, + .features = { + .linkup_notifier = false, + .msi_capable = true, + .msix_capable = false, + .align = 1 << 12, + .bar_fixed_64bit = BIT(BAR_0) | BIT(BAR_2) | BIT(BAR_4), + }, }; static const struct of_device_id uniphier_pcie_ep_match[] = { @@ -335,6 +435,10 @@ static const struct of_device_id uniphier_pcie_ep_match[] = { .compatible = "socionext,uniphier-pro5-pcie-ep", .data = &uniphier_pro5_data, }, + { + .compatible = "socionext,uniphier-nx1-pcie-ep", + .data = &uniphier_nx1_data, + }, { /* sentinel */ }, }; diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c index f3547aa60140..31a7bdebe540 100644 --- a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c +++ b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c @@ -295,7 +295,7 @@ int mobiveil_host_init(struct mobiveil_pcie *pcie, bool reinit) /* fixup for PCIe class register */ value = mobiveil_csr_readl(pcie, PAB_INTP_AXI_PIO_CLASS); value &= 0xff; - value |= (PCI_CLASS_BRIDGE_PCI << 16); + value |= PCI_CLASS_BRIDGE_PCI_NORMAL << 8; mobiveil_csr_writel(pcie, value, PAB_INTP_AXI_PIO_CLASS); return 0; diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c index 4f5b44827d21..09d9bf465d72 100644 --- a/drivers/pci/controller/pci-aardvark.c +++ b/drivers/pci/controller/pci-aardvark.c @@ -38,10 +38,6 @@ #define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN BIT(6) #define PCIE_CORE_ERR_CAPCTL_ECRC_CHCK BIT(7) #define PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV BIT(8) -#define PCIE_CORE_INT_A_ASSERT_ENABLE 1 -#define PCIE_CORE_INT_B_ASSERT_ENABLE 2 -#define PCIE_CORE_INT_C_ASSERT_ENABLE 3 -#define PCIE_CORE_INT_D_ASSERT_ENABLE 4 /* PIO registers base address and register offsets */ #define PIO_BASE_ADDR 0x4000 #define PIO_CTRL (PIO_BASE_ADDR + 0x0) @@ -102,6 +98,10 @@ #define PCIE_MSG_PM_PME_MASK BIT(7) #define PCIE_ISR0_MASK_REG (CONTROL_BASE_ADDR + 0x44) #define PCIE_ISR0_MSI_INT_PENDING BIT(24) +#define PCIE_ISR0_CORR_ERR BIT(11) +#define PCIE_ISR0_NFAT_ERR BIT(12) +#define PCIE_ISR0_FAT_ERR BIT(13) +#define PCIE_ISR0_ERR_MASK GENMASK(13, 11) #define PCIE_ISR0_INTX_ASSERT(val) BIT(16 + (val)) #define PCIE_ISR0_INTX_DEASSERT(val) BIT(20 + (val)) #define PCIE_ISR0_ALL_MASK GENMASK(31, 0) @@ -272,17 +272,16 @@ struct advk_pcie { u32 actions; } wins[OB_WIN_COUNT]; u8 wins_count; + int irq; + struct irq_domain *rp_irq_domain; struct irq_domain *irq_domain; struct irq_chip irq_chip; raw_spinlock_t irq_lock; struct irq_domain *msi_domain; struct irq_domain *msi_inner_domain; - struct irq_chip msi_bottom_irq_chip; - struct irq_chip msi_irq_chip; - struct msi_domain_info msi_domain_info; + raw_spinlock_t msi_irq_lock; DECLARE_BITMAP(msi_used, MSI_IRQ_NUM); struct mutex msi_used_lock; - u16 msi_msg; int link_gen; struct pci_bridge_emul bridge; struct gpio_desc *reset_gpio; @@ -477,6 +476,7 @@ static void advk_pcie_disable_ob_win(struct advk_pcie *pcie, u8 win_num) static void advk_pcie_setup_hw(struct advk_pcie *pcie) { + phys_addr_t msi_addr; u32 reg; int i; @@ -529,7 +529,7 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie) */ reg = advk_readl(pcie, PCIE_CORE_DEV_REV_REG); reg &= ~0xffffff00; - reg |= (PCI_CLASS_BRIDGE_PCI << 8) << 8; + reg |= PCI_CLASS_BRIDGE_PCI_NORMAL << 8; advk_writel(pcie, reg, PCIE_CORE_DEV_REV_REG); /* Disable Root Bridge I/O space, memory space and bus mastering */ @@ -565,6 +565,11 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie) reg |= LANE_COUNT_1; advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG); + /* Set MSI address */ + msi_addr = virt_to_phys(pcie); + advk_writel(pcie, lower_32_bits(msi_addr), PCIE_MSI_ADDR_LOW_REG); + advk_writel(pcie, upper_32_bits(msi_addr), PCIE_MSI_ADDR_HIGH_REG); + /* Enable MSI */ reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG); reg |= PCIE_CORE_CTRL2_MSI_ENABLE; @@ -576,15 +581,20 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie) advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG); advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG); - /* Disable All ISR0/1 Sources */ - reg = PCIE_ISR0_ALL_MASK; + /* Disable All ISR0/1 and MSI Sources */ + advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_MASK_REG); + advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_MASK_REG); + advk_writel(pcie, PCIE_MSI_ALL_MASK, PCIE_MSI_MASK_REG); + + /* Unmask summary MSI interrupt */ + reg = advk_readl(pcie, PCIE_ISR0_MASK_REG); reg &= ~PCIE_ISR0_MSI_INT_PENDING; advk_writel(pcie, reg, PCIE_ISR0_MASK_REG); - advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_MASK_REG); - - /* Unmask all MSIs */ - advk_writel(pcie, ~(u32)PCIE_MSI_ALL_MASK, PCIE_MSI_MASK_REG); + /* Unmask PME interrupt for processing of PME requester */ + reg = advk_readl(pcie, PCIE_ISR0_MASK_REG); + reg &= ~PCIE_MSG_PM_PME_MASK; + advk_writel(pcie, reg, PCIE_ISR0_MASK_REG); /* Enable summary interrupt for GIC SPI source */ reg = PCIE_IRQ_ALL_MASK & (~PCIE_IRQ_ENABLE_INTS_MASK); @@ -778,11 +788,15 @@ advk_pci_bridge_emul_base_conf_read(struct pci_bridge_emul *bridge, case PCI_INTERRUPT_LINE: { /* * From the whole 32bit register we support reading from HW only - * one bit: PCI_BRIDGE_CTL_BUS_RESET. + * two bits: PCI_BRIDGE_CTL_BUS_RESET and PCI_BRIDGE_CTL_SERR. * Other bits are retrieved only from emulated config buffer. */ __le32 *cfgspace = (__le32 *)&bridge->conf; u32 val = le32_to_cpu(cfgspace[PCI_INTERRUPT_LINE / 4]); + if (advk_readl(pcie, PCIE_ISR0_MASK_REG) & PCIE_ISR0_ERR_MASK) + val &= ~(PCI_BRIDGE_CTL_SERR << 16); + else + val |= PCI_BRIDGE_CTL_SERR << 16; if (advk_readl(pcie, PCIE_CORE_CTRL1_REG) & HOT_RESET_GEN) val |= PCI_BRIDGE_CTL_BUS_RESET << 16; else @@ -808,6 +822,19 @@ advk_pci_bridge_emul_base_conf_write(struct pci_bridge_emul *bridge, break; case PCI_INTERRUPT_LINE: + /* + * According to Figure 6-3: Pseudo Logic Diagram for Error + * Message Controls in PCIe base specification, SERR# Enable bit + * in Bridge Control register enable receiving of ERR_* messages + */ + if (mask & (PCI_BRIDGE_CTL_SERR << 16)) { + u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG); + if (new & (PCI_BRIDGE_CTL_SERR << 16)) + val &= ~PCIE_ISR0_ERR_MASK; + else + val |= PCIE_ISR0_ERR_MASK; + advk_writel(pcie, val, PCIE_ISR0_MASK_REG); + } if (mask & (PCI_BRIDGE_CTL_BUS_RESET << 16)) { u32 val = advk_readl(pcie, PCIE_CORE_CTRL1_REG); if (new & (PCI_BRIDGE_CTL_BUS_RESET << 16)) @@ -835,20 +862,11 @@ advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge, *value = PCI_EXP_SLTSTA_PDS << 16; return PCI_BRIDGE_EMUL_HANDLED; - case PCI_EXP_RTCTL: { - u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG); - *value = (val & PCIE_MSG_PM_PME_MASK) ? 0 : PCI_EXP_RTCTL_PMEIE; - *value |= le16_to_cpu(bridge->pcie_conf.rootctl) & PCI_EXP_RTCTL_CRSSVE; - *value |= PCI_EXP_RTCAP_CRSVIS << 16; - return PCI_BRIDGE_EMUL_HANDLED; - } - - case PCI_EXP_RTSTA: { - u32 isr0 = advk_readl(pcie, PCIE_ISR0_REG); - u32 msglog = advk_readl(pcie, PCIE_MSG_LOG_REG); - *value = (isr0 & PCIE_MSG_PM_PME_MASK) << 16 | (msglog >> 16); - return PCI_BRIDGE_EMUL_HANDLED; - } + /* + * PCI_EXP_RTCTL and PCI_EXP_RTSTA are also supported, but do not need + * to be handled here, because their values are stored in emulated + * config space buffer, and we read them from there when needed. + */ case PCI_EXP_LNKCAP: { u32 val = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg); @@ -903,19 +921,18 @@ advk_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge, break; case PCI_EXP_RTCTL: { - /* Only mask/unmask PME interrupt */ - u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG) & - ~PCIE_MSG_PM_PME_MASK; - if ((new & PCI_EXP_RTCTL_PMEIE) == 0) - val |= PCIE_MSG_PM_PME_MASK; - advk_writel(pcie, val, PCIE_ISR0_MASK_REG); + u16 rootctl = le16_to_cpu(bridge->pcie_conf.rootctl); + /* Only emulation of PMEIE and CRSSVE bits is provided */ + rootctl &= PCI_EXP_RTCTL_PMEIE | PCI_EXP_RTCTL_CRSSVE; + bridge->pcie_conf.rootctl = cpu_to_le16(rootctl); break; } - case PCI_EXP_RTSTA: - new = (new & PCI_EXP_RTSTA_PME) >> 9; - advk_writel(pcie, new, PCIE_ISR0_REG); - break; + /* + * PCI_EXP_RTSTA is also supported, but does not need to be handled + * here, because its value is stored in emulated config space buffer, + * and we write it there when needed. + */ case PCI_EXP_DEVCTL: case PCI_EXP_DEVCTL2: @@ -928,7 +945,7 @@ advk_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge, } } -static struct pci_bridge_emul_ops advk_pci_bridge_emul_ops = { +static const struct pci_bridge_emul_ops advk_pci_bridge_emul_ops = { .read_base = advk_pci_bridge_emul_base_conf_read, .write_base = advk_pci_bridge_emul_base_conf_write, .read_pcie = advk_pci_bridge_emul_pcie_conf_read, @@ -959,7 +976,7 @@ static int advk_sw_pci_bridge_init(struct advk_pcie *pcie) bridge->conf.pref_mem_limit = cpu_to_le16(PCI_PREF_RANGE_TYPE_64); /* Support interrupt A for MSI feature */ - bridge->conf.intpin = PCIE_CORE_INT_A_ASSERT_ENABLE; + bridge->conf.intpin = PCI_INTERRUPT_INTA; /* Aardvark HW provides PCIe Capability structure in version 2 */ bridge->pcie_conf.cap = cpu_to_le16(2); @@ -981,8 +998,12 @@ static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus, return false; /* - * If the link goes down after we check for link-up, nothing bad - * happens but the config access times out. + * If the link goes down after we check for link-up, we have a problem: + * if a PIO request is executed while link-down, the whole controller + * gets stuck in a non-functional state, and even after link comes up + * again, PIO requests won't work anymore, and a reset of the whole PCIe + * controller is needed. Therefore we need to prevent sending PIO + * requests while the link is down. */ if (!pci_is_root_bus(bus) && !advk_pcie_link_up(pcie)) return false; @@ -1180,11 +1201,11 @@ static void advk_msi_irq_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) { struct advk_pcie *pcie = irq_data_get_irq_chip_data(data); - phys_addr_t msi_msg = virt_to_phys(&pcie->msi_msg); + phys_addr_t msi_addr = virt_to_phys(pcie); - msg->address_lo = lower_32_bits(msi_msg); - msg->address_hi = upper_32_bits(msi_msg); - msg->data = data->irq; + msg->address_lo = lower_32_bits(msi_addr); + msg->address_hi = upper_32_bits(msi_addr); + msg->data = data->hwirq; } static int advk_msi_set_affinity(struct irq_data *irq_data, @@ -1193,6 +1214,54 @@ static int advk_msi_set_affinity(struct irq_data *irq_data, return -EINVAL; } +static void advk_msi_irq_mask(struct irq_data *d) +{ + struct advk_pcie *pcie = d->domain->host_data; + irq_hw_number_t hwirq = irqd_to_hwirq(d); + unsigned long flags; + u32 mask; + + raw_spin_lock_irqsave(&pcie->msi_irq_lock, flags); + mask = advk_readl(pcie, PCIE_MSI_MASK_REG); + mask |= BIT(hwirq); + advk_writel(pcie, mask, PCIE_MSI_MASK_REG); + raw_spin_unlock_irqrestore(&pcie->msi_irq_lock, flags); +} + +static void advk_msi_irq_unmask(struct irq_data *d) +{ + struct advk_pcie *pcie = d->domain->host_data; + irq_hw_number_t hwirq = irqd_to_hwirq(d); + unsigned long flags; + u32 mask; + + raw_spin_lock_irqsave(&pcie->msi_irq_lock, flags); + mask = advk_readl(pcie, PCIE_MSI_MASK_REG); + mask &= ~BIT(hwirq); + advk_writel(pcie, mask, PCIE_MSI_MASK_REG); + raw_spin_unlock_irqrestore(&pcie->msi_irq_lock, flags); +} + +static void advk_msi_top_irq_mask(struct irq_data *d) +{ + pci_msi_mask_irq(d); + irq_chip_mask_parent(d); +} + +static void advk_msi_top_irq_unmask(struct irq_data *d) +{ + pci_msi_unmask_irq(d); + irq_chip_unmask_parent(d); +} + +static struct irq_chip advk_msi_bottom_irq_chip = { + .name = "MSI", + .irq_compose_msi_msg = advk_msi_irq_compose_msi_msg, + .irq_set_affinity = advk_msi_set_affinity, + .irq_mask = advk_msi_irq_mask, + .irq_unmask = advk_msi_irq_unmask, +}; + static int advk_msi_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *args) @@ -1201,19 +1270,15 @@ static int advk_msi_irq_domain_alloc(struct irq_domain *domain, int hwirq, i; mutex_lock(&pcie->msi_used_lock); - hwirq = bitmap_find_next_zero_area(pcie->msi_used, MSI_IRQ_NUM, - 0, nr_irqs, 0); - if (hwirq >= MSI_IRQ_NUM) { - mutex_unlock(&pcie->msi_used_lock); - return -ENOSPC; - } - - bitmap_set(pcie->msi_used, hwirq, nr_irqs); + hwirq = bitmap_find_free_region(pcie->msi_used, MSI_IRQ_NUM, + order_base_2(nr_irqs)); mutex_unlock(&pcie->msi_used_lock); + if (hwirq < 0) + return -ENOSPC; for (i = 0; i < nr_irqs; i++) irq_domain_set_info(domain, virq + i, hwirq + i, - &pcie->msi_bottom_irq_chip, + &advk_msi_bottom_irq_chip, domain->host_data, handle_simple_irq, NULL, NULL); @@ -1227,7 +1292,7 @@ static void advk_msi_irq_domain_free(struct irq_domain *domain, struct advk_pcie *pcie = domain->host_data; mutex_lock(&pcie->msi_used_lock); - bitmap_clear(pcie->msi_used, d->hwirq, nr_irqs); + bitmap_release_region(pcie->msi_used, d->hwirq, order_base_2(nr_irqs)); mutex_unlock(&pcie->msi_used_lock); } @@ -1269,7 +1334,6 @@ static int advk_pcie_irq_map(struct irq_domain *h, { struct advk_pcie *pcie = h->host_data; - advk_pcie_irq_mask(irq_get_irq_data(virq)); irq_set_status_flags(virq, IRQ_LEVEL); irq_set_chip_and_handler(virq, &pcie->irq_chip, handle_level_irq); @@ -1283,37 +1347,25 @@ static const struct irq_domain_ops advk_pcie_irq_domain_ops = { .xlate = irq_domain_xlate_onecell, }; +static struct irq_chip advk_msi_irq_chip = { + .name = "advk-MSI", + .irq_mask = advk_msi_top_irq_mask, + .irq_unmask = advk_msi_top_irq_unmask, +}; + +static struct msi_domain_info advk_msi_domain_info = { + .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX, + .chip = &advk_msi_irq_chip, +}; + static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie) { struct device *dev = &pcie->pdev->dev; - struct device_node *node = dev->of_node; - struct irq_chip *bottom_ic, *msi_ic; - struct msi_domain_info *msi_di; - phys_addr_t msi_msg_phys; + raw_spin_lock_init(&pcie->msi_irq_lock); mutex_init(&pcie->msi_used_lock); - bottom_ic = &pcie->msi_bottom_irq_chip; - - bottom_ic->name = "MSI"; - bottom_ic->irq_compose_msi_msg = advk_msi_irq_compose_msi_msg; - bottom_ic->irq_set_affinity = advk_msi_set_affinity; - - msi_ic = &pcie->msi_irq_chip; - msi_ic->name = "advk-MSI"; - - msi_di = &pcie->msi_domain_info; - msi_di->flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_MULTI_PCI_MSI; - msi_di->chip = msi_ic; - - msi_msg_phys = virt_to_phys(&pcie->msi_msg); - - advk_writel(pcie, lower_32_bits(msi_msg_phys), - PCIE_MSI_ADDR_LOW_REG); - advk_writel(pcie, upper_32_bits(msi_msg_phys), - PCIE_MSI_ADDR_HIGH_REG); - pcie->msi_inner_domain = irq_domain_add_linear(NULL, MSI_IRQ_NUM, &advk_msi_domain_ops, pcie); @@ -1321,8 +1373,9 @@ static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie) return -ENOMEM; pcie->msi_domain = - pci_msi_create_irq_domain(of_node_to_fwnode(node), - msi_di, pcie->msi_inner_domain); + pci_msi_create_irq_domain(dev_fwnode(dev), + &advk_msi_domain_info, + pcie->msi_inner_domain); if (!pcie->msi_domain) { irq_domain_remove(pcie->msi_inner_domain); return -ENOMEM; @@ -1363,7 +1416,6 @@ static int advk_pcie_init_irq_domain(struct advk_pcie *pcie) } irq_chip->irq_mask = advk_pcie_irq_mask; - irq_chip->irq_mask_ack = advk_pcie_irq_mask; irq_chip->irq_unmask = advk_pcie_irq_unmask; pcie->irq_domain = @@ -1385,10 +1437,73 @@ static void advk_pcie_remove_irq_domain(struct advk_pcie *pcie) irq_domain_remove(pcie->irq_domain); } +static struct irq_chip advk_rp_irq_chip = { + .name = "advk-RP", +}; + +static int advk_pcie_rp_irq_map(struct irq_domain *h, + unsigned int virq, irq_hw_number_t hwirq) +{ + struct advk_pcie *pcie = h->host_data; + + irq_set_chip_and_handler(virq, &advk_rp_irq_chip, handle_simple_irq); + irq_set_chip_data(virq, pcie); + + return 0; +} + +static const struct irq_domain_ops advk_pcie_rp_irq_domain_ops = { + .map = advk_pcie_rp_irq_map, + .xlate = irq_domain_xlate_onecell, +}; + +static int advk_pcie_init_rp_irq_domain(struct advk_pcie *pcie) +{ + pcie->rp_irq_domain = irq_domain_add_linear(NULL, 1, + &advk_pcie_rp_irq_domain_ops, + pcie); + if (!pcie->rp_irq_domain) { + dev_err(&pcie->pdev->dev, "Failed to add Root Port IRQ domain\n"); + return -ENOMEM; + } + + return 0; +} + +static void advk_pcie_remove_rp_irq_domain(struct advk_pcie *pcie) +{ + irq_domain_remove(pcie->rp_irq_domain); +} + +static void advk_pcie_handle_pme(struct advk_pcie *pcie) +{ + u32 requester = advk_readl(pcie, PCIE_MSG_LOG_REG) >> 16; + + advk_writel(pcie, PCIE_MSG_PM_PME_MASK, PCIE_ISR0_REG); + + /* + * PCIE_MSG_LOG_REG contains the last inbound message, so store + * the requester ID only when PME was not asserted yet. + * Also do not trigger PME interrupt when PME is still asserted. + */ + if (!(le32_to_cpu(pcie->bridge.pcie_conf.rootsta) & PCI_EXP_RTSTA_PME)) { + pcie->bridge.pcie_conf.rootsta = cpu_to_le32(requester | PCI_EXP_RTSTA_PME); + + /* + * Trigger PME interrupt only if PMEIE bit in Root Control is set. + * Aardvark HW returns zero for PCI_EXP_FLAGS_IRQ, so use PCIe interrupt 0. + */ + if (!(le16_to_cpu(pcie->bridge.pcie_conf.rootctl) & PCI_EXP_RTCTL_PMEIE)) + return; + + if (generic_handle_domain_irq(pcie->rp_irq_domain, 0) == -EINVAL) + dev_err_ratelimited(&pcie->pdev->dev, "unhandled PME IRQ\n"); + } +} + static void advk_pcie_handle_msi(struct advk_pcie *pcie) { u32 msi_val, msi_mask, msi_status, msi_idx; - u16 msi_data; msi_mask = advk_readl(pcie, PCIE_MSI_MASK_REG); msi_val = advk_readl(pcie, PCIE_MSI_STATUS_REG); @@ -1398,13 +1513,9 @@ static void advk_pcie_handle_msi(struct advk_pcie *pcie) if (!(BIT(msi_idx) & msi_status)) continue; - /* - * msi_idx contains bits [4:0] of the msi_data and msi_data - * contains 16bit MSI interrupt number - */ advk_writel(pcie, BIT(msi_idx), PCIE_MSI_STATUS_REG); - msi_data = advk_readl(pcie, PCIE_MSI_PAYLOAD_REG) & PCIE_MSI_DATA_MASK; - generic_handle_irq(msi_data); + if (generic_handle_domain_irq(pcie->msi_inner_domain, msi_idx) == -EINVAL) + dev_err_ratelimited(&pcie->pdev->dev, "unexpected MSI 0x%02x\n", msi_idx); } advk_writel(pcie, PCIE_ISR0_MSI_INT_PENDING, @@ -1425,6 +1536,22 @@ static void advk_pcie_handle_int(struct advk_pcie *pcie) isr1_mask = advk_readl(pcie, PCIE_ISR1_MASK_REG); isr1_status = isr1_val & ((~isr1_mask) & PCIE_ISR1_ALL_MASK); + /* Process PME interrupt as the first one to do not miss PME requester id */ + if (isr0_status & PCIE_MSG_PM_PME_MASK) + advk_pcie_handle_pme(pcie); + + /* Process ERR interrupt */ + if (isr0_status & PCIE_ISR0_ERR_MASK) { + advk_writel(pcie, PCIE_ISR0_ERR_MASK, PCIE_ISR0_REG); + + /* + * Aardvark HW returns zero for PCI_ERR_ROOT_AER_IRQ, so use + * PCIe interrupt 0 + */ + if (generic_handle_domain_irq(pcie->rp_irq_domain, 0) == -EINVAL) + dev_err_ratelimited(&pcie->pdev->dev, "unhandled ERR IRQ\n"); + } + /* Process MSI interrupts */ if (isr0_status & PCIE_ISR0_MSI_INT_PENDING) advk_pcie_handle_msi(pcie); @@ -1437,28 +1564,50 @@ static void advk_pcie_handle_int(struct advk_pcie *pcie) advk_writel(pcie, PCIE_ISR1_INTX_ASSERT(i), PCIE_ISR1_REG); - generic_handle_domain_irq(pcie->irq_domain, i); + if (generic_handle_domain_irq(pcie->irq_domain, i) == -EINVAL) + dev_err_ratelimited(&pcie->pdev->dev, "unexpected INT%c IRQ\n", + (char)i + 'A'); } } -static irqreturn_t advk_pcie_irq_handler(int irq, void *arg) +static void advk_pcie_irq_handler(struct irq_desc *desc) { - struct advk_pcie *pcie = arg; - u32 status; + struct advk_pcie *pcie = irq_desc_get_handler_data(desc); + struct irq_chip *chip = irq_desc_get_chip(desc); + u32 val, mask, status; - status = advk_readl(pcie, HOST_CTRL_INT_STATUS_REG); - if (!(status & PCIE_IRQ_CORE_INT)) - return IRQ_NONE; + chained_irq_enter(chip, desc); - advk_pcie_handle_int(pcie); + val = advk_readl(pcie, HOST_CTRL_INT_STATUS_REG); + mask = advk_readl(pcie, HOST_CTRL_INT_MASK_REG); + status = val & ((~mask) & PCIE_IRQ_ALL_MASK); - /* Clear interrupt */ - advk_writel(pcie, PCIE_IRQ_CORE_INT, HOST_CTRL_INT_STATUS_REG); + if (status & PCIE_IRQ_CORE_INT) { + advk_pcie_handle_int(pcie); - return IRQ_HANDLED; + /* Clear interrupt */ + advk_writel(pcie, PCIE_IRQ_CORE_INT, HOST_CTRL_INT_STATUS_REG); + } + + chained_irq_exit(chip, desc); +} + +static int advk_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + struct advk_pcie *pcie = dev->bus->sysdata; + + /* + * Emulated root bridge has its own emulated irq chip and irq domain. + * Argument pin is the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD) and + * hwirq for irq_create_mapping() is indexed from zero. + */ + if (pci_is_root_bus(dev->bus)) + return irq_create_mapping(pcie->rp_irq_domain, pin - 1); + else + return of_irq_parse_and_map_pci(dev, slot, pin); } -static void __maybe_unused advk_pcie_disable_phy(struct advk_pcie *pcie) +static void advk_pcie_disable_phy(struct advk_pcie *pcie) { phy_power_off(pcie->phy); phy_exit(pcie->phy); @@ -1482,9 +1631,7 @@ static int advk_pcie_enable_phy(struct advk_pcie *pcie) } ret = phy_power_on(pcie->phy); - if (ret == -EOPNOTSUPP) { - dev_warn(&pcie->pdev->dev, "PHY unsupported by firmware\n"); - } else if (ret) { + if (ret) { phy_exit(pcie->phy); return ret; } @@ -1522,7 +1669,7 @@ static int advk_pcie_probe(struct platform_device *pdev) struct advk_pcie *pcie; struct pci_host_bridge *bridge; struct resource_entry *entry; - int ret, irq; + int ret; bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct advk_pcie)); if (!bridge) @@ -1608,17 +1755,9 @@ static int advk_pcie_probe(struct platform_device *pdev) if (IS_ERR(pcie->base)) return PTR_ERR(pcie->base); - irq = platform_get_irq(pdev, 0); - if (irq < 0) - return irq; - - ret = devm_request_irq(dev, irq, advk_pcie_irq_handler, - IRQF_SHARED | IRQF_NO_THREAD, "advk-pcie", - pcie); - if (ret) { - dev_err(dev, "Failed to register interrupt\n"); - return ret; - } + pcie->irq = platform_get_irq(pdev, 0); + if (pcie->irq < 0) + return pcie->irq; pcie->reset_gpio = devm_gpiod_get_from_of_node(dev, dev->of_node, "reset-gpios", 0, @@ -1667,11 +1806,24 @@ static int advk_pcie_probe(struct platform_device *pdev) return ret; } + ret = advk_pcie_init_rp_irq_domain(pcie); + if (ret) { + dev_err(dev, "Failed to initialize irq\n"); + advk_pcie_remove_msi_irq_domain(pcie); + advk_pcie_remove_irq_domain(pcie); + return ret; + } + + irq_set_chained_handler_and_data(pcie->irq, advk_pcie_irq_handler, pcie); + bridge->sysdata = pcie; bridge->ops = &advk_pcie_ops; + bridge->map_irq = advk_pcie_map_irq; ret = pci_host_probe(bridge); if (ret < 0) { + irq_set_chained_handler_and_data(pcie->irq, NULL, NULL); + advk_pcie_remove_rp_irq_domain(pcie); advk_pcie_remove_msi_irq_domain(pcie); advk_pcie_remove_irq_domain(pcie); return ret; @@ -1719,7 +1871,11 @@ static int advk_pcie_remove(struct platform_device *pdev) advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG); advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG); + /* Remove IRQ handler */ + irq_set_chained_handler_and_data(pcie->irq, NULL, NULL); + /* Remove IRQ domains */ + advk_pcie_remove_rp_irq_domain(pcie); advk_pcie_remove_msi_irq_domain(pcie); advk_pcie_remove_irq_domain(pcie); diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c index 20ea2ee330b8..558b35aba610 100644 --- a/drivers/pci/controller/pci-hyperv.c +++ b/drivers/pci/controller/pci-hyperv.c @@ -616,6 +616,121 @@ static int hv_msi_prepare(struct irq_domain *domain, struct device *dev, { return pci_msi_prepare(domain, dev, nvec, info); } + +/** + * hv_arch_irq_unmask() - "Unmask" the IRQ by setting its current + * affinity. + * @data: Describes the IRQ + * + * Build new a destination for the MSI and make a hypercall to + * update the Interrupt Redirection Table. "Device Logical ID" + * is built out of this PCI bus's instance GUID and the function + * number of the device. + */ +static void hv_arch_irq_unmask(struct irq_data *data) +{ + struct msi_desc *msi_desc = irq_data_get_msi_desc(data); + struct hv_retarget_device_interrupt *params; + struct hv_pcibus_device *hbus; + struct cpumask *dest; + cpumask_var_t tmp; + struct pci_bus *pbus; + struct pci_dev *pdev; + unsigned long flags; + u32 var_size = 0; + int cpu, nr_bank; + u64 res; + + dest = irq_data_get_effective_affinity_mask(data); + pdev = msi_desc_to_pci_dev(msi_desc); + pbus = pdev->bus; + hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata); + + spin_lock_irqsave(&hbus->retarget_msi_interrupt_lock, flags); + + params = &hbus->retarget_msi_interrupt_params; + memset(params, 0, sizeof(*params)); + params->partition_id = HV_PARTITION_ID_SELF; + params->int_entry.source = HV_INTERRUPT_SOURCE_MSI; + hv_set_msi_entry_from_desc(¶ms->int_entry.msi_entry, msi_desc); + params->device_id = (hbus->hdev->dev_instance.b[5] << 24) | + (hbus->hdev->dev_instance.b[4] << 16) | + (hbus->hdev->dev_instance.b[7] << 8) | + (hbus->hdev->dev_instance.b[6] & 0xf8) | + PCI_FUNC(pdev->devfn); + params->int_target.vector = hv_msi_get_int_vector(data); + + /* + * Honoring apic->delivery_mode set to APIC_DELIVERY_MODE_FIXED by + * setting the HV_DEVICE_INTERRUPT_TARGET_MULTICAST flag results in a + * spurious interrupt storm. Not doing so does not seem to have a + * negative effect (yet?). + */ + + if (hbus->protocol_version >= PCI_PROTOCOL_VERSION_1_2) { + /* + * PCI_PROTOCOL_VERSION_1_2 supports the VP_SET version of the + * HVCALL_RETARGET_INTERRUPT hypercall, which also coincides + * with >64 VP support. + * ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED + * is not sufficient for this hypercall. + */ + params->int_target.flags |= + HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET; + + if (!alloc_cpumask_var(&tmp, GFP_ATOMIC)) { + res = 1; + goto exit_unlock; + } + + cpumask_and(tmp, dest, cpu_online_mask); + nr_bank = cpumask_to_vpset(¶ms->int_target.vp_set, tmp); + free_cpumask_var(tmp); + + if (nr_bank <= 0) { + res = 1; + goto exit_unlock; + } + + /* + * var-sized hypercall, var-size starts after vp_mask (thus + * vp_set.format does not count, but vp_set.valid_bank_mask + * does). + */ + var_size = 1 + nr_bank; + } else { + for_each_cpu_and(cpu, dest, cpu_online_mask) { + params->int_target.vp_mask |= + (1ULL << hv_cpu_number_to_vp_number(cpu)); + } + } + + res = hv_do_hypercall(HVCALL_RETARGET_INTERRUPT | (var_size << 17), + params, NULL); + +exit_unlock: + spin_unlock_irqrestore(&hbus->retarget_msi_interrupt_lock, flags); + + /* + * During hibernation, when a CPU is offlined, the kernel tries + * to move the interrupt to the remaining CPUs that haven't + * been offlined yet. In this case, the below hv_do_hypercall() + * always fails since the vmbus channel has been closed: + * refer to cpu_disable_common() -> fixup_irqs() -> + * irq_migrate_all_off_this_cpu() -> migrate_one_irq(). + * + * Suppress the error message for hibernation because the failure + * during hibernation does not matter (at this time all the devices + * have been frozen). Note: the correct affinity info is still updated + * into the irqdata data structure in migrate_one_irq() -> + * irq_do_set_affinity() -> hv_set_affinity(), so later when the VM + * resumes, hv_pci_restore_msi_state() is able to correctly restore + * the interrupt with the correct affinity. + */ + if (!hv_result_success(res) && hbus->state != hv_pcibus_removing) + dev_err(&hbus->hdev->device, + "%s() failed: %#llx", __func__, res); +} #elif defined(CONFIG_ARM64) /* * SPI vectors to use for vPCI; arch SPIs range is [32, 1019], but leaving a bit @@ -651,14 +766,6 @@ static unsigned int hv_msi_get_int_vector(struct irq_data *irqd) return irqd->parent_data->hwirq; } -static void hv_set_msi_entry_from_desc(union hv_msi_entry *msi_entry, - struct msi_desc *msi_desc) -{ - msi_entry->address = ((u64)msi_desc->msg.address_hi << 32) | - msi_desc->msg.address_lo; - msi_entry->data = msi_desc->msg.data; -} - /* * @nr_bm_irqs: Indicates the number of IRQs that were allocated from * the bitmap. @@ -839,6 +946,12 @@ static struct irq_domain *hv_pci_get_root_domain(void) { return hv_msi_gic_irq_domain; } + +/* + * SPIs are used for interrupts of PCI devices and SPIs is managed via GICD + * registers which Hyper-V already supports, so no hypercall needed. + */ +static void hv_arch_irq_unmask(struct irq_data *data) { } #endif /* CONFIG_ARM64 */ /** @@ -1456,119 +1569,9 @@ static void hv_irq_mask(struct irq_data *data) irq_chip_mask_parent(data); } -/** - * hv_irq_unmask() - "Unmask" the IRQ by setting its current - * affinity. - * @data: Describes the IRQ - * - * Build new a destination for the MSI and make a hypercall to - * update the Interrupt Redirection Table. "Device Logical ID" - * is built out of this PCI bus's instance GUID and the function - * number of the device. - */ static void hv_irq_unmask(struct irq_data *data) { - struct msi_desc *msi_desc = irq_data_get_msi_desc(data); - struct hv_retarget_device_interrupt *params; - struct hv_pcibus_device *hbus; - struct cpumask *dest; - cpumask_var_t tmp; - struct pci_bus *pbus; - struct pci_dev *pdev; - unsigned long flags; - u32 var_size = 0; - int cpu, nr_bank; - u64 res; - - dest = irq_data_get_effective_affinity_mask(data); - pdev = msi_desc_to_pci_dev(msi_desc); - pbus = pdev->bus; - hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata); - - spin_lock_irqsave(&hbus->retarget_msi_interrupt_lock, flags); - - params = &hbus->retarget_msi_interrupt_params; - memset(params, 0, sizeof(*params)); - params->partition_id = HV_PARTITION_ID_SELF; - params->int_entry.source = HV_INTERRUPT_SOURCE_MSI; - hv_set_msi_entry_from_desc(¶ms->int_entry.msi_entry, msi_desc); - params->device_id = (hbus->hdev->dev_instance.b[5] << 24) | - (hbus->hdev->dev_instance.b[4] << 16) | - (hbus->hdev->dev_instance.b[7] << 8) | - (hbus->hdev->dev_instance.b[6] & 0xf8) | - PCI_FUNC(pdev->devfn); - params->int_target.vector = hv_msi_get_int_vector(data); - - /* - * Honoring apic->delivery_mode set to APIC_DELIVERY_MODE_FIXED by - * setting the HV_DEVICE_INTERRUPT_TARGET_MULTICAST flag results in a - * spurious interrupt storm. Not doing so does not seem to have a - * negative effect (yet?). - */ - - if (hbus->protocol_version >= PCI_PROTOCOL_VERSION_1_2) { - /* - * PCI_PROTOCOL_VERSION_1_2 supports the VP_SET version of the - * HVCALL_RETARGET_INTERRUPT hypercall, which also coincides - * with >64 VP support. - * ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED - * is not sufficient for this hypercall. - */ - params->int_target.flags |= - HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET; - - if (!alloc_cpumask_var(&tmp, GFP_ATOMIC)) { - res = 1; - goto exit_unlock; - } - - cpumask_and(tmp, dest, cpu_online_mask); - nr_bank = cpumask_to_vpset(¶ms->int_target.vp_set, tmp); - free_cpumask_var(tmp); - - if (nr_bank <= 0) { - res = 1; - goto exit_unlock; - } - - /* - * var-sized hypercall, var-size starts after vp_mask (thus - * vp_set.format does not count, but vp_set.valid_bank_mask - * does). - */ - var_size = 1 + nr_bank; - } else { - for_each_cpu_and(cpu, dest, cpu_online_mask) { - params->int_target.vp_mask |= - (1ULL << hv_cpu_number_to_vp_number(cpu)); - } - } - - res = hv_do_hypercall(HVCALL_RETARGET_INTERRUPT | (var_size << 17), - params, NULL); - -exit_unlock: - spin_unlock_irqrestore(&hbus->retarget_msi_interrupt_lock, flags); - - /* - * During hibernation, when a CPU is offlined, the kernel tries - * to move the interrupt to the remaining CPUs that haven't - * been offlined yet. In this case, the below hv_do_hypercall() - * always fails since the vmbus channel has been closed: - * refer to cpu_disable_common() -> fixup_irqs() -> - * irq_migrate_all_off_this_cpu() -> migrate_one_irq(). - * - * Suppress the error message for hibernation because the failure - * during hibernation does not matter (at this time all the devices - * have been frozen). Note: the correct affinity info is still updated - * into the irqdata data structure in migrate_one_irq() -> - * irq_do_set_affinity() -> hv_set_affinity(), so later when the VM - * resumes, hv_pci_restore_msi_state() is able to correctly restore - * the interrupt with the correct affinity. - */ - if (!hv_result_success(res) && hbus->state != hv_pcibus_removing) - dev_err(&hbus->hdev->device, - "%s() failed: %#llx", __func__, res); + hv_arch_irq_unmask(data); if (data->parent_data->chip->irq_unmask) irq_chip_unmask_parent(data); @@ -2155,8 +2158,17 @@ static void hv_pci_assign_numa_node(struct hv_pcibus_device *hbus) if (!hv_dev) continue; - if (hv_dev->desc.flags & HV_PCI_DEVICE_FLAG_NUMA_AFFINITY) - set_dev_node(&dev->dev, hv_dev->desc.virtual_numa_node); + if (hv_dev->desc.flags & HV_PCI_DEVICE_FLAG_NUMA_AFFINITY && + hv_dev->desc.virtual_numa_node < num_possible_nodes()) + /* + * The kernel may boot with some NUMA nodes offline + * (e.g. in a KDUMP kernel) or with NUMA disabled via + * "numa=off". In those cases, adjust the host provided + * NUMA node to a valid NUMA node used by the kernel. + */ + set_dev_node(&dev->dev, + numa_map_to_online_node( + hv_dev->desc.virtual_numa_node)); put_pcichild(hv_dev); } diff --git a/drivers/pci/controller/pci-loongson.c b/drivers/pci/controller/pci-loongson.c index 48169b1e3817..50a8e1d6f70a 100644 --- a/drivers/pci/controller/pci-loongson.c +++ b/drivers/pci/controller/pci-loongson.c @@ -35,7 +35,7 @@ struct loongson_pci { /* Fixup wrong class code in PCIe bridges */ static void bridge_class_quirk(struct pci_dev *dev) { - dev->class = PCI_CLASS_BRIDGE_PCI << 8; + dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL; } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON, DEV_PCIE_PORT_0, bridge_class_quirk); diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c index 71258ea3d35f..8f76d4bda356 100644 --- a/drivers/pci/controller/pci-mvebu.c +++ b/drivers/pci/controller/pci-mvebu.c @@ -32,8 +32,9 @@ #define PCIE_DEV_REV_OFF 0x0008 #define PCIE_BAR_LO_OFF(n) (0x0010 + ((n) << 3)) #define PCIE_BAR_HI_OFF(n) (0x0014 + ((n) << 3)) +#define PCIE_SSDEV_ID_OFF 0x002c #define PCIE_CAP_PCIEXP 0x0060 -#define PCIE_HEADER_LOG_4_OFF 0x0128 +#define PCIE_CAP_PCIERR_OFF 0x0100 #define PCIE_BAR_CTRL_OFF(n) (0x1804 + (((n) - 1) * 4)) #define PCIE_WIN04_CTRL_OFF(n) (0x1820 + ((n) << 4)) #define PCIE_WIN04_BASE_OFF(n) (0x1824 + ((n) << 4)) @@ -53,9 +54,10 @@ PCIE_CONF_ADDR_EN) #define PCIE_CONF_DATA_OFF 0x18fc #define PCIE_INT_CAUSE_OFF 0x1900 +#define PCIE_INT_UNMASK_OFF 0x1910 +#define PCIE_INT_INTX(i) BIT(24+i) #define PCIE_INT_PM_PME BIT(28) -#define PCIE_MASK_OFF 0x1910 -#define PCIE_MASK_ENABLE_INTS 0x0f000000 +#define PCIE_INT_ALL_MASK GENMASK(31, 0) #define PCIE_CTRL_OFF 0x1a00 #define PCIE_CTRL_X1_MODE 0x0001 #define PCIE_CTRL_RC_MODE BIT(1) @@ -93,6 +95,7 @@ struct mvebu_pcie_port { void __iomem *base; u32 port; u32 lane; + bool is_x4; int devfn; unsigned int mem_target; unsigned int mem_attr; @@ -108,6 +111,9 @@ struct mvebu_pcie_port { struct mvebu_pcie_window iowin; u32 saved_pcie_stat; struct resource regs; + struct irq_domain *intx_irq_domain; + raw_spinlock_t irq_lock; + int intx_irq; }; static inline void mvebu_writel(struct mvebu_pcie_port *port, u32 val, u32 reg) @@ -233,13 +239,25 @@ static void mvebu_pcie_setup_wins(struct mvebu_pcie_port *port) static void mvebu_pcie_setup_hw(struct mvebu_pcie_port *port) { - u32 ctrl, cmd, dev_rev, mask; + u32 ctrl, lnkcap, cmd, dev_rev, unmask; /* Setup PCIe controller to Root Complex mode. */ ctrl = mvebu_readl(port, PCIE_CTRL_OFF); ctrl |= PCIE_CTRL_RC_MODE; mvebu_writel(port, ctrl, PCIE_CTRL_OFF); + /* + * Set Maximum Link Width to X1 or X4 in Root Port's PCIe Link + * Capability register. This register is defined by PCIe specification + * as read-only but this mvebu controller has it as read-write and must + * be set to number of SerDes PCIe lanes (1 or 4). If this register is + * not set correctly then link with endpoint card is not established. + */ + lnkcap = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP); + lnkcap &= ~PCI_EXP_LNKCAP_MLW; + lnkcap |= (port->is_x4 ? 4 : 1) << 4; + mvebu_writel(port, lnkcap, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP); + /* Disable Root Bridge I/O space, memory space and bus mastering. */ cmd = mvebu_readl(port, PCIE_CMD_OFF); cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); @@ -268,23 +286,57 @@ static void mvebu_pcie_setup_hw(struct mvebu_pcie_port *port) */ dev_rev = mvebu_readl(port, PCIE_DEV_REV_OFF); dev_rev &= ~0xffffff00; - dev_rev |= (PCI_CLASS_BRIDGE_PCI << 8) << 8; + dev_rev |= PCI_CLASS_BRIDGE_PCI_NORMAL << 8; mvebu_writel(port, dev_rev, PCIE_DEV_REV_OFF); /* Point PCIe unit MBUS decode windows to DRAM space. */ mvebu_pcie_setup_wins(port); - /* Enable interrupt lines A-D. */ - mask = mvebu_readl(port, PCIE_MASK_OFF); - mask |= PCIE_MASK_ENABLE_INTS; - mvebu_writel(port, mask, PCIE_MASK_OFF); + /* Mask all interrupt sources. */ + mvebu_writel(port, ~PCIE_INT_ALL_MASK, PCIE_INT_UNMASK_OFF); + + /* Clear all interrupt causes. */ + mvebu_writel(port, ~PCIE_INT_ALL_MASK, PCIE_INT_CAUSE_OFF); + + /* Check if "intx" interrupt was specified in DT. */ + if (port->intx_irq > 0) + return; + + /* + * Fallback code when "intx" interrupt was not specified in DT: + * Unmask all legacy INTx interrupts as driver does not provide a way + * for masking and unmasking of individual legacy INTx interrupts. + * Legacy INTx are reported via one shared GIC source and therefore + * kernel cannot distinguish which individual legacy INTx was triggered. + * These interrupts are shared, so it should not cause any issue. Just + * performance penalty as every PCIe interrupt handler needs to be + * called when some interrupt is triggered. + */ + unmask = mvebu_readl(port, PCIE_INT_UNMASK_OFF); + unmask |= PCIE_INT_INTX(0) | PCIE_INT_INTX(1) | + PCIE_INT_INTX(2) | PCIE_INT_INTX(3); + mvebu_writel(port, unmask, PCIE_INT_UNMASK_OFF); } -static int mvebu_pcie_hw_rd_conf(struct mvebu_pcie_port *port, - struct pci_bus *bus, - u32 devfn, int where, int size, u32 *val) +static struct mvebu_pcie_port *mvebu_pcie_find_port(struct mvebu_pcie *pcie, + struct pci_bus *bus, + int devfn); + +static int mvebu_pcie_child_rd_conf(struct pci_bus *bus, u32 devfn, int where, + int size, u32 *val) { - void __iomem *conf_data = port->base + PCIE_CONF_DATA_OFF; + struct mvebu_pcie *pcie = bus->sysdata; + struct mvebu_pcie_port *port; + void __iomem *conf_data; + + port = mvebu_pcie_find_port(pcie, bus, devfn); + if (!port) + return PCIBIOS_DEVICE_NOT_FOUND; + + if (!mvebu_pcie_link_up(port)) + return PCIBIOS_DEVICE_NOT_FOUND; + + conf_data = port->base + PCIE_CONF_DATA_OFF; mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where), PCIE_CONF_ADDR_OFF); @@ -300,18 +352,27 @@ static int mvebu_pcie_hw_rd_conf(struct mvebu_pcie_port *port, *val = readl_relaxed(conf_data); break; default: - *val = 0xffffffff; return PCIBIOS_BAD_REGISTER_NUMBER; } return PCIBIOS_SUCCESSFUL; } -static int mvebu_pcie_hw_wr_conf(struct mvebu_pcie_port *port, - struct pci_bus *bus, - u32 devfn, int where, int size, u32 val) +static int mvebu_pcie_child_wr_conf(struct pci_bus *bus, u32 devfn, + int where, int size, u32 val) { - void __iomem *conf_data = port->base + PCIE_CONF_DATA_OFF; + struct mvebu_pcie *pcie = bus->sysdata; + struct mvebu_pcie_port *port; + void __iomem *conf_data; + + port = mvebu_pcie_find_port(pcie, bus, devfn); + if (!port) + return PCIBIOS_DEVICE_NOT_FOUND; + + if (!mvebu_pcie_link_up(port)) + return PCIBIOS_DEVICE_NOT_FOUND; + + conf_data = port->base + PCIE_CONF_DATA_OFF; mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where), PCIE_CONF_ADDR_OFF); @@ -333,6 +394,11 @@ static int mvebu_pcie_hw_wr_conf(struct mvebu_pcie_port *port, return PCIBIOS_SUCCESSFUL; } +static struct pci_ops mvebu_pcie_child_ops = { + .read = mvebu_pcie_child_rd_conf, + .write = mvebu_pcie_child_wr_conf, +}; + /* * Remove windows, starting from the largest ones to the smallest * ones. @@ -438,12 +504,6 @@ static int mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port) return mvebu_pcie_set_window(port, port->io_target, port->io_attr, &desired, &port->iowin); - if (!mvebu_has_ioport(port)) { - dev_WARN(&port->pcie->pdev->dev, - "Attempt to set IO when IO is disabled\n"); - return -EOPNOTSUPP; - } - /* * We read the PCI-to-PCI bridge emulated registers, and * calculate the base address and size of the address decoding @@ -552,15 +612,20 @@ mvebu_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge, case PCI_EXP_LNKCAP: /* - * PCIe requires the clock power management capability to be - * hard-wired to zero for downstream ports + * PCIe requires that the Clock Power Management capability bit + * is hard-wired to zero for downstream ports but HW returns 1. + * Additionally enable Data Link Layer Link Active Reporting + * Capable bit as DL_Active indication is provided too. */ - *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP) & - ~PCI_EXP_LNKCAP_CLKPM; + *value = (mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP) & + ~PCI_EXP_LNKCAP_CLKPM) | PCI_EXP_LNKCAP_DLLLARC; break; case PCI_EXP_LNKCTL: - *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL); + /* DL_Active indication is provided via PCIE_STAT_OFF */ + *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL) | + (mvebu_pcie_link_up(port) ? + (PCI_EXP_LNKSTA_DLLLA << 16) : 0); break; case PCI_EXP_SLTCTL: @@ -590,6 +655,37 @@ mvebu_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge, return PCI_BRIDGE_EMUL_HANDLED; } +static pci_bridge_emul_read_status_t +mvebu_pci_bridge_emul_ext_conf_read(struct pci_bridge_emul *bridge, + int reg, u32 *value) +{ + struct mvebu_pcie_port *port = bridge->data; + + switch (reg) { + case 0: + case PCI_ERR_UNCOR_STATUS: + case PCI_ERR_UNCOR_MASK: + case PCI_ERR_UNCOR_SEVER: + case PCI_ERR_COR_STATUS: + case PCI_ERR_COR_MASK: + case PCI_ERR_CAP: + case PCI_ERR_HEADER_LOG+0: + case PCI_ERR_HEADER_LOG+4: + case PCI_ERR_HEADER_LOG+8: + case PCI_ERR_HEADER_LOG+12: + case PCI_ERR_ROOT_COMMAND: + case PCI_ERR_ROOT_STATUS: + case PCI_ERR_ROOT_ERR_SRC: + *value = mvebu_readl(port, PCIE_CAP_PCIERR_OFF + reg); + break; + + default: + return PCI_BRIDGE_EMUL_NOT_HANDLED; + } + + return PCI_BRIDGE_EMUL_HANDLED; +} + static void mvebu_pci_bridge_emul_base_conf_write(struct pci_bridge_emul *bridge, int reg, u32 old, u32 new, u32 mask) @@ -599,24 +695,18 @@ mvebu_pci_bridge_emul_base_conf_write(struct pci_bridge_emul *bridge, switch (reg) { case PCI_COMMAND: - if (!mvebu_has_ioport(port)) { - conf->command = cpu_to_le16( - le16_to_cpu(conf->command) & ~PCI_COMMAND_IO); - new &= ~PCI_COMMAND_IO; - } - mvebu_writel(port, new, PCIE_CMD_OFF); break; case PCI_IO_BASE: - if ((mask & 0xffff) && mvebu_pcie_handle_iobase_change(port)) { + if ((mask & 0xffff) && mvebu_has_ioport(port) && + mvebu_pcie_handle_iobase_change(port)) { /* On error disable IO range */ conf->iobase &= ~0xf0; conf->iolimit &= ~0xf0; + conf->iobase |= 0xf0; conf->iobaseupper = cpu_to_le16(0x0000); conf->iolimitupper = cpu_to_le16(0x0000); - if (mvebu_has_ioport(port)) - conf->iobase |= 0xf0; } break; @@ -630,14 +720,14 @@ mvebu_pci_bridge_emul_base_conf_write(struct pci_bridge_emul *bridge, break; case PCI_IO_BASE_UPPER16: - if (mvebu_pcie_handle_iobase_change(port)) { + if (mvebu_has_ioport(port) && + mvebu_pcie_handle_iobase_change(port)) { /* On error disable IO range */ conf->iobase &= ~0xf0; conf->iolimit &= ~0xf0; + conf->iobase |= 0xf0; conf->iobaseupper = cpu_to_le16(0x0000); conf->iolimitupper = cpu_to_le16(0x0000); - if (mvebu_has_ioport(port)) - conf->iobase |= 0xf0; } break; @@ -675,10 +765,9 @@ mvebu_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge, case PCI_EXP_LNKCTL: /* - * If we don't support CLKREQ, we must ensure that the - * CLKREQ enable bit always reads zero. Since we haven't - * had this capability, and it's dependent on board wiring, - * disable it for the time being. + * PCIe requires that the Enable Clock Power Management bit + * is hard-wired to zero for downstream ports but HW allows + * to change it. */ new &= ~PCI_EXP_LNKCTL_CLKREQ_EN; @@ -709,11 +798,45 @@ mvebu_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge, } } -static struct pci_bridge_emul_ops mvebu_pci_bridge_emul_ops = { +static void +mvebu_pci_bridge_emul_ext_conf_write(struct pci_bridge_emul *bridge, + int reg, u32 old, u32 new, u32 mask) +{ + struct mvebu_pcie_port *port = bridge->data; + + switch (reg) { + /* These are W1C registers, so clear other bits */ + case PCI_ERR_UNCOR_STATUS: + case PCI_ERR_COR_STATUS: + case PCI_ERR_ROOT_STATUS: + new &= mask; + fallthrough; + + case PCI_ERR_UNCOR_MASK: + case PCI_ERR_UNCOR_SEVER: + case PCI_ERR_COR_MASK: + case PCI_ERR_CAP: + case PCI_ERR_HEADER_LOG+0: + case PCI_ERR_HEADER_LOG+4: + case PCI_ERR_HEADER_LOG+8: + case PCI_ERR_HEADER_LOG+12: + case PCI_ERR_ROOT_COMMAND: + case PCI_ERR_ROOT_ERR_SRC: + mvebu_writel(port, new, PCIE_CAP_PCIERR_OFF + reg); + break; + + default: + break; + } +} + +static const struct pci_bridge_emul_ops mvebu_pci_bridge_emul_ops = { .read_base = mvebu_pci_bridge_emul_base_conf_read, .write_base = mvebu_pci_bridge_emul_base_conf_write, .read_pcie = mvebu_pci_bridge_emul_pcie_conf_read, .write_pcie = mvebu_pci_bridge_emul_pcie_conf_write, + .read_ext = mvebu_pci_bridge_emul_ext_conf_read, + .write_ext = mvebu_pci_bridge_emul_ext_conf_write, }; /* @@ -722,19 +845,24 @@ static struct pci_bridge_emul_ops mvebu_pci_bridge_emul_ops = { */ static int mvebu_pci_bridge_emul_init(struct mvebu_pcie_port *port) { + unsigned int bridge_flags = PCI_BRIDGE_EMUL_NO_PREFMEM_FORWARD; struct pci_bridge_emul *bridge = &port->bridge; + u32 dev_id = mvebu_readl(port, PCIE_DEV_ID_OFF); + u32 dev_rev = mvebu_readl(port, PCIE_DEV_REV_OFF); + u32 ssdev_id = mvebu_readl(port, PCIE_SSDEV_ID_OFF); u32 pcie_cap = mvebu_readl(port, PCIE_CAP_PCIEXP); u8 pcie_cap_ver = ((pcie_cap >> 16) & PCI_EXP_FLAGS_VERS); - bridge->conf.vendor = PCI_VENDOR_ID_MARVELL; - bridge->conf.device = mvebu_readl(port, PCIE_DEV_ID_OFF) >> 16; - bridge->conf.class_revision = - mvebu_readl(port, PCIE_DEV_REV_OFF) & 0xff; + bridge->conf.vendor = cpu_to_le16(dev_id & 0xffff); + bridge->conf.device = cpu_to_le16(dev_id >> 16); + bridge->conf.class_revision = cpu_to_le32(dev_rev & 0xff); if (mvebu_has_ioport(port)) { /* We support 32 bits I/O addressing */ bridge->conf.iobase = PCI_IO_RANGE_TYPE_32; bridge->conf.iolimit = PCI_IO_RANGE_TYPE_32; + } else { + bridge_flags |= PCI_BRIDGE_EMUL_NO_IO_FORWARD; } /* @@ -743,11 +871,13 @@ static int mvebu_pci_bridge_emul_init(struct mvebu_pcie_port *port) */ bridge->pcie_conf.cap = cpu_to_le16(pcie_cap_ver); + bridge->subsystem_vendor_id = ssdev_id & 0xffff; + bridge->subsystem_id = ssdev_id >> 16; bridge->has_pcie = true; bridge->data = port; bridge->ops = &mvebu_pci_bridge_emul_ops; - return pci_bridge_emul_init(bridge, PCI_BRIDGE_EMUL_NO_PREFETCHABLE_BAR); + return pci_bridge_emul_init(bridge, bridge_flags); } static inline struct mvebu_pcie *sys_to_pcie(struct pci_sys_data *sys) @@ -784,25 +914,12 @@ static int mvebu_pcie_wr_conf(struct pci_bus *bus, u32 devfn, { struct mvebu_pcie *pcie = bus->sysdata; struct mvebu_pcie_port *port; - int ret; port = mvebu_pcie_find_port(pcie, bus, devfn); if (!port) return PCIBIOS_DEVICE_NOT_FOUND; - /* Access the emulated PCI-to-PCI bridge */ - if (bus->number == 0) - return pci_bridge_emul_conf_write(&port->bridge, where, - size, val); - - if (!mvebu_pcie_link_up(port)) - return PCIBIOS_DEVICE_NOT_FOUND; - - /* Access the real PCIe interface */ - ret = mvebu_pcie_hw_wr_conf(port, bus, devfn, - where, size, val); - - return ret; + return pci_bridge_emul_conf_write(&port->bridge, where, size, val); } /* PCI configuration space read function */ @@ -811,25 +928,12 @@ static int mvebu_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, { struct mvebu_pcie *pcie = bus->sysdata; struct mvebu_pcie_port *port; - int ret; port = mvebu_pcie_find_port(pcie, bus, devfn); if (!port) return PCIBIOS_DEVICE_NOT_FOUND; - /* Access the emulated PCI-to-PCI bridge */ - if (bus->number == 0) - return pci_bridge_emul_conf_read(&port->bridge, where, - size, val); - - if (!mvebu_pcie_link_up(port)) - return PCIBIOS_DEVICE_NOT_FOUND; - - /* Access the real PCIe interface */ - ret = mvebu_pcie_hw_rd_conf(port, bus, devfn, - where, size, val); - - return ret; + return pci_bridge_emul_conf_read(&port->bridge, where, size, val); } static struct pci_ops mvebu_pcie_ops = { @@ -837,6 +941,108 @@ static struct pci_ops mvebu_pcie_ops = { .write = mvebu_pcie_wr_conf, }; +static void mvebu_pcie_intx_irq_mask(struct irq_data *d) +{ + struct mvebu_pcie_port *port = d->domain->host_data; + irq_hw_number_t hwirq = irqd_to_hwirq(d); + unsigned long flags; + u32 unmask; + + raw_spin_lock_irqsave(&port->irq_lock, flags); + unmask = mvebu_readl(port, PCIE_INT_UNMASK_OFF); + unmask &= ~PCIE_INT_INTX(hwirq); + mvebu_writel(port, unmask, PCIE_INT_UNMASK_OFF); + raw_spin_unlock_irqrestore(&port->irq_lock, flags); +} + +static void mvebu_pcie_intx_irq_unmask(struct irq_data *d) +{ + struct mvebu_pcie_port *port = d->domain->host_data; + irq_hw_number_t hwirq = irqd_to_hwirq(d); + unsigned long flags; + u32 unmask; + + raw_spin_lock_irqsave(&port->irq_lock, flags); + unmask = mvebu_readl(port, PCIE_INT_UNMASK_OFF); + unmask |= PCIE_INT_INTX(hwirq); + mvebu_writel(port, unmask, PCIE_INT_UNMASK_OFF); + raw_spin_unlock_irqrestore(&port->irq_lock, flags); +} + +static struct irq_chip intx_irq_chip = { + .name = "mvebu-INTx", + .irq_mask = mvebu_pcie_intx_irq_mask, + .irq_unmask = mvebu_pcie_intx_irq_unmask, +}; + +static int mvebu_pcie_intx_irq_map(struct irq_domain *h, + unsigned int virq, irq_hw_number_t hwirq) +{ + struct mvebu_pcie_port *port = h->host_data; + + irq_set_status_flags(virq, IRQ_LEVEL); + irq_set_chip_and_handler(virq, &intx_irq_chip, handle_level_irq); + irq_set_chip_data(virq, port); + + return 0; +} + +static const struct irq_domain_ops mvebu_pcie_intx_irq_domain_ops = { + .map = mvebu_pcie_intx_irq_map, + .xlate = irq_domain_xlate_onecell, +}; + +static int mvebu_pcie_init_irq_domain(struct mvebu_pcie_port *port) +{ + struct device *dev = &port->pcie->pdev->dev; + struct device_node *pcie_intc_node; + + raw_spin_lock_init(&port->irq_lock); + + pcie_intc_node = of_get_next_child(port->dn, NULL); + if (!pcie_intc_node) { + dev_err(dev, "No PCIe Intc node found for %s\n", port->name); + return -ENODEV; + } + + port->intx_irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, + &mvebu_pcie_intx_irq_domain_ops, + port); + of_node_put(pcie_intc_node); + if (!port->intx_irq_domain) { + dev_err(dev, "Failed to get INTx IRQ domain for %s\n", port->name); + return -ENOMEM; + } + + return 0; +} + +static void mvebu_pcie_irq_handler(struct irq_desc *desc) +{ + struct mvebu_pcie_port *port = irq_desc_get_handler_data(desc); + struct irq_chip *chip = irq_desc_get_chip(desc); + struct device *dev = &port->pcie->pdev->dev; + u32 cause, unmask, status; + int i; + + chained_irq_enter(chip, desc); + + cause = mvebu_readl(port, PCIE_INT_CAUSE_OFF); + unmask = mvebu_readl(port, PCIE_INT_UNMASK_OFF); + status = cause & unmask; + + /* Process legacy INTx interrupts */ + for (i = 0; i < PCI_NUM_INTX; i++) { + if (!(status & PCIE_INT_INTX(i))) + continue; + + if (generic_handle_domain_irq(port->intx_irq_domain, i) == -EINVAL) + dev_err_ratelimited(dev, "unexpected INT%c IRQ\n", (char)i+'A'); + } + + chained_irq_exit(chip, desc); +} + static int mvebu_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { /* Interrupt support on mvebu emulated bridges is not implemented yet */ @@ -986,6 +1192,7 @@ static int mvebu_pcie_parse_port(struct mvebu_pcie *pcie, struct device *dev = &pcie->pdev->dev; enum of_gpio_flags flags; int reset_gpio, ret; + u32 num_lanes; port->pcie = pcie; @@ -998,6 +1205,9 @@ static int mvebu_pcie_parse_port(struct mvebu_pcie *pcie, if (of_property_read_u32(child, "marvell,pcie-lane", &port->lane)) port->lane = 0; + if (!of_property_read_u32(child, "num-lanes", &num_lanes) && num_lanes == 4) + port->is_x4 = true; + port->name = devm_kasprintf(dev, GFP_KERNEL, "pcie%d.%d", port->port, port->lane); if (!port->name) { @@ -1030,6 +1240,21 @@ static int mvebu_pcie_parse_port(struct mvebu_pcie *pcie, port->io_attr = -1; } + /* + * Old DT bindings do not contain "intx" interrupt + * so do not fail probing driver when interrupt does not exist. + */ + port->intx_irq = of_irq_get_byname(child, "intx"); + if (port->intx_irq == -EPROBE_DEFER) { + ret = port->intx_irq; + goto err; + } + if (port->intx_irq <= 0) { + dev_warn(dev, "%s: legacy INTx interrupts cannot be masked individually, " + "%pOF does not contain intx interrupt\n", + port->name, child); + } + reset_gpio = of_get_named_gpio_flags(child, "reset-gpios", 0, &flags); if (reset_gpio == -EPROBE_DEFER) { ret = reset_gpio; @@ -1226,6 +1451,7 @@ static int mvebu_pcie_probe(struct platform_device *pdev) for (i = 0; i < pcie->nports; i++) { struct mvebu_pcie_port *port = &pcie->ports[i]; + int irq = port->intx_irq; child = port->dn; if (!child) @@ -1253,6 +1479,22 @@ static int mvebu_pcie_probe(struct platform_device *pdev) continue; } + if (irq > 0) { + ret = mvebu_pcie_init_irq_domain(port); + if (ret) { + dev_err(dev, "%s: cannot init irq domain\n", + port->name); + pci_bridge_emul_cleanup(&port->bridge); + devm_iounmap(dev, port->base); + port->base = NULL; + mvebu_pcie_powerdown(port); + continue; + } + irq_set_chained_handler_and_data(irq, + mvebu_pcie_irq_handler, + port); + } + /* * PCIe topology exported by mvebu hw is quite complicated. In * reality has something like N fully independent host bridges @@ -1329,13 +1571,13 @@ static int mvebu_pcie_probe(struct platform_device *pdev) * indirectly via kernel emulated PCI bridge driver. */ mvebu_pcie_setup_hw(port); - mvebu_pcie_set_local_dev_nr(port, 0); + mvebu_pcie_set_local_dev_nr(port, 1); + mvebu_pcie_set_local_bus_nr(port, 0); } - pcie->nports = i; - bridge->sysdata = pcie; bridge->ops = &mvebu_pcie_ops; + bridge->child_ops = &mvebu_pcie_child_ops; bridge->align_resource = mvebu_pcie_align_resource; bridge->map_irq = mvebu_pcie_map_irq; @@ -1357,6 +1599,7 @@ static int mvebu_pcie_remove(struct platform_device *pdev) for (i = 0; i < pcie->nports; i++) { struct mvebu_pcie_port *port = &pcie->ports[i]; + int irq = port->intx_irq; if (!port->base) continue; @@ -1367,7 +1610,17 @@ static int mvebu_pcie_remove(struct platform_device *pdev) mvebu_writel(port, cmd, PCIE_CMD_OFF); /* Mask all interrupt sources. */ - mvebu_writel(port, 0, PCIE_MASK_OFF); + mvebu_writel(port, ~PCIE_INT_ALL_MASK, PCIE_INT_UNMASK_OFF); + + /* Clear all interrupt causes. */ + mvebu_writel(port, ~PCIE_INT_ALL_MASK, PCIE_INT_CAUSE_OFF); + + if (irq > 0) + irq_set_chained_handler_and_data(irq, NULL, NULL); + + /* Remove IRQ domains. */ + if (port->intx_irq_domain) + irq_domain_remove(port->intx_irq_domain); /* Free config space for emulated root bridge. */ pci_bridge_emul_cleanup(&port->bridge); diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c index cb0aa65d6934..0457ec02ab70 100644 --- a/drivers/pci/controller/pci-tegra.c +++ b/drivers/pci/controller/pci-tegra.c @@ -726,7 +726,7 @@ static void tegra_pcie_port_free(struct tegra_pcie_port *port) /* Tegra PCIE root complex wrongly reports device class */ static void tegra_pcie_fixup_class(struct pci_dev *dev) { - dev->class = PCI_CLASS_BRIDGE_PCI << 8; + dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL; } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_fixup_class); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class); diff --git a/drivers/pci/controller/pci-xgene.c b/drivers/pci/controller/pci-xgene.c index 0d5acbfc7143..eb6240958bb0 100644 --- a/drivers/pci/controller/pci-xgene.c +++ b/drivers/pci/controller/pci-xgene.c @@ -49,7 +49,6 @@ #define EN_REG 0x00000001 #define OB_LO_IO 0x00000002 #define XGENE_PCIE_DEVICEID 0xE004 -#define SZ_1T (SZ_1G*1024ULL) #define PIPE_PHY_RATE_RD(src) ((0xc000 & (u32)(src)) >> 0xe) #define XGENE_V1_PCI_EXP_CAP 0x40 @@ -465,7 +464,7 @@ static int xgene_pcie_select_ib_reg(u8 *ib_reg_mask, u64 size) return 1; } - if ((size > SZ_1K) && (size < SZ_4G) && !(*ib_reg_mask & (1 << 0))) { + if ((size > SZ_1K) && (size < SZ_1T) && !(*ib_reg_mask & (1 << 0))) { *ib_reg_mask |= (1 << 0); return 0; } @@ -479,28 +478,27 @@ static int xgene_pcie_select_ib_reg(u8 *ib_reg_mask, u64 size) } static void xgene_pcie_setup_ib_reg(struct xgene_pcie *port, - struct resource_entry *entry, - u8 *ib_reg_mask) + struct of_pci_range *range, u8 *ib_reg_mask) { void __iomem *cfg_base = port->cfg_base; struct device *dev = port->dev; void __iomem *bar_addr; u32 pim_reg; - u64 cpu_addr = entry->res->start; - u64 pci_addr = cpu_addr - entry->offset; - u64 size = resource_size(entry->res); + u64 cpu_addr = range->cpu_addr; + u64 pci_addr = range->pci_addr; + u64 size = range->size; u64 mask = ~(size - 1) | EN_REG; u32 flags = PCI_BASE_ADDRESS_MEM_TYPE_64; u32 bar_low; int region; - region = xgene_pcie_select_ib_reg(ib_reg_mask, size); + region = xgene_pcie_select_ib_reg(ib_reg_mask, range->size); if (region < 0) { dev_warn(dev, "invalid pcie dma-range config\n"); return; } - if (entry->res->flags & IORESOURCE_PREFETCH) + if (range->flags & IORESOURCE_PREFETCH) flags |= PCI_BASE_ADDRESS_MEM_PREFETCH; bar_low = pcie_bar_low_val((u32)cpu_addr, flags); @@ -531,13 +529,25 @@ static void xgene_pcie_setup_ib_reg(struct xgene_pcie *port, static int xgene_pcie_parse_map_dma_ranges(struct xgene_pcie *port) { - struct pci_host_bridge *bridge = pci_host_bridge_from_priv(port); - struct resource_entry *entry; + struct device_node *np = port->node; + struct of_pci_range range; + struct of_pci_range_parser parser; + struct device *dev = port->dev; u8 ib_reg_mask = 0; - resource_list_for_each_entry(entry, &bridge->dma_ranges) - xgene_pcie_setup_ib_reg(port, entry, &ib_reg_mask); + if (of_pci_dma_range_parser_init(&parser, np)) { + dev_err(dev, "missing dma-ranges property\n"); + return -EINVAL; + } + + /* Get the dma-ranges from DT */ + for_each_of_pci_range(&parser, &range) { + u64 end = range.cpu_addr + range.size - 1; + dev_dbg(dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n", + range.flags, range.cpu_addr, end, range.pci_addr); + xgene_pcie_setup_ib_reg(port, &range, &ib_reg_mask); + } return 0; } diff --git a/drivers/pci/controller/pcie-apple.c b/drivers/pci/controller/pcie-apple.c index 854d95163112..a2c3c207a04b 100644 --- a/drivers/pci/controller/pcie-apple.c +++ b/drivers/pci/controller/pcie-apple.c @@ -219,7 +219,7 @@ static int apple_msi_domain_alloc(struct irq_domain *domain, unsigned int virq, if (hwirq < 0) return -ENOSPC; - fwspec.param[1] += hwirq; + fwspec.param[fwspec.param_count - 2] += hwirq; ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &fwspec); if (ret) diff --git a/drivers/pci/controller/pcie-iproc-bcma.c b/drivers/pci/controller/pcie-iproc-bcma.c index 54b6e6d5bc64..99a99900444d 100644 --- a/drivers/pci/controller/pcie-iproc-bcma.c +++ b/drivers/pci/controller/pcie-iproc-bcma.c @@ -18,7 +18,7 @@ /* NS: CLASS field is R/O, and set to wrong 0x200 value */ static void bcma_pcie2_fixup_class(struct pci_dev *dev) { - dev->class = PCI_CLASS_BRIDGE_PCI << 8; + dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL; } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x8011, bcma_pcie2_fixup_class); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x8012, bcma_pcie2_fixup_class); diff --git a/drivers/pci/controller/pcie-iproc.c b/drivers/pci/controller/pcie-iproc.c index b3e75bc61ff1..2519201b0e51 100644 --- a/drivers/pci/controller/pcie-iproc.c +++ b/drivers/pci/controller/pcie-iproc.c @@ -789,14 +789,13 @@ static int iproc_pcie_check_link(struct iproc_pcie *pcie) return -EFAULT; } - /* force class to PCI_CLASS_BRIDGE_PCI (0x0604) */ + /* force class to PCI_CLASS_BRIDGE_PCI_NORMAL (0x060400) */ #define PCI_BRIDGE_CTRL_REG_OFFSET 0x43c -#define PCI_CLASS_BRIDGE_MASK 0xffff00 -#define PCI_CLASS_BRIDGE_SHIFT 8 +#define PCI_BRIDGE_CTRL_REG_CLASS_MASK 0xffffff iproc_pci_raw_config_read32(pcie, 0, PCI_BRIDGE_CTRL_REG_OFFSET, 4, &class); - class &= ~PCI_CLASS_BRIDGE_MASK; - class |= (PCI_CLASS_BRIDGE_PCI << PCI_CLASS_BRIDGE_SHIFT); + class &= ~PCI_BRIDGE_CTRL_REG_CLASS_MASK; + class |= PCI_CLASS_BRIDGE_PCI_NORMAL; iproc_pci_raw_config_write32(pcie, 0, PCI_BRIDGE_CTRL_REG_OFFSET, 4, class); @@ -1581,7 +1580,7 @@ static void quirk_paxc_bridge(struct pci_dev *pdev) * code that the bridge is not an Ethernet device. */ if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) - pdev->class = PCI_CLASS_BRIDGE_PCI << 8; + pdev->class = PCI_CLASS_BRIDGE_PCI_NORMAL; /* * MPSS is not being set properly (as it is currently 0). This is diff --git a/drivers/pci/controller/pcie-mediatek-gen3.c b/drivers/pci/controller/pcie-mediatek-gen3.c index 7705d61fba4c..3e8d70bfabc6 100644 --- a/drivers/pci/controller/pcie-mediatek-gen3.c +++ b/drivers/pci/controller/pcie-mediatek-gen3.c @@ -292,7 +292,7 @@ static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie) /* Set class code */ val = readl_relaxed(pcie->base + PCIE_PCI_IDS_1); val &= ~GENMASK(31, 8); - val |= PCI_CLASS(PCI_CLASS_BRIDGE_PCI << 8); + val |= PCI_CLASS(PCI_CLASS_BRIDGE_PCI_NORMAL); writel_relaxed(val, pcie->base + PCIE_PCI_IDS_1); /* Mask all INTx interrupts */ diff --git a/drivers/pci/controller/pcie-mt7621.c b/drivers/pci/controller/pcie-mt7621.c index 3824862ea144..33eb37a2225c 100644 --- a/drivers/pci/controller/pcie-mt7621.c +++ b/drivers/pci/controller/pcie-mt7621.c @@ -109,15 +109,6 @@ static inline void pcie_write(struct mt7621_pcie *pcie, u32 val, u32 reg) writel_relaxed(val, pcie->base + reg); } -static inline void pcie_rmw(struct mt7621_pcie *pcie, u32 reg, u32 clr, u32 set) -{ - u32 val = readl_relaxed(pcie->base + reg); - - val &= ~clr; - val |= set; - writel_relaxed(val, pcie->base + reg); -} - static inline u32 pcie_port_read(struct mt7621_pcie_port *port, u32 reg) { return readl_relaxed(port->base + reg); @@ -557,7 +548,7 @@ static struct platform_driver mt7621_pcie_driver = { .remove = mt7621_pcie_remove, .driver = { .name = "mt7621-pci", - .of_match_table = of_match_ptr(mt7621_pcie_ids), + .of_match_table = mt7621_pcie_ids, }, }; builtin_platform_driver(mt7621_pcie_driver); diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c index 38b6e02edfa9..997c4df6a1e7 100644 --- a/drivers/pci/controller/pcie-rcar-host.c +++ b/drivers/pci/controller/pcie-rcar-host.c @@ -65,6 +65,42 @@ struct rcar_pcie_host { int (*phy_init_fn)(struct rcar_pcie_host *host); }; +static DEFINE_SPINLOCK(pmsr_lock); + +static int rcar_pcie_wakeup(struct device *pcie_dev, void __iomem *pcie_base) +{ + unsigned long flags; + u32 pmsr, val; + int ret = 0; + + spin_lock_irqsave(&pmsr_lock, flags); + + if (!pcie_base || pm_runtime_suspended(pcie_dev)) { + ret = -EINVAL; + goto unlock_exit; + } + + pmsr = readl(pcie_base + PMSR); + + /* + * Test if the PCIe controller received PM_ENTER_L1 DLLP and + * the PCIe controller is not in L1 link state. If true, apply + * fix, which will put the controller into L1 link state, from + * which it can return to L0s/L0 on its own. + */ + if ((pmsr & PMEL1RX) && ((pmsr & PMSTATE) != PMSTATE_L1)) { + writel(L1IATN, pcie_base + PMCTLR); + ret = readl_poll_timeout_atomic(pcie_base + PMSR, val, + val & L1FAEG, 10, 1000); + WARN(ret, "Timeout waiting for L1 link state, ret=%d\n", ret); + writel(L1FAEG | PMEL1RX, pcie_base + PMSR); + } + +unlock_exit: + spin_unlock_irqrestore(&pmsr_lock, flags); + return ret; +} + static struct rcar_pcie_host *msi_to_host(struct rcar_msi *msi) { return container_of(msi, struct rcar_pcie_host, msi); @@ -78,6 +114,54 @@ static u32 rcar_read_conf(struct rcar_pcie *pcie, int where) return val >> shift; } +#ifdef CONFIG_ARM +#define __rcar_pci_rw_reg_workaround(instr) \ + " .arch armv7-a\n" \ + "1: " instr " %1, [%2]\n" \ + "2: isb\n" \ + "3: .pushsection .text.fixup,\"ax\"\n" \ + " .align 2\n" \ + "4: mov %0, #" __stringify(PCIBIOS_SET_FAILED) "\n" \ + " b 3b\n" \ + " .popsection\n" \ + " .pushsection __ex_table,\"a\"\n" \ + " .align 3\n" \ + " .long 1b, 4b\n" \ + " .long 2b, 4b\n" \ + " .popsection\n" +#endif + +static int rcar_pci_write_reg_workaround(struct rcar_pcie *pcie, u32 val, + unsigned int reg) +{ + int error = PCIBIOS_SUCCESSFUL; +#ifdef CONFIG_ARM + asm volatile( + __rcar_pci_rw_reg_workaround("str") + : "+r"(error):"r"(val), "r"(pcie->base + reg) : "memory"); +#else + rcar_pci_write_reg(pcie, val, reg); +#endif + return error; +} + +static int rcar_pci_read_reg_workaround(struct rcar_pcie *pcie, u32 *val, + unsigned int reg) +{ + int error = PCIBIOS_SUCCESSFUL; +#ifdef CONFIG_ARM + asm volatile( + __rcar_pci_rw_reg_workaround("ldr") + : "+r"(error), "=r"(*val) : "r"(pcie->base + reg) : "memory"); + + if (error != PCIBIOS_SUCCESSFUL) + PCI_SET_ERROR_RESPONSE(val); +#else + *val = rcar_pci_read_reg(pcie, reg); +#endif + return error; +} + /* Serialization is provided by 'pci_lock' in drivers/pci/access.c */ static int rcar_pcie_config_access(struct rcar_pcie_host *host, unsigned char access_type, struct pci_bus *bus, @@ -85,6 +169,14 @@ static int rcar_pcie_config_access(struct rcar_pcie_host *host, { struct rcar_pcie *pcie = &host->pcie; unsigned int dev, func, reg, index; + int ret; + + /* Wake the bus up in case it is in L1 state. */ + ret = rcar_pcie_wakeup(pcie->dev, pcie->base); + if (ret) { + PCI_SET_ERROR_RESPONSE(data); + return PCIBIOS_SET_FAILED; + } dev = PCI_SLOT(devfn); func = PCI_FUNC(devfn); @@ -141,14 +233,14 @@ static int rcar_pcie_config_access(struct rcar_pcie_host *host, return PCIBIOS_DEVICE_NOT_FOUND; if (access_type == RCAR_PCI_ACCESS_READ) - *data = rcar_pci_read_reg(pcie, PCIECDR); + ret = rcar_pci_read_reg_workaround(pcie, data, PCIECDR); else - rcar_pci_write_reg(pcie, *data, PCIECDR); + ret = rcar_pci_write_reg_workaround(pcie, *data, PCIECDR); /* Disable the configuration access */ rcar_pci_write_reg(pcie, 0, PCIECCTLR); - return PCIBIOS_SUCCESSFUL; + return ret; } static int rcar_pcie_read_conf(struct pci_bus *bus, unsigned int devfn, @@ -370,7 +462,7 @@ static int rcar_pcie_hw_init(struct rcar_pcie *pcie) * class to match. Hardware takes care of propagating the IDSETR * settings, so there is no need to bother with a quirk. */ - rcar_pci_write_reg(pcie, PCI_CLASS_BRIDGE_PCI << 16, IDSETR1); + rcar_pci_write_reg(pcie, PCI_CLASS_BRIDGE_PCI_NORMAL << 8, IDSETR1); /* * Setup Secondary Bus Number & Subordinate Bus Number, even though @@ -1050,40 +1142,10 @@ static struct platform_driver rcar_pcie_driver = { }; #ifdef CONFIG_ARM -static DEFINE_SPINLOCK(pmsr_lock); static int rcar_pcie_aarch32_abort_handler(unsigned long addr, unsigned int fsr, struct pt_regs *regs) { - unsigned long flags; - u32 pmsr, val; - int ret = 0; - - spin_lock_irqsave(&pmsr_lock, flags); - - if (!pcie_base || pm_runtime_suspended(pcie_dev)) { - ret = 1; - goto unlock_exit; - } - - pmsr = readl(pcie_base + PMSR); - - /* - * Test if the PCIe controller received PM_ENTER_L1 DLLP and - * the PCIe controller is not in L1 link state. If true, apply - * fix, which will put the controller into L1 link state, from - * which it can return to L0s/L0 on its own. - */ - if ((pmsr & PMEL1RX) && ((pmsr & PMSTATE) != PMSTATE_L1)) { - writel(L1IATN, pcie_base + PMCTLR); - ret = readl_poll_timeout_atomic(pcie_base + PMSR, val, - val & L1FAEG, 10, 1000); - WARN(ret, "Timeout waiting for L1 link state, ret=%d\n", ret); - writel(L1FAEG | PMEL1RX, pcie_base + PMSR); - } - -unlock_exit: - spin_unlock_irqrestore(&pmsr_lock, flags); - return ret; + return !fixup_exception(regs); } static const struct of_device_id rcar_pcie_abort_handler_of_match[] __initconst = { diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c index 45a28880f322..7f56f99b4116 100644 --- a/drivers/pci/controller/pcie-rockchip-host.c +++ b/drivers/pci/controller/pcie-rockchip-host.c @@ -370,7 +370,7 @@ static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip) rockchip_pcie_write(rockchip, ROCKCHIP_VENDOR_ID, PCIE_CORE_CONFIG_VENDOR); rockchip_pcie_write(rockchip, - PCI_CLASS_BRIDGE_PCI << PCIE_RC_CONFIG_SCC_SHIFT, + PCI_CLASS_BRIDGE_PCI_NORMAL << 8, PCIE_RC_CONFIG_RID_CCR); /* Clear THP cap's next cap pointer to remove L1 substate cap */ diff --git a/drivers/pci/controller/pcie-rockchip.h b/drivers/pci/controller/pcie-rockchip.h index 1650a5087450..32c3a859c26b 100644 --- a/drivers/pci/controller/pcie-rockchip.h +++ b/drivers/pci/controller/pcie-rockchip.h @@ -134,7 +134,6 @@ #define PCIE_RC_CONFIG_NORMAL_BASE 0x800000 #define PCIE_RC_CONFIG_BASE 0xa00000 #define PCIE_RC_CONFIG_RID_CCR (PCIE_RC_CONFIG_BASE + 0x08) -#define PCIE_RC_CONFIG_SCC_SHIFT 16 #define PCIE_RC_CONFIG_DCR (PCIE_RC_CONFIG_BASE + 0xc4) #define PCIE_RC_CONFIG_DCR_CSPL_SHIFT 18 #define PCIE_RC_CONFIG_DCR_CSPL_LIMIT 0xff diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c index cc166c683638..eb05cceab964 100644 --- a/drivers/pci/controller/vmd.c +++ b/drivers/pci/controller/vmd.c @@ -99,11 +99,13 @@ struct vmd_irq { * @srcu: SRCU struct for local synchronization. * @count: number of child IRQs assigned to this vector; used to track * sharing. + * @virq: The underlying VMD Linux interrupt number */ struct vmd_irq_list { struct list_head irq_list; struct srcu_struct srcu; unsigned int count; + unsigned int virq; }; struct vmd_dev { @@ -253,7 +255,6 @@ static int vmd_msi_init(struct irq_domain *domain, struct msi_domain_info *info, struct msi_desc *desc = arg->desc; struct vmd_dev *vmd = vmd_from_bus(msi_desc_to_pci_dev(desc)->bus); struct vmd_irq *vmdirq = kzalloc(sizeof(*vmdirq), GFP_KERNEL); - unsigned int index, vector; if (!vmdirq) return -ENOMEM; @@ -261,10 +262,8 @@ static int vmd_msi_init(struct irq_domain *domain, struct msi_domain_info *info, INIT_LIST_HEAD(&vmdirq->node); vmdirq->irq = vmd_next_irq(vmd, desc); vmdirq->virq = virq; - index = index_from_irqs(vmd, vmdirq->irq); - vector = pci_irq_vector(vmd->dev, index); - irq_domain_set_info(domain, virq, vector, info->chip, vmdirq, + irq_domain_set_info(domain, virq, vmdirq->irq->virq, info->chip, vmdirq, handle_untracked_irq, vmd, NULL); return 0; } @@ -685,7 +684,8 @@ static int vmd_alloc_irqs(struct vmd_dev *vmd) return err; INIT_LIST_HEAD(&vmd->irqs[i].irq_list); - err = devm_request_irq(&dev->dev, pci_irq_vector(dev, i), + vmd->irqs[i].virq = pci_irq_vector(dev, i); + err = devm_request_irq(&dev->dev, vmd->irqs[i].virq, vmd_irq, IRQF_NO_THREAD, vmd->name, &vmd->irqs[i]); if (err) @@ -969,7 +969,7 @@ static int vmd_suspend(struct device *dev) int i; for (i = 0; i < vmd->msix_count; i++) - devm_free_irq(dev, pci_irq_vector(pdev, i), &vmd->irqs[i]); + devm_free_irq(dev, vmd->irqs[i].virq, &vmd->irqs[i]); return 0; } @@ -981,7 +981,7 @@ static int vmd_resume(struct device *dev) int err, i; for (i = 0; i < vmd->msix_count; i++) { - err = devm_request_irq(dev, pci_irq_vector(pdev, i), + err = devm_request_irq(dev, vmd->irqs[i].virq, vmd_irq, IRQF_NO_THREAD, vmd->name, &vmd->irqs[i]); if (err) |