diff options
121 files changed, 4009 insertions, 2727 deletions
diff --git a/Documentation/PCI/pci-iov-howto.rst b/Documentation/PCI/pci-iov-howto.rst index b9fd003206f1..27d35933cea2 100644 --- a/Documentation/PCI/pci-iov-howto.rst +++ b/Documentation/PCI/pci-iov-howto.rst @@ -125,14 +125,14 @@ Following piece of code illustrates the usage of the SR-IOV API.  		...  	} -	static int dev_suspend(struct pci_dev *dev, pm_message_t state) +	static int dev_suspend(struct device *dev)  	{  		...  		return 0;  	} -	static int dev_resume(struct pci_dev *dev) +	static int dev_resume(struct device *dev)  	{  		... @@ -165,8 +165,7 @@ Following piece of code illustrates the usage of the SR-IOV API.  		.id_table =	dev_id_table,  		.probe =	dev_probe,  		.remove =	dev_remove, -		.suspend =	dev_suspend, -		.resume =	dev_resume, +		.driver.pm =	&dev_pm_ops,  		.shutdown =	dev_shutdown,  		.sriov_configure = dev_sriov_configure,  	}; diff --git a/Documentation/PCI/sysfs-pci.rst b/Documentation/PCI/sysfs-pci.rst index 742fbd21dc1f..f495185aa88a 100644 --- a/Documentation/PCI/sysfs-pci.rst +++ b/Documentation/PCI/sysfs-pci.rst @@ -125,7 +125,7 @@ implementation of that functionality. To support the historical interface of  mmap() through files in /proc/bus/pci, platforms may also set HAVE_PCI_MMAP.  Alternatively, platforms which set HAVE_PCI_MMAP may provide their own -implementation of pci_mmap_page_range() instead of defining +implementation of pci_mmap_resource_range() instead of defining  ARCH_GENERIC_PCI_MMAP_RESOURCE.  Platforms which support write-combining maps of PCI resources must define diff --git a/Documentation/devicetree/bindings/pci/mediatek-pcie.txt b/Documentation/devicetree/bindings/pci/mediatek-pcie.txt index 57ae73462272..684227522267 100644 --- a/Documentation/devicetree/bindings/pci/mediatek-pcie.txt +++ b/Documentation/devicetree/bindings/pci/mediatek-pcie.txt @@ -7,6 +7,7 @@ Required properties:  	"mediatek,mt7622-pcie"  	"mediatek,mt7623-pcie"  	"mediatek,mt7629-pcie" +	"airoha,en7523-pcie"  - device_type: Must be "pci"  - reg: Base addresses and lengths of the root ports.  - reg-names: Names of the above areas to use during resource lookup. diff --git a/Documentation/devicetree/bindings/pci/nvidia,tegra194-pcie-ep.yaml b/Documentation/devicetree/bindings/pci/nvidia,tegra194-pcie-ep.yaml new file mode 100644 index 000000000000..a24fb8307d29 --- /dev/null +++ b/Documentation/devicetree/bindings/pci/nvidia,tegra194-pcie-ep.yaml @@ -0,0 +1,319 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/pci/nvidia,tegra194-pcie-ep.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: NVIDIA Tegra194 (and later) PCIe Endpoint controller (Synopsys DesignWare Core based) + +maintainers: +  - Thierry Reding <thierry.reding@gmail.com> +  - Jon Hunter <jonathanh@nvidia.com> +  - Vidya Sagar <vidyas@nvidia.com> + +description: | +  This PCIe controller is based on the Synopsys DesignWare PCIe IP and thus +  inherits all the common properties defined in snps,dw-pcie-ep.yaml.  Some +  of the controller instances are dual mode; they can work either in Root +  Port mode or Endpoint mode but one at a time. + +  On Tegra194, controllers C0, C4 and C5 support Endpoint mode. +  On Tegra234, controllers C5, C6, C7 and C10 support Endpoint mode. + +  Note: On Tegra194's P2972-0000 platform, only C5 controller can be enabled to +  operate in the Endpoint mode because of the way the platform is designed. + +properties: +  compatible: +    enum: +      - nvidia,tegra194-pcie-ep +      - nvidia,tegra234-pcie-ep + +  reg: +    items: +      - description: controller's application logic registers +      - description: iATU and DMA registers. This is where the iATU (internal +          Address Translation Unit) registers of the PCIe core are made +          available for software access. +      - description: aperture where the Root Port's own configuration +          registers are available. +      - description: aperture used to map the remote Root Complex address space + +  reg-names: +    items: +      - const: appl +      - const: atu_dma +      - const: dbi +      - const: addr_space + +  interrupts: +    items: +      - description: controller interrupt + +  interrupt-names: +    items: +      - const: intr + +  clocks: +    items: +      - description: module clock + +  clock-names: +    items: +      - const: core + +  resets: +    items: +      - description: APB bus interface reset +      - description: module reset + +  reset-names: +    items: +      - const: apb +      - const: core + +  reset-gpios: +    description: Must contain a phandle to a GPIO controller followed by GPIO +      that is being used as PERST input signal. Please refer to pci.txt. + +  phys: +    minItems: 1 +    maxItems: 8 + +  phy-names: +    minItems: 1 +    items: +      - const: p2u-0 +      - const: p2u-1 +      - const: p2u-2 +      - const: p2u-3 +      - const: p2u-4 +      - const: p2u-5 +      - const: p2u-6 +      - const: p2u-7 + +  power-domains: +    maxItems: 1 +    description: | +      A phandle to the node that controls power to the respective PCIe +      controller and a specifier name for the PCIe controller. + +      Tegra194 specifiers are defined in "include/dt-bindings/power/tegra194-powergate.h" +      Tegra234 specifiers are defined in "include/dt-bindings/power/tegra234-powergate.h" + +  interconnects: +    items: +      - description: memory read client +      - description: memory write client + +  interconnect-names: +    items: +      - const: dma-mem # read +      - const: write + +  dma-coherent: true + +  nvidia,bpmp: +    $ref: /schemas/types.yaml#/definitions/phandle-array +    description: | +      Must contain a pair of phandles to BPMP controller node followed by +      controller ID. Following are the controller IDs for each controller: + +      Tegra194 + +        0: C0 +        1: C1 +        2: C2 +        3: C3 +        4: C4 +        5: C5 + +      Tegra234 + +        0 : C0 +        1 : C1 +        2 : C2 +        3 : C3 +        4 : C4 +        5 : C5 +        6 : C6 +        7 : C7 +        8 : C8 +        9 : C9 +        10: C10 + +    items: +      - items: +          - description: phandle to BPMP controller node +          - description: PCIe controller ID +            maximum: 10 + +  nvidia,aspm-cmrt-us: +    description: Common Mode Restore Time for proper operation of ASPM to be +      specified in microseconds + +  nvidia,aspm-pwr-on-t-us: +    description: Power On time for proper operation of ASPM to be specified in +      microseconds + +  nvidia,aspm-l0s-entrance-latency-us: +    description: ASPM L0s entrance latency to be specified in microseconds + +  vddio-pex-ctl-supply: +    description: A phandle to the regulator supply for PCIe side band signals + +  nvidia,refclk-select-gpios: +    maxItems: 1 +    description: GPIO used to enable REFCLK to controller from the host + +  nvidia,enable-ext-refclk: +    description: | +      This boolean property needs to be present if the controller is configured +      to receive Reference Clock from the host. +      NOTE: This is applicable only for Tegra234. + +    $ref: /schemas/types.yaml#/definitions/flag + +  nvidia,enable-srns: +    description: | +      This boolean property needs to be present if the controller is +      configured to operate in SRNS (Separate Reference Clocks with No +      Spread-Spectrum Clocking).  NOTE: This is applicable only for +      Tegra234. + +    $ref: /schemas/types.yaml#/definitions/flag + +allOf: +  - $ref: /schemas/pci/snps,dw-pcie-ep.yaml# + +unevaluatedProperties: false + +required: +  - interrupts +  - interrupt-names +  - clocks +  - clock-names +  - resets +  - reset-names +  - power-domains +  - reset-gpios +  - vddio-pex-ctl-supply +  - num-lanes +  - phys +  - phy-names +  - nvidia,bpmp + +examples: +  - | +    #include <dt-bindings/clock/tegra194-clock.h> +    #include <dt-bindings/gpio/tegra194-gpio.h> +    #include <dt-bindings/interrupt-controller/arm-gic.h> +    #include <dt-bindings/power/tegra194-powergate.h> +    #include <dt-bindings/reset/tegra194-reset.h> + +    bus@0 { +        #address-cells = <2>; +        #size-cells = <2>; +        ranges = <0x0 0x0 0x0 0x8 0x0>; + +        pcie-ep@141a0000 { +            compatible = "nvidia,tegra194-pcie-ep"; +            reg = <0x00 0x141a0000 0x0 0x00020000>, /* appl registers (128K) */ +                  <0x00 0x3a040000 0x0 0x00040000>, /* iATU_DMA reg space (256K)  */ +                  <0x00 0x3a080000 0x0 0x00040000>, /* DBI reg space (256K) */ +                  <0x1c 0x00000000 0x4 0x00000000>; /* Address Space (16G) */ +            reg-names = "appl", "atu_dma", "dbi", "addr_space"; +            interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>; /* controller interrupt */ +            interrupt-names = "intr"; + +            clocks = <&bpmp TEGRA194_CLK_PEX1_CORE_5>; +            clock-names = "core"; + +            resets = <&bpmp TEGRA194_RESET_PEX1_CORE_5_APB>, +                     <&bpmp TEGRA194_RESET_PEX1_CORE_5>; +            reset-names = "apb", "core"; + +            power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX8A>; +            pinctrl-names = "default"; +            pinctrl-0 = <&clkreq_c5_bi_dir_state>; + +            nvidia,bpmp = <&bpmp 5>; + +            nvidia,aspm-cmrt-us = <60>; +            nvidia,aspm-pwr-on-t-us = <20>; +            nvidia,aspm-l0s-entrance-latency-us = <3>; + +            vddio-pex-ctl-supply = <&vdd_1v8ao>; + +            reset-gpios = <&gpio TEGRA194_MAIN_GPIO(GG, 1) GPIO_ACTIVE_LOW>; + +            nvidia,refclk-select-gpios = <&gpio_aon TEGRA194_AON_GPIO(AA, 5) +                                          GPIO_ACTIVE_HIGH>; + +            num-lanes = <8>; + +            phys = <&p2u_nvhs_0>, <&p2u_nvhs_1>, <&p2u_nvhs_2>, +                   <&p2u_nvhs_3>, <&p2u_nvhs_4>, <&p2u_nvhs_5>, +                   <&p2u_nvhs_6>, <&p2u_nvhs_7>; + +            phy-names = "p2u-0", "p2u-1", "p2u-2", "p2u-3", "p2u-4", +                        "p2u-5", "p2u-6", "p2u-7"; +        }; +    }; + +  - | +    #include <dt-bindings/clock/tegra234-clock.h> +    #include <dt-bindings/gpio/tegra234-gpio.h> +    #include <dt-bindings/interrupt-controller/arm-gic.h> +    #include <dt-bindings/power/tegra234-powergate.h> +    #include <dt-bindings/reset/tegra234-reset.h> + +    bus@0 { +        #address-cells = <2>; +        #size-cells = <2>; +        ranges = <0x0 0x0 0x0 0x8 0x0>; + +        pcie-ep@141a0000 { +            compatible = "nvidia,tegra234-pcie-ep"; +            power-domains = <&bpmp TEGRA234_POWER_DOMAIN_PCIEX8A>; +            reg = <0x00 0x141a0000 0x0 0x00020000>, /* appl registers (128K)      */ +                  <0x00 0x3a040000 0x0 0x00040000>, /* iATU_DMA reg space (256K)  */ +                  <0x00 0x3a080000 0x0 0x00040000>, /* DBI reg space (256K)       */ +                  <0x27 0x40000000 0x4 0x00000000>; /* Address Space (16G)        */ +            reg-names = "appl", "atu_dma", "dbi", "addr_space"; + +            interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>; /* controller interrupt */ +            interrupt-names = "intr"; + +            clocks = <&bpmp TEGRA234_CLK_PEX1_C5_CORE>; +            clock-names = "core"; + +            resets = <&bpmp TEGRA234_RESET_PEX1_CORE_5_APB>, +                     <&bpmp TEGRA234_RESET_PEX1_CORE_5>; +            reset-names = "apb", "core"; + +            nvidia,bpmp = <&bpmp 5>; + +            nvidia,enable-ext-refclk; +            nvidia,aspm-cmrt-us = <60>; +            nvidia,aspm-pwr-on-t-us = <20>; +            nvidia,aspm-l0s-entrance-latency-us = <3>; + +            vddio-pex-ctl-supply = <&p3701_vdd_1v8_ls>; + +            reset-gpios = <&gpio TEGRA234_MAIN_GPIO(AF, 1) GPIO_ACTIVE_LOW>; + +            nvidia,refclk-select-gpios = <&gpio_aon +                                          TEGRA234_AON_GPIO(AA, 4) +                                          GPIO_ACTIVE_HIGH>; + +            num-lanes = <8>; + +            phys = <&p2u_nvhs_0>, <&p2u_nvhs_1>, <&p2u_nvhs_2>, +                   <&p2u_nvhs_3>, <&p2u_nvhs_4>, <&p2u_nvhs_5>, +                   <&p2u_nvhs_6>, <&p2u_nvhs_7>; + +            phy-names = "p2u-0", "p2u-1", "p2u-2", "p2u-3", "p2u-4", +                        "p2u-5", "p2u-6", "p2u-7"; +        }; +    }; diff --git a/Documentation/devicetree/bindings/pci/nvidia,tegra194-pcie.txt b/Documentation/devicetree/bindings/pci/nvidia,tegra194-pcie.txt deleted file mode 100644 index 8e4f9bfb316d..000000000000 --- a/Documentation/devicetree/bindings/pci/nvidia,tegra194-pcie.txt +++ /dev/null @@ -1,245 +0,0 @@ -NVIDIA Tegra PCIe controller (Synopsys DesignWare Core based) - -This PCIe controller is based on the Synopsis Designware PCIe IP -and thus inherits all the common properties defined in snps,dw-pcie.yaml and -snps,dw-pcie-ep.yaml. -Some of the controller instances are dual mode where in they can work either -in root port mode or endpoint mode but one at a time. - -Required properties: -- power-domains: A phandle to the node that controls power to the respective -  PCIe controller and a specifier name for the PCIe controller. Following are -  the specifiers for the different PCIe controllers -    TEGRA194_POWER_DOMAIN_PCIEX8B: C0 -    TEGRA194_POWER_DOMAIN_PCIEX1A: C1 -    TEGRA194_POWER_DOMAIN_PCIEX1A: C2 -    TEGRA194_POWER_DOMAIN_PCIEX1A: C3 -    TEGRA194_POWER_DOMAIN_PCIEX4A: C4 -    TEGRA194_POWER_DOMAIN_PCIEX8A: C5 -  these specifiers are defined in -  "include/dt-bindings/power/tegra194-powergate.h" file. -- reg: A list of physical base address and length pairs for each set of -  controller registers. Must contain an entry for each entry in the reg-names -  property. -- reg-names: Must include the following entries: -  "appl": Controller's application logic registers -  "config": As per the definition in snps,dw-pcie.yaml -  "atu_dma": iATU and DMA registers. This is where the iATU (internal Address -             Translation Unit) registers of the PCIe core are made available -             for SW access. -  "dbi": The aperture where root port's own configuration registers are -         available -- interrupts: A list of interrupt outputs of the controller. Must contain an -  entry for each entry in the interrupt-names property. -- interrupt-names: Must include the following entries: -  "intr": The Tegra interrupt that is asserted for controller interrupts -- clocks: Must contain an entry for each entry in clock-names. -  See ../clocks/clock-bindings.txt for details. -- clock-names: Must include the following entries: -  - core -- resets: Must contain an entry for each entry in reset-names. -  See ../reset/reset.txt for details. -- reset-names: Must include the following entries: -  - apb -  - core -- phys: Must contain a phandle to P2U PHY for each entry in phy-names. -- phy-names: Must include an entry for each active lane. -  "p2u-N": where N ranges from 0 to one less than the total number of lanes -- nvidia,bpmp: Must contain a pair of phandle to BPMP controller node followed -  by controller-id. Following are the controller ids for each controller. -    0: C0 -    1: C1 -    2: C2 -    3: C3 -    4: C4 -    5: C5 -- vddio-pex-ctl-supply: Regulator supply for PCIe side band signals - -RC mode: -- compatible: Tegra19x must contain  "nvidia,tegra194-pcie" -- device_type: Must be "pci" for RC mode -- interrupt-names: Must include the following entries: -  "msi": The Tegra interrupt that is asserted when an MSI is received -- bus-range: Range of bus numbers associated with this controller -- #address-cells: Address representation for root ports (must be 3) -  - cell 0 specifies the bus and device numbers of the root port: -    [23:16]: bus number -    [15:11]: device number -  - cell 1 denotes the upper 32 address bits and should be 0 -  - cell 2 contains the lower 32 address bits and is used to translate to the -    CPU address space -- #size-cells: Size representation for root ports (must be 2) -- ranges: Describes the translation of addresses for root ports and standard -  PCI regions. The entries must be 7 cells each, where the first three cells -  correspond to the address as described for the #address-cells property -  above, the fourth and fifth cells are for the physical CPU address to -  translate to and the sixth and seventh cells are as described for the -  #size-cells property above. -  - Entries setup the mapping for the standard I/O, memory and -    prefetchable PCI regions. The first cell determines the type of region -    that is setup: -    - 0x81000000: I/O memory region -    - 0x82000000: non-prefetchable memory region -    - 0xc2000000: prefetchable memory region -  Please refer to the standard PCI bus binding document for a more detailed -  explanation. -- #interrupt-cells: Size representation for interrupts (must be 1) -- interrupt-map-mask and interrupt-map: Standard PCI IRQ mapping properties -  Please refer to the standard PCI bus binding document for a more detailed -  explanation. - -EP mode: -In Tegra194, Only controllers C0, C4 & C5 support EP mode. -- compatible: Tegra19x must contain "nvidia,tegra194-pcie-ep" -- reg-names: Must include the following entries: -  "addr_space": Used to map remote RC address space -- reset-gpios: Must contain a phandle to a GPIO controller followed by -  GPIO that is being used as PERST input signal. Please refer to pci.txt -  document. - -Optional properties: -- pinctrl-names: A list of pinctrl state names. -  It is mandatory for C5 controller and optional for other controllers. -  - "default": Configures PCIe I/O for proper operation. -- pinctrl-0: phandle for the 'default' state of pin configuration. -  It is mandatory for C5 controller and optional for other controllers. -- supports-clkreq: Refer to Documentation/devicetree/bindings/pci/pci.txt -- nvidia,update-fc-fixup: This is a boolean property and needs to be present to -    improve performance when a platform is designed in such a way that it -    satisfies at least one of the following conditions thereby enabling root -    port to exchange optimum number of FC (Flow Control) credits with -    downstream devices -    1. If C0/C4/C5 run at x1/x2 link widths (irrespective of speed and MPS) -    2. If C0/C1/C2/C3/C4/C5 operate at their respective max link widths and -       a) speed is Gen-2 and MPS is 256B -       b) speed is >= Gen-3 with any MPS -- nvidia,aspm-cmrt-us: Common Mode Restore Time for proper operation of ASPM -   to be specified in microseconds -- nvidia,aspm-pwr-on-t-us: Power On time for proper operation of ASPM to be -   specified in microseconds -- nvidia,aspm-l0s-entrance-latency-us: ASPM L0s entrance latency to be -   specified in microseconds - -RC mode: -- vpcie3v3-supply: A phandle to the regulator node that supplies 3.3V to the slot -  if the platform has one such slot. (Ex:- x16 slot owned by C5 controller -  in p2972-0000 platform). -- vpcie12v-supply: A phandle to the regulator node that supplies 12V to the slot -  if the platform has one such slot. (Ex:- x16 slot owned by C5 controller -  in p2972-0000 platform). - -EP mode: -- nvidia,refclk-select-gpios: Must contain a phandle to a GPIO controller -  followed by GPIO that is being used to enable REFCLK to controller from host - -NOTE:- On Tegra194's P2972-0000 platform, only C5 controller can be enabled to -operate in the endpoint mode because of the way the platform is designed. - -Examples: -========= - -Tegra194 RC mode: ------------------ - -	pcie@14180000 { -		compatible = "nvidia,tegra194-pcie"; -		power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX8B>; -		reg = <0x00 0x14180000 0x0 0x00020000   /* appl registers (128K)      */ -		       0x00 0x38000000 0x0 0x00040000   /* configuration space (256K) */ -		       0x00 0x38040000 0x0 0x00040000>; /* iATU_DMA reg space (256K)  */ -		reg-names = "appl", "config", "atu_dma"; - -		#address-cells = <3>; -		#size-cells = <2>; -		device_type = "pci"; -		num-lanes = <8>; -		linux,pci-domain = <0>; - -		pinctrl-names = "default"; -		pinctrl-0 = <&pex_rst_c5_out_state>, <&clkreq_c5_bi_dir_state>; - -		clocks = <&bpmp TEGRA194_CLK_PEX0_CORE_0>; -		clock-names = "core"; - -		resets = <&bpmp TEGRA194_RESET_PEX0_CORE_0_APB>, -			 <&bpmp TEGRA194_RESET_PEX0_CORE_0>; -		reset-names = "apb", "core"; - -		interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>,	/* controller interrupt */ -			     <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;	/* MSI interrupt */ -		interrupt-names = "intr", "msi"; - -		#interrupt-cells = <1>; -		interrupt-map-mask = <0 0 0 0>; -		interrupt-map = <0 0 0 0 &gic GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>; - -		nvidia,bpmp = <&bpmp 0>; - -		supports-clkreq; -		nvidia,aspm-cmrt-us = <60>; -		nvidia,aspm-pwr-on-t-us = <20>; -		nvidia,aspm-l0s-entrance-latency-us = <3>; - -		bus-range = <0x0 0xff>; -		ranges = <0x81000000 0x0  0x38100000 0x0  0x38100000 0x0 0x00100000    /* downstream I/O (1MB) */ -			  0x82000000 0x0  0x38200000 0x0  0x38200000 0x0 0x01E00000    /* non-prefetchable memory (30MB) */ -			  0xc2000000 0x18 0x00000000 0x18 0x00000000 0x4 0x00000000>;  /* prefetchable memory (16GB) */ - -		vddio-pex-ctl-supply = <&vdd_1v8ao>; -		vpcie3v3-supply = <&vdd_3v3_pcie>; -		vpcie12v-supply = <&vdd_12v_pcie>; - -		phys = <&p2u_hsio_2>, <&p2u_hsio_3>, <&p2u_hsio_4>, -		       <&p2u_hsio_5>; -		phy-names = "p2u-0", "p2u-1", "p2u-2", "p2u-3"; -	}; - -Tegra194 EP mode: ------------------ - -	pcie-ep@141a0000 { -		compatible = "nvidia,tegra194-pcie-ep", "snps,dw-pcie-ep"; -		power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX8A>; -		reg = <0x00 0x141a0000 0x0 0x00020000   /* appl registers (128K)      */ -		       0x00 0x3a040000 0x0 0x00040000   /* iATU_DMA reg space (256K)  */ -		       0x00 0x3a080000 0x0 0x00040000   /* DBI reg space (256K)       */ -		       0x1c 0x00000000 0x4 0x00000000>; /* Address Space (16G)        */ -		reg-names = "appl", "atu_dma", "dbi", "addr_space"; - -		num-lanes = <8>; -		num-ib-windows = <2>; -		num-ob-windows = <8>; - -		pinctrl-names = "default"; -		pinctrl-0 = <&clkreq_c5_bi_dir_state>; - -		clocks = <&bpmp TEGRA194_CLK_PEX1_CORE_5>; -		clock-names = "core"; - -		resets = <&bpmp TEGRA194_RESET_PEX1_CORE_5_APB>, -			 <&bpmp TEGRA194_RESET_PEX1_CORE_5>; -		reset-names = "apb", "core"; - -		interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;	/* controller interrupt */ -		interrupt-names = "intr"; - -		nvidia,bpmp = <&bpmp 5>; - -		nvidia,aspm-cmrt-us = <60>; -		nvidia,aspm-pwr-on-t-us = <20>; -		nvidia,aspm-l0s-entrance-latency-us = <3>; - -		vddio-pex-ctl-supply = <&vdd_1v8ao>; - -		reset-gpios = <&gpio TEGRA194_MAIN_GPIO(GG, 1) GPIO_ACTIVE_LOW>; - -		nvidia,refclk-select-gpios = <&gpio_aon TEGRA194_AON_GPIO(AA, 5) -					      GPIO_ACTIVE_HIGH>; - -		phys = <&p2u_nvhs_0>, <&p2u_nvhs_1>, <&p2u_nvhs_2>, -		       <&p2u_nvhs_3>, <&p2u_nvhs_4>, <&p2u_nvhs_5>, -		       <&p2u_nvhs_6>, <&p2u_nvhs_7>; - -		phy-names = "p2u-0", "p2u-1", "p2u-2", "p2u-3", "p2u-4", -			    "p2u-5", "p2u-6", "p2u-7"; -	}; diff --git a/Documentation/devicetree/bindings/pci/nvidia,tegra194-pcie.yaml b/Documentation/devicetree/bindings/pci/nvidia,tegra194-pcie.yaml new file mode 100644 index 000000000000..75da3e8eecb9 --- /dev/null +++ b/Documentation/devicetree/bindings/pci/nvidia,tegra194-pcie.yaml @@ -0,0 +1,350 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/pci/nvidia,tegra194-pcie.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: NVIDIA Tegra194 (and later) PCIe controller (Synopsys DesignWare Core based) + +maintainers: +  - Thierry Reding <thierry.reding@gmail.com> +  - Jon Hunter <jonathanh@nvidia.com> +  - Vidya Sagar <vidyas@nvidia.com> + +description: | +  This PCIe controller is based on the Synopsys DesignWare PCIe IP and thus +  inherits all the common properties defined in snps,dw-pcie.yaml. Some of +  the controller instances are dual mode where in they can work either in +  Root Port mode or Endpoint mode but one at a time. + +  See nvidia,tegra194-pcie-ep.yaml for details on the Endpoint mode device +  tree bindings. + +properties: +  compatible: +    enum: +      - nvidia,tegra194-pcie +      - nvidia,tegra234-pcie + +  reg: +    items: +      - description: controller's application logic registers +      - description: configuration registers +      - description: iATU and DMA registers. This is where the iATU (internal +          Address Translation Unit) registers of the PCIe core are made +          available for software access. +      - description: aperture where the Root Port's own configuration +          registers are available. + +  reg-names: +    items: +      - const: appl +      - const: config +      - const: atu_dma +      - const: dbi + +  interrupts: +    items: +      - description: controller interrupt +      - description: MSI interrupt + +  interrupt-names: +    items: +      - const: intr +      - const: msi + +  clocks: +    items: +      - description: module clock + +  clock-names: +    items: +      - const: core + +  resets: +    items: +      - description: APB bus interface reset +      - description: module reset + +  reset-names: +    items: +      - const: apb +      - const: core + +  phys: +    minItems: 1 +    maxItems: 8 + +  phy-names: +    minItems: 1 +    items: +      - const: p2u-0 +      - const: p2u-1 +      - const: p2u-2 +      - const: p2u-3 +      - const: p2u-4 +      - const: p2u-5 +      - const: p2u-6 +      - const: p2u-7 + +  power-domains: +    maxItems: 1 +    description: | +      A phandle to the node that controls power to the respective PCIe +      controller and a specifier name for the PCIe controller. + +      Tegra194 specifiers defined in "include/dt-bindings/power/tegra194-powergate.h" +      Tegra234 specifiers defined in "include/dt-bindings/power/tegra234-powergate.h" + +  interconnects: +    items: +      - description: memory read client +      - description: memory write client + +  interconnect-names: +    items: +      - const: dma-mem # read +      - const: write + +  dma-coherent: true + +  nvidia,bpmp: +    $ref: /schemas/types.yaml#/definitions/phandle-array +    description: | +      Must contain a pair of phandles to BPMP controller node followed by +      controller ID. Following are the controller IDs for each controller: + +      Tegra194 + +        0: C0 +        1: C1 +        2: C2 +        3: C3 +        4: C4 +        5: C5 + +      Tegra234 + +        0 : C0 +        1 : C1 +        2 : C2 +        3 : C3 +        4 : C4 +        5 : C5 +        6 : C6 +        7 : C7 +        8 : C8 +        9 : C9 +        10: C10 + +    items: +      - items: +          - description: phandle to BPMP controller node +          - description: PCIe controller ID +            maximum: 10 + +  nvidia,update-fc-fixup: +    description: | +      This is a boolean property and needs to be present to improve performance +      when a platform is designed in such a way that it satisfies at least one +      of the following conditions thereby enabling Root Port to exchange +      optimum number of FC (Flow Control) credits with downstream devices: + +      NOTE: This is applicable only for Tegra194. + +        1. If C0/C4/C5 run at x1/x2 link widths (irrespective of speed and MPS) +        2. If C0/C1/C2/C3/C4/C5 operate at their respective max link widths and +          a) speed is Gen-2 and MPS is 256B +          b) speed is >= Gen-3 with any MPS + +    $ref: /schemas/types.yaml#/definitions/flag + +  nvidia,aspm-cmrt-us: +    description: Common Mode Restore Time for proper operation of ASPM to be +      specified in microseconds + +  nvidia,aspm-pwr-on-t-us: +    description: Power On time for proper operation of ASPM to be specified in +      microseconds + +  nvidia,aspm-l0s-entrance-latency-us: +    description: ASPM L0s entrance latency to be specified in microseconds + +  vddio-pex-ctl-supply: +    description: A phandle to the regulator supply for PCIe side band signals. + +  vpcie3v3-supply: +    description: A phandle to the regulator node that supplies 3.3V to the slot +      if the platform has one such slot, e.g., x16 slot owned by C5 controller +      in p2972-0000 platform. + +  vpcie12v-supply: +    description: A phandle to the regulator node that supplies 12V to the slot +      if the platform has one such slot, e.g., x16 slot owned by C5 controller +      in p2972-0000 platform. + +  nvidia,enable-srns: +    description: | +      This boolean property needs to be present if the controller is +      configured to operate in SRNS (Separate Reference Clocks with No +      Spread-Spectrum Clocking).  NOTE: This is applicable only for +      Tegra234. + +    $ref: /schemas/types.yaml#/definitions/flag + +  nvidia,enable-ext-refclk: +    description: | +      This boolean property needs to be present if the controller is +      configured to use the reference clocking coming in from an external +      clock source instead of using the internal clock source. + +    $ref: /schemas/types.yaml#/definitions/flag + +allOf: +  - $ref: /schemas/pci/snps,dw-pcie.yaml# + +unevaluatedProperties: false + +required: +  - interrupts +  - interrupt-names +  - interrupt-map +  - interrupt-map-mask +  - clocks +  - clock-names +  - resets +  - reset-names +  - power-domains +  - vddio-pex-ctl-supply +  - num-lanes +  - phys +  - phy-names +  - nvidia,bpmp + +examples: +  - | +    #include <dt-bindings/clock/tegra194-clock.h> +    #include <dt-bindings/interrupt-controller/arm-gic.h> +    #include <dt-bindings/power/tegra194-powergate.h> +    #include <dt-bindings/reset/tegra194-reset.h> + +    bus@0 { +        #address-cells = <2>; +        #size-cells = <2>; +        ranges = <0x0 0x0 0x0 0x8 0x0>; + +        pcie@14180000 { +            compatible = "nvidia,tegra194-pcie"; +            power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX8B>; +            reg = <0x0 0x14180000 0x0 0x00020000>, /* appl registers (128K)      */ +                  <0x0 0x38000000 0x0 0x00040000>, /* configuration space (256K) */ +                  <0x0 0x38040000 0x0 0x00040000>, /* iATU_DMA reg space (256K)  */ +                  <0x0 0x38080000 0x0 0x00040000>; /* DBI reg space (256K)       */ +            reg-names = "appl", "config", "atu_dma", "dbi"; + +            #address-cells = <3>; +            #size-cells = <2>; +            device_type = "pci"; +            num-lanes = <8>; +            linux,pci-domain = <0>; + +            pinctrl-names = "default"; +            pinctrl-0 = <&pex_rst_c5_out_state>, <&clkreq_c5_bi_dir_state>; + +            clocks = <&bpmp TEGRA194_CLK_PEX0_CORE_0>; +            clock-names = "core"; + +            resets = <&bpmp TEGRA194_RESET_PEX0_CORE_0_APB>, +                     <&bpmp TEGRA194_RESET_PEX0_CORE_0>; +            reset-names = "apb", "core"; + +            interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>, /* controller interrupt */ +                         <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>; /* MSI interrupt */ +            interrupt-names = "intr", "msi"; + +            #interrupt-cells = <1>; +            interrupt-map-mask = <0 0 0 0>; +            interrupt-map = <0 0 0 0 &gic GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>; + +            nvidia,bpmp = <&bpmp 0>; + +            supports-clkreq; +            nvidia,aspm-cmrt-us = <60>; +            nvidia,aspm-pwr-on-t-us = <20>; +            nvidia,aspm-l0s-entrance-latency-us = <3>; + +            bus-range = <0x0 0xff>; +            ranges = <0x81000000 0x0  0x38100000 0x0  0x38100000 0x0 0x00100000>, /* downstream I/O */ +                     <0x82000000 0x0  0x38200000 0x0  0x38200000 0x0 0x01e00000>, /* non-prefetch memory */ +                     <0xc2000000 0x18 0x00000000 0x18 0x00000000 0x4 0x00000000>; /* prefetchable memory */ + +            vddio-pex-ctl-supply = <&vdd_1v8ao>; +            vpcie3v3-supply = <&vdd_3v3_pcie>; +            vpcie12v-supply = <&vdd_12v_pcie>; + +            phys = <&p2u_hsio_2>, <&p2u_hsio_3>, <&p2u_hsio_4>, +                   <&p2u_hsio_5>; +            phy-names = "p2u-0", "p2u-1", "p2u-2", "p2u-3"; +        }; +    }; + +  - | +    #include <dt-bindings/clock/tegra234-clock.h> +    #include <dt-bindings/interrupt-controller/arm-gic.h> +    #include <dt-bindings/power/tegra234-powergate.h> +    #include <dt-bindings/reset/tegra234-reset.h> + +    bus@0 { +        #address-cells = <2>; +        #size-cells = <2>; +        ranges = <0x0 0x0 0x0 0x8 0x0>; + +        pcie@14160000 { +            compatible = "nvidia,tegra234-pcie"; +            power-domains = <&bpmp TEGRA234_POWER_DOMAIN_PCIEX4BB>; +            reg = <0x00 0x14160000 0x0 0x00020000>, /* appl registers (128K)      */ +                  <0x00 0x36000000 0x0 0x00040000>, /* configuration space (256K) */ +                  <0x00 0x36040000 0x0 0x00040000>, /* iATU_DMA reg space (256K)  */ +                  <0x00 0x36080000 0x0 0x00040000>; /* DBI reg space (256K)       */ +            reg-names = "appl", "config", "atu_dma", "dbi"; + +            #address-cells = <3>; +            #size-cells = <2>; +            device_type = "pci"; +            num-lanes = <4>; +            num-viewport = <8>; +            linux,pci-domain = <4>; + +            clocks = <&bpmp TEGRA234_CLK_PEX0_C4_CORE>; +            clock-names = "core"; + +            resets = <&bpmp TEGRA234_RESET_PEX0_CORE_4_APB>, +                     <&bpmp TEGRA234_RESET_PEX0_CORE_4>; +            reset-names = "apb", "core"; + +            interrupts = <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>, /* controller interrupt */ +                         <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>; /* MSI interrupt */ +            interrupt-names = "intr", "msi"; + +            #interrupt-cells = <1>; +            interrupt-map-mask = <0 0 0 0>; +            interrupt-map = <0 0 0 0 &gic GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>; + +            nvidia,bpmp = <&bpmp 4>; + +            nvidia,aspm-cmrt-us = <60>; +            nvidia,aspm-pwr-on-t-us = <20>; +            nvidia,aspm-l0s-entrance-latency-us = <3>; + +            bus-range = <0x0 0xff>; +            ranges = <0x43000000 0x21 0x40000000 0x21 0x40000000 0x2 0xe8000000>, /* prefetchable */ +                     <0x02000000 0x0  0x40000000 0x24 0x28000000 0x0 0x08000000>, /* non-prefetchable */ +                     <0x01000000 0x0  0x36100000 0x00 0x36100000 0x0 0x00100000>; /* downstream I/O */ + +            vddio-pex-ctl-supply = <&p3701_vdd_AO_1v8>; + +            phys = <&p2u_hsio_4>, <&p2u_hsio_5>, <&p2u_hsio_6>, +                   <&p2u_hsio_7>; +            phy-names = "p2u-0", "p2u-1", "p2u-2", "p2u-3"; +        }; +    }; diff --git a/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt b/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt deleted file mode 100644 index aeba38f0a387..000000000000 --- a/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt +++ /dev/null @@ -1,84 +0,0 @@ -Renesas AHB to PCI bridge -------------------------- - -This is the bridge used internally to connect the USB controllers to the -AHB. There is one bridge instance per USB port connected to the internal -OHCI and EHCI controllers. - -Required properties: -- compatible: "renesas,pci-r8a7742" for the R8A7742 SoC; -	      "renesas,pci-r8a7743" for the R8A7743 SoC; -	      "renesas,pci-r8a7744" for the R8A7744 SoC; -	      "renesas,pci-r8a7745" for the R8A7745 SoC; -	      "renesas,pci-r8a7790" for the R8A7790 SoC; -	      "renesas,pci-r8a7791" for the R8A7791 SoC; -	      "renesas,pci-r8a7793" for the R8A7793 SoC; -	      "renesas,pci-r8a7794" for the R8A7794 SoC; -	      "renesas,pci-rcar-gen2" for a generic R-Car Gen2 or -				      RZ/G1 compatible device. - - -	      When compatible with the generic version, nodes must list the -	      SoC-specific version corresponding to the platform first -	      followed by the generic version. - -- reg:	A list of physical regions to access the device: the first is -	the operational registers for the OHCI/EHCI controllers and the -	second is for the bridge configuration and control registers. -- interrupts: interrupt for the device. -- clocks: The reference to the device clock. -- bus-range: The PCI bus number range; as this is a single bus, the range -	     should be specified as the same value twice. -- #address-cells: must be 3. -- #size-cells: must be 2. -- #interrupt-cells: must be 1. -- interrupt-map: standard property used to define the mapping of the PCI -  interrupts to the GIC interrupts. -- interrupt-map-mask: standard property that helps to define the interrupt -  mapping. - -Optional properties: -- dma-ranges: a single range for the inbound memory region. If not supplied, -  defaults to 1GiB at 0x40000000. Note there are hardware restrictions on the -  allowed combinations of address and size. - -Example SoC configuration: - -	pci0: pci@ee090000  { -		compatible = "renesas,pci-r8a7790", "renesas,pci-rcar-gen2"; -		clocks = <&mstp7_clks R8A7790_CLK_EHCI>; -		reg = <0x0 0xee090000 0x0 0xc00>, -		      <0x0 0xee080000 0x0 0x1100>; -		interrupts = <0 108 IRQ_TYPE_LEVEL_HIGH>; -		status = "disabled"; - -		bus-range = <0 0>; -		#address-cells = <3>; -		#size-cells = <2>; -		#interrupt-cells = <1>; -		dma-ranges = <0x42000000 0 0x40000000 0 0x40000000 0 0x40000000>; -		interrupt-map-mask = <0xff00 0 0 0x7>; -		interrupt-map = <0x0000 0 0 1 &gic 0 108 IRQ_TYPE_LEVEL_HIGH -				 0x0800 0 0 1 &gic 0 108 IRQ_TYPE_LEVEL_HIGH -				 0x1000 0 0 2 &gic 0 108 IRQ_TYPE_LEVEL_HIGH>; - -		usb@1,0 { -			reg = <0x800 0 0 0 0>; -			phys = <&usb0 0>; -			phy-names = "usb"; -		}; - -		usb@2,0 { -			reg = <0x1000 0 0 0 0>; -			phys = <&usb0 0>; -			phy-names = "usb"; -		}; -	}; - -Example board setup: - -&pci0 { -	status = "okay"; -	pinctrl-0 = <&usb0_pins>; -	pinctrl-names = "default"; -}; diff --git a/Documentation/devicetree/bindings/pci/qcom,pcie.yaml b/Documentation/devicetree/bindings/pci/qcom,pcie.yaml index 0b69b12b849e..7d29e2a45183 100644 --- a/Documentation/devicetree/bindings/pci/qcom,pcie.yaml +++ b/Documentation/devicetree/bindings/pci/qcom,pcie.yaml @@ -11,7 +11,7 @@ maintainers:    - Stanimir Varbanov <svarbanov@mm-sol.com>  description: | -  Qualcomm PCIe root complex controller is bansed on the Synopsys DesignWare +  Qualcomm PCIe root complex controller is based on the Synopsys DesignWare    PCIe IP.  properties: @@ -43,11 +43,12 @@ properties:      maxItems: 5    interrupts: -    maxItems: 1 +    minItems: 1 +    maxItems: 8    interrupt-names: -    items: -      - const: msi +    minItems: 1 +    maxItems: 8    # Common definitions for clocks, clock-names and reset.    # Platform constraints are described later. @@ -614,7 +615,7 @@ allOf:    - if:        not:          properties: -          compatibles: +          compatible:              contains:                enum:                  - qcom,pcie-msm8996 @@ -623,6 +624,50 @@ allOf:          - resets          - reset-names +    # Newer chipsets support either 1 or 8 MSI vectors +    # On older chipsets it's always 1 MSI vector +  - if: +      properties: +        compatible: +          contains: +            enum: +              - qcom,pcie-msm8996 +              - qcom,pcie-sc7280 +              - qcom,pcie-sc8180x +              - qcom,pcie-sdm845 +              - qcom,pcie-sm8150 +              - qcom,pcie-sm8250 +              - qcom,pcie-sm8450-pcie0 +              - qcom,pcie-sm8450-pcie1 +    then: +      oneOf: +        - properties: +            interrupts: +              maxItems: 1 +            interrupt-names: +              items: +                - const: msi +        - properties: +            interrupts: +              minItems: 8 +            interrupt-names: +              items: +                - const: msi0 +                - const: msi1 +                - const: msi2 +                - const: msi3 +                - const: msi4 +                - const: msi5 +                - const: msi6 +                - const: msi7 +    else: +      properties: +        interrupts: +          maxItems: 1 +        interrupt-names: +          items: +            - const: msi +  unevaluatedProperties: false  examples: diff --git a/Documentation/devicetree/bindings/pci/renesas,pci-rcar-gen2.yaml b/Documentation/devicetree/bindings/pci/renesas,pci-rcar-gen2.yaml new file mode 100644 index 000000000000..0f18cceba3d5 --- /dev/null +++ b/Documentation/devicetree/bindings/pci/renesas,pci-rcar-gen2.yaml @@ -0,0 +1,186 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/pci/renesas,pci-rcar-gen2.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Renesas AHB to PCI bridge + +maintainers: +  - Marek Vasut <marek.vasut+renesas@gmail.com> +  - Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com> + +description: | +  This is the bridge used internally to connect the USB controllers to the +  AHB. There is one bridge instance per USB port connected to the internal +  OHCI and EHCI controllers. + +properties: +  compatible: +    oneOf: +      - items: +          - enum: +              - renesas,pci-r8a7742      # RZ/G1H +              - renesas,pci-r8a7743      # RZ/G1M +              - renesas,pci-r8a7744      # RZ/G1N +              - renesas,pci-r8a7745      # RZ/G1E +              - renesas,pci-r8a7790      # R-Car H2 +              - renesas,pci-r8a7791      # R-Car M2-W +              - renesas,pci-r8a7793      # R-Car M2-N +              - renesas,pci-r8a7794      # R-Car E2 +          - const: renesas,pci-rcar-gen2 # R-Car Gen2 and RZ/G1 +      - items: +          - enum: +              - renesas,pci-r9a06g032     # RZ/N1D +          - const: renesas,pci-rzn1       # RZ/N1 + +  reg: +    items: +      - description: Operational registers for the OHCI/EHCI controllers. +      - description: Bridge configuration and control registers. + +  interrupts: +    maxItems: 1 + +  clocks: true + +  clock-names: true + +  resets: +    maxItems: 1 + +  power-domains: +    maxItems: 1 + +  bus-range: +    description: | +      The PCI bus number range; as this is a single bus, the range +      should be specified as the same value twice. + +  dma-ranges: +    description: | +      A single range for the inbound memory region. If not supplied, +      defaults to 1GiB at 0x40000000. Note there are hardware restrictions on +      the allowed combinations of address and size. +    maxItems: 1 + +patternProperties: +  'usb@[0-1],0': +    type: object + +    description: +      This a USB controller PCI device + +    properties: +      reg: +        description: +          Identify the correct bus, device and function number in the +          form <bdf 0 0 0 0>. + +        items: +          minItems: 5 +          maxItems: 5 + +      phys: +        description: +          Reference to the USB phy +        maxItems: 1 + +      phy-names: +        maxItems: 1 + +    required: +      - reg +      - phys +      - phy-names + +    unevaluatedProperties: false + +required: +  - compatible +  - reg +  - interrupts +  - interrupt-map +  - interrupt-map-mask +  - clocks +  - power-domains +  - bus-range +  - "#address-cells" +  - "#size-cells" +  - "#interrupt-cells" + +allOf: +  - $ref: /schemas/pci/pci-bus.yaml# + +  - if: +      properties: +        compatible: +          contains: +            enum: +              - renesas,pci-rzn1 +    then: +      properties: +        clocks: +          items: +            - description: Internal bus clock (AHB) for HOST +            - description: Internal bus clock (AHB) Power Management +            - description: PCI clock for USB subsystem +        clock-names: +          items: +            - const: hclkh +            - const: hclkpm +            - const: pciclk +      required: +        - clock-names +    else: +      properties: +        clocks: +          items: +            - description: Device clock +        clock-names: +          items: +            - const: pclk +      required: +        - resets + +unevaluatedProperties: false + +examples: +  - | +    #include <dt-bindings/interrupt-controller/arm-gic.h> +    #include <dt-bindings/clock/r8a7790-cpg-mssr.h> +    #include <dt-bindings/power/r8a7790-sysc.h> + +    pci@ee090000  { +        compatible = "renesas,pci-r8a7790", "renesas,pci-rcar-gen2"; +        device_type = "pci"; +        reg = <0xee090000 0xc00>, +              <0xee080000 0x1100>; +        clocks = <&cpg CPG_MOD 703>; +        power-domains = <&sysc R8A7790_PD_ALWAYS_ON>; +        resets = <&cpg 703>; +        interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>; + +        bus-range = <0 0>; +        #address-cells = <3>; +        #size-cells = <2>; +        #interrupt-cells = <1>; +        ranges = <0x02000000 0 0xee080000 0xee080000 0 0x00010000>; +        dma-ranges = <0x42000000 0 0x40000000 0x40000000 0 0x40000000>; +        interrupt-map-mask = <0xf800 0 0 0x7>; +        interrupt-map = <0x0000 0 0 1 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>, +                        <0x0800 0 0 1 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>, +                        <0x1000 0 0 2 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>; + +        usb@1,0 { +            reg = <0x800 0 0 0 0>; +            phys = <&usb0 0>; +            phy-names = "usb"; +        }; + +        usb@2,0 { +            reg = <0x1000 0 0 0 0>; +            phys = <&usb0 0>; +            phy-names = "usb"; +        }; +    }; diff --git a/Documentation/devicetree/bindings/pci/snps,dw-pcie.yaml b/Documentation/devicetree/bindings/pci/snps,dw-pcie.yaml index c90e5e2d25f6..7287d395e1b6 100644 --- a/Documentation/devicetree/bindings/pci/snps,dw-pcie.yaml +++ b/Documentation/devicetree/bindings/pci/snps,dw-pcie.yaml @@ -34,8 +34,8 @@ properties:      minItems: 2      maxItems: 5      items: -      enum: [ dbi, dbi2, config, atu, app, elbi, mgmt, ctrl, parf, cfg, link, -              ulreg, smu, mpu, apb, phy ] +      enum: [ dbi, dbi2, config, atu, atu_dma, app, appl, elbi, mgmt, ctrl, +              parf, cfg, link, ulreg, smu, mpu, apb, phy ]    num-lanes:      description: | diff --git a/Documentation/devicetree/bindings/pci/xilinx-versal-cpm.yaml b/Documentation/devicetree/bindings/pci/xilinx-versal-cpm.yaml index cca395317a4c..24ddc2855b94 100644 --- a/Documentation/devicetree/bindings/pci/xilinx-versal-cpm.yaml +++ b/Documentation/devicetree/bindings/pci/xilinx-versal-cpm.yaml @@ -14,17 +14,23 @@ allOf:  properties:    compatible: -    const: xlnx,versal-cpm-host-1.00 +    enum: +      - xlnx,versal-cpm-host-1.00 +      - xlnx,versal-cpm5-host    reg:      items:        - description: CPM system level control and status registers.        - description: Configuration space region and bridge registers. +      - description: CPM5 control and status registers. +    minItems: 2    reg-names:      items:        - const: cpm_slcr        - const: cfg +      - const: cpm_csr +    minItems: 2    interrupts:      maxItems: 1 @@ -95,4 +101,34 @@ examples:                                 interrupt-controller;                         };                 }; + +               cpm5_pcie: pcie@fcdd0000 { +                       compatible = "xlnx,versal-cpm5-host"; +                       device_type = "pci"; +                       #address-cells = <3>; +                       #interrupt-cells = <1>; +                       #size-cells = <2>; +                       interrupts = <0 72 4>; +                       interrupt-parent = <&gic>; +                       interrupt-map-mask = <0 0 0 7>; +                       interrupt-map = <0 0 0 1 &pcie_intc_1 0>, +                                       <0 0 0 2 &pcie_intc_1 1>, +                                       <0 0 0 3 &pcie_intc_1 2>, +                                       <0 0 0 4 &pcie_intc_1 3>; +                       bus-range = <0x00 0xff>; +                       ranges = <0x02000000 0x0 0xe0000000 0x0 0xe0000000 0x0 0x10000000>, +                                <0x43000000 0x80 0x00000000 0x80 0x00000000 0x0 0x80000000>; +                       msi-map = <0x0 &its_gic 0x0 0x10000>; +                       reg = <0x00 0xfcdd0000 0x00 0x1000>, +                             <0x06 0x00000000 0x00 0x1000000>, +                             <0x00 0xfce20000 0x00 0x1000000>; +                       reg-names = "cpm_slcr", "cfg", "cpm_csr"; + +                       pcie_intc_1: interrupt-controller { +                               #address-cells = <0>; +                               #interrupt-cells = <1>; +                               interrupt-controller; +                       }; +               }; +      }; diff --git a/MAINTAINERS b/MAINTAINERS index 2ecc1913bbe1..5c402001e2ce 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -15862,6 +15862,14 @@ L:	linux-pci@vger.kernel.org  S:	Maintained  F:	drivers/pci/controller/dwc/*spear* +PCI DRIVER FOR XILINX VERSAL CPM +M:	Bharat Kumar Gogada <bharat.kumar.gogada@amd.com> +M:	Michal Simek <michal.simek@amd.com> +L:	linux-pci@vger.kernel.org +S:	Maintained +F:	Documentation/devicetree/bindings/pci/xilinx-versal-cpm.yaml +F:	drivers/pci/controller/pcie-xilinx-cpm.c +  PCMCIA SUBSYSTEM  M:	Dominik Brodowski <linux@dominikbrodowski.net>  S:	Odd Fixes diff --git a/arch/alpha/include/asm/dma.h b/arch/alpha/include/asm/dma.h index 28610ea7786d..a04d76b96089 100644 --- a/arch/alpha/include/asm/dma.h +++ b/arch/alpha/include/asm/dma.h @@ -365,13 +365,4 @@ extern void free_dma(unsigned int dmanr);	/* release it again */  #define KERNEL_HAVE_CHECK_DMA  extern int check_dma(unsigned int dmanr); -/* From PCI */ - -#ifdef CONFIG_PCI -extern int isa_dma_bridge_buggy; -#else -#define isa_dma_bridge_buggy 	(0) -#endif - -  #endif /* _ASM_DMA_H */ diff --git a/arch/alpha/include/asm/pci.h b/arch/alpha/include/asm/pci.h index cf6bc1e64d66..6312656279d7 100644 --- a/arch/alpha/include/asm/pci.h +++ b/arch/alpha/include/asm/pci.h @@ -56,12 +56,6 @@ struct pci_controller {  /* IOMMU controls.  */ -/* TODO: integrate with include/asm-generic/pci.h ? */ -static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) -{ -	return channel ? 15 : 14; -} -  #define pci_domain_nr(bus) ((struct pci_controller *)(bus)->sysdata)->index  static inline int pci_proc_domain(struct pci_bus *bus) diff --git a/arch/arc/include/asm/dma.h b/arch/arc/include/asm/dma.h index 5b744f4b10a7..02431027ed2f 100644 --- a/arch/arc/include/asm/dma.h +++ b/arch/arc/include/asm/dma.h @@ -7,10 +7,5 @@  #define ASM_ARC_DMA_H  #define MAX_DMA_ADDRESS 0xC0000000 -#ifdef CONFIG_PCI -extern int isa_dma_bridge_buggy; -#else -#define isa_dma_bridge_buggy	0 -#endif  #endif diff --git a/arch/arm/include/asm/dma.h b/arch/arm/include/asm/dma.h index 45180a2cc47c..05f29a72150b 100644 --- a/arch/arm/include/asm/dma.h +++ b/arch/arm/include/asm/dma.h @@ -143,10 +143,4 @@ extern int  get_dma_residue(unsigned int chan);  #endif /* CONFIG_ISA_DMA_API */ -#ifdef CONFIG_PCI -extern int isa_dma_bridge_buggy; -#else -#define isa_dma_bridge_buggy    (0) -#endif -  #endif /* __ASM_ARM_DMA_H */ diff --git a/arch/arm/include/asm/pci.h b/arch/arm/include/asm/pci.h index 68e6f25784a4..5916b88d4c94 100644 --- a/arch/arm/include/asm/pci.h +++ b/arch/arm/include/asm/pci.h @@ -22,11 +22,6 @@ static inline int pci_proc_domain(struct pci_bus *bus)  #define HAVE_PCI_MMAP  #define ARCH_GENERIC_PCI_MMAP_RESOURCE -static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) -{ -	return channel ? 15 : 14; -} -  extern void pcibios_report_status(unsigned int status_mask, int warn);  #endif /* __KERNEL__ */ diff --git a/arch/arm64/include/asm/pci.h b/arch/arm64/include/asm/pci.h index b33ca260e3c9..016eb6b46dc0 100644 --- a/arch/arm64/include/asm/pci.h +++ b/arch/arm64/include/asm/pci.h @@ -9,7 +9,6 @@  #include <asm/io.h>  #define PCIBIOS_MIN_IO		0x1000 -#define PCIBIOS_MIN_MEM		0  /*   * Set to 1 if the kernel should re-assign all PCI bus numbers @@ -18,21 +17,8 @@  	(pci_has_flag(PCI_REASSIGN_ALL_BUS))  #define arch_can_pci_mmap_wc() 1 -#define ARCH_GENERIC_PCI_MMAP_RESOURCE	1 -extern int isa_dma_bridge_buggy; - -#ifdef CONFIG_PCI -static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) -{ -	/* no legacy IRQ on arm64 */ -	return -ENODEV; -} - -static inline int pci_proc_domain(struct pci_bus *bus) -{ -	return 1; -} -#endif  /* CONFIG_PCI */ +/* Generic PCI */ +#include <asm-generic/pci.h>  #endif  /* __ASM_PCI_H */ diff --git a/arch/csky/include/asm/pci.h b/arch/csky/include/asm/pci.h index ebc765b1f78b..42724c630d30 100644 --- a/arch/csky/include/asm/pci.h +++ b/arch/csky/include/asm/pci.h @@ -9,26 +9,7 @@  #include <asm/io.h> -#define PCIBIOS_MIN_IO		0 -#define PCIBIOS_MIN_MEM		0 - -/* C-SKY shim does not initialize PCI bus */ -#define pcibios_assign_all_busses() 1 - -extern int isa_dma_bridge_buggy; - -#ifdef CONFIG_PCI -static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) -{ -	/* no legacy IRQ on csky */ -	return -ENODEV; -} - -static inline int pci_proc_domain(struct pci_bus *bus) -{ -	/* always show the domain in /proc */ -	return 1; -} -#endif  /* CONFIG_PCI */ +/* Generic PCI */ +#include <asm-generic/pci.h>  #endif  /* __ASM_CSKY_PCI_H */ diff --git a/arch/ia64/include/asm/dma.h b/arch/ia64/include/asm/dma.h index 59625e9c1f9c..eaed2626ffda 100644 --- a/arch/ia64/include/asm/dma.h +++ b/arch/ia64/include/asm/dma.h @@ -12,8 +12,6 @@  extern unsigned long MAX_DMA_ADDRESS; -extern int isa_dma_bridge_buggy; -  #define free_dma(x)  #endif /* _ASM_IA64_DMA_H */ diff --git a/arch/ia64/include/asm/pci.h b/arch/ia64/include/asm/pci.h index 8c163d1d0189..fa8f545c24c9 100644 --- a/arch/ia64/include/asm/pci.h +++ b/arch/ia64/include/asm/pci.h @@ -63,10 +63,4 @@ static inline int pci_proc_domain(struct pci_bus *bus)  	return (pci_domain_nr(bus) != 0);  } -#define HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ -static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) -{ -	return channel ? isa_irq_to_vector(15) : isa_irq_to_vector(14); -} -  #endif /* _ASM_IA64_PCI_H */ diff --git a/arch/m68k/include/asm/dma.h b/arch/m68k/include/asm/dma.h index f6c5e0dfb4e5..1c8d9c5bc2fa 100644 --- a/arch/m68k/include/asm/dma.h +++ b/arch/m68k/include/asm/dma.h @@ -6,10 +6,4 @@     bootmem allocator (but this should do it for this) */  #define MAX_DMA_ADDRESS PAGE_OFFSET -#ifdef CONFIG_PCI -extern int isa_dma_bridge_buggy; -#else -#define isa_dma_bridge_buggy    (0) -#endif -  #endif /* _M68K_DMA_H */ diff --git a/arch/m68k/include/asm/pci.h b/arch/m68k/include/asm/pci.h index 5a4bc223743b..ccdfa0dc8413 100644 --- a/arch/m68k/include/asm/pci.h +++ b/arch/m68k/include/asm/pci.h @@ -2,8 +2,6 @@  #ifndef _ASM_M68K_PCI_H  #define _ASM_M68K_PCI_H -#include <asm-generic/pci.h> -  #define	pcibios_assign_all_busses()	1  #define	PCIBIOS_MIN_IO		0x00000100 diff --git a/arch/microblaze/include/asm/dma.h b/arch/microblaze/include/asm/dma.h index f801582be912..7484c9eb66c4 100644 --- a/arch/microblaze/include/asm/dma.h +++ b/arch/microblaze/include/asm/dma.h @@ -9,10 +9,4 @@  /* Virtual address corresponding to last available physical memory address.  */  #define MAX_DMA_ADDRESS (CONFIG_KERNEL_START + memory_size - 1) -#ifdef CONFIG_PCI -extern int isa_dma_bridge_buggy; -#else -#define isa_dma_bridge_buggy     (0) -#endif -  #endif /* _ASM_MICROBLAZE_DMA_H */ diff --git a/arch/mips/include/asm/dma.h b/arch/mips/include/asm/dma.h index be726b943530..d6186e6bea7e 100644 --- a/arch/mips/include/asm/dma.h +++ b/arch/mips/include/asm/dma.h @@ -307,12 +307,4 @@ static __inline__ int get_dma_residue(unsigned int dmanr)  extern int request_dma(unsigned int dmanr, const char * device_id);	/* reserve a DMA channel */  extern void free_dma(unsigned int dmanr);	/* release it again */ -/* From PCI */ - -#ifdef CONFIG_PCI -extern int isa_dma_bridge_buggy; -#else -#define isa_dma_bridge_buggy	(0) -#endif -  #endif /* _ASM_DMA_H */ diff --git a/arch/mips/include/asm/pci.h b/arch/mips/include/asm/pci.h index 9ffc8192adae..3fd6e22c108b 100644 --- a/arch/mips/include/asm/pci.h +++ b/arch/mips/include/asm/pci.h @@ -139,10 +139,4 @@ static inline int pci_proc_domain(struct pci_bus *bus)  /* Do platform specific device initialization at pci_enable_device() time */  extern int pcibios_plat_dev_init(struct pci_dev *dev); -/* Chances are this interrupt is wired PC-style ...  */ -static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) -{ -	return channel ? 15 : 14; -} -  #endif /* _ASM_PCI_H */ diff --git a/arch/parisc/include/asm/dma.h b/arch/parisc/include/asm/dma.h index eea80ed34e6d..9e8c101de902 100644 --- a/arch/parisc/include/asm/dma.h +++ b/arch/parisc/include/asm/dma.h @@ -176,10 +176,4 @@ static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)  #define free_dma(dmanr) -#ifdef CONFIG_PCI -extern int isa_dma_bridge_buggy; -#else -#define isa_dma_bridge_buggy 	(0) -#endif -  #endif /* _ASM_DMA_H */ diff --git a/arch/parisc/include/asm/pci.h b/arch/parisc/include/asm/pci.h index f14465b84de4..127ed5021ae3 100644 --- a/arch/parisc/include/asm/pci.h +++ b/arch/parisc/include/asm/pci.h @@ -162,11 +162,6 @@ extern void pcibios_init_bridge(struct pci_dev *);  #define PCIBIOS_MIN_IO          0x10  #define PCIBIOS_MIN_MEM         0x1000 /* NBPG - but pci/setup-res.c dies */ -static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) -{ -	return channel ? 15 : 14; -} -  #define HAVE_PCI_MMAP  #define ARCH_GENERIC_PCI_MMAP_RESOURCE diff --git a/arch/powerpc/include/asm/dma.h b/arch/powerpc/include/asm/dma.h index 6161a9596196..d97c66d9ae34 100644 --- a/arch/powerpc/include/asm/dma.h +++ b/arch/powerpc/include/asm/dma.h @@ -340,11 +340,5 @@ extern int request_dma(unsigned int dmanr, const char *device_id);  /* release it again */  extern void free_dma(unsigned int dmanr); -#ifdef CONFIG_PCI -extern int isa_dma_bridge_buggy; -#else -#define isa_dma_bridge_buggy	(0) -#endif -  #endif /* __KERNEL__ */  #endif	/* _ASM_POWERPC_DMA_H */ diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h index 915d6ee4b40a..f9da506751bb 100644 --- a/arch/powerpc/include/asm/pci.h +++ b/arch/powerpc/include/asm/pci.h @@ -39,7 +39,6 @@  #define pcibios_assign_all_busses() \  	(pci_has_flag(PCI_REASSIGN_ALL_BUS)) -#define HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ  static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)  {  	if (ppc_md.pci_get_legacy_ide_irq) diff --git a/arch/riscv/include/asm/pci.h b/arch/riscv/include/asm/pci.h index 7fd52a30e605..6ef4a1426194 100644 --- a/arch/riscv/include/asm/pci.h +++ b/arch/riscv/include/asm/pci.h @@ -12,31 +12,7 @@  #include <asm/io.h> -#define PCIBIOS_MIN_IO		0 -#define PCIBIOS_MIN_MEM		0 - -/* RISC-V shim does not initialize PCI bus */ -#define pcibios_assign_all_busses() 1 - -#define ARCH_GENERIC_PCI_MMAP_RESOURCE 1 - -extern int isa_dma_bridge_buggy; - -#ifdef CONFIG_PCI -static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) -{ -	/* no legacy IRQ on risc-v */ -	return -ENODEV; -} - -static inline int pci_proc_domain(struct pci_bus *bus) -{ -	/* always show the domain in /proc */ -	return 1; -} - -#ifdef	CONFIG_NUMA - +#if defined(CONFIG_PCI) && defined(CONFIG_NUMA)  static inline int pcibus_to_node(struct pci_bus *bus)  {  	return dev_to_node(&bus->dev); @@ -46,8 +22,9 @@ static inline int pcibus_to_node(struct pci_bus *bus)  				 cpu_all_mask :				\  				 cpumask_of_node(pcibus_to_node(bus)))  #endif -#endif	/* CONFIG_NUMA */ +#endif /* defined(CONFIG_PCI) && defined(CONFIG_NUMA) */ -#endif  /* CONFIG_PCI */ +/* Generic PCI */ +#include <asm-generic/pci.h>  #endif  /* _ASM_RISCV_PCI_H */ diff --git a/arch/s390/include/asm/dma.h b/arch/s390/include/asm/dma.h index 6f26f35d4a71..dec1c4ce628c 100644 --- a/arch/s390/include/asm/dma.h +++ b/arch/s390/include/asm/dma.h @@ -11,10 +11,4 @@   */  #define MAX_DMA_ADDRESS         0x80000000 -#ifdef CONFIG_PCI -extern int isa_dma_bridge_buggy; -#else -#define isa_dma_bridge_buggy	(0) -#endif -  #endif /* _ASM_S390_DMA_H */ diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h index 85eb0ef9d4c3..7b4cdadbc023 100644 --- a/arch/s390/include/asm/pci.h +++ b/arch/s390/include/asm/pci.h @@ -6,7 +6,6 @@  #include <linux/mutex.h>  #include <linux/iommu.h>  #include <linux/pci_hotplug.h> -#include <asm-generic/pci.h>  #include <asm/pci_clp.h>  #include <asm/pci_debug.h>  #include <asm/pci_insn.h> diff --git a/arch/s390/pci/pci_bus.c b/arch/s390/pci/pci_bus.c index 5d77acbd1c87..6a8da1b742ae 100644 --- a/arch/s390/pci/pci_bus.c +++ b/arch/s390/pci/pci_bus.c @@ -145,9 +145,6 @@ int zpci_bus_scan_bus(struct zpci_bus *zbus)  	struct zpci_dev *zdev;  	int devfn, rc, ret = 0; -	if (!zbus->function[0]) -		return 0; -  	for (devfn = 0; devfn < ZPCI_FUNCTIONS_PER_BUS; devfn++) {  		zdev = zbus->function[devfn];  		if (zdev && zdev->state == ZPCI_FN_STATE_CONFIGURED) { @@ -184,26 +181,26 @@ void zpci_bus_scan_busses(void)  /* zpci_bus_create_pci_bus - Create the PCI bus associated with this zbus   * @zbus: the zbus holding the zdevices - * @f0: function 0 of the bus + * @fr: PCI root function that will determine the bus's domain, and bus speeed   * @ops: the pci operations   * - * Function zero is taken as a parameter as this is used to determine the - * domain, multifunction property and maximum bus speed of the entire bus. + * The PCI function @fr determines the domain (its UID), multifunction property + * and maximum bus speed of the entire bus.   *   * Return: 0 on success, an error code otherwise   */ -static int zpci_bus_create_pci_bus(struct zpci_bus *zbus, struct zpci_dev *f0, struct pci_ops *ops) +static int zpci_bus_create_pci_bus(struct zpci_bus *zbus, struct zpci_dev *fr, struct pci_ops *ops)  {  	struct pci_bus *bus;  	int domain; -	domain = zpci_alloc_domain((u16)f0->uid); +	domain = zpci_alloc_domain((u16)fr->uid);  	if (domain < 0)  		return domain;  	zbus->domain_nr = domain; -	zbus->multifunction = f0->rid_available; -	zbus->max_bus_speed = f0->max_bus_speed; +	zbus->multifunction = fr->rid_available; +	zbus->max_bus_speed = fr->max_bus_speed;  	/*  	 * Note that the zbus->resources are taken over and zbus->resources @@ -303,47 +300,6 @@ void pcibios_bus_add_device(struct pci_dev *pdev)  	}  } -/* zpci_bus_create_hotplug_slots - Add hotplug slot(s) for device added to bus - * @zdev: the zPCI device that was newly added - * - * Add the hotplug slot(s) for the newly added PCI function. Normally this is - * simply the slot for the function itself. If however we are adding the - * function 0 on a zbus, it might be that we already registered functions on - * that zbus but could not create their hotplug slots yet so add those now too. - * - * Return: 0 on success, an error code otherwise - */ -static int zpci_bus_create_hotplug_slots(struct zpci_dev *zdev) -{ -	struct zpci_bus *zbus = zdev->zbus; -	int devfn, rc = 0; - -	rc = zpci_init_slot(zdev); -	if (rc) -		return rc; -	zdev->has_hp_slot = 1; - -	if (zdev->devfn == 0 && zbus->multifunction) { -		/* Now that function 0 is there we can finally create the -		 * hotplug slots for those functions with devfn != 0 that have -		 * been parked in zbus->function[] waiting for us to be able to -		 * create the PCI bus. -		 */ -		for  (devfn = 1; devfn < ZPCI_FUNCTIONS_PER_BUS; devfn++) { -			zdev = zbus->function[devfn]; -			if (zdev && !zdev->has_hp_slot) { -				rc = zpci_init_slot(zdev); -				if (rc) -					return rc; -				zdev->has_hp_slot = 1; -			} -		} - -	} - -	return rc; -} -  static int zpci_bus_add_device(struct zpci_bus *zbus, struct zpci_dev *zdev)  {  	int rc = -EINVAL; @@ -352,21 +308,19 @@ static int zpci_bus_add_device(struct zpci_bus *zbus, struct zpci_dev *zdev)  		pr_err("devfn %04x is already assigned\n", zdev->devfn);  		return rc;  	} +  	zdev->zbus = zbus;  	zbus->function[zdev->devfn] = zdev;  	zpci_nb_devices++; -	if (zbus->bus) { -		if (zbus->multifunction && !zdev->rid_available) { -			WARN_ONCE(1, "rid_available not set for multifunction\n"); -			goto error; -		} - -		zpci_bus_create_hotplug_slots(zdev); -	} else { -		/* Hotplug slot will be created once function 0 appears */ -		zbus->multifunction = 1; +	if (zbus->multifunction && !zdev->rid_available) { +		WARN_ONCE(1, "rid_available not set for multifunction\n"); +		goto error;  	} +	rc = zpci_init_slot(zdev); +	if (rc) +		goto error; +	zdev->has_hp_slot = 1;  	return 0; @@ -400,7 +354,11 @@ int zpci_bus_device_register(struct zpci_dev *zdev, struct pci_ops *ops)  			return -ENOMEM;  	} -	if (zdev->devfn == 0) { +	if (!zbus->bus) { +		/* The UID of the first PCI function registered with a zpci_bus +		 * is used as the domain number for that bus. Currently there +		 * is exactly one zpci_bus per domain. +		 */  		rc = zpci_bus_create_pci_bus(zbus, zdev, ops);  		if (rc)  			goto error; diff --git a/arch/sh/include/asm/dma.h b/arch/sh/include/asm/dma.h index 17d23ae98c77..c8bee3f985a2 100644 --- a/arch/sh/include/asm/dma.h +++ b/arch/sh/include/asm/dma.h @@ -137,10 +137,4 @@ extern int register_chan_caps(const char *dmac, struct dma_chan_caps *capslist);  extern int dma_create_sysfs_files(struct dma_channel *, struct dma_info *);  extern void dma_remove_sysfs_files(struct dma_channel *, struct dma_info *); -#ifdef CONFIG_PCI -extern int isa_dma_bridge_buggy; -#else -#define isa_dma_bridge_buggy	(0) -#endif -  #endif /* __ASM_SH_DMA_H */ diff --git a/arch/sh/include/asm/pci.h b/arch/sh/include/asm/pci.h index ad22e88c6657..54c30126ea17 100644 --- a/arch/sh/include/asm/pci.h +++ b/arch/sh/include/asm/pci.h @@ -88,10 +88,4 @@ static inline int pci_proc_domain(struct pci_bus *bus)  	return hose->need_domain_info;  } -/* Chances are this interrupt is wired PC-style ...  */ -static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) -{ -	return channel ? 15 : 14; -} -  #endif /* __ASM_SH_PCI_H */ diff --git a/arch/sparc/include/asm/dma.h b/arch/sparc/include/asm/dma.h index 462e7c794a09..08043f35b110 100644 --- a/arch/sparc/include/asm/dma.h +++ b/arch/sparc/include/asm/dma.h @@ -82,14 +82,6 @@  #define DMA_BURST64      0x40  #define DMA_BURSTBITS    0x7f -/* From PCI */ - -#ifdef CONFIG_PCI -extern int isa_dma_bridge_buggy; -#else -#define isa_dma_bridge_buggy 	(0) -#endif -  #ifdef CONFIG_SPARC32  struct device; diff --git a/arch/sparc/include/asm/pci.h b/arch/sparc/include/asm/pci.h index 4deddf430e5d..1419aa84df71 100644 --- a/arch/sparc/include/asm/pci.h +++ b/arch/sparc/include/asm/pci.h @@ -37,16 +37,8 @@ static inline int pci_proc_domain(struct pci_bus *bus)  #define HAVE_PCI_MMAP  #define arch_can_pci_mmap_io()	1  #define HAVE_ARCH_PCI_GET_UNMAPPED_AREA +#define ARCH_GENERIC_PCI_MMAP_RESOURCE  #define get_pci_unmapped_area get_fb_unmapped_area  #endif /* CONFIG_SPARC64 */ -#if defined(CONFIG_SPARC64) || defined(CONFIG_LEON_PCI) -static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) -{ -	return PCI_IRQ_NONE; -} -#else -#include <asm-generic/pci.h> -#endif -  #endif /* ___ASM_SPARC_PCI_H */ diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c index 31b0c1983286..cb1ef25116e9 100644 --- a/arch/sparc/kernel/pci.c +++ b/arch/sparc/kernel/pci.c @@ -751,156 +751,15 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)  }  /* Platform support for /proc/bus/pci/X/Y mmap()s. */ - -/* If the user uses a host-bridge as the PCI device, he may use - * this to perform a raw mmap() of the I/O or MEM space behind - * that controller. - * - * This can be useful for execution of x86 PCI bios initialization code - * on a PCI card, like the xfree86 int10 stuff does. - */ -static int __pci_mmap_make_offset_bus(struct pci_dev *pdev, struct vm_area_struct *vma, -				      enum pci_mmap_state mmap_state) +int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma)  {  	struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; -	unsigned long space_size, user_offset, user_size; - -	if (mmap_state == pci_mmap_io) { -		space_size = resource_size(&pbm->io_space); -	} else { -		space_size = resource_size(&pbm->mem_space); -	} - -	/* Make sure the request is in range. */ -	user_offset = vma->vm_pgoff << PAGE_SHIFT; -	user_size = vma->vm_end - vma->vm_start; - -	if (user_offset >= space_size || -	    (user_offset + user_size) > space_size) -		return -EINVAL; - -	if (mmap_state == pci_mmap_io) { -		vma->vm_pgoff = (pbm->io_space.start + -				 user_offset) >> PAGE_SHIFT; -	} else { -		vma->vm_pgoff = (pbm->mem_space.start + -				 user_offset) >> PAGE_SHIFT; -	} - -	return 0; -} - -/* Adjust vm_pgoff of VMA such that it is the physical page offset - * corresponding to the 32-bit pci bus offset for DEV requested by the user. - * - * Basically, the user finds the base address for his device which he wishes - * to mmap.  They read the 32-bit value from the config space base register, - * add whatever PAGE_SIZE multiple offset they wish, and feed this into the - * offset parameter of mmap on /proc/bus/pci/XXX for that device. - * - * Returns negative error code on failure, zero on success. - */ -static int __pci_mmap_make_offset(struct pci_dev *pdev, -				  struct vm_area_struct *vma, -				  enum pci_mmap_state mmap_state) -{ -	unsigned long user_paddr, user_size; -	int i, err; - -	/* First compute the physical address in vma->vm_pgoff, -	 * making sure the user offset is within range in the -	 * appropriate PCI space. -	 */ -	err = __pci_mmap_make_offset_bus(pdev, vma, mmap_state); -	if (err) -		return err; - -	/* If this is a mapping on a host bridge, any address -	 * is OK. -	 */ -	if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_HOST) -		return err; - -	/* Otherwise make sure it's in the range for one of the -	 * device's resources. -	 */ -	user_paddr = vma->vm_pgoff << PAGE_SHIFT; -	user_size = vma->vm_end - vma->vm_start; +	resource_size_t ioaddr = pci_resource_start(pdev, bar); -	for (i = 0; i <= PCI_ROM_RESOURCE; i++) { -		struct resource *rp = &pdev->resource[i]; -		resource_size_t aligned_end; - -		/* Active? */ -		if (!rp->flags) -			continue; - -		/* Same type? */ -		if (i == PCI_ROM_RESOURCE) { -			if (mmap_state != pci_mmap_mem) -				continue; -		} else { -			if ((mmap_state == pci_mmap_io && -			     (rp->flags & IORESOURCE_IO) == 0) || -			    (mmap_state == pci_mmap_mem && -			     (rp->flags & IORESOURCE_MEM) == 0)) -				continue; -		} - -		/* Align the resource end to the next page address. -		 * PAGE_SIZE intentionally added instead of (PAGE_SIZE - 1), -		 * because actually we need the address of the next byte -		 * after rp->end. -		 */ -		aligned_end = (rp->end + PAGE_SIZE) & PAGE_MASK; - -		if ((rp->start <= user_paddr) && -		    (user_paddr + user_size) <= aligned_end) -			break; -	} - -	if (i > PCI_ROM_RESOURCE) +	if (!pbm)  		return -EINVAL; -	return 0; -} - -/* Set vm_page_prot of VMA, as appropriate for this architecture, for a pci - * device mapping. - */ -static void __pci_mmap_set_pgprot(struct pci_dev *dev, struct vm_area_struct *vma, -					     enum pci_mmap_state mmap_state) -{ -	/* Our io_remap_pfn_range takes care of this, do nothing.  */ -} - -/* Perform the actual remap of the pages for a PCI device mapping, as appropriate - * for this architecture.  The region in the process to map is described by vm_start - * and vm_end members of VMA, the base physical address is found in vm_pgoff. - * The pci device structure is provided so that architectures may make mapping - * decisions on a per-device or per-bus basis. - * - * Returns a negative error code on failure, zero on success. - */ -int pci_mmap_page_range(struct pci_dev *dev, int bar, -			struct vm_area_struct *vma, -			enum pci_mmap_state mmap_state, int write_combine) -{ -	int ret; - -	ret = __pci_mmap_make_offset(dev, vma, mmap_state); -	if (ret < 0) -		return ret; - -	__pci_mmap_set_pgprot(dev, vma, mmap_state); - -	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); -	ret = io_remap_pfn_range(vma, vma->vm_start, -				 vma->vm_pgoff, -				 vma->vm_end - vma->vm_start, -				 vma->vm_page_prot); -	if (ret) -		return ret; +	vma->vm_pgoff += (ioaddr + pbm->io_space.start) >> PAGE_SHIFT;  	return 0;  } diff --git a/arch/um/include/asm/pci.h b/arch/um/include/asm/pci.h index da13fd5519ef..34fe4921b5fa 100644 --- a/arch/um/include/asm/pci.h +++ b/arch/um/include/asm/pci.h @@ -4,28 +4,8 @@  #include <linux/types.h>  #include <asm/io.h> -#define PCIBIOS_MIN_IO		0 -#define PCIBIOS_MIN_MEM		0 - -#define pcibios_assign_all_busses() 1 - -extern int isa_dma_bridge_buggy; - -#ifdef CONFIG_PCI -static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) -{ -	/* no legacy IRQs */ -	return -ENODEV; -} -#endif - -#ifdef CONFIG_PCI_DOMAINS -static inline int pci_proc_domain(struct pci_bus *bus) -{ -	/* always show the domain in /proc */ -	return 1; -} -#endif  /* CONFIG_PCI */ +/* Generic PCI */ +#include <asm-generic/pci.h>  #ifdef CONFIG_PCI_MSI_IRQ_DOMAIN  /* diff --git a/arch/x86/include/asm/dma.h b/arch/x86/include/asm/dma.h index 8e95aa4b0d17..8ae6e0e11b8b 100644 --- a/arch/x86/include/asm/dma.h +++ b/arch/x86/include/asm/dma.h @@ -307,12 +307,4 @@ extern int request_dma(unsigned int dmanr, const char *device_id);  extern void free_dma(unsigned int dmanr);  #endif -/* From PCI */ - -#ifdef CONFIG_PCI -extern int isa_dma_bridge_buggy; -#else -#define isa_dma_bridge_buggy	(0) -#endif -  #endif /* _ASM_X86_DMA_H */ diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h index f3fd5928bcbb..736793d65bcb 100644 --- a/arch/x86/include/asm/pci.h +++ b/arch/x86/include/asm/pci.h @@ -105,9 +105,6 @@ static inline void early_quirks(void) { }  extern void pci_iommu_alloc(void); -/* generic pci stuff */ -#include <asm-generic/pci.h> -  #ifdef CONFIG_NUMA  /* Returns the node based on pci bus */  static inline int __pcibus_to_node(const struct pci_bus *bus) diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c index 7227c15299d0..9651275aecd1 100644 --- a/arch/x86/kernel/cpu/cyrix.c +++ b/arch/x86/kernel/cpu/cyrix.c @@ -1,6 +1,7 @@  // SPDX-License-Identifier: GPL-2.0  #include <linux/bitops.h>  #include <linux/delay.h> +#include <linux/isa-dma.h>  #include <linux/pci.h>  #include <asm/dma.h>  #include <linux/io.h> diff --git a/arch/xtensa/include/asm/dma.h b/arch/xtensa/include/asm/dma.h index bb099a373b5a..172644539032 100644 --- a/arch/xtensa/include/asm/dma.h +++ b/arch/xtensa/include/asm/dma.h @@ -52,11 +52,4 @@  extern int request_dma(unsigned int dmanr, const char * device_id);  extern void free_dma(unsigned int dmanr); -#ifdef CONFIG_PCI -extern int isa_dma_bridge_buggy; -#else -#define isa_dma_bridge_buggy 	(0) -#endif - -  #endif diff --git a/arch/xtensa/include/asm/pci.h b/arch/xtensa/include/asm/pci.h index 8e2b48a268db..b56de9635b6c 100644 --- a/arch/xtensa/include/asm/pci.h +++ b/arch/xtensa/include/asm/pci.h @@ -43,7 +43,4 @@  #define ARCH_GENERIC_PCI_MMAP_RESOURCE	1  #define arch_can_pci_mmap_io()		1 -/* Generic PCI */ -#include <asm-generic/pci.h> -  #endif	/* _XTENSA_PCI_H */ diff --git a/drivers/acpi/pci_mcfg.c b/drivers/acpi/pci_mcfg.c index 53cab975f612..860014b89b8e 100644 --- a/drivers/acpi/pci_mcfg.c +++ b/drivers/acpi/pci_mcfg.c @@ -41,6 +41,8 @@ struct mcfg_fixup {  static struct mcfg_fixup mcfg_quirks[] = {  /*	{ OEM_ID, OEM_TABLE_ID, REV, SEGMENT, BUS_RANGE, ops, cfgres }, */ +#ifdef CONFIG_ARM64 +  #define AL_ECAM(table_id, rev, seg, ops) \  	{ "AMAZON", table_id, rev, seg, MCFG_BUS_ANY, ops } @@ -169,6 +171,17 @@ static struct mcfg_fixup mcfg_quirks[] = {  	ALTRA_ECAM_QUIRK(1, 13),  	ALTRA_ECAM_QUIRK(1, 14),  	ALTRA_ECAM_QUIRK(1, 15), +#endif /* ARM64 */ + +#ifdef CONFIG_LOONGARCH +#define LOONGSON_ECAM_MCFG(table_id, seg) \ +	{ "LOONGS", table_id, 1, seg, MCFG_BUS_ANY, &loongson_pci_ecam_ops } + +	LOONGSON_ECAM_MCFG("\0", 0), +	LOONGSON_ECAM_MCFG("LOONGSON", 0), +	LOONGSON_ECAM_MCFG("\0", 1), +	LOONGSON_ECAM_MCFG("LOONGSON", 1), +#endif /* LOONGARCH */  };  static char mcfg_oem_id[ACPI_OEM_ID_SIZE]; diff --git a/drivers/comedi/drivers/comedi_isadma.c b/drivers/comedi/drivers/comedi_isadma.c index 700982464c53..020b3d1e1ac0 100644 --- a/drivers/comedi/drivers/comedi_isadma.c +++ b/drivers/comedi/drivers/comedi_isadma.c @@ -8,7 +8,7 @@  #include <linux/slab.h>  #include <linux/delay.h>  #include <linux/dma-mapping.h> -#include <asm/dma.h> +#include <linux/isa-dma.h>  #include <linux/comedi/comedidev.h>  #include <linux/comedi/comedi_isadma.h> diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c index 468d1097a1ec..07f756479663 100644 --- a/drivers/dma/dw-edma/dw-edma-core.c +++ b/drivers/dma/dw-edma/dw-edma-core.c @@ -64,8 +64,8 @@ static struct dw_edma_burst *dw_edma_alloc_burst(struct dw_edma_chunk *chunk)  static struct dw_edma_chunk *dw_edma_alloc_chunk(struct dw_edma_desc *desc)  { +	struct dw_edma_chip *chip = desc->chan->dw->chip;  	struct dw_edma_chan *chan = desc->chan; -	struct dw_edma *dw = chan->chip->dw;  	struct dw_edma_chunk *chunk;  	chunk = kzalloc(sizeof(*chunk), GFP_NOWAIT); @@ -82,11 +82,11 @@ static struct dw_edma_chunk *dw_edma_alloc_chunk(struct dw_edma_desc *desc)  	 */  	chunk->cb = !(desc->chunks_alloc % 2);  	if (chan->dir == EDMA_DIR_WRITE) { -		chunk->ll_region.paddr = dw->ll_region_wr[chan->id].paddr; -		chunk->ll_region.vaddr = dw->ll_region_wr[chan->id].vaddr; +		chunk->ll_region.paddr = chip->ll_region_wr[chan->id].paddr; +		chunk->ll_region.vaddr = chip->ll_region_wr[chan->id].vaddr;  	} else { -		chunk->ll_region.paddr = dw->ll_region_rd[chan->id].paddr; -		chunk->ll_region.vaddr = dw->ll_region_rd[chan->id].vaddr; +		chunk->ll_region.paddr = chip->ll_region_rd[chan->id].paddr; +		chunk->ll_region.vaddr = chip->ll_region_rd[chan->id].vaddr;  	}  	if (desc->chunk) { @@ -339,21 +339,40 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)  	if (!chan->configured)  		return NULL; -	switch (chan->config.direction) { -	case DMA_DEV_TO_MEM: /* local DMA */ -		if (dir == DMA_DEV_TO_MEM && chan->dir == EDMA_DIR_READ) -			break; -		return NULL; -	case DMA_MEM_TO_DEV: /* local DMA */ -		if (dir == DMA_MEM_TO_DEV && chan->dir == EDMA_DIR_WRITE) -			break; -		return NULL; -	default: /* remote DMA */ -		if (dir == DMA_MEM_TO_DEV && chan->dir == EDMA_DIR_READ) -			break; -		if (dir == DMA_DEV_TO_MEM && chan->dir == EDMA_DIR_WRITE) -			break; -		return NULL; +	/* +	 * Local Root Port/End-point              Remote End-point +	 * +-----------------------+ PCIe bus +----------------------+ +	 * |                       |    +-+   |                      | +	 * |    DEV_TO_MEM   Rx Ch <----+ +---+ Tx Ch  DEV_TO_MEM    | +	 * |                       |    | |   |                      | +	 * |    MEM_TO_DEV   Tx Ch +----+ +---> Rx Ch  MEM_TO_DEV    | +	 * |                       |    +-+   |                      | +	 * +-----------------------+          +----------------------+ +	 * +	 * 1. Normal logic: +	 * If eDMA is embedded into the DW PCIe RP/EP and controlled from the +	 * CPU/Application side, the Rx channel (EDMA_DIR_READ) will be used +	 * for the device read operations (DEV_TO_MEM) and the Tx channel +	 * (EDMA_DIR_WRITE) - for the write operations (MEM_TO_DEV). +	 * +	 * 2. Inverted logic: +	 * If eDMA is embedded into a Remote PCIe EP and is controlled by the +	 * MWr/MRd TLPs sent from the CPU's PCIe host controller, the Tx +	 * channel (EDMA_DIR_WRITE) will be used for the device read operations +	 * (DEV_TO_MEM) and the Rx channel (EDMA_DIR_READ) - for the write +	 * operations (MEM_TO_DEV). +	 * +	 * It is the client driver responsibility to choose a proper channel +	 * for the DMA transfers. +	 */ +	if (chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) { +		if ((chan->dir == EDMA_DIR_READ && dir != DMA_DEV_TO_MEM) || +		    (chan->dir == EDMA_DIR_WRITE && dir != DMA_MEM_TO_DEV)) +			return NULL; +	} else { +		if ((chan->dir == EDMA_DIR_WRITE && dir != DMA_DEV_TO_MEM) || +		    (chan->dir == EDMA_DIR_READ && dir != DMA_MEM_TO_DEV)) +			return NULL;  	}  	if (xfer->type == EDMA_XFER_CYCLIC) { @@ -423,7 +442,7 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)  		chunk->ll_region.sz += burst->sz;  		desc->alloc_sz += burst->sz; -		if (chan->dir == EDMA_DIR_WRITE) { +		if (dir == DMA_DEV_TO_MEM) {  			burst->sar = src_addr;  			if (xfer->type == EDMA_XFER_CYCLIC) {  				burst->dar = xfer->xfer.cyclic.paddr; @@ -663,7 +682,7 @@ static int dw_edma_alloc_chan_resources(struct dma_chan *dchan)  	if (chan->status != EDMA_ST_IDLE)  		return -EBUSY; -	pm_runtime_get(chan->chip->dev); +	pm_runtime_get(chan->dw->chip->dev);  	return 0;  } @@ -685,15 +704,15 @@ static void dw_edma_free_chan_resources(struct dma_chan *dchan)  		cpu_relax();  	} -	pm_runtime_put(chan->chip->dev); +	pm_runtime_put(chan->dw->chip->dev);  } -static int dw_edma_channel_setup(struct dw_edma_chip *chip, bool write, +static int dw_edma_channel_setup(struct dw_edma *dw, bool write,  				 u32 wr_alloc, u32 rd_alloc)  { +	struct dw_edma_chip *chip = dw->chip;  	struct dw_edma_region *dt_region;  	struct device *dev = chip->dev; -	struct dw_edma *dw = chip->dw;  	struct dw_edma_chan *chan;  	struct dw_edma_irq *irq;  	struct dma_device *dma; @@ -726,7 +745,7 @@ static int dw_edma_channel_setup(struct dw_edma_chip *chip, bool write,  		chan->vc.chan.private = dt_region; -		chan->chip = chip; +		chan->dw = dw;  		chan->id = j;  		chan->dir = write ? EDMA_DIR_WRITE : EDMA_DIR_READ;  		chan->configured = false; @@ -734,9 +753,9 @@ static int dw_edma_channel_setup(struct dw_edma_chip *chip, bool write,  		chan->status = EDMA_ST_IDLE;  		if (write) -			chan->ll_max = (dw->ll_region_wr[j].sz / EDMA_LL_SZ); +			chan->ll_max = (chip->ll_region_wr[j].sz / EDMA_LL_SZ);  		else -			chan->ll_max = (dw->ll_region_rd[j].sz / EDMA_LL_SZ); +			chan->ll_max = (chip->ll_region_rd[j].sz / EDMA_LL_SZ);  		chan->ll_max -= 1;  		dev_vdbg(dev, "L. List:\tChannel %s[%u] max_cnt=%u\n", @@ -766,13 +785,13 @@ static int dw_edma_channel_setup(struct dw_edma_chip *chip, bool write,  		vchan_init(&chan->vc, dma);  		if (write) { -			dt_region->paddr = dw->dt_region_wr[j].paddr; -			dt_region->vaddr = dw->dt_region_wr[j].vaddr; -			dt_region->sz = dw->dt_region_wr[j].sz; +			dt_region->paddr = chip->dt_region_wr[j].paddr; +			dt_region->vaddr = chip->dt_region_wr[j].vaddr; +			dt_region->sz = chip->dt_region_wr[j].sz;  		} else { -			dt_region->paddr = dw->dt_region_rd[j].paddr; -			dt_region->vaddr = dw->dt_region_rd[j].vaddr; -			dt_region->sz = dw->dt_region_rd[j].sz; +			dt_region->paddr = chip->dt_region_rd[j].paddr; +			dt_region->vaddr = chip->dt_region_rd[j].vaddr; +			dt_region->sz = chip->dt_region_rd[j].sz;  		}  		dw_edma_v0_core_device_config(chan); @@ -826,11 +845,11 @@ static inline void dw_edma_add_irq_mask(u32 *mask, u32 alloc, u16 cnt)  		(*mask)++;  } -static int dw_edma_irq_request(struct dw_edma_chip *chip, +static int dw_edma_irq_request(struct dw_edma *dw,  			       u32 *wr_alloc, u32 *rd_alloc)  { -	struct device *dev = chip->dev; -	struct dw_edma *dw = chip->dw; +	struct dw_edma_chip *chip = dw->chip; +	struct device *dev = dw->chip->dev;  	u32 wr_mask = 1;  	u32 rd_mask = 1;  	int i, err = 0; @@ -839,12 +858,16 @@ static int dw_edma_irq_request(struct dw_edma_chip *chip,  	ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt; -	if (dw->nr_irqs < 1) +	if (chip->nr_irqs < 1 || !chip->ops->irq_vector)  		return -EINVAL; -	if (dw->nr_irqs == 1) { +	dw->irq = devm_kcalloc(dev, chip->nr_irqs, sizeof(*dw->irq), GFP_KERNEL); +	if (!dw->irq) +		return -ENOMEM; + +	if (chip->nr_irqs == 1) {  		/* Common IRQ shared among all channels */ -		irq = dw->ops->irq_vector(dev, 0); +		irq = chip->ops->irq_vector(dev, 0);  		err = request_irq(irq, dw_edma_interrupt_common,  				  IRQF_SHARED, dw->name, &dw->irq[0]);  		if (err) { @@ -854,9 +877,11 @@ static int dw_edma_irq_request(struct dw_edma_chip *chip,  		if (irq_get_msi_desc(irq))  			get_cached_msi_msg(irq, &dw->irq[0].msi); + +		dw->nr_irqs = 1;  	} else {  		/* Distribute IRQs equally among all channels */ -		int tmp = dw->nr_irqs; +		int tmp = chip->nr_irqs;  		while (tmp && (*wr_alloc + *rd_alloc) < ch_cnt) {  			dw_edma_dec_irq_alloc(&tmp, wr_alloc, dw->wr_ch_cnt); @@ -867,7 +892,7 @@ static int dw_edma_irq_request(struct dw_edma_chip *chip,  		dw_edma_add_irq_mask(&rd_mask, *rd_alloc, dw->rd_ch_cnt);  		for (i = 0; i < (*wr_alloc + *rd_alloc); i++) { -			irq = dw->ops->irq_vector(dev, i); +			irq = chip->ops->irq_vector(dev, i);  			err = request_irq(irq,  					  i < *wr_alloc ?  						dw_edma_interrupt_write : @@ -901,20 +926,22 @@ int dw_edma_probe(struct dw_edma_chip *chip)  		return -EINVAL;  	dev = chip->dev; -	if (!dev) +	if (!dev || !chip->ops)  		return -EINVAL; -	dw = chip->dw; -	if (!dw || !dw->irq || !dw->ops || !dw->ops->irq_vector) -		return -EINVAL; +	dw = devm_kzalloc(dev, sizeof(*dw), GFP_KERNEL); +	if (!dw) +		return -ENOMEM; + +	dw->chip = chip;  	raw_spin_lock_init(&dw->lock); -	dw->wr_ch_cnt = min_t(u16, dw->wr_ch_cnt, +	dw->wr_ch_cnt = min_t(u16, chip->ll_wr_cnt,  			      dw_edma_v0_core_ch_count(dw, EDMA_DIR_WRITE));  	dw->wr_ch_cnt = min_t(u16, dw->wr_ch_cnt, EDMA_MAX_WR_CH); -	dw->rd_ch_cnt = min_t(u16, dw->rd_ch_cnt, +	dw->rd_ch_cnt = min_t(u16, chip->ll_rd_cnt,  			      dw_edma_v0_core_ch_count(dw, EDMA_DIR_READ));  	dw->rd_ch_cnt = min_t(u16, dw->rd_ch_cnt, EDMA_MAX_RD_CH); @@ -936,17 +963,17 @@ int dw_edma_probe(struct dw_edma_chip *chip)  	dw_edma_v0_core_off(dw);  	/* Request IRQs */ -	err = dw_edma_irq_request(chip, &wr_alloc, &rd_alloc); +	err = dw_edma_irq_request(dw, &wr_alloc, &rd_alloc);  	if (err)  		return err;  	/* Setup write channels */ -	err = dw_edma_channel_setup(chip, true, wr_alloc, rd_alloc); +	err = dw_edma_channel_setup(dw, true, wr_alloc, rd_alloc);  	if (err)  		goto err_irq_free;  	/* Setup read channels */ -	err = dw_edma_channel_setup(chip, false, wr_alloc, rd_alloc); +	err = dw_edma_channel_setup(dw, false, wr_alloc, rd_alloc);  	if (err)  		goto err_irq_free; @@ -954,15 +981,15 @@ int dw_edma_probe(struct dw_edma_chip *chip)  	pm_runtime_enable(dev);  	/* Turn debugfs on */ -	dw_edma_v0_core_debugfs_on(chip); +	dw_edma_v0_core_debugfs_on(dw); + +	chip->dw = dw;  	return 0;  err_irq_free:  	for (i = (dw->nr_irqs - 1); i >= 0; i--) -		free_irq(dw->ops->irq_vector(dev, i), &dw->irq[i]); - -	dw->nr_irqs = 0; +		free_irq(chip->ops->irq_vector(dev, i), &dw->irq[i]);  	return err;  } @@ -980,7 +1007,7 @@ int dw_edma_remove(struct dw_edma_chip *chip)  	/* Free irqs */  	for (i = (dw->nr_irqs - 1); i >= 0; i--) -		free_irq(dw->ops->irq_vector(dev, i), &dw->irq[i]); +		free_irq(chip->ops->irq_vector(dev, i), &dw->irq[i]);  	/* Power management */  	pm_runtime_disable(dev); @@ -1001,7 +1028,7 @@ int dw_edma_remove(struct dw_edma_chip *chip)  	}  	/* Turn debugfs off */ -	dw_edma_v0_core_debugfs_off(chip); +	dw_edma_v0_core_debugfs_off(dw);  	return 0;  } diff --git a/drivers/dma/dw-edma/dw-edma-core.h b/drivers/dma/dw-edma/dw-edma-core.h index 60316d408c3e..85df2d511907 100644 --- a/drivers/dma/dw-edma/dw-edma-core.h +++ b/drivers/dma/dw-edma/dw-edma-core.h @@ -15,20 +15,12 @@  #include "../virt-dma.h"  #define EDMA_LL_SZ					24 -#define EDMA_MAX_WR_CH					8 -#define EDMA_MAX_RD_CH					8  enum dw_edma_dir {  	EDMA_DIR_WRITE = 0,  	EDMA_DIR_READ  }; -enum dw_edma_map_format { -	EDMA_MF_EDMA_LEGACY = 0x0, -	EDMA_MF_EDMA_UNROLL = 0x1, -	EDMA_MF_HDMA_COMPAT = 0x5 -}; -  enum dw_edma_request {  	EDMA_REQ_NONE = 0,  	EDMA_REQ_STOP, @@ -57,12 +49,6 @@ struct dw_edma_burst {  	u32				sz;  }; -struct dw_edma_region { -	phys_addr_t			paddr; -	void				__iomem *vaddr; -	size_t				sz; -}; -  struct dw_edma_chunk {  	struct list_head		list;  	struct dw_edma_chan		*chan; @@ -87,7 +73,7 @@ struct dw_edma_desc {  struct dw_edma_chan {  	struct virt_dma_chan		vc; -	struct dw_edma_chip		*chip; +	struct dw_edma			*dw;  	int				id;  	enum dw_edma_dir		dir; @@ -109,10 +95,6 @@ struct dw_edma_irq {  	struct dw_edma			*dw;  }; -struct dw_edma_core_ops { -	int	(*irq_vector)(struct device *dev, unsigned int nr); -}; -  struct dw_edma {  	char				name[20]; @@ -122,21 +104,14 @@ struct dw_edma {  	struct dma_device		rd_edma;  	u16				rd_ch_cnt; -	struct dw_edma_region		rg_region;	/* Registers */ -	struct dw_edma_region		ll_region_wr[EDMA_MAX_WR_CH]; -	struct dw_edma_region		ll_region_rd[EDMA_MAX_RD_CH]; -	struct dw_edma_region		dt_region_wr[EDMA_MAX_WR_CH]; -	struct dw_edma_region		dt_region_rd[EDMA_MAX_RD_CH]; -  	struct dw_edma_irq		*irq;  	int				nr_irqs; -	enum dw_edma_map_format		mf; -  	struct dw_edma_chan		*chan; -	const struct dw_edma_core_ops	*ops;  	raw_spinlock_t			lock;		/* Only for legacy */ + +	struct dw_edma_chip             *chip;  #ifdef CONFIG_DEBUG_FS  	struct dentry			*debugfs;  #endif /* CONFIG_DEBUG_FS */ diff --git a/drivers/dma/dw-edma/dw-edma-pcie.c b/drivers/dma/dw-edma/dw-edma-pcie.c index cee7aa231d7b..d6b5e2463884 100644 --- a/drivers/dma/dw-edma/dw-edma-pcie.c +++ b/drivers/dma/dw-edma/dw-edma-pcie.c @@ -148,7 +148,6 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,  	struct dw_edma_pcie_data vsec_data;  	struct device *dev = &pdev->dev;  	struct dw_edma_chip *chip; -	struct dw_edma *dw;  	int err, nr_irqs;  	int i, mask; @@ -197,10 +196,6 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,  	if (!chip)  		return -ENOMEM; -	dw = devm_kzalloc(dev, sizeof(*dw), GFP_KERNEL); -	if (!dw) -		return -ENOMEM; -  	/* IRQs allocation */  	nr_irqs = pci_alloc_irq_vectors(pdev, 1, vsec_data.irqs,  					PCI_IRQ_MSI | PCI_IRQ_MSIX); @@ -211,29 +206,23 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,  	}  	/* Data structure initialization */ -	chip->dw = dw;  	chip->dev = dev;  	chip->id = pdev->devfn; -	chip->irq = pdev->irq; -	dw->mf = vsec_data.mf; -	dw->nr_irqs = nr_irqs; -	dw->ops = &dw_edma_pcie_core_ops; -	dw->wr_ch_cnt = vsec_data.wr_ch_cnt; -	dw->rd_ch_cnt = vsec_data.rd_ch_cnt; +	chip->mf = vsec_data.mf; +	chip->nr_irqs = nr_irqs; +	chip->ops = &dw_edma_pcie_core_ops; -	dw->rg_region.vaddr = pcim_iomap_table(pdev)[vsec_data.rg.bar]; -	if (!dw->rg_region.vaddr) -		return -ENOMEM; +	chip->ll_wr_cnt = vsec_data.wr_ch_cnt; +	chip->ll_rd_cnt = vsec_data.rd_ch_cnt; -	dw->rg_region.vaddr += vsec_data.rg.off; -	dw->rg_region.paddr = pdev->resource[vsec_data.rg.bar].start; -	dw->rg_region.paddr += vsec_data.rg.off; -	dw->rg_region.sz = vsec_data.rg.sz; +	chip->reg_base = pcim_iomap_table(pdev)[vsec_data.rg.bar]; +	if (!chip->reg_base) +		return -ENOMEM; -	for (i = 0; i < dw->wr_ch_cnt; i++) { -		struct dw_edma_region *ll_region = &dw->ll_region_wr[i]; -		struct dw_edma_region *dt_region = &dw->dt_region_wr[i]; +	for (i = 0; i < chip->ll_wr_cnt; i++) { +		struct dw_edma_region *ll_region = &chip->ll_region_wr[i]; +		struct dw_edma_region *dt_region = &chip->dt_region_wr[i];  		struct dw_edma_block *ll_block = &vsec_data.ll_wr[i];  		struct dw_edma_block *dt_block = &vsec_data.dt_wr[i]; @@ -256,9 +245,9 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,  		dt_region->sz = dt_block->sz;  	} -	for (i = 0; i < dw->rd_ch_cnt; i++) { -		struct dw_edma_region *ll_region = &dw->ll_region_rd[i]; -		struct dw_edma_region *dt_region = &dw->dt_region_rd[i]; +	for (i = 0; i < chip->ll_rd_cnt; i++) { +		struct dw_edma_region *ll_region = &chip->ll_region_rd[i]; +		struct dw_edma_region *dt_region = &chip->dt_region_rd[i];  		struct dw_edma_block *ll_block = &vsec_data.ll_rd[i];  		struct dw_edma_block *dt_block = &vsec_data.dt_rd[i]; @@ -282,45 +271,45 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,  	}  	/* Debug info */ -	if (dw->mf == EDMA_MF_EDMA_LEGACY) -		pci_dbg(pdev, "Version:\teDMA Port Logic (0x%x)\n", dw->mf); -	else if (dw->mf == EDMA_MF_EDMA_UNROLL) -		pci_dbg(pdev, "Version:\teDMA Unroll (0x%x)\n", dw->mf); -	else if (dw->mf == EDMA_MF_HDMA_COMPAT) -		pci_dbg(pdev, "Version:\tHDMA Compatible (0x%x)\n", dw->mf); +	if (chip->mf == EDMA_MF_EDMA_LEGACY) +		pci_dbg(pdev, "Version:\teDMA Port Logic (0x%x)\n", chip->mf); +	else if (chip->mf == EDMA_MF_EDMA_UNROLL) +		pci_dbg(pdev, "Version:\teDMA Unroll (0x%x)\n", chip->mf); +	else if (chip->mf == EDMA_MF_HDMA_COMPAT) +		pci_dbg(pdev, "Version:\tHDMA Compatible (0x%x)\n", chip->mf);  	else -		pci_dbg(pdev, "Version:\tUnknown (0x%x)\n", dw->mf); +		pci_dbg(pdev, "Version:\tUnknown (0x%x)\n", chip->mf); -	pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n", +	pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p)\n",  		vsec_data.rg.bar, vsec_data.rg.off, vsec_data.rg.sz, -		dw->rg_region.vaddr, &dw->rg_region.paddr); +		chip->reg_base); -	for (i = 0; i < dw->wr_ch_cnt; i++) { +	for (i = 0; i < chip->ll_wr_cnt; i++) {  		pci_dbg(pdev, "L. List:\tWRITE CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",  			i, vsec_data.ll_wr[i].bar, -			vsec_data.ll_wr[i].off, dw->ll_region_wr[i].sz, -			dw->ll_region_wr[i].vaddr, &dw->ll_region_wr[i].paddr); +			vsec_data.ll_wr[i].off, chip->ll_region_wr[i].sz, +			chip->ll_region_wr[i].vaddr, &chip->ll_region_wr[i].paddr);  		pci_dbg(pdev, "Data:\tWRITE CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",  			i, vsec_data.dt_wr[i].bar, -			vsec_data.dt_wr[i].off, dw->dt_region_wr[i].sz, -			dw->dt_region_wr[i].vaddr, &dw->dt_region_wr[i].paddr); +			vsec_data.dt_wr[i].off, chip->dt_region_wr[i].sz, +			chip->dt_region_wr[i].vaddr, &chip->dt_region_wr[i].paddr);  	} -	for (i = 0; i < dw->rd_ch_cnt; i++) { +	for (i = 0; i < chip->ll_rd_cnt; i++) {  		pci_dbg(pdev, "L. List:\tREAD CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",  			i, vsec_data.ll_rd[i].bar, -			vsec_data.ll_rd[i].off, dw->ll_region_rd[i].sz, -			dw->ll_region_rd[i].vaddr, &dw->ll_region_rd[i].paddr); +			vsec_data.ll_rd[i].off, chip->ll_region_rd[i].sz, +			chip->ll_region_rd[i].vaddr, &chip->ll_region_rd[i].paddr);  		pci_dbg(pdev, "Data:\tREAD CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",  			i, vsec_data.dt_rd[i].bar, -			vsec_data.dt_rd[i].off, dw->dt_region_rd[i].sz, -			dw->dt_region_rd[i].vaddr, &dw->dt_region_rd[i].paddr); +			vsec_data.dt_rd[i].off, chip->dt_region_rd[i].sz, +			chip->dt_region_rd[i].vaddr, &chip->dt_region_rd[i].paddr);  	} -	pci_dbg(pdev, "Nr. IRQs:\t%u\n", dw->nr_irqs); +	pci_dbg(pdev, "Nr. IRQs:\t%u\n", chip->nr_irqs);  	/* Validating if PCI interrupts were enabled */  	if (!pci_dev_msi_enabled(pdev)) { @@ -328,10 +317,6 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,  		return -EPERM;  	} -	dw->irq = devm_kcalloc(dev, nr_irqs, sizeof(*dw->irq), GFP_KERNEL); -	if (!dw->irq) -		return -ENOMEM; -  	/* Starting eDMA driver */  	err = dw_edma_probe(chip);  	if (err) { diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.c b/drivers/dma/dw-edma/dw-edma-v0-core.c index c73b9ed1ce74..77e6cfe52e0a 100644 --- a/drivers/dma/dw-edma/dw-edma-v0-core.c +++ b/drivers/dma/dw-edma/dw-edma-v0-core.c @@ -25,7 +25,7 @@ enum dw_edma_control {  static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw)  { -	return dw->rg_region.vaddr; +	return dw->chip->reg_base;  }  #define SET_32(dw, name, value)				\ @@ -96,7 +96,7 @@ static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw)  static inline struct dw_edma_v0_ch_regs __iomem *  __dw_ch_regs(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch)  { -	if (dw->mf == EDMA_MF_EDMA_LEGACY) +	if (dw->chip->mf == EDMA_MF_EDMA_LEGACY)  		return &(__dw_regs(dw)->type.legacy.ch);  	if (dir == EDMA_DIR_WRITE) @@ -108,7 +108,7 @@ __dw_ch_regs(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch)  static inline void writel_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,  			     u32 value, void __iomem *addr)  { -	if (dw->mf == EDMA_MF_EDMA_LEGACY) { +	if (dw->chip->mf == EDMA_MF_EDMA_LEGACY) {  		u32 viewport_sel;  		unsigned long flags; @@ -133,7 +133,7 @@ static inline u32 readl_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,  {  	u32 value; -	if (dw->mf == EDMA_MF_EDMA_LEGACY) { +	if (dw->chip->mf == EDMA_MF_EDMA_LEGACY) {  		u32 viewport_sel;  		unsigned long flags; @@ -169,7 +169,7 @@ static inline u32 readl_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,  static inline void writeq_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,  			     u64 value, void __iomem *addr)  { -	if (dw->mf == EDMA_MF_EDMA_LEGACY) { +	if (dw->chip->mf == EDMA_MF_EDMA_LEGACY) {  		u32 viewport_sel;  		unsigned long flags; @@ -194,7 +194,7 @@ static inline u64 readq_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,  {  	u32 value; -	if (dw->mf == EDMA_MF_EDMA_LEGACY) { +	if (dw->chip->mf == EDMA_MF_EDMA_LEGACY) {  		u32 viewport_sel;  		unsigned long flags; @@ -256,7 +256,7 @@ u16 dw_edma_v0_core_ch_count(struct dw_edma *dw, enum dw_edma_dir dir)  enum dma_status dw_edma_v0_core_ch_status(struct dw_edma_chan *chan)  { -	struct dw_edma *dw = chan->chip->dw; +	struct dw_edma *dw = chan->dw;  	u32 tmp;  	tmp = FIELD_GET(EDMA_V0_CH_STATUS_MASK, @@ -272,7 +272,7 @@ enum dma_status dw_edma_v0_core_ch_status(struct dw_edma_chan *chan)  void dw_edma_v0_core_clear_done_int(struct dw_edma_chan *chan)  { -	struct dw_edma *dw = chan->chip->dw; +	struct dw_edma *dw = chan->dw;  	SET_RW_32(dw, chan->dir, int_clear,  		  FIELD_PREP(EDMA_V0_DONE_INT_MASK, BIT(chan->id))); @@ -280,7 +280,7 @@ void dw_edma_v0_core_clear_done_int(struct dw_edma_chan *chan)  void dw_edma_v0_core_clear_abort_int(struct dw_edma_chan *chan)  { -	struct dw_edma *dw = chan->chip->dw; +	struct dw_edma *dw = chan->dw;  	SET_RW_32(dw, chan->dir, int_clear,  		  FIELD_PREP(EDMA_V0_ABORT_INT_MASK, BIT(chan->id))); @@ -301,6 +301,7 @@ u32 dw_edma_v0_core_status_abort_int(struct dw_edma *dw, enum dw_edma_dir dir)  static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)  {  	struct dw_edma_burst *child; +	struct dw_edma_chan *chan = chunk->chan;  	struct dw_edma_v0_lli __iomem *lli;  	struct dw_edma_v0_llp __iomem *llp;  	u32 control = 0, i = 0; @@ -314,9 +315,11 @@ static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)  	j = chunk->bursts_alloc;  	list_for_each_entry(child, &chunk->burst->list, list) {  		j--; -		if (!j) -			control |= (DW_EDMA_V0_LIE | DW_EDMA_V0_RIE); - +		if (!j) { +			control |= DW_EDMA_V0_LIE; +			if (!(chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL)) +				control |= DW_EDMA_V0_RIE; +		}  		/* Channel control */  		SET_LL_32(&lli[i].control, control);  		/* Transfer size */ @@ -357,7 +360,7 @@ static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)  void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)  {  	struct dw_edma_chan *chan = chunk->chan; -	struct dw_edma *dw = chan->chip->dw; +	struct dw_edma *dw = chan->dw;  	u32 tmp;  	dw_edma_v0_core_write_chunk(chunk); @@ -365,7 +368,7 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)  	if (first) {  		/* Enable engine */  		SET_RW_32(dw, chan->dir, engine_en, BIT(0)); -		if (dw->mf == EDMA_MF_HDMA_COMPAT) { +		if (dw->chip->mf == EDMA_MF_HDMA_COMPAT) {  			switch (chan->id) {  			case 0:  				SET_RW_COMPAT(dw, chan->dir, ch0_pwr_en, @@ -427,7 +430,7 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)  int dw_edma_v0_core_device_config(struct dw_edma_chan *chan)  { -	struct dw_edma *dw = chan->chip->dw; +	struct dw_edma *dw = chan->dw;  	u32 tmp = 0;  	/* MSI done addr - low, high */ @@ -497,12 +500,12 @@ int dw_edma_v0_core_device_config(struct dw_edma_chan *chan)  }  /* eDMA debugfs callbacks */ -void dw_edma_v0_core_debugfs_on(struct dw_edma_chip *chip) +void dw_edma_v0_core_debugfs_on(struct dw_edma *dw)  { -	dw_edma_v0_debugfs_on(chip); +	dw_edma_v0_debugfs_on(dw);  } -void dw_edma_v0_core_debugfs_off(struct dw_edma_chip *chip) +void dw_edma_v0_core_debugfs_off(struct dw_edma *dw)  { -	dw_edma_v0_debugfs_off(chip); +	dw_edma_v0_debugfs_off(dw);  } diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.h b/drivers/dma/dw-edma/dw-edma-v0-core.h index 2afa626b8300..75aec6d31b21 100644 --- a/drivers/dma/dw-edma/dw-edma-v0-core.h +++ b/drivers/dma/dw-edma/dw-edma-v0-core.h @@ -22,7 +22,7 @@ u32 dw_edma_v0_core_status_abort_int(struct dw_edma *chan, enum dw_edma_dir dir)  void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first);  int dw_edma_v0_core_device_config(struct dw_edma_chan *chan);  /* eDMA debug fs callbacks */ -void dw_edma_v0_core_debugfs_on(struct dw_edma_chip *chip); -void dw_edma_v0_core_debugfs_off(struct dw_edma_chip *chip); +void dw_edma_v0_core_debugfs_on(struct dw_edma *dw); +void dw_edma_v0_core_debugfs_off(struct dw_edma *dw);  #endif /* _DW_EDMA_V0_CORE_H */ diff --git a/drivers/dma/dw-edma/dw-edma-v0-debugfs.c b/drivers/dma/dw-edma/dw-edma-v0-debugfs.c index 4b3bcffd15ef..5226c9014703 100644 --- a/drivers/dma/dw-edma/dw-edma-v0-debugfs.c +++ b/drivers/dma/dw-edma/dw-edma-v0-debugfs.c @@ -54,7 +54,7 @@ struct debugfs_entries {  static int dw_edma_debugfs_u32_get(void *data, u64 *val)  {  	void __iomem *reg = (void __force __iomem *)data; -	if (dw->mf == EDMA_MF_EDMA_LEGACY && +	if (dw->chip->mf == EDMA_MF_EDMA_LEGACY &&  	    reg >= (void __iomem *)®s->type.legacy.ch) {  		void __iomem *ptr = ®s->type.legacy.ch;  		u32 viewport_sel = 0; @@ -173,7 +173,7 @@ static void dw_edma_debugfs_regs_wr(struct dentry *dir)  	nr_entries = ARRAY_SIZE(debugfs_regs);  	dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir); -	if (dw->mf == EDMA_MF_HDMA_COMPAT) { +	if (dw->chip->mf == EDMA_MF_HDMA_COMPAT) {  		nr_entries = ARRAY_SIZE(debugfs_unroll_regs);  		dw_edma_debugfs_create_x32(debugfs_unroll_regs, nr_entries,  					   regs_dir); @@ -242,7 +242,7 @@ static void dw_edma_debugfs_regs_rd(struct dentry *dir)  	nr_entries = ARRAY_SIZE(debugfs_regs);  	dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir); -	if (dw->mf == EDMA_MF_HDMA_COMPAT) { +	if (dw->chip->mf == EDMA_MF_HDMA_COMPAT) {  		nr_entries = ARRAY_SIZE(debugfs_unroll_regs);  		dw_edma_debugfs_create_x32(debugfs_unroll_regs, nr_entries,  					   regs_dir); @@ -282,13 +282,13 @@ static void dw_edma_debugfs_regs(void)  	dw_edma_debugfs_regs_rd(regs_dir);  } -void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip) +void dw_edma_v0_debugfs_on(struct dw_edma *_dw)  { -	dw = chip->dw; +	dw = _dw;  	if (!dw)  		return; -	regs = dw->rg_region.vaddr; +	regs = dw->chip->reg_base;  	if (!regs)  		return; @@ -296,16 +296,16 @@ void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip)  	if (!dw->debugfs)  		return; -	debugfs_create_u32("mf", 0444, dw->debugfs, &dw->mf); +	debugfs_create_u32("mf", 0444, dw->debugfs, &dw->chip->mf);  	debugfs_create_u16("wr_ch_cnt", 0444, dw->debugfs, &dw->wr_ch_cnt);  	debugfs_create_u16("rd_ch_cnt", 0444, dw->debugfs, &dw->rd_ch_cnt);  	dw_edma_debugfs_regs();  } -void dw_edma_v0_debugfs_off(struct dw_edma_chip *chip) +void dw_edma_v0_debugfs_off(struct dw_edma *_dw)  { -	dw = chip->dw; +	dw = _dw;  	if (!dw)  		return; diff --git a/drivers/dma/dw-edma/dw-edma-v0-debugfs.h b/drivers/dma/dw-edma/dw-edma-v0-debugfs.h index d0ff25a9ea5c..3391b86edf5a 100644 --- a/drivers/dma/dw-edma/dw-edma-v0-debugfs.h +++ b/drivers/dma/dw-edma/dw-edma-v0-debugfs.h @@ -12,14 +12,14 @@  #include <linux/dma/edma.h>  #ifdef CONFIG_DEBUG_FS -void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip); -void dw_edma_v0_debugfs_off(struct dw_edma_chip *chip); +void dw_edma_v0_debugfs_on(struct dw_edma *dw); +void dw_edma_v0_debugfs_off(struct dw_edma *dw);  #else -static inline void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip) +static inline void dw_edma_v0_debugfs_on(struct dw_edma *dw)  {  } -static inline void dw_edma_v0_debugfs_off(struct dw_edma_chip *chip) +static inline void dw_edma_v0_debugfs_off(struct dw_edma *dw)  {  }  #endif /* CONFIG_DEBUG_FS */ diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig index b8d96d38064d..d1c5fcf00a8a 100644 --- a/drivers/pci/controller/Kconfig +++ b/drivers/pci/controller/Kconfig @@ -237,7 +237,7 @@ config PCIE_ROCKCHIP_EP  config PCIE_MEDIATEK  	tristate "MediaTek PCIe controller" -	depends on ARCH_MEDIATEK || COMPILE_TEST +	depends on ARCH_AIROHA || ARCH_MEDIATEK || COMPILE_TEST  	depends on OF  	depends on PCI_MSI_IRQ_DOMAIN  	help @@ -293,7 +293,7 @@ config PCI_HYPERV_INTERFACE  config PCI_LOONGSON  	bool "LOONGSON PCI Controller"  	depends on MACH_LOONGSON64 || COMPILE_TEST -	depends on OF +	depends on OF || ACPI  	depends on PCI_QUIRKS  	default MACH_LOONGSON64  	help diff --git a/drivers/pci/controller/cadence/pcie-cadence.c b/drivers/pci/controller/cadence/pcie-cadence.c index 52767f26048f..13c4032ca379 100644 --- a/drivers/pci/controller/cadence/pcie-cadence.c +++ b/drivers/pci/controller/cadence/pcie-cadence.c @@ -243,7 +243,6 @@ err_phy:  	return ret;  } -#ifdef CONFIG_PM_SLEEP  static int cdns_pcie_suspend_noirq(struct device *dev)  {  	struct cdns_pcie *pcie = dev_get_drvdata(dev); @@ -266,9 +265,8 @@ static int cdns_pcie_resume_noirq(struct device *dev)  	return 0;  } -#endif  const struct dev_pm_ops cdns_pcie_pm_ops = { -	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(cdns_pcie_suspend_noirq, -				      cdns_pcie_resume_noirq) +	NOIRQ_SYSTEM_SLEEP_PM_OPS(cdns_pcie_suspend_noirq, +				  cdns_pcie_resume_noirq)  }; diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c index dfcdeb432dc8..38462ed11d07 100644 --- a/drivers/pci/controller/dwc/pci-dra7xx.c +++ b/drivers/pci/controller/dwc/pci-dra7xx.c @@ -178,7 +178,7 @@ static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx)  	dra7xx_pcie_enable_msi_interrupts(dra7xx);  } -static int dra7xx_pcie_host_init(struct pcie_port *pp) +static int dra7xx_pcie_host_init(struct dw_pcie_rp *pp)  {  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);  	struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); @@ -202,7 +202,7 @@ static const struct irq_domain_ops intx_domain_ops = {  	.xlate = pci_irqd_intx_xlate,  }; -static int dra7xx_pcie_handle_msi(struct pcie_port *pp, int index) +static int dra7xx_pcie_handle_msi(struct dw_pcie_rp *pp, int index)  {  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);  	unsigned long val; @@ -224,7 +224,7 @@ static int dra7xx_pcie_handle_msi(struct pcie_port *pp, int index)  	return 1;  } -static void dra7xx_pcie_handle_msi_irq(struct pcie_port *pp) +static void dra7xx_pcie_handle_msi_irq(struct dw_pcie_rp *pp)  {  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);  	int ret, i, count, num_ctrls; @@ -255,8 +255,8 @@ static void dra7xx_pcie_msi_irq_handler(struct irq_desc *desc)  {  	struct irq_chip *chip = irq_desc_get_chip(desc);  	struct dra7xx_pcie *dra7xx; +	struct dw_pcie_rp *pp;  	struct dw_pcie *pci; -	struct pcie_port *pp;  	unsigned long reg;  	u32 bit; @@ -344,7 +344,7 @@ static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg)  	return IRQ_HANDLED;  } -static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp) +static int dra7xx_pcie_init_irq_domain(struct dw_pcie_rp *pp)  {  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);  	struct device *dev = pci->dev; @@ -475,7 +475,7 @@ static int dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,  {  	int ret;  	struct dw_pcie *pci = dra7xx->pci; -	struct pcie_port *pp = &pci->pp; +	struct dw_pcie_rp *pp = &pci->pp;  	struct device *dev = pci->dev;  	pp->irq = platform_get_irq(pdev, 1); @@ -483,7 +483,7 @@ static int dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,  		return pp->irq;  	/* MSI IRQ is muxed */ -	pp->msi_irq = -ENODEV; +	pp->msi_irq[0] = -ENODEV;  	ret = dra7xx_pcie_init_irq_domain(pp);  	if (ret < 0) @@ -862,7 +862,6 @@ err_link:  	return ret;  } -#ifdef CONFIG_PM_SLEEP  static int dra7xx_pcie_suspend(struct device *dev)  {  	struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); @@ -919,7 +918,6 @@ static int dra7xx_pcie_resume_noirq(struct device *dev)  	return 0;  } -#endif  static void dra7xx_pcie_shutdown(struct platform_device *pdev)  { @@ -940,9 +938,9 @@ static void dra7xx_pcie_shutdown(struct platform_device *pdev)  }  static const struct dev_pm_ops dra7xx_pcie_pm_ops = { -	SET_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend, dra7xx_pcie_resume) -	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend_noirq, -				      dra7xx_pcie_resume_noirq) +	SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend, dra7xx_pcie_resume) +	NOIRQ_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend_noirq, +				  dra7xx_pcie_resume_noirq)  };  static struct platform_driver dra7xx_pcie_driver = { diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c index 467c8d1cd7e4..ec5611005566 100644 --- a/drivers/pci/controller/dwc/pci-exynos.c +++ b/drivers/pci/controller/dwc/pci-exynos.c @@ -249,7 +249,7 @@ static int exynos_pcie_link_up(struct dw_pcie *pci)  	return (val & PCIE_ELBI_XMLH_LINKUP);  } -static int exynos_pcie_host_init(struct pcie_port *pp) +static int exynos_pcie_host_init(struct dw_pcie_rp *pp)  {  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);  	struct exynos_pcie *ep = to_exynos_pcie(pci); @@ -258,9 +258,8 @@ static int exynos_pcie_host_init(struct pcie_port *pp)  	exynos_pcie_assert_core_reset(ep); -	phy_reset(ep->phy); -	phy_power_on(ep->phy);  	phy_init(ep->phy); +	phy_power_on(ep->phy);  	exynos_pcie_deassert_core_reset(ep);  	exynos_pcie_enable_irq_pulse(ep); @@ -276,7 +275,7 @@ static int exynos_add_pcie_port(struct exynos_pcie *ep,  				       struct platform_device *pdev)  {  	struct dw_pcie *pci = &ep->pci; -	struct pcie_port *pp = &pci->pp; +	struct dw_pcie_rp *pp = &pci->pp;  	struct device *dev = &pdev->dev;  	int ret; @@ -292,7 +291,7 @@ static int exynos_add_pcie_port(struct exynos_pcie *ep,  	}  	pp->ops = &exynos_pcie_host_ops; -	pp->msi_irq = -ENODEV; +	pp->msi_irq[0] = -ENODEV;  	ret = dw_pcie_host_init(pp);  	if (ret) { @@ -390,7 +389,7 @@ static int __exit exynos_pcie_remove(struct platform_device *pdev)  	return 0;  } -static int __maybe_unused exynos_pcie_suspend_noirq(struct device *dev) +static int exynos_pcie_suspend_noirq(struct device *dev)  {  	struct exynos_pcie *ep = dev_get_drvdata(dev); @@ -402,11 +401,11 @@ static int __maybe_unused exynos_pcie_suspend_noirq(struct device *dev)  	return 0;  } -static int __maybe_unused exynos_pcie_resume_noirq(struct device *dev) +static int exynos_pcie_resume_noirq(struct device *dev)  {  	struct exynos_pcie *ep = dev_get_drvdata(dev);  	struct dw_pcie *pci = &ep->pci; -	struct pcie_port *pp = &pci->pp; +	struct dw_pcie_rp *pp = &pci->pp;  	int ret;  	ret = regulator_bulk_enable(ARRAY_SIZE(ep->supplies), ep->supplies); @@ -421,8 +420,8 @@ static int __maybe_unused exynos_pcie_resume_noirq(struct device *dev)  }  static const struct dev_pm_ops exynos_pcie_pm_ops = { -	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(exynos_pcie_suspend_noirq, -				      exynos_pcie_resume_noirq) +	NOIRQ_SYSTEM_SLEEP_PM_OPS(exynos_pcie_suspend_noirq, +				  exynos_pcie_resume_noirq)  };  static const struct of_device_id exynos_pcie_of_match[] = { diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c index 7a285fb0f619..6e5debdbc55b 100644 --- a/drivers/pci/controller/dwc/pci-imx6.c +++ b/drivers/pci/controller/dwc/pci-imx6.c @@ -67,6 +67,7 @@ struct imx6_pcie {  	struct dw_pcie		*pci;  	int			reset_gpio;  	bool			gpio_active_high; +	bool			link_is_up;  	struct clk		*pcie_bus;  	struct clk		*pcie_phy;  	struct clk		*pcie_inbound_axi; @@ -146,6 +147,31 @@ struct imx6_pcie {  #define PHY_RX_OVRD_IN_LO_RX_DATA_EN		BIT(5)  #define PHY_RX_OVRD_IN_LO_RX_PLL_EN		BIT(3) +static unsigned int imx6_pcie_grp_offset(const struct imx6_pcie *imx6_pcie) +{ +	WARN_ON(imx6_pcie->drvdata->variant != IMX8MQ && +		imx6_pcie->drvdata->variant != IMX8MM); +	return imx6_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14; +} + +static void imx6_pcie_configure_type(struct imx6_pcie *imx6_pcie) +{ +	unsigned int mask, val; + +	if (imx6_pcie->drvdata->variant == IMX8MQ && +	    imx6_pcie->controller_id == 1) { +		mask = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE; +		val  = FIELD_PREP(IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE, +				  PCI_EXP_TYPE_ROOT_PORT); +	} else { +		mask = IMX6Q_GPR12_DEVICE_TYPE; +		val  = FIELD_PREP(IMX6Q_GPR12_DEVICE_TYPE, +				  PCI_EXP_TYPE_ROOT_PORT); +	} + +	regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, mask, val); +} +  static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, bool exp_val)  {  	struct dw_pcie *pci = imx6_pcie->pci; @@ -271,6 +297,134 @@ static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data)  	return 0;  } +static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie) +{ +	switch (imx6_pcie->drvdata->variant) { +	case IMX8MM: +		/* +		 * The PHY initialization had been done in the PHY +		 * driver, break here directly. +		 */ +		break; +	case IMX8MQ: +		/* +		 * TODO: Currently this code assumes external +		 * oscillator is being used +		 */ +		regmap_update_bits(imx6_pcie->iomuxc_gpr, +				   imx6_pcie_grp_offset(imx6_pcie), +				   IMX8MQ_GPR_PCIE_REF_USE_PAD, +				   IMX8MQ_GPR_PCIE_REF_USE_PAD); +		/* +		 * Regarding the datasheet, the PCIE_VPH is suggested +		 * to be 1.8V. If the PCIE_VPH is supplied by 3.3V, the +		 * VREG_BYPASS should be cleared to zero. +		 */ +		if (imx6_pcie->vph && +		    regulator_get_voltage(imx6_pcie->vph) > 3000000) +			regmap_update_bits(imx6_pcie->iomuxc_gpr, +					   imx6_pcie_grp_offset(imx6_pcie), +					   IMX8MQ_GPR_PCIE_VREG_BYPASS, +					   0); +		break; +	case IMX7D: +		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, +				   IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0); +		break; +	case IMX6SX: +		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, +				   IMX6SX_GPR12_PCIE_RX_EQ_MASK, +				   IMX6SX_GPR12_PCIE_RX_EQ_2); +		fallthrough; +	default: +		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, +				   IMX6Q_GPR12_PCIE_CTL_2, 0 << 10); + +		/* configure constant input signal to the pcie ctrl and phy */ +		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, +				   IMX6Q_GPR12_LOS_LEVEL, 9 << 4); + +		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, +				   IMX6Q_GPR8_TX_DEEMPH_GEN1, +				   imx6_pcie->tx_deemph_gen1 << 0); +		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, +				   IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB, +				   imx6_pcie->tx_deemph_gen2_3p5db << 6); +		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, +				   IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB, +				   imx6_pcie->tx_deemph_gen2_6db << 12); +		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, +				   IMX6Q_GPR8_TX_SWING_FULL, +				   imx6_pcie->tx_swing_full << 18); +		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, +				   IMX6Q_GPR8_TX_SWING_LOW, +				   imx6_pcie->tx_swing_low << 25); +		break; +	} + +	imx6_pcie_configure_type(imx6_pcie); +} + +static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie) +{ +	u32 val; +	struct device *dev = imx6_pcie->pci->dev; + +	if (regmap_read_poll_timeout(imx6_pcie->iomuxc_gpr, +				     IOMUXC_GPR22, val, +				     val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED, +				     PHY_PLL_LOCK_WAIT_USLEEP_MAX, +				     PHY_PLL_LOCK_WAIT_TIMEOUT)) +		dev_err(dev, "PCIe PLL lock timeout\n"); +} + +static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie) +{ +	unsigned long phy_rate = clk_get_rate(imx6_pcie->pcie_phy); +	int mult, div; +	u16 val; + +	if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY)) +		return 0; + +	switch (phy_rate) { +	case 125000000: +		/* +		 * The default settings of the MPLL are for a 125MHz input +		 * clock, so no need to reconfigure anything in that case. +		 */ +		return 0; +	case 100000000: +		mult = 25; +		div = 0; +		break; +	case 200000000: +		mult = 25; +		div = 1; +		break; +	default: +		dev_err(imx6_pcie->pci->dev, +			"Unsupported PHY reference clock rate %lu\n", phy_rate); +		return -EINVAL; +	} + +	pcie_phy_read(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, &val); +	val &= ~(PCIE_PHY_MPLL_MULTIPLIER_MASK << +		 PCIE_PHY_MPLL_MULTIPLIER_SHIFT); +	val |= mult << PCIE_PHY_MPLL_MULTIPLIER_SHIFT; +	val |= PCIE_PHY_MPLL_MULTIPLIER_OVRD; +	pcie_phy_write(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, val); + +	pcie_phy_read(imx6_pcie, PCIE_PHY_ATEOVRD, &val); +	val &= ~(PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK << +		 PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT); +	val |= div << PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT; +	val |= PCIE_PHY_ATEOVRD_EN; +	pcie_phy_write(imx6_pcie, PCIE_PHY_ATEOVRD, val); + +	return 0; +} +  static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie)  {  	u16 tmp; @@ -367,61 +521,6 @@ static int imx6_pcie_attach_pd(struct device *dev)  	return 0;  } -static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie) -{ -	struct device *dev = imx6_pcie->pci->dev; - -	switch (imx6_pcie->drvdata->variant) { -	case IMX7D: -	case IMX8MQ: -		reset_control_assert(imx6_pcie->pciephy_reset); -		fallthrough; -	case IMX8MM: -		reset_control_assert(imx6_pcie->apps_reset); -		break; -	case IMX6SX: -		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, -				   IMX6SX_GPR12_PCIE_TEST_POWERDOWN, -				   IMX6SX_GPR12_PCIE_TEST_POWERDOWN); -		/* Force PCIe PHY reset */ -		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5, -				   IMX6SX_GPR5_PCIE_BTNRST_RESET, -				   IMX6SX_GPR5_PCIE_BTNRST_RESET); -		break; -	case IMX6QP: -		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, -				   IMX6Q_GPR1_PCIE_SW_RST, -				   IMX6Q_GPR1_PCIE_SW_RST); -		break; -	case IMX6Q: -		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, -				   IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18); -		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, -				   IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16); -		break; -	} - -	if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) { -		int ret = regulator_disable(imx6_pcie->vpcie); - -		if (ret) -			dev_err(dev, "failed to disable vpcie regulator: %d\n", -				ret); -	} - -	/* Some boards don't have PCIe reset GPIO. */ -	if (gpio_is_valid(imx6_pcie->reset_gpio)) -		gpio_set_value_cansleep(imx6_pcie->reset_gpio, -					imx6_pcie->gpio_active_high); -} - -static unsigned int imx6_pcie_grp_offset(const struct imx6_pcie *imx6_pcie) -{ -	WARN_ON(imx6_pcie->drvdata->variant != IMX8MQ && -		imx6_pcie->drvdata->variant != IMX8MM); -	return imx6_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14; -} -  static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)  {  	struct dw_pcie *pci = imx6_pcie->pci; @@ -482,38 +581,44 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)  	return ret;  } -static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie) +static void imx6_pcie_disable_ref_clk(struct imx6_pcie *imx6_pcie)  { -	u32 val; -	struct device *dev = imx6_pcie->pci->dev; - -	if (regmap_read_poll_timeout(imx6_pcie->iomuxc_gpr, -				     IOMUXC_GPR22, val, -				     val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED, -				     PHY_PLL_LOCK_WAIT_USLEEP_MAX, -				     PHY_PLL_LOCK_WAIT_TIMEOUT)) -		dev_err(dev, "PCIe PLL lock timeout\n"); +	switch (imx6_pcie->drvdata->variant) { +	case IMX6SX: +		clk_disable_unprepare(imx6_pcie->pcie_inbound_axi); +		break; +	case IMX6QP: +	case IMX6Q: +		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, +				IMX6Q_GPR1_PCIE_REF_CLK_EN, 0); +		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, +				IMX6Q_GPR1_PCIE_TEST_PD, +				IMX6Q_GPR1_PCIE_TEST_PD); +		break; +	case IMX7D: +		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, +				   IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, +				   IMX7D_GPR12_PCIE_PHY_REFCLK_SEL); +		break; +	case IMX8MM: +	case IMX8MQ: +		clk_disable_unprepare(imx6_pcie->pcie_aux); +		break; +	default: +		break; +	}  } -static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie) +static int imx6_pcie_clk_enable(struct imx6_pcie *imx6_pcie)  {  	struct dw_pcie *pci = imx6_pcie->pci;  	struct device *dev = pci->dev;  	int ret; -	if (imx6_pcie->vpcie && !regulator_is_enabled(imx6_pcie->vpcie)) { -		ret = regulator_enable(imx6_pcie->vpcie); -		if (ret) { -			dev_err(dev, "failed to enable vpcie regulator: %d\n", -				ret); -			return; -		} -	} -  	ret = clk_prepare_enable(imx6_pcie->pcie_phy);  	if (ret) {  		dev_err(dev, "unable to enable pcie_phy clock\n"); -		goto err_pcie_phy; +		return ret;  	}  	ret = clk_prepare_enable(imx6_pcie->pcie_bus); @@ -534,25 +639,75 @@ static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)  		goto err_ref_clk;  	} +	/* allow the clocks to stabilize */ +	usleep_range(200, 500); +	return 0; + +err_ref_clk: +	clk_disable_unprepare(imx6_pcie->pcie); +err_pcie: +	clk_disable_unprepare(imx6_pcie->pcie_bus); +err_pcie_bus: +	clk_disable_unprepare(imx6_pcie->pcie_phy); + +	return ret; +} + +static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie) +{ +	imx6_pcie_disable_ref_clk(imx6_pcie); +	clk_disable_unprepare(imx6_pcie->pcie); +	clk_disable_unprepare(imx6_pcie->pcie_bus); +	clk_disable_unprepare(imx6_pcie->pcie_phy); +} + +static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie) +{  	switch (imx6_pcie->drvdata->variant) { +	case IMX7D: +	case IMX8MQ: +		reset_control_assert(imx6_pcie->pciephy_reset); +		fallthrough;  	case IMX8MM: -		if (phy_power_on(imx6_pcie->phy)) -			dev_err(dev, "unable to power on PHY\n"); +		reset_control_assert(imx6_pcie->apps_reset);  		break; -	default: +	case IMX6SX: +		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, +				   IMX6SX_GPR12_PCIE_TEST_POWERDOWN, +				   IMX6SX_GPR12_PCIE_TEST_POWERDOWN); +		/* Force PCIe PHY reset */ +		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5, +				   IMX6SX_GPR5_PCIE_BTNRST_RESET, +				   IMX6SX_GPR5_PCIE_BTNRST_RESET); +		break; +	case IMX6QP: +		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, +				   IMX6Q_GPR1_PCIE_SW_RST, +				   IMX6Q_GPR1_PCIE_SW_RST); +		break; +	case IMX6Q: +		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, +				   IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18); +		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, +				   IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);  		break;  	} -	/* allow the clocks to stabilize */ -	usleep_range(200, 500); + +	/* Some boards don't have PCIe reset GPIO. */ +	if (gpio_is_valid(imx6_pcie->reset_gpio)) +		gpio_set_value_cansleep(imx6_pcie->reset_gpio, +					imx6_pcie->gpio_active_high); +} + +static int imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie) +{ +	struct dw_pcie *pci = imx6_pcie->pci; +	struct device *dev = pci->dev;  	switch (imx6_pcie->drvdata->variant) {  	case IMX8MQ:  		reset_control_deassert(imx6_pcie->pciephy_reset);  		break; -	case IMX8MM: -		if (phy_init(imx6_pcie->phy)) -			dev_err(dev, "waiting for phy ready timeout!\n"); -		break;  	case IMX7D:  		reset_control_deassert(imx6_pcie->pciephy_reset); @@ -588,6 +743,7 @@ static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)  		usleep_range(200, 500);  		break;  	case IMX6Q:		/* Nothing to do */ +	case IMX8MM:  		break;  	} @@ -600,153 +756,6 @@ static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)  		msleep(100);  	} -	return; - -err_ref_clk: -	clk_disable_unprepare(imx6_pcie->pcie); -err_pcie: -	clk_disable_unprepare(imx6_pcie->pcie_bus); -err_pcie_bus: -	clk_disable_unprepare(imx6_pcie->pcie_phy); -err_pcie_phy: -	if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) { -		ret = regulator_disable(imx6_pcie->vpcie); -		if (ret) -			dev_err(dev, "failed to disable vpcie regulator: %d\n", -				ret); -	} -} - -static void imx6_pcie_configure_type(struct imx6_pcie *imx6_pcie) -{ -	unsigned int mask, val; - -	if (imx6_pcie->drvdata->variant == IMX8MQ && -	    imx6_pcie->controller_id == 1) { -		mask   = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE; -		val    = FIELD_PREP(IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE, -				    PCI_EXP_TYPE_ROOT_PORT); -	} else { -		mask = IMX6Q_GPR12_DEVICE_TYPE; -		val  = FIELD_PREP(IMX6Q_GPR12_DEVICE_TYPE, -				  PCI_EXP_TYPE_ROOT_PORT); -	} - -	regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, mask, val); -} - -static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie) -{ -	switch (imx6_pcie->drvdata->variant) { -	case IMX8MM: -		/* -		 * The PHY initialization had been done in the PHY -		 * driver, break here directly. -		 */ -		break; -	case IMX8MQ: -		/* -		 * TODO: Currently this code assumes external -		 * oscillator is being used -		 */ -		regmap_update_bits(imx6_pcie->iomuxc_gpr, -				   imx6_pcie_grp_offset(imx6_pcie), -				   IMX8MQ_GPR_PCIE_REF_USE_PAD, -				   IMX8MQ_GPR_PCIE_REF_USE_PAD); -		/* -		 * Regarding the datasheet, the PCIE_VPH is suggested -		 * to be 1.8V. If the PCIE_VPH is supplied by 3.3V, the -		 * VREG_BYPASS should be cleared to zero. -		 */ -		if (imx6_pcie->vph && -		    regulator_get_voltage(imx6_pcie->vph) > 3000000) -			regmap_update_bits(imx6_pcie->iomuxc_gpr, -					   imx6_pcie_grp_offset(imx6_pcie), -					   IMX8MQ_GPR_PCIE_VREG_BYPASS, -					   0); -		break; -	case IMX7D: -		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, -				   IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0); -		break; -	case IMX6SX: -		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, -				   IMX6SX_GPR12_PCIE_RX_EQ_MASK, -				   IMX6SX_GPR12_PCIE_RX_EQ_2); -		fallthrough; -	default: -		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, -				   IMX6Q_GPR12_PCIE_CTL_2, 0 << 10); - -		/* configure constant input signal to the pcie ctrl and phy */ -		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, -				   IMX6Q_GPR12_LOS_LEVEL, 9 << 4); - -		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, -				   IMX6Q_GPR8_TX_DEEMPH_GEN1, -				   imx6_pcie->tx_deemph_gen1 << 0); -		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, -				   IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB, -				   imx6_pcie->tx_deemph_gen2_3p5db << 6); -		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, -				   IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB, -				   imx6_pcie->tx_deemph_gen2_6db << 12); -		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, -				   IMX6Q_GPR8_TX_SWING_FULL, -				   imx6_pcie->tx_swing_full << 18); -		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, -				   IMX6Q_GPR8_TX_SWING_LOW, -				   imx6_pcie->tx_swing_low << 25); -		break; -	} - -	imx6_pcie_configure_type(imx6_pcie); -} - -static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie) -{ -	unsigned long phy_rate = clk_get_rate(imx6_pcie->pcie_phy); -	int mult, div; -	u16 val; - -	if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY)) -		return 0; - -	switch (phy_rate) { -	case 125000000: -		/* -		 * The default settings of the MPLL are for a 125MHz input -		 * clock, so no need to reconfigure anything in that case. -		 */ -		return 0; -	case 100000000: -		mult = 25; -		div = 0; -		break; -	case 200000000: -		mult = 25; -		div = 1; -		break; -	default: -		dev_err(imx6_pcie->pci->dev, -			"Unsupported PHY reference clock rate %lu\n", phy_rate); -		return -EINVAL; -	} - -	pcie_phy_read(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, &val); -	val &= ~(PCIE_PHY_MPLL_MULTIPLIER_MASK << -		 PCIE_PHY_MPLL_MULTIPLIER_SHIFT); -	val |= mult << PCIE_PHY_MPLL_MULTIPLIER_SHIFT; -	val |= PCIE_PHY_MPLL_MULTIPLIER_OVRD; -	pcie_phy_write(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, val); - -	pcie_phy_read(imx6_pcie, PCIE_PHY_ATEOVRD, &val); -	val &= ~(PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK << -		 PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT); -	val |= div << PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT; -	val |= PCIE_PHY_ATEOVRD_EN; -	pcie_phy_write(imx6_pcie, PCIE_PHY_ATEOVRD, val); -  	return 0;  } @@ -789,6 +798,25 @@ static void imx6_pcie_ltssm_enable(struct device *dev)  	}  } +static void imx6_pcie_ltssm_disable(struct device *dev) +{ +	struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev); + +	switch (imx6_pcie->drvdata->variant) { +	case IMX6Q: +	case IMX6SX: +	case IMX6QP: +		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, +				   IMX6Q_GPR12_PCIE_CTL_2, 0); +		break; +	case IMX7D: +	case IMX8MQ: +	case IMX8MM: +		reset_control_assert(imx6_pcie->apps_reset); +		break; +	} +} +  static int imx6_pcie_start_link(struct dw_pcie *pci)  {  	struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci); @@ -802,21 +830,26 @@ static int imx6_pcie_start_link(struct dw_pcie *pci)  	 * started in Gen2 mode, there is a possibility the devices on the  	 * bus will not be detected at all.  This happens with PCIe switches.  	 */ +	dw_pcie_dbi_ro_wr_en(pci);  	tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);  	tmp &= ~PCI_EXP_LNKCAP_SLS;  	tmp |= PCI_EXP_LNKCAP_SLS_2_5GB;  	dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, tmp); +	dw_pcie_dbi_ro_wr_dis(pci);  	/* Start LTSSM. */  	imx6_pcie_ltssm_enable(dev); -	dw_pcie_wait_for_link(pci); +	ret = dw_pcie_wait_for_link(pci); +	if (ret) +		goto err_reset_phy; -	if (pci->link_gen == 2) { -		/* Allow Gen2 mode after the link is up. */ +	if (pci->link_gen > 1) { +		/* Allow faster modes after the link is up */ +		dw_pcie_dbi_ro_wr_en(pci);  		tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);  		tmp &= ~PCI_EXP_LNKCAP_SLS; -		tmp |= PCI_EXP_LNKCAP_SLS_5_0GB; +		tmp |= pci->link_gen;  		dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, tmp);  		/* @@ -826,6 +859,7 @@ static int imx6_pcie_start_link(struct dw_pcie *pci)  		tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);  		tmp |= PORT_LOGIC_SPEED_CHANGE;  		dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp); +		dw_pcie_dbi_ro_wr_dis(pci);  		if (imx6_pcie->drvdata->flags &  		    IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE) { @@ -846,34 +880,110 @@ static int imx6_pcie_start_link(struct dw_pcie *pci)  		}  		/* Make sure link training is finished as well! */ -		dw_pcie_wait_for_link(pci); +		ret = dw_pcie_wait_for_link(pci); +		if (ret) +			goto err_reset_phy;  	} else { -		dev_info(dev, "Link: Gen2 disabled\n"); +		dev_info(dev, "Link: Only Gen1 is enabled\n");  	} +	imx6_pcie->link_is_up = true;  	tmp = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA);  	dev_info(dev, "Link up, Gen%i\n", tmp & PCI_EXP_LNKSTA_CLS);  	return 0;  err_reset_phy: +	imx6_pcie->link_is_up = false;  	dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n",  		dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0),  		dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG1));  	imx6_pcie_reset_phy(imx6_pcie); -	return ret; +	return 0; +} + +static void imx6_pcie_stop_link(struct dw_pcie *pci) +{ +	struct device *dev = pci->dev; + +	/* Turn off PCIe LTSSM */ +	imx6_pcie_ltssm_disable(dev);  } -static int imx6_pcie_host_init(struct pcie_port *pp) +static int imx6_pcie_host_init(struct dw_pcie_rp *pp)  {  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp); +	struct device *dev = pci->dev;  	struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci); +	int ret; + +	if (imx6_pcie->vpcie) { +		ret = regulator_enable(imx6_pcie->vpcie); +		if (ret) { +			dev_err(dev, "failed to enable vpcie regulator: %d\n", +				ret); +			return ret; +		} +	}  	imx6_pcie_assert_core_reset(imx6_pcie);  	imx6_pcie_init_phy(imx6_pcie); -	imx6_pcie_deassert_core_reset(imx6_pcie); + +	ret = imx6_pcie_clk_enable(imx6_pcie); +	if (ret) { +		dev_err(dev, "unable to enable pcie clocks: %d\n", ret); +		goto err_reg_disable; +	} + +	if (imx6_pcie->phy) { +		ret = phy_power_on(imx6_pcie->phy); +		if (ret) { +			dev_err(dev, "pcie PHY power up failed\n"); +			goto err_clk_disable; +		} +	} + +	ret = imx6_pcie_deassert_core_reset(imx6_pcie); +	if (ret < 0) { +		dev_err(dev, "pcie deassert core reset failed: %d\n", ret); +		goto err_phy_off; +	} + +	if (imx6_pcie->phy) { +		ret = phy_init(imx6_pcie->phy); +		if (ret) { +			dev_err(dev, "waiting for PHY ready timeout!\n"); +			goto err_phy_off; +		} +	}  	imx6_setup_phy_mpll(imx6_pcie);  	return 0; + +err_phy_off: +	if (imx6_pcie->phy) +		phy_power_off(imx6_pcie->phy); +err_clk_disable: +	imx6_pcie_clk_disable(imx6_pcie); +err_reg_disable: +	if (imx6_pcie->vpcie) +		regulator_disable(imx6_pcie->vpcie); +	return ret; +} + +static void imx6_pcie_host_exit(struct dw_pcie_rp *pp) +{ +	struct dw_pcie *pci = to_dw_pcie_from_pp(pp); +	struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci); + +	if (imx6_pcie->phy) { +		if (phy_power_off(imx6_pcie->phy)) +			dev_err(pci->dev, "unable to power off PHY\n"); +		phy_exit(imx6_pcie->phy); +	} +	imx6_pcie_clk_disable(imx6_pcie); + +	if (imx6_pcie->vpcie) +		regulator_disable(imx6_pcie->vpcie);  }  static const struct dw_pcie_host_ops imx6_pcie_host_ops = { @@ -884,26 +994,6 @@ static const struct dw_pcie_ops dw_pcie_ops = {  	.start_link = imx6_pcie_start_link,  }; -#ifdef CONFIG_PM_SLEEP -static void imx6_pcie_ltssm_disable(struct device *dev) -{ -	struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev); - -	switch (imx6_pcie->drvdata->variant) { -	case IMX6SX: -	case IMX6QP: -		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, -				   IMX6Q_GPR12_PCIE_CTL_2, 0); -		break; -	case IMX7D: -	case IMX8MM: -		reset_control_assert(imx6_pcie->apps_reset); -		break; -	default: -		dev_err(dev, "ltssm_disable not supported\n"); -	} -} -  static void imx6_pcie_pm_turnoff(struct imx6_pcie *imx6_pcie)  {  	struct device *dev = imx6_pcie->pci->dev; @@ -941,49 +1031,17 @@ pm_turnoff_sleep:  	usleep_range(1000, 10000);  } -static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie) -{ -	clk_disable_unprepare(imx6_pcie->pcie); -	clk_disable_unprepare(imx6_pcie->pcie_phy); -	clk_disable_unprepare(imx6_pcie->pcie_bus); - -	switch (imx6_pcie->drvdata->variant) { -	case IMX6SX: -		clk_disable_unprepare(imx6_pcie->pcie_inbound_axi); -		break; -	case IMX7D: -		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, -				   IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, -				   IMX7D_GPR12_PCIE_PHY_REFCLK_SEL); -		break; -	case IMX8MQ: -	case IMX8MM: -		clk_disable_unprepare(imx6_pcie->pcie_aux); -		break; -	default: -		break; -	} -} -  static int imx6_pcie_suspend_noirq(struct device *dev)  {  	struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev); +	struct dw_pcie_rp *pp = &imx6_pcie->pci->pp;  	if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND))  		return 0;  	imx6_pcie_pm_turnoff(imx6_pcie); -	imx6_pcie_ltssm_disable(dev); -	imx6_pcie_clk_disable(imx6_pcie); -	switch (imx6_pcie->drvdata->variant) { -	case IMX8MM: -		if (phy_power_off(imx6_pcie->phy)) -			dev_err(dev, "unable to power off PHY\n"); -		phy_exit(imx6_pcie->phy); -		break; -	default: -		break; -	} +	imx6_pcie_stop_link(imx6_pcie->pci); +	imx6_pcie_host_exit(pp);  	return 0;  } @@ -992,27 +1050,25 @@ static int imx6_pcie_resume_noirq(struct device *dev)  {  	int ret;  	struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev); -	struct pcie_port *pp = &imx6_pcie->pci->pp; +	struct dw_pcie_rp *pp = &imx6_pcie->pci->pp;  	if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND))  		return 0; -	imx6_pcie_assert_core_reset(imx6_pcie); -	imx6_pcie_init_phy(imx6_pcie); -	imx6_pcie_deassert_core_reset(imx6_pcie); +	ret = imx6_pcie_host_init(pp); +	if (ret) +		return ret;  	dw_pcie_setup_rc(pp); -	ret = imx6_pcie_start_link(imx6_pcie->pci); -	if (ret < 0) -		dev_info(dev, "pcie link is down after resume.\n"); +	if (imx6_pcie->link_is_up) +		imx6_pcie_start_link(imx6_pcie->pci);  	return 0;  } -#endif  static const struct dev_pm_ops imx6_pcie_pm_ops = { -	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx6_pcie_suspend_noirq, -				      imx6_pcie_resume_noirq) +	NOIRQ_SYSTEM_SLEEP_PM_OPS(imx6_pcie_suspend_noirq, +				  imx6_pcie_resume_noirq)  };  static int imx6_pcie_probe(struct platform_device *pdev) @@ -1291,7 +1347,7 @@ static struct platform_driver imx6_pcie_driver = {  static void imx6_pcie_quirk(struct pci_dev *dev)  {  	struct pci_bus *bus = dev->bus; -	struct pcie_port *pp = bus->sysdata; +	struct dw_pcie_rp *pp = bus->sysdata;  	/* Bus parent is the PCI bridge, its parent is this platform driver */  	if (!bus->dev.parent || !bus->dev.parent->parent) diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c index d10e5fd0f83c..78818853af9e 100644 --- a/drivers/pci/controller/dwc/pci-keystone.c +++ b/drivers/pci/controller/dwc/pci-keystone.c @@ -109,7 +109,7 @@ struct ks_pcie_of_data {  	enum dw_pcie_device_mode mode;  	const struct dw_pcie_host_ops *host_ops;  	const struct dw_pcie_ep_ops *ep_ops; -	unsigned int version; +	u32 version;  };  struct keystone_pcie { @@ -147,7 +147,7 @@ static void ks_pcie_app_writel(struct keystone_pcie *ks_pcie, u32 offset,  static void ks_pcie_msi_irq_ack(struct irq_data *data)  { -	struct pcie_port *pp  = irq_data_get_irq_chip_data(data); +	struct dw_pcie_rp *pp  = irq_data_get_irq_chip_data(data);  	struct keystone_pcie *ks_pcie;  	u32 irq = data->hwirq;  	struct dw_pcie *pci; @@ -167,7 +167,7 @@ static void ks_pcie_msi_irq_ack(struct irq_data *data)  static void ks_pcie_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)  { -	struct pcie_port *pp = irq_data_get_irq_chip_data(data); +	struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data);  	struct keystone_pcie *ks_pcie;  	struct dw_pcie *pci;  	u64 msi_target; @@ -192,7 +192,7 @@ static int ks_pcie_msi_set_affinity(struct irq_data *irq_data,  static void ks_pcie_msi_mask(struct irq_data *data)  { -	struct pcie_port *pp = irq_data_get_irq_chip_data(data); +	struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data);  	struct keystone_pcie *ks_pcie;  	u32 irq = data->hwirq;  	struct dw_pcie *pci; @@ -216,7 +216,7 @@ static void ks_pcie_msi_mask(struct irq_data *data)  static void ks_pcie_msi_unmask(struct irq_data *data)  { -	struct pcie_port *pp = irq_data_get_irq_chip_data(data); +	struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data);  	struct keystone_pcie *ks_pcie;  	u32 irq = data->hwirq;  	struct dw_pcie *pci; @@ -247,7 +247,7 @@ static struct irq_chip ks_pcie_msi_irq_chip = {  	.irq_unmask = ks_pcie_msi_unmask,  }; -static int ks_pcie_msi_host_init(struct pcie_port *pp) +static int ks_pcie_msi_host_init(struct dw_pcie_rp *pp)  {  	pp->msi_irq_chip = &ks_pcie_msi_irq_chip;  	return dw_pcie_allocate_domains(pp); @@ -390,7 +390,7 @@ static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)  	u32 val;  	u32 num_viewport = ks_pcie->num_viewport;  	struct dw_pcie *pci = ks_pcie->pci; -	struct pcie_port *pp = &pci->pp; +	struct dw_pcie_rp *pp = &pci->pp;  	u64 start, end;  	struct resource *mem;  	int i; @@ -428,7 +428,7 @@ static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)  static void __iomem *ks_pcie_other_map_bus(struct pci_bus *bus,  					   unsigned int devfn, int where)  { -	struct pcie_port *pp = bus->sysdata; +	struct dw_pcie_rp *pp = bus->sysdata;  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);  	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);  	u32 reg; @@ -456,7 +456,7 @@ static struct pci_ops ks_child_pcie_ops = {   */  static int ks_pcie_v3_65_add_bus(struct pci_bus *bus)  { -	struct pcie_port *pp = bus->sysdata; +	struct dw_pcie_rp *pp = bus->sysdata;  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);  	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); @@ -574,7 +574,7 @@ static void ks_pcie_msi_irq_handler(struct irq_desc *desc)  	struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);  	u32 offset = irq - ks_pcie->msi_host_irq;  	struct dw_pcie *pci = ks_pcie->pci; -	struct pcie_port *pp = &pci->pp; +	struct dw_pcie_rp *pp = &pci->pp;  	struct device *dev = pci->dev;  	struct irq_chip *chip = irq_desc_get_chip(desc);  	u32 vector, reg, pos; @@ -799,7 +799,7 @@ static int __init ks_pcie_init_id(struct keystone_pcie *ks_pcie)  	return 0;  } -static int __init ks_pcie_host_init(struct pcie_port *pp) +static int __init ks_pcie_host_init(struct dw_pcie_rp *pp)  {  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);  	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); @@ -1069,19 +1069,19 @@ static int ks_pcie_am654_set_mode(struct device *dev,  static const struct ks_pcie_of_data ks_pcie_rc_of_data = {  	.host_ops = &ks_pcie_host_ops, -	.version = 0x365A, +	.version = DW_PCIE_VER_365A,  };  static const struct ks_pcie_of_data ks_pcie_am654_rc_of_data = {  	.host_ops = &ks_pcie_am654_host_ops,  	.mode = DW_PCIE_RC_TYPE, -	.version = 0x490A, +	.version = DW_PCIE_VER_490A,  };  static const struct ks_pcie_of_data ks_pcie_am654_ep_of_data = {  	.ep_ops = &ks_pcie_am654_ep_ops,  	.mode = DW_PCIE_EP_TYPE, -	.version = 0x490A, +	.version = DW_PCIE_VER_490A,  };  static const struct of_device_id ks_pcie_of_match[] = { @@ -1114,12 +1114,12 @@ static int __init ks_pcie_probe(struct platform_device *pdev)  	struct device_link **link;  	struct gpio_desc *gpiod;  	struct resource *res; -	unsigned int version;  	void __iomem *base;  	u32 num_viewport;  	struct phy **phy;  	u32 num_lanes;  	char name[10]; +	u32 version;  	int ret;  	int irq;  	int i; @@ -1233,7 +1233,7 @@ static int __init ks_pcie_probe(struct platform_device *pdev)  		goto err_get_sync;  	} -	if (pci->version >= 0x480A) +	if (dw_pcie_ver_is_ge(pci, 480A))  		ret = ks_pcie_am654_set_mode(dev, mode);  	else  		ret = ks_pcie_set_mode(dev); @@ -1324,7 +1324,7 @@ static struct platform_driver ks_pcie_driver __refdata = {  	.remove = __exit_p(ks_pcie_remove),  	.driver = {  		.name	= "keystone-pcie", -		.of_match_table = of_match_ptr(ks_pcie_of_match), +		.of_match_table = ks_pcie_of_match,  	},  };  builtin_platform_driver(ks_pcie_driver); diff --git a/drivers/pci/controller/dwc/pci-layerscape-ep.c b/drivers/pci/controller/dwc/pci-layerscape-ep.c index 39f4664bd84c..ad99707b3b99 100644 --- a/drivers/pci/controller/dwc/pci-layerscape-ep.c +++ b/drivers/pci/controller/dwc/pci-layerscape-ep.c @@ -32,15 +32,6 @@ struct ls_pcie_ep {  	const struct ls_pcie_ep_drvdata *drvdata;  }; -static int ls_pcie_establish_link(struct dw_pcie *pci) -{ -	return 0; -} - -static const struct dw_pcie_ops dw_ls_pcie_ep_ops = { -	.start_link = ls_pcie_establish_link, -}; -  static const struct pci_epc_features*  ls_pcie_ep_get_features(struct dw_pcie_ep *ep)  { @@ -106,19 +97,16 @@ static const struct dw_pcie_ep_ops ls_pcie_ep_ops = {  static const struct ls_pcie_ep_drvdata ls1_ep_drvdata = {  	.ops = &ls_pcie_ep_ops, -	.dw_pcie_ops = &dw_ls_pcie_ep_ops,  };  static const struct ls_pcie_ep_drvdata ls2_ep_drvdata = {  	.func_offset = 0x20000,  	.ops = &ls_pcie_ep_ops, -	.dw_pcie_ops = &dw_ls_pcie_ep_ops,  };  static const struct ls_pcie_ep_drvdata lx2_ep_drvdata = {  	.func_offset = 0x8000,  	.ops = &ls_pcie_ep_ops, -	.dw_pcie_ops = &dw_ls_pcie_ep_ops,  };  static const struct of_device_id ls_pcie_ep_of_match[] = { diff --git a/drivers/pci/controller/dwc/pci-layerscape.c b/drivers/pci/controller/dwc/pci-layerscape.c index 6a4f0619bb1c..879b8692f96a 100644 --- a/drivers/pci/controller/dwc/pci-layerscape.c +++ b/drivers/pci/controller/dwc/pci-layerscape.c @@ -74,7 +74,7 @@ static void ls_pcie_fix_error_response(struct ls_pcie *pcie)  	iowrite32(PCIE_ABSERR_SETTING, pci->dbi_base + PCIE_ABSERR);  } -static int ls_pcie_host_init(struct pcie_port *pp) +static int ls_pcie_host_init(struct dw_pcie_rp *pp)  {  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);  	struct ls_pcie *pcie = to_ls_pcie(pci); diff --git a/drivers/pci/controller/dwc/pci-meson.c b/drivers/pci/controller/dwc/pci-meson.c index f44bf347904a..c1527693bed9 100644 --- a/drivers/pci/controller/dwc/pci-meson.c +++ b/drivers/pci/controller/dwc/pci-meson.c @@ -370,7 +370,7 @@ static int meson_pcie_link_up(struct dw_pcie *pci)  	return 0;  } -static int meson_pcie_host_init(struct pcie_port *pp) +static int meson_pcie_host_init(struct dw_pcie_rp *pp)  {  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);  	struct meson_pcie *mp = to_meson_pcie(pci); diff --git a/drivers/pci/controller/dwc/pcie-al.c b/drivers/pci/controller/dwc/pcie-al.c index e8afa50129a8..b8cb77c9c4bd 100644 --- a/drivers/pci/controller/dwc/pcie-al.c +++ b/drivers/pci/controller/dwc/pcie-al.c @@ -217,7 +217,7 @@ static inline void al_pcie_target_bus_set(struct al_pcie *pcie,  static void __iomem *al_pcie_conf_addr_map_bus(struct pci_bus *bus,  					       unsigned int devfn, int where)  { -	struct pcie_port *pp = bus->sysdata; +	struct dw_pcie_rp *pp = bus->sysdata;  	struct al_pcie *pcie = to_al_pcie(to_dw_pcie_from_pp(pp));  	unsigned int busnr = bus->number;  	struct al_pcie_target_bus_cfg *target_bus_cfg = &pcie->target_bus_cfg; @@ -245,7 +245,7 @@ static struct pci_ops al_child_pci_ops = {  static void al_pcie_config_prepare(struct al_pcie *pcie)  {  	struct al_pcie_target_bus_cfg *target_bus_cfg; -	struct pcie_port *pp = &pcie->pci->pp; +	struct dw_pcie_rp *pp = &pcie->pci->pp;  	unsigned int ecam_bus_mask;  	u32 cfg_control_offset;  	u8 subordinate_bus; @@ -289,7 +289,7 @@ static void al_pcie_config_prepare(struct al_pcie *pcie)  	al_pcie_controller_writel(pcie, cfg_control_offset, reg);  } -static int al_pcie_host_init(struct pcie_port *pp) +static int al_pcie_host_init(struct dw_pcie_rp *pp)  {  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);  	struct al_pcie *pcie = to_al_pcie(pci); diff --git a/drivers/pci/controller/dwc/pcie-armada8k.c b/drivers/pci/controller/dwc/pcie-armada8k.c index 4e2552dcf982..dc469ef8e99b 100644 --- a/drivers/pci/controller/dwc/pcie-armada8k.c +++ b/drivers/pci/controller/dwc/pcie-armada8k.c @@ -166,7 +166,7 @@ static int armada8k_pcie_start_link(struct dw_pcie *pci)  	return 0;  } -static int armada8k_pcie_host_init(struct pcie_port *pp) +static int armada8k_pcie_host_init(struct dw_pcie_rp *pp)  {  	u32 reg;  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp); @@ -233,7 +233,7 @@ static int armada8k_add_pcie_port(struct armada8k_pcie *pcie,  				  struct platform_device *pdev)  {  	struct dw_pcie *pci = pcie->pci; -	struct pcie_port *pp = &pci->pp; +	struct dw_pcie_rp *pp = &pci->pp;  	struct device *dev = &pdev->dev;  	int ret; @@ -343,7 +343,7 @@ static struct platform_driver armada8k_pcie_driver = {  	.probe		= armada8k_pcie_probe,  	.driver = {  		.name	= "armada8k-pcie", -		.of_match_table = of_match_ptr(armada8k_pcie_of_match), +		.of_match_table = armada8k_pcie_of_match,  		.suppress_bind_attrs = true,  	},  }; diff --git a/drivers/pci/controller/dwc/pcie-artpec6.c b/drivers/pci/controller/dwc/pcie-artpec6.c index 2f15441770e1..98102079e26d 100644 --- a/drivers/pci/controller/dwc/pcie-artpec6.c +++ b/drivers/pci/controller/dwc/pcie-artpec6.c @@ -97,7 +97,7 @@ static void artpec6_pcie_writel(struct artpec6_pcie *artpec6_pcie, u32 offset, u  static u64 artpec6_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr)  {  	struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci); -	struct pcie_port *pp = &pci->pp; +	struct dw_pcie_rp *pp = &pci->pp;  	struct dw_pcie_ep *ep = &pci->ep;  	switch (artpec6_pcie->mode) { @@ -315,7 +315,7 @@ static void artpec6_pcie_deassert_core_reset(struct artpec6_pcie *artpec6_pcie)  	usleep_range(100, 200);  } -static int artpec6_pcie_host_init(struct pcie_port *pp) +static int artpec6_pcie_host_init(struct dw_pcie_rp *pp)  {  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);  	struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci); diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c index 0eda8236c125..cf1627679716 100644 --- a/drivers/pci/controller/dwc/pcie-designware-ep.c +++ b/drivers/pci/controller/dwc/pcie-designware-ep.c @@ -154,9 +154,8 @@ static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,  	return 0;  } -static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, -				  enum pci_barno bar, dma_addr_t cpu_addr, -				  enum dw_pcie_as_type as_type) +static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type, +				  dma_addr_t cpu_addr, enum pci_barno bar)  {  	int ret;  	u32 free_win; @@ -168,8 +167,8 @@ static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no,  		return -EINVAL;  	} -	ret = dw_pcie_prog_inbound_atu(pci, func_no, free_win, bar, cpu_addr, -				       as_type); +	ret = dw_pcie_prog_inbound_atu(pci, func_no, free_win, type, +				       cpu_addr, bar);  	if (ret < 0) {  		dev_err(pci->dev, "Failed to program IB window\n");  		return ret; @@ -185,8 +184,9 @@ static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, u8 func_no,  				   phys_addr_t phys_addr,  				   u64 pci_addr, size_t size)  { -	u32 free_win;  	struct dw_pcie *pci = to_dw_pcie_from_ep(ep); +	u32 free_win; +	int ret;  	free_win = find_first_zero_bit(ep->ob_window_map, pci->num_ob_windows);  	if (free_win >= pci->num_ob_windows) { @@ -194,8 +194,10 @@ static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, u8 func_no,  		return -EINVAL;  	} -	dw_pcie_prog_ep_outbound_atu(pci, func_no, free_win, PCIE_ATU_TYPE_MEM, -				     phys_addr, pci_addr, size); +	ret = dw_pcie_prog_ep_outbound_atu(pci, func_no, free_win, PCIE_ATU_TYPE_MEM, +					   phys_addr, pci_addr, size); +	if (ret) +		return ret;  	set_bit(free_win, ep->ob_window_map);  	ep->outbound_addr[free_win] = phys_addr; @@ -213,7 +215,7 @@ static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,  	__dw_pcie_ep_reset_bar(pci, func_no, bar, epf_bar->flags); -	dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_INBOUND); +	dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_IB, atu_index);  	clear_bit(atu_index, ep->ib_window_map);  	ep->epf_bar[bar] = NULL;  } @@ -221,27 +223,25 @@ static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,  static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,  			      struct pci_epf_bar *epf_bar)  { -	int ret;  	struct dw_pcie_ep *ep = epc_get_drvdata(epc);  	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);  	enum pci_barno bar = epf_bar->barno;  	size_t size = epf_bar->size;  	int flags = epf_bar->flags; -	enum dw_pcie_as_type as_type; -	u32 reg;  	unsigned int func_offset = 0; +	int ret, type; +	u32 reg;  	func_offset = dw_pcie_ep_func_select(ep, func_no);  	reg = PCI_BASE_ADDRESS_0 + (4 * bar) + func_offset;  	if (!(flags & PCI_BASE_ADDRESS_SPACE)) -		as_type = DW_PCIE_AS_MEM; +		type = PCIE_ATU_TYPE_MEM;  	else -		as_type = DW_PCIE_AS_IO; +		type = PCIE_ATU_TYPE_IO; -	ret = dw_pcie_ep_inbound_atu(ep, func_no, bar, -				     epf_bar->phys_addr, as_type); +	ret = dw_pcie_ep_inbound_atu(ep, func_no, type, epf_bar->phys_addr, bar);  	if (ret)  		return ret; @@ -289,7 +289,7 @@ static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,  	if (ret < 0)  		return; -	dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_OUTBOUND); +	dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_OB, atu_index);  	clear_bit(atu_index, ep->ob_window_map);  } @@ -435,8 +435,7 @@ static void dw_pcie_ep_stop(struct pci_epc *epc)  	struct dw_pcie_ep *ep = epc_get_drvdata(epc);  	struct dw_pcie *pci = to_dw_pcie_from_ep(ep); -	if (pci->ops && pci->ops->stop_link) -		pci->ops->stop_link(pci); +	dw_pcie_stop_link(pci);  }  static int dw_pcie_ep_start(struct pci_epc *epc) @@ -444,10 +443,7 @@ static int dw_pcie_ep_start(struct pci_epc *epc)  	struct dw_pcie_ep *ep = epc_get_drvdata(epc);  	struct dw_pcie *pci = to_dw_pcie_from_ep(ep); -	if (!pci->ops || !pci->ops->start_link) -		return -EINVAL; - -	return pci->ops->start_link(pci); +	return dw_pcie_start_link(pci);  }  static const struct pci_epc_features* @@ -699,17 +695,15 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)  	if (!pci->dbi_base2) {  		res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2"); -		if (!res) +		if (!res) {  			pci->dbi_base2 = pci->dbi_base + SZ_4K; -		else { +		} else {  			pci->dbi_base2 = devm_pci_remap_cfg_resource(dev, res);  			if (IS_ERR(pci->dbi_base2))  				return PTR_ERR(pci->dbi_base2);  		}  	} -	dw_pcie_iatu_detect(pci); -  	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");  	if (!res)  		return -EINVAL; @@ -717,17 +711,17 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)  	ep->phys_base = res->start;  	ep->addr_size = resource_size(res); -	ep->ib_window_map = devm_kcalloc(dev, -					 BITS_TO_LONGS(pci->num_ib_windows), -					 sizeof(long), -					 GFP_KERNEL); +	dw_pcie_version_detect(pci); + +	dw_pcie_iatu_detect(pci); + +	ep->ib_window_map = devm_bitmap_zalloc(dev, pci->num_ib_windows, +					       GFP_KERNEL);  	if (!ep->ib_window_map)  		return -ENOMEM; -	ep->ob_window_map = devm_kcalloc(dev, -					 BITS_TO_LONGS(pci->num_ob_windows), -					 sizeof(long), -					 GFP_KERNEL); +	ep->ob_window_map = devm_bitmap_zalloc(dev, pci->num_ob_windows, +					       GFP_KERNEL);  	if (!ep->ob_window_map)  		return -ENOMEM; @@ -780,8 +774,9 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)  	ep->msi_mem = pci_epc_mem_alloc_addr(epc, &ep->msi_mem_phys,  					     epc->mem->window.page_size);  	if (!ep->msi_mem) { +		ret = -ENOMEM;  		dev_err(dev, "Failed to reserve memory for MSI/MSI-X\n"); -		return -ENOMEM; +		goto err_exit_epc_mem;  	}  	if (ep->ops->get_features) { @@ -790,6 +785,19 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)  			return 0;  	} -	return dw_pcie_ep_init_complete(ep); +	ret = dw_pcie_ep_init_complete(ep); +	if (ret) +		goto err_free_epc_mem; + +	return 0; + +err_free_epc_mem: +	pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem, +			      epc->mem->window.page_size); + +err_exit_epc_mem: +	pci_epc_mem_exit(epc); + +	return ret;  }  EXPORT_SYMBOL_GPL(dw_pcie_ep_init); diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c index 9979302532b7..7746f94a715f 100644 --- a/drivers/pci/controller/dwc/pcie-designware-host.c +++ b/drivers/pci/controller/dwc/pcie-designware-host.c @@ -53,7 +53,7 @@ static struct msi_domain_info dw_pcie_msi_domain_info = {  };  /* MSI int handler */ -irqreturn_t dw_handle_msi_irq(struct pcie_port *pp) +irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp)  {  	int i, pos;  	unsigned long val; @@ -88,7 +88,7 @@ irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)  static void dw_chained_msi_isr(struct irq_desc *desc)  {  	struct irq_chip *chip = irq_desc_get_chip(desc); -	struct pcie_port *pp; +	struct dw_pcie_rp *pp;  	chained_irq_enter(chip, desc); @@ -100,7 +100,7 @@ static void dw_chained_msi_isr(struct irq_desc *desc)  static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg)  { -	struct pcie_port *pp = irq_data_get_irq_chip_data(d); +	struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);  	u64 msi_target; @@ -123,7 +123,7 @@ static int dw_pci_msi_set_affinity(struct irq_data *d,  static void dw_pci_bottom_mask(struct irq_data *d)  { -	struct pcie_port *pp = irq_data_get_irq_chip_data(d); +	struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);  	unsigned int res, bit, ctrl;  	unsigned long flags; @@ -142,7 +142,7 @@ static void dw_pci_bottom_mask(struct irq_data *d)  static void dw_pci_bottom_unmask(struct irq_data *d)  { -	struct pcie_port *pp = irq_data_get_irq_chip_data(d); +	struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);  	unsigned int res, bit, ctrl;  	unsigned long flags; @@ -161,7 +161,7 @@ static void dw_pci_bottom_unmask(struct irq_data *d)  static void dw_pci_bottom_ack(struct irq_data *d)  { -	struct pcie_port *pp  = irq_data_get_irq_chip_data(d); +	struct dw_pcie_rp *pp  = irq_data_get_irq_chip_data(d);  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);  	unsigned int res, bit, ctrl; @@ -185,7 +185,7 @@ static int dw_pcie_irq_domain_alloc(struct irq_domain *domain,  				    unsigned int virq, unsigned int nr_irqs,  				    void *args)  { -	struct pcie_port *pp = domain->host_data; +	struct dw_pcie_rp *pp = domain->host_data;  	unsigned long flags;  	u32 i;  	int bit; @@ -213,7 +213,7 @@ static void dw_pcie_irq_domain_free(struct irq_domain *domain,  				    unsigned int virq, unsigned int nr_irqs)  {  	struct irq_data *d = irq_domain_get_irq_data(domain, virq); -	struct pcie_port *pp = domain->host_data; +	struct dw_pcie_rp *pp = domain->host_data;  	unsigned long flags;  	raw_spin_lock_irqsave(&pp->lock, flags); @@ -229,7 +229,7 @@ static const struct irq_domain_ops dw_pcie_msi_domain_ops = {  	.free	= dw_pcie_irq_domain_free,  }; -int dw_pcie_allocate_domains(struct pcie_port *pp) +int dw_pcie_allocate_domains(struct dw_pcie_rp *pp)  {  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);  	struct fwnode_handle *fwnode = of_node_to_fwnode(pci->dev->of_node); @@ -255,10 +255,15 @@ int dw_pcie_allocate_domains(struct pcie_port *pp)  	return 0;  } -static void dw_pcie_free_msi(struct pcie_port *pp) +static void dw_pcie_free_msi(struct dw_pcie_rp *pp)  { -	if (pp->msi_irq) -		irq_set_chained_handler_and_data(pp->msi_irq, NULL, NULL); +	u32 ctrl; + +	for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++) { +		if (pp->msi_irq[ctrl] > 0) +			irq_set_chained_handler_and_data(pp->msi_irq[ctrl], +							 NULL, NULL); +	}  	irq_domain_remove(pp->msi_domain);  	irq_domain_remove(pp->irq_domain); @@ -267,12 +272,13 @@ static void dw_pcie_free_msi(struct pcie_port *pp)  		struct dw_pcie *pci = to_dw_pcie_from_pp(pp);  		struct device *dev = pci->dev; -		dma_unmap_single_attrs(dev, pp->msi_data, sizeof(pp->msi_msg), -				       DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); +		dma_unmap_page(dev, pp->msi_data, PAGE_SIZE, DMA_FROM_DEVICE); +		if (pp->msi_page) +			__free_page(pp->msi_page);  	}  } -static void dw_pcie_msi_init(struct pcie_port *pp) +static void dw_pcie_msi_init(struct dw_pcie_rp *pp)  {  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);  	u64 msi_target = (u64)pp->msi_data; @@ -285,7 +291,112 @@ static void dw_pcie_msi_init(struct pcie_port *pp)  	dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_HI, upper_32_bits(msi_target));  } -int dw_pcie_host_init(struct pcie_port *pp) +static int dw_pcie_parse_split_msi_irq(struct dw_pcie_rp *pp) +{ +	struct dw_pcie *pci = to_dw_pcie_from_pp(pp); +	struct device *dev = pci->dev; +	struct platform_device *pdev = to_platform_device(dev); +	u32 ctrl, max_vectors; +	int irq; + +	/* Parse any "msiX" IRQs described in the devicetree */ +	for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++) { +		char msi_name[] = "msiX"; + +		msi_name[3] = '0' + ctrl; +		irq = platform_get_irq_byname_optional(pdev, msi_name); +		if (irq == -ENXIO) +			break; +		if (irq < 0) +			return dev_err_probe(dev, irq, +					     "Failed to parse MSI IRQ '%s'\n", +					     msi_name); + +		pp->msi_irq[ctrl] = irq; +	} + +	/* If no "msiX" IRQs, caller should fallback to "msi" IRQ */ +	if (ctrl == 0) +		return -ENXIO; + +	max_vectors = ctrl * MAX_MSI_IRQS_PER_CTRL; +	if (pp->num_vectors > max_vectors) { +		dev_warn(dev, "Exceeding number of MSI vectors, limiting to %u\n", +			 max_vectors); +		pp->num_vectors = max_vectors; +	} +	if (!pp->num_vectors) +		pp->num_vectors = max_vectors; + +	return 0; +} + +static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp) +{ +	struct dw_pcie *pci = to_dw_pcie_from_pp(pp); +	struct device *dev = pci->dev; +	struct platform_device *pdev = to_platform_device(dev); +	int ret; +	u32 ctrl, num_ctrls; + +	for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++) +		pp->irq_mask[ctrl] = ~0; + +	if (!pp->msi_irq[0]) { +		ret = dw_pcie_parse_split_msi_irq(pp); +		if (ret < 0 && ret != -ENXIO) +			return ret; +	} + +	if (!pp->num_vectors) +		pp->num_vectors = MSI_DEF_NUM_VECTORS; +	num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; + +	if (!pp->msi_irq[0]) { +		pp->msi_irq[0] = platform_get_irq_byname_optional(pdev, "msi"); +		if (pp->msi_irq[0] < 0) { +			pp->msi_irq[0] = platform_get_irq(pdev, 0); +			if (pp->msi_irq[0] < 0) +				return pp->msi_irq[0]; +		} +	} + +	dev_dbg(dev, "Using %d MSI vectors\n", pp->num_vectors); + +	pp->msi_irq_chip = &dw_pci_msi_bottom_irq_chip; + +	ret = dw_pcie_allocate_domains(pp); +	if (ret) +		return ret; + +	for (ctrl = 0; ctrl < num_ctrls; ctrl++) { +		if (pp->msi_irq[ctrl] > 0) +			irq_set_chained_handler_and_data(pp->msi_irq[ctrl], +						    dw_chained_msi_isr, pp); +	} + +	ret = dma_set_mask(dev, DMA_BIT_MASK(32)); +	if (ret) +		dev_warn(dev, "Failed to set DMA mask to 32-bit. Devices with only 32-bit MSI support may not work properly\n"); + +	pp->msi_page = alloc_page(GFP_DMA32); +	pp->msi_data = dma_map_page(dev, pp->msi_page, 0, +				    PAGE_SIZE, DMA_FROM_DEVICE); +	ret = dma_mapping_error(dev, pp->msi_data); +	if (ret) { +		dev_err(pci->dev, "Failed to map MSI data\n"); +		__free_page(pp->msi_page); +		pp->msi_page = NULL; +		pp->msi_data = 0; +		dw_pcie_free_msi(pp); + +		return ret; +	} + +	return 0; +} + +int dw_pcie_host_init(struct dw_pcie_rp *pp)  {  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);  	struct device *dev = pci->dev; @@ -293,17 +404,17 @@ int dw_pcie_host_init(struct pcie_port *pp)  	struct platform_device *pdev = to_platform_device(dev);  	struct resource_entry *win;  	struct pci_host_bridge *bridge; -	struct resource *cfg_res; +	struct resource *res;  	int ret; -	raw_spin_lock_init(&pci->pp.lock); +	raw_spin_lock_init(&pp->lock); -	cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config"); -	if (cfg_res) { -		pp->cfg0_size = resource_size(cfg_res); -		pp->cfg0_base = cfg_res->start; +	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config"); +	if (res) { +		pp->cfg0_size = resource_size(res); +		pp->cfg0_base = res->start; -		pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, cfg_res); +		pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res);  		if (IS_ERR(pp->va_cfg0_base))  			return PTR_ERR(pp->va_cfg0_base);  	} else { @@ -312,8 +423,8 @@ int dw_pcie_host_init(struct pcie_port *pp)  	}  	if (!pci->dbi_base) { -		struct resource *dbi_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); -		pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_res); +		res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); +		pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);  		if (IS_ERR(pci->dbi_base))  			return PTR_ERR(pci->dbi_base);  	} @@ -350,67 +461,39 @@ int dw_pcie_host_init(struct pcie_port *pp)  				     of_property_read_bool(np, "msi-parent") ||  				     of_property_read_bool(np, "msi-map")); -		if (!pp->num_vectors) { +		/* +		 * For the has_msi_ctrl case the default assignment is handled +		 * in the dw_pcie_msi_host_init(). +		 */ +		if (!pp->has_msi_ctrl && !pp->num_vectors) {  			pp->num_vectors = MSI_DEF_NUM_VECTORS;  		} else if (pp->num_vectors > MAX_MSI_IRQS) {  			dev_err(dev, "Invalid number of vectors\n"); -			return -EINVAL; +			ret = -EINVAL; +			goto err_deinit_host;  		}  		if (pp->ops->msi_host_init) {  			ret = pp->ops->msi_host_init(pp);  			if (ret < 0) -				return ret; +				goto err_deinit_host;  		} else if (pp->has_msi_ctrl) { -			u32 ctrl, num_ctrls; - -			num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; -			for (ctrl = 0; ctrl < num_ctrls; ctrl++) -				pp->irq_mask[ctrl] = ~0; - -			if (!pp->msi_irq) { -				pp->msi_irq = platform_get_irq_byname_optional(pdev, "msi"); -				if (pp->msi_irq < 0) { -					pp->msi_irq = platform_get_irq(pdev, 0); -					if (pp->msi_irq < 0) -						return pp->msi_irq; -				} -			} - -			pp->msi_irq_chip = &dw_pci_msi_bottom_irq_chip; - -			ret = dw_pcie_allocate_domains(pp); -			if (ret) -				return ret; - -			if (pp->msi_irq > 0) -				irq_set_chained_handler_and_data(pp->msi_irq, -							    dw_chained_msi_isr, -							    pp); - -			ret = dma_set_mask(pci->dev, DMA_BIT_MASK(32)); -			if (ret) -				dev_warn(pci->dev, "Failed to set DMA mask to 32-bit. Devices with only 32-bit MSI support may not work properly\n"); - -			pp->msi_data = dma_map_single_attrs(pci->dev, &pp->msi_msg, -						      sizeof(pp->msi_msg), -						      DMA_FROM_DEVICE, -						      DMA_ATTR_SKIP_CPU_SYNC); -			ret = dma_mapping_error(pci->dev, pp->msi_data); -			if (ret) { -				dev_err(pci->dev, "Failed to map MSI data\n"); -				pp->msi_data = 0; -				goto err_free_msi; -			} +			ret = dw_pcie_msi_host_init(pp); +			if (ret < 0) +				goto err_deinit_host;  		}  	} +	dw_pcie_version_detect(pci); +  	dw_pcie_iatu_detect(pci); -	dw_pcie_setup_rc(pp); +	ret = dw_pcie_setup_rc(pp); +	if (ret) +		goto err_free_msi; -	if (!dw_pcie_link_up(pci) && pci->ops && pci->ops->start_link) { -		ret = pci->ops->start_link(pci); +	if (!dw_pcie_link_up(pci)) { +		ret = dw_pcie_start_link(pci);  		if (ret)  			goto err_free_msi;  	} @@ -421,32 +504,50 @@ int dw_pcie_host_init(struct pcie_port *pp)  	bridge->sysdata = pp;  	ret = pci_host_probe(bridge); -	if (!ret) -		return 0; +	if (ret) +		goto err_stop_link; + +	return 0; + +err_stop_link: +	dw_pcie_stop_link(pci);  err_free_msi:  	if (pp->has_msi_ctrl)  		dw_pcie_free_msi(pp); + +err_deinit_host: +	if (pp->ops->host_deinit) +		pp->ops->host_deinit(pp); +  	return ret;  }  EXPORT_SYMBOL_GPL(dw_pcie_host_init); -void dw_pcie_host_deinit(struct pcie_port *pp) +void dw_pcie_host_deinit(struct dw_pcie_rp *pp)  { +	struct dw_pcie *pci = to_dw_pcie_from_pp(pp); +  	pci_stop_root_bus(pp->bridge->bus);  	pci_remove_root_bus(pp->bridge->bus); + +	dw_pcie_stop_link(pci); +  	if (pp->has_msi_ctrl)  		dw_pcie_free_msi(pp); + +	if (pp->ops->host_deinit) +		pp->ops->host_deinit(pp);  }  EXPORT_SYMBOL_GPL(dw_pcie_host_deinit);  static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,  						unsigned int devfn, int where)  { -	int type; -	u32 busdev; -	struct pcie_port *pp = bus->sysdata; +	struct dw_pcie_rp *pp = bus->sysdata;  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp); +	int type, ret; +	u32 busdev;  	/*  	 * Checking whether the link is up here is a last line of defense @@ -467,8 +568,10 @@ static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,  	else  		type = PCIE_ATU_TYPE_CFG1; - -	dw_pcie_prog_outbound_atu(pci, 0, type, pp->cfg0_base, busdev, pp->cfg0_size); +	ret = dw_pcie_prog_outbound_atu(pci, 0, type, pp->cfg0_base, busdev, +					pp->cfg0_size); +	if (ret) +		return NULL;  	return pp->va_cfg0_base + where;  } @@ -476,33 +579,45 @@ static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,  static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn,  				 int where, int size, u32 *val)  { -	int ret; -	struct pcie_port *pp = bus->sysdata; +	struct dw_pcie_rp *pp = bus->sysdata;  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp); +	int ret;  	ret = pci_generic_config_read(bus, devfn, where, size, val); +	if (ret != PCIBIOS_SUCCESSFUL) +		return ret; -	if (!ret && pci->io_cfg_atu_shared) -		dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO, pp->io_base, -					  pp->io_bus_addr, pp->io_size); +	if (pp->cfg0_io_shared) { +		ret = dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO, +						pp->io_base, pp->io_bus_addr, +						pp->io_size); +		if (ret) +			return PCIBIOS_SET_FAILED; +	} -	return ret; +	return PCIBIOS_SUCCESSFUL;  }  static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn,  				 int where, int size, u32 val)  { -	int ret; -	struct pcie_port *pp = bus->sysdata; +	struct dw_pcie_rp *pp = bus->sysdata;  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp); +	int ret;  	ret = pci_generic_config_write(bus, devfn, where, size, val); +	if (ret != PCIBIOS_SUCCESSFUL) +		return ret; -	if (!ret && pci->io_cfg_atu_shared) -		dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO, pp->io_base, -					  pp->io_bus_addr, pp->io_size); +	if (pp->cfg0_io_shared) { +		ret = dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO, +						pp->io_base, pp->io_bus_addr, +						pp->io_size); +		if (ret) +			return PCIBIOS_SET_FAILED; +	} -	return ret; +	return PCIBIOS_SUCCESSFUL;  }  static struct pci_ops dw_child_pcie_ops = { @@ -513,7 +628,7 @@ static struct pci_ops dw_child_pcie_ops = {  void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn, int where)  { -	struct pcie_port *pp = bus->sysdata; +	struct dw_pcie_rp *pp = bus->sysdata;  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);  	if (PCI_SLOT(devfn) > 0) @@ -529,11 +644,72 @@ static struct pci_ops dw_pcie_ops = {  	.write = pci_generic_config_write,  }; -void dw_pcie_setup_rc(struct pcie_port *pp) +static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)  { -	int i; -	u32 val, ctrl, num_ctrls;  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp); +	struct resource_entry *entry; +	int i, ret; + +	/* Note the very first outbound ATU is used for CFG IOs */ +	if (!pci->num_ob_windows) { +		dev_err(pci->dev, "No outbound iATU found\n"); +		return -EINVAL; +	} + +	/* +	 * Ensure all outbound windows are disabled before proceeding with +	 * the MEM/IO ranges setups. +	 */ +	for (i = 0; i < pci->num_ob_windows; i++) +		dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_OB, i); + +	i = 0; +	resource_list_for_each_entry(entry, &pp->bridge->windows) { +		if (resource_type(entry->res) != IORESOURCE_MEM) +			continue; + +		if (pci->num_ob_windows <= ++i) +			break; + +		ret = dw_pcie_prog_outbound_atu(pci, i, PCIE_ATU_TYPE_MEM, +						entry->res->start, +						entry->res->start - entry->offset, +						resource_size(entry->res)); +		if (ret) { +			dev_err(pci->dev, "Failed to set MEM range %pr\n", +				entry->res); +			return ret; +		} +	} + +	if (pp->io_size) { +		if (pci->num_ob_windows > ++i) { +			ret = dw_pcie_prog_outbound_atu(pci, i, PCIE_ATU_TYPE_IO, +							pp->io_base, +							pp->io_bus_addr, +							pp->io_size); +			if (ret) { +				dev_err(pci->dev, "Failed to set IO range %pr\n", +					entry->res); +				return ret; +			} +		} else { +			pp->cfg0_io_shared = true; +		} +	} + +	if (pci->num_ob_windows <= i) +		dev_warn(pci->dev, "Resources exceed number of ATU entries (%d)\n", +			 pci->num_ob_windows); + +	return 0; +} + +int dw_pcie_setup_rc(struct dw_pcie_rp *pp) +{ +	struct dw_pcie *pci = to_dw_pcie_from_pp(pp); +	u32 val, ctrl, num_ctrls; +	int ret;  	/*  	 * Enable DBI read-only registers for writing/updating configuration. @@ -582,45 +758,15 @@ void dw_pcie_setup_rc(struct pcie_port *pp)  		PCI_COMMAND_MASTER | PCI_COMMAND_SERR;  	dw_pcie_writel_dbi(pci, PCI_COMMAND, val); -	/* Ensure all outbound windows are disabled so there are multiple matches */ -	for (i = 0; i < pci->num_ob_windows; i++) -		dw_pcie_disable_atu(pci, i, DW_PCIE_REGION_OUTBOUND); -  	/*  	 * If the platform provides its own child bus config accesses, it means  	 * the platform uses its own address translation component rather than  	 * ATU, so we should not program the ATU here.  	 */  	if (pp->bridge->child_ops == &dw_child_pcie_ops) { -		int atu_idx = 0; -		struct resource_entry *entry; - -		/* Get last memory resource entry */ -		resource_list_for_each_entry(entry, &pp->bridge->windows) { -			if (resource_type(entry->res) != IORESOURCE_MEM) -				continue; - -			if (pci->num_ob_windows <= ++atu_idx) -				break; - -			dw_pcie_prog_outbound_atu(pci, atu_idx, -						  PCIE_ATU_TYPE_MEM, entry->res->start, -						  entry->res->start - entry->offset, -						  resource_size(entry->res)); -		} - -		if (pp->io_size) { -			if (pci->num_ob_windows > ++atu_idx) -				dw_pcie_prog_outbound_atu(pci, atu_idx, -							  PCIE_ATU_TYPE_IO, pp->io_base, -							  pp->io_bus_addr, pp->io_size); -			else -				pci->io_cfg_atu_shared = true; -		} - -		if (pci->num_ob_windows <= atu_idx) -			dev_warn(pci->dev, "Resources exceed number of ATU entries (%d)", -				 pci->num_ob_windows); +		ret = dw_pcie_iatu_setup(pp); +		if (ret) +			return ret;  	}  	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0); @@ -633,5 +779,7 @@ void dw_pcie_setup_rc(struct pcie_port *pp)  	dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);  	dw_pcie_dbi_ro_wr_dis(pci); + +	return 0;  }  EXPORT_SYMBOL_GPL(dw_pcie_setup_rc); diff --git a/drivers/pci/controller/dwc/pcie-designware-plat.c b/drivers/pci/controller/dwc/pcie-designware-plat.c index 0c5de87d3cc6..1fcfb840f238 100644 --- a/drivers/pci/controller/dwc/pcie-designware-plat.c +++ b/drivers/pci/controller/dwc/pcie-designware-plat.c @@ -17,13 +17,11 @@  #include <linux/platform_device.h>  #include <linux/resource.h>  #include <linux/types.h> -#include <linux/regmap.h>  #include "pcie-designware.h"  struct dw_plat_pcie {  	struct dw_pcie			*pci; -	struct regmap			*regmap;  	enum dw_pcie_device_mode	mode;  }; @@ -31,20 +29,9 @@ struct dw_plat_pcie_of_data {  	enum dw_pcie_device_mode	mode;  }; -static const struct of_device_id dw_plat_pcie_of_match[]; -  static const struct dw_pcie_host_ops dw_plat_pcie_host_ops = {  }; -static int dw_plat_pcie_establish_link(struct dw_pcie *pci) -{ -	return 0; -} - -static const struct dw_pcie_ops dw_pcie_ops = { -	.start_link = dw_plat_pcie_establish_link, -}; -  static void dw_plat_pcie_ep_init(struct dw_pcie_ep *ep)  {  	struct dw_pcie *pci = to_dw_pcie_from_ep(ep); @@ -96,7 +83,7 @@ static int dw_plat_add_pcie_port(struct dw_plat_pcie *dw_plat_pcie,  				 struct platform_device *pdev)  {  	struct dw_pcie *pci = dw_plat_pcie->pci; -	struct pcie_port *pp = &pci->pp; +	struct dw_pcie_rp *pp = &pci->pp;  	struct device *dev = &pdev->dev;  	int ret; @@ -140,7 +127,6 @@ static int dw_plat_pcie_probe(struct platform_device *pdev)  		return -ENOMEM;  	pci->dev = dev; -	pci->ops = &dw_pcie_ops;  	dw_plat_pcie->pci = pci;  	dw_plat_pcie->mode = mode; @@ -153,20 +139,21 @@ static int dw_plat_pcie_probe(struct platform_device *pdev)  			return -ENODEV;  		ret = dw_plat_add_pcie_port(dw_plat_pcie, pdev); -		if (ret < 0) -			return ret;  		break;  	case DW_PCIE_EP_TYPE:  		if (!IS_ENABLED(CONFIG_PCIE_DW_PLAT_EP))  			return -ENODEV;  		pci->ep.ops = &pcie_ep_ops; -		return dw_pcie_ep_init(&pci->ep); +		ret = dw_pcie_ep_init(&pci->ep); +		break;  	default:  		dev_err(dev, "INVALID device type %d\n", dw_plat_pcie->mode); +		ret = -EINVAL; +		break;  	} -	return 0; +	return ret;  }  static const struct dw_plat_pcie_of_data dw_plat_pcie_rc_of_data = { diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c index d92c8a25094f..c6725c519a47 100644 --- a/drivers/pci/controller/dwc/pcie-designware.c +++ b/drivers/pci/controller/dwc/pcie-designware.c @@ -8,14 +8,41 @@   * Author: Jingoo Han <jg1.han@samsung.com>   */ +#include <linux/align.h> +#include <linux/bitops.h>  #include <linux/delay.h>  #include <linux/of.h>  #include <linux/of_platform.h> +#include <linux/sizes.h>  #include <linux/types.h>  #include "../../pci.h"  #include "pcie-designware.h" +void dw_pcie_version_detect(struct dw_pcie *pci) +{ +	u32 ver; + +	/* The content of the CSR is zero on DWC PCIe older than v4.70a */ +	ver = dw_pcie_readl_dbi(pci, PCIE_VERSION_NUMBER); +	if (!ver) +		return; + +	if (pci->version && pci->version != ver) +		dev_warn(pci->dev, "Versions don't match (%08x != %08x)\n", +			 pci->version, ver); +	else +		pci->version = ver; + +	ver = dw_pcie_readl_dbi(pci, PCIE_VERSION_TYPE); + +	if (pci->type && pci->type != ver) +		dev_warn(pci->dev, "Types don't match (%08x != %08x)\n", +			 pci->type, ver); +	else +		pci->type = ver; +} +  /*   * These interfaces resemble the pci_find_*capability() interfaces, but these   * are for configuring host controllers, which are bridges *to* PCI devices but @@ -181,48 +208,61 @@ void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val)  		dev_err(pci->dev, "write DBI address failed\n");  } -static u32 dw_pcie_readl_atu(struct dw_pcie *pci, u32 reg) +static inline void __iomem *dw_pcie_select_atu(struct dw_pcie *pci, u32 dir, +					       u32 index)  { +	if (pci->iatu_unroll_enabled) +		return pci->atu_base + PCIE_ATU_UNROLL_BASE(dir, index); + +	dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, dir | index); +	return pci->atu_base; +} + +static u32 dw_pcie_readl_atu(struct dw_pcie *pci, u32 dir, u32 index, u32 reg) +{ +	void __iomem *base;  	int ret;  	u32 val; +	base = dw_pcie_select_atu(pci, dir, index); +  	if (pci->ops && pci->ops->read_dbi) -		return pci->ops->read_dbi(pci, pci->atu_base, reg, 4); +		return pci->ops->read_dbi(pci, base, reg, 4); -	ret = dw_pcie_read(pci->atu_base + reg, 4, &val); +	ret = dw_pcie_read(base + reg, 4, &val);  	if (ret)  		dev_err(pci->dev, "Read ATU address failed\n");  	return val;  } -static void dw_pcie_writel_atu(struct dw_pcie *pci, u32 reg, u32 val) +static void dw_pcie_writel_atu(struct dw_pcie *pci, u32 dir, u32 index, +			       u32 reg, u32 val)  { +	void __iomem *base;  	int ret; +	base = dw_pcie_select_atu(pci, dir, index); +  	if (pci->ops && pci->ops->write_dbi) { -		pci->ops->write_dbi(pci, pci->atu_base, reg, 4, val); +		pci->ops->write_dbi(pci, base, reg, 4, val);  		return;  	} -	ret = dw_pcie_write(pci->atu_base + reg, 4, val); +	ret = dw_pcie_write(base + reg, 4, val);  	if (ret)  		dev_err(pci->dev, "Write ATU address failed\n");  } -static u32 dw_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg) +static inline u32 dw_pcie_readl_atu_ob(struct dw_pcie *pci, u32 index, u32 reg)  { -	u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index); - -	return dw_pcie_readl_atu(pci, offset + reg); +	return dw_pcie_readl_atu(pci, PCIE_ATU_REGION_DIR_OB, index, reg);  } -static void dw_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg, -				     u32 val) +static inline void dw_pcie_writel_atu_ob(struct dw_pcie *pci, u32 index, u32 reg, +					 u32 val)  { -	u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index); - -	dw_pcie_writel_atu(pci, offset + reg, val); +	dw_pcie_writel_atu(pci, PCIE_ATU_REGION_DIR_OB, index, reg, val);  }  static inline u32 dw_pcie_enable_ecrc(u32 val) @@ -266,264 +306,160 @@ static inline u32 dw_pcie_enable_ecrc(u32 val)  	return val | PCIE_ATU_TD;  } -static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, u8 func_no, -					     int index, int type, -					     u64 cpu_addr, u64 pci_addr, -					     u64 size) +static int __dw_pcie_prog_outbound_atu(struct dw_pcie *pci, u8 func_no, +				       int index, int type, u64 cpu_addr, +				       u64 pci_addr, u64 size)  {  	u32 retries, val; -	u64 limit_addr = cpu_addr + size - 1; - -	dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_BASE, -				 lower_32_bits(cpu_addr)); -	dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_BASE, -				 upper_32_bits(cpu_addr)); -	dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_LIMIT, -				 lower_32_bits(limit_addr)); -	dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_LIMIT, -				 upper_32_bits(limit_addr)); -	dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET, -				 lower_32_bits(pci_addr)); -	dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET, -				 upper_32_bits(pci_addr)); -	val = type | PCIE_ATU_FUNC_NUM(func_no); -	val = upper_32_bits(size - 1) ? -		val | PCIE_ATU_INCREASE_REGION_SIZE : val; -	if (pci->version == 0x490A) -		val = dw_pcie_enable_ecrc(val); -	dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, val); -	dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2, -				 PCIE_ATU_ENABLE); +	u64 limit_addr; -	/* -	 * Make sure ATU enable takes effect before any subsequent config -	 * and I/O accesses. -	 */ -	for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { -		val = dw_pcie_readl_ob_unroll(pci, index, -					      PCIE_ATU_UNR_REGION_CTRL2); -		if (val & PCIE_ATU_ENABLE) -			return; +	if (pci->ops && pci->ops->cpu_addr_fixup) +		cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr); -		mdelay(LINK_WAIT_IATU); +	limit_addr = cpu_addr + size - 1; + +	if ((limit_addr & ~pci->region_limit) != (cpu_addr & ~pci->region_limit) || +	    !IS_ALIGNED(cpu_addr, pci->region_align) || +	    !IS_ALIGNED(pci_addr, pci->region_align) || !size) { +		return -EINVAL;  	} -	dev_err(pci->dev, "Outbound iATU is not being enabled\n"); -} -static void __dw_pcie_prog_outbound_atu(struct dw_pcie *pci, u8 func_no, -					int index, int type, u64 cpu_addr, -					u64 pci_addr, u64 size) -{ -	u32 retries, val; +	dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LOWER_BASE, +			      lower_32_bits(cpu_addr)); +	dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_BASE, +			      upper_32_bits(cpu_addr)); -	if (pci->ops && pci->ops->cpu_addr_fixup) -		cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr); +	dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LIMIT, +			      lower_32_bits(limit_addr)); +	if (dw_pcie_ver_is_ge(pci, 460A)) +		dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_LIMIT, +				      upper_32_bits(limit_addr)); -	if (pci->iatu_unroll_enabled) { -		dw_pcie_prog_outbound_atu_unroll(pci, func_no, index, type, -						 cpu_addr, pci_addr, size); -		return; -	} +	dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LOWER_TARGET, +			      lower_32_bits(pci_addr)); +	dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_TARGET, +			      upper_32_bits(pci_addr)); -	dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, -			   PCIE_ATU_REGION_OUTBOUND | index); -	dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_BASE, -			   lower_32_bits(cpu_addr)); -	dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_BASE, -			   upper_32_bits(cpu_addr)); -	dw_pcie_writel_dbi(pci, PCIE_ATU_LIMIT, -			   lower_32_bits(cpu_addr + size - 1)); -	if (pci->version >= 0x460A) -		dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_LIMIT, -				   upper_32_bits(cpu_addr + size - 1)); -	dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, -			   lower_32_bits(pci_addr)); -	dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, -			   upper_32_bits(pci_addr));  	val = type | PCIE_ATU_FUNC_NUM(func_no); -	val = ((upper_32_bits(size - 1)) && (pci->version >= 0x460A)) ? -		val | PCIE_ATU_INCREASE_REGION_SIZE : val; -	if (pci->version == 0x490A) +	if (upper_32_bits(limit_addr) > upper_32_bits(cpu_addr) && +	    dw_pcie_ver_is_ge(pci, 460A)) +		val |= PCIE_ATU_INCREASE_REGION_SIZE; +	if (dw_pcie_ver_is(pci, 490A))  		val = dw_pcie_enable_ecrc(val); -	dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, val); -	dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE); +	dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_REGION_CTRL1, val); + +	dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_REGION_CTRL2, PCIE_ATU_ENABLE);  	/*  	 * Make sure ATU enable takes effect before any subsequent config  	 * and I/O accesses.  	 */  	for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { -		val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2); +		val = dw_pcie_readl_atu_ob(pci, index, PCIE_ATU_REGION_CTRL2);  		if (val & PCIE_ATU_ENABLE) -			return; +			return 0;  		mdelay(LINK_WAIT_IATU);  	} +  	dev_err(pci->dev, "Outbound iATU is not being enabled\n"); -} -void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type, -			       u64 cpu_addr, u64 pci_addr, u64 size) -{ -	__dw_pcie_prog_outbound_atu(pci, 0, index, type, -				    cpu_addr, pci_addr, size); +	return -ETIMEDOUT;  } -void dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index, -				  int type, u64 cpu_addr, u64 pci_addr, -				  u64 size) +int dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type, +			      u64 cpu_addr, u64 pci_addr, u64 size)  { -	__dw_pcie_prog_outbound_atu(pci, func_no, index, type, -				    cpu_addr, pci_addr, size); +	return __dw_pcie_prog_outbound_atu(pci, 0, index, type, +					   cpu_addr, pci_addr, size);  } -static u32 dw_pcie_readl_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg) +int dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index, +				 int type, u64 cpu_addr, u64 pci_addr, +				 u64 size)  { -	u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index); - -	return dw_pcie_readl_atu(pci, offset + reg); +	return __dw_pcie_prog_outbound_atu(pci, func_no, index, type, +					   cpu_addr, pci_addr, size);  } -static void dw_pcie_writel_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg, -				     u32 val) +static inline u32 dw_pcie_readl_atu_ib(struct dw_pcie *pci, u32 index, u32 reg)  { -	u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index); - -	dw_pcie_writel_atu(pci, offset + reg, val); +	return dw_pcie_readl_atu(pci, PCIE_ATU_REGION_DIR_IB, index, reg);  } -static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, u8 func_no, -					   int index, int bar, u64 cpu_addr, -					   enum dw_pcie_as_type as_type) +static inline void dw_pcie_writel_atu_ib(struct dw_pcie *pci, u32 index, u32 reg, +					 u32 val)  { -	int type; -	u32 retries, val; - -	dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET, -				 lower_32_bits(cpu_addr)); -	dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET, -				 upper_32_bits(cpu_addr)); - -	switch (as_type) { -	case DW_PCIE_AS_MEM: -		type = PCIE_ATU_TYPE_MEM; -		break; -	case DW_PCIE_AS_IO: -		type = PCIE_ATU_TYPE_IO; -		break; -	default: -		return -EINVAL; -	} - -	dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, type | -				 PCIE_ATU_FUNC_NUM(func_no)); -	dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2, -				 PCIE_ATU_FUNC_NUM_MATCH_EN | -				 PCIE_ATU_ENABLE | -				 PCIE_ATU_BAR_MODE_ENABLE | (bar << 8)); - -	/* -	 * Make sure ATU enable takes effect before any subsequent config -	 * and I/O accesses. -	 */ -	for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { -		val = dw_pcie_readl_ib_unroll(pci, index, -					      PCIE_ATU_UNR_REGION_CTRL2); -		if (val & PCIE_ATU_ENABLE) -			return 0; - -		mdelay(LINK_WAIT_IATU); -	} -	dev_err(pci->dev, "Inbound iATU is not being enabled\n"); - -	return -EBUSY; +	dw_pcie_writel_atu(pci, PCIE_ATU_REGION_DIR_IB, index, reg, val);  }  int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, u8 func_no, int index, -			     int bar, u64 cpu_addr, -			     enum dw_pcie_as_type as_type) +			     int type, u64 cpu_addr, u8 bar)  { -	int type;  	u32 retries, val; -	if (pci->iatu_unroll_enabled) -		return dw_pcie_prog_inbound_atu_unroll(pci, func_no, index, bar, -						       cpu_addr, as_type); - -	dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND | -			   index); -	dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, lower_32_bits(cpu_addr)); -	dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, upper_32_bits(cpu_addr)); - -	switch (as_type) { -	case DW_PCIE_AS_MEM: -		type = PCIE_ATU_TYPE_MEM; -		break; -	case DW_PCIE_AS_IO: -		type = PCIE_ATU_TYPE_IO; -		break; -	default: +	if (!IS_ALIGNED(cpu_addr, pci->region_align))  		return -EINVAL; -	} -	dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type | -			   PCIE_ATU_FUNC_NUM(func_no)); -	dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE | -			   PCIE_ATU_FUNC_NUM_MATCH_EN | -			   PCIE_ATU_BAR_MODE_ENABLE | (bar << 8)); +	dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LOWER_TARGET, +			      lower_32_bits(cpu_addr)); +	dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_TARGET, +			      upper_32_bits(cpu_addr)); + +	dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL1, type | +			      PCIE_ATU_FUNC_NUM(func_no)); +	dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL2, +			      PCIE_ATU_ENABLE | PCIE_ATU_FUNC_NUM_MATCH_EN | +			      PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));  	/*  	 * Make sure ATU enable takes effect before any subsequent config  	 * and I/O accesses.  	 */  	for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { -		val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2); +		val = dw_pcie_readl_atu_ib(pci, index, PCIE_ATU_REGION_CTRL2);  		if (val & PCIE_ATU_ENABLE)  			return 0;  		mdelay(LINK_WAIT_IATU);  	} +  	dev_err(pci->dev, "Inbound iATU is not being enabled\n"); -	return -EBUSY; +	return -ETIMEDOUT;  } -void dw_pcie_disable_atu(struct dw_pcie *pci, int index, -			 enum dw_pcie_region_type type) +void dw_pcie_disable_atu(struct dw_pcie *pci, u32 dir, int index)  { -	int region; - -	switch (type) { -	case DW_PCIE_REGION_INBOUND: -		region = PCIE_ATU_REGION_INBOUND; -		break; -	case DW_PCIE_REGION_OUTBOUND: -		region = PCIE_ATU_REGION_OUTBOUND; -		break; -	default: -		return; -	} - -	dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, region | index); -	dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, ~(u32)PCIE_ATU_ENABLE); +	dw_pcie_writel_atu(pci, dir, index, PCIE_ATU_REGION_CTRL2, 0);  }  int dw_pcie_wait_for_link(struct dw_pcie *pci)  { +	u32 offset, val;  	int retries;  	/* Check if the link is up or not */  	for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) { -		if (dw_pcie_link_up(pci)) { -			dev_info(pci->dev, "Link up\n"); -			return 0; -		} +		if (dw_pcie_link_up(pci)) +			break; +  		usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);  	} -	dev_info(pci->dev, "Phy link never came up\n"); +	if (retries >= LINK_WAIT_MAX_RETRIES) { +		dev_err(pci->dev, "Phy link never came up\n"); +		return -ETIMEDOUT; +	} -	return -ETIMEDOUT; +	offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); +	val = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA); + +	dev_info(pci->dev, "PCIe Gen.%u x%u link up\n", +		 FIELD_GET(PCI_EXP_LNKSTA_CLS, val), +		 FIELD_GET(PCI_EXP_LNKSTA_NLW, val)); + +	return 0;  }  EXPORT_SYMBOL_GPL(dw_pcie_wait_for_link); @@ -534,7 +470,7 @@ int dw_pcie_link_up(struct dw_pcie *pci)  	if (pci->ops && pci->ops->link_up)  		return pci->ops->link_up(pci); -	val = readl(pci->dbi_base + PCIE_PORT_DEBUG1); +	val = dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG1);  	return ((val & PCIE_PORT_DEBUG1_LINK_UP) &&  		(!(val & PCIE_PORT_DEBUG1_LINK_IN_TRAINING)));  } @@ -586,95 +522,81 @@ static void dw_pcie_link_set_max_speed(struct dw_pcie *pci, u32 link_gen)  } -static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci) +static bool dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)  {  	u32 val;  	val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT);  	if (val == 0xffffffff) -		return 1; +		return true; -	return 0; +	return false;  } -static void dw_pcie_iatu_detect_regions_unroll(struct dw_pcie *pci) +static void dw_pcie_iatu_detect_regions(struct dw_pcie *pci)  { -	int max_region, i, ob = 0, ib = 0; -	u32 val; +	int max_region, ob, ib; +	u32 val, min, dir; +	u64 max; -	max_region = min((int)pci->atu_size / 512, 256); - -	for (i = 0; i < max_region; i++) { -		dw_pcie_writel_ob_unroll(pci, i, PCIE_ATU_UNR_LOWER_TARGET, -					0x11110000); +	if (pci->iatu_unroll_enabled) { +		max_region = min((int)pci->atu_size / 512, 256); +	} else { +		dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, 0xFF); +		max_region = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT) + 1; +	} -		val = dw_pcie_readl_ob_unroll(pci, i, PCIE_ATU_UNR_LOWER_TARGET); -		if (val == 0x11110000) -			ob++; -		else +	for (ob = 0; ob < max_region; ob++) { +		dw_pcie_writel_atu_ob(pci, ob, PCIE_ATU_LOWER_TARGET, 0x11110000); +		val = dw_pcie_readl_atu_ob(pci, ob, PCIE_ATU_LOWER_TARGET); +		if (val != 0x11110000)  			break;  	} -	for (i = 0; i < max_region; i++) { -		dw_pcie_writel_ib_unroll(pci, i, PCIE_ATU_UNR_LOWER_TARGET, -					0x11110000); - -		val = dw_pcie_readl_ib_unroll(pci, i, PCIE_ATU_UNR_LOWER_TARGET); -		if (val == 0x11110000) -			ib++; -		else +	for (ib = 0; ib < max_region; ib++) { +		dw_pcie_writel_atu_ib(pci, ib, PCIE_ATU_LOWER_TARGET, 0x11110000); +		val = dw_pcie_readl_atu_ib(pci, ib, PCIE_ATU_LOWER_TARGET); +		if (val != 0x11110000)  			break;  	} -	pci->num_ib_windows = ib; -	pci->num_ob_windows = ob; -} - -static void dw_pcie_iatu_detect_regions(struct dw_pcie *pci) -{ -	int max_region, i, ob = 0, ib = 0; -	u32 val; - -	dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, 0xFF); -	max_region = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT) + 1; -	for (i = 0; i < max_region; i++) { -		dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_OUTBOUND | i); -		dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, 0x11110000); -		val = dw_pcie_readl_dbi(pci, PCIE_ATU_LOWER_TARGET); -		if (val == 0x11110000) -			ob++; -		else -			break; +	if (ob) { +		dir = PCIE_ATU_REGION_DIR_OB; +	} else if (ib) { +		dir = PCIE_ATU_REGION_DIR_IB; +	} else { +		dev_err(pci->dev, "No iATU regions found\n"); +		return;  	} -	for (i = 0; i < max_region; i++) { -		dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND | i); -		dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, 0x11110000); -		val = dw_pcie_readl_dbi(pci, PCIE_ATU_LOWER_TARGET); -		if (val == 0x11110000) -			ib++; -		else -			break; +	dw_pcie_writel_atu(pci, dir, 0, PCIE_ATU_LIMIT, 0x0); +	min = dw_pcie_readl_atu(pci, dir, 0, PCIE_ATU_LIMIT); + +	if (dw_pcie_ver_is_ge(pci, 460A)) { +		dw_pcie_writel_atu(pci, dir, 0, PCIE_ATU_UPPER_LIMIT, 0xFFFFFFFF); +		max = dw_pcie_readl_atu(pci, dir, 0, PCIE_ATU_UPPER_LIMIT); +	} else { +		max = 0;  	} -	pci->num_ib_windows = ib;  	pci->num_ob_windows = ob; +	pci->num_ib_windows = ib; +	pci->region_align = 1 << fls(min); +	pci->region_limit = (max << 32) | (SZ_4G - 1);  }  void dw_pcie_iatu_detect(struct dw_pcie *pci)  { -	struct device *dev = pci->dev; -	struct platform_device *pdev = to_platform_device(dev); +	struct platform_device *pdev = to_platform_device(pci->dev); -	if (pci->version >= 0x480A || (!pci->version && -				       dw_pcie_iatu_unroll_enabled(pci))) { -		pci->iatu_unroll_enabled = true; +	pci->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pci); +	if (pci->iatu_unroll_enabled) {  		if (!pci->atu_base) {  			struct resource *res =  				platform_get_resource_byname(pdev, IORESOURCE_MEM, "atu");  			if (res) {  				pci->atu_size = resource_size(res); -				pci->atu_base = devm_ioremap_resource(dev, res); +				pci->atu_base = devm_ioremap_resource(pci->dev, res);  			}  			if (!pci->atu_base || IS_ERR(pci->atu_base))  				pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET; @@ -683,23 +605,25 @@ void dw_pcie_iatu_detect(struct dw_pcie *pci)  		if (!pci->atu_size)  			/* Pick a minimal default, enough for 8 in and 8 out windows */  			pci->atu_size = SZ_4K; +	} else { +		pci->atu_base = pci->dbi_base + PCIE_ATU_VIEWPORT_BASE; +		pci->atu_size = PCIE_ATU_VIEWPORT_SIZE; +	} -		dw_pcie_iatu_detect_regions_unroll(pci); -	} else -		dw_pcie_iatu_detect_regions(pci); +	dw_pcie_iatu_detect_regions(pci);  	dev_info(pci->dev, "iATU unroll: %s\n", pci->iatu_unroll_enabled ?  		"enabled" : "disabled"); -	dev_info(pci->dev, "Detected iATU regions: %u outbound, %u inbound", -		 pci->num_ob_windows, pci->num_ib_windows); +	dev_info(pci->dev, "iATU regions: %u ob, %u ib, align %uK, limit %lluG\n", +		 pci->num_ob_windows, pci->num_ib_windows, +		 pci->region_align / SZ_1K, (pci->region_limit + 1) / SZ_1G);  }  void dw_pcie_setup(struct dw_pcie *pci)  { +	struct device_node *np = pci->dev->of_node;  	u32 val; -	struct device *dev = pci->dev; -	struct device_node *np = dev->of_node;  	if (pci->link_gen > 0)  		dw_pcie_link_set_max_speed(pci, pci->link_gen); @@ -726,6 +650,13 @@ void dw_pcie_setup(struct dw_pcie *pci)  	val |= PORT_LINK_DLL_LINK_EN;  	dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val); +	if (of_property_read_bool(np, "snps,enable-cdm-check")) { +		val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS); +		val |= PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS | +		       PCIE_PL_CHK_REG_CHK_REG_START; +		dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val); +	} +  	of_property_read_u32(np, "num-lanes", &pci->num_lanes);  	if (!pci->num_lanes) {  		dev_dbg(pci->dev, "Using h/w default number of lanes\n"); @@ -772,11 +703,4 @@ void dw_pcie_setup(struct dw_pcie *pci)  		break;  	}  	dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val); - -	if (of_property_read_bool(np, "snps,enable-cdm-check")) { -		val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS); -		val |= PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS | -		       PCIE_PL_CHK_REG_CHK_REG_START; -		dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val); -	}  } diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h index 7d6e9b7576be..09b887093a84 100644 --- a/drivers/pci/controller/dwc/pcie-designware.h +++ b/drivers/pci/controller/dwc/pcie-designware.h @@ -20,6 +20,29 @@  #include <linux/pci-epc.h>  #include <linux/pci-epf.h> +/* DWC PCIe IP-core versions (native support since v4.70a) */ +#define DW_PCIE_VER_365A		0x3336352a +#define DW_PCIE_VER_460A		0x3436302a +#define DW_PCIE_VER_470A		0x3437302a +#define DW_PCIE_VER_480A		0x3438302a +#define DW_PCIE_VER_490A		0x3439302a +#define DW_PCIE_VER_520A		0x3532302a + +#define __dw_pcie_ver_cmp(_pci, _ver, _op) \ +	((_pci)->version _op DW_PCIE_VER_ ## _ver) + +#define dw_pcie_ver_is(_pci, _ver) __dw_pcie_ver_cmp(_pci, _ver, ==) + +#define dw_pcie_ver_is_ge(_pci, _ver) __dw_pcie_ver_cmp(_pci, _ver, >=) + +#define dw_pcie_ver_type_is(_pci, _ver, _type) \ +	(__dw_pcie_ver_cmp(_pci, _ver, ==) && \ +	 __dw_pcie_ver_cmp(_pci, TYPE_ ## _type, ==)) + +#define dw_pcie_ver_type_is_ge(_pci, _ver, _type) \ +	(__dw_pcie_ver_cmp(_pci, _ver, ==) && \ +	 __dw_pcie_ver_cmp(_pci, TYPE_ ## _type, >=)) +  /* Parameters for the waiting for link up routine */  #define LINK_WAIT_MAX_RETRIES		10  #define LINK_WAIT_USLEEP_MIN		90000 @@ -74,13 +97,34 @@  #define PCIE_MSI_INTR0_MASK		0x82C  #define PCIE_MSI_INTR0_STATUS		0x830 +#define GEN3_RELATED_OFF			0x890 +#define GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL	BIT(0) +#define GEN3_RELATED_OFF_RXEQ_RGRDLESS_RXTS	BIT(13) +#define GEN3_RELATED_OFF_GEN3_EQ_DISABLE	BIT(16) +#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT	24 +#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK	GENMASK(25, 24) +  #define PCIE_PORT_MULTI_LANE_CTRL	0x8C0  #define PORT_MLTI_UPCFG_SUPPORT		BIT(7) +#define PCIE_VERSION_NUMBER		0x8F8 +#define PCIE_VERSION_TYPE		0x8FC + +/* + * iATU inbound and outbound windows CSRs. Before the IP-core v4.80a each + * iATU region CSRs had been indirectly accessible by means of the dedicated + * viewport selector. The iATU/eDMA CSRs space was re-designed in DWC PCIe + * v4.80a in a way so the viewport was unrolled into the directly accessible + * iATU/eDMA CSRs space. + */  #define PCIE_ATU_VIEWPORT		0x900 -#define PCIE_ATU_REGION_INBOUND		BIT(31) -#define PCIE_ATU_REGION_OUTBOUND	0 -#define PCIE_ATU_CR1			0x904 +#define PCIE_ATU_REGION_DIR_IB		BIT(31) +#define PCIE_ATU_REGION_DIR_OB		0 +#define PCIE_ATU_VIEWPORT_BASE		0x904 +#define PCIE_ATU_UNROLL_BASE(dir, index) \ +	(((index) << 9) | ((dir == PCIE_ATU_REGION_DIR_IB) ? BIT(8) : 0)) +#define PCIE_ATU_VIEWPORT_SIZE		0x2C +#define PCIE_ATU_REGION_CTRL1		0x000  #define PCIE_ATU_INCREASE_REGION_SIZE	BIT(13)  #define PCIE_ATU_TYPE_MEM		0x0  #define PCIE_ATU_TYPE_IO		0x2 @@ -88,19 +132,19 @@  #define PCIE_ATU_TYPE_CFG1		0x5  #define PCIE_ATU_TD			BIT(8)  #define PCIE_ATU_FUNC_NUM(pf)           ((pf) << 20) -#define PCIE_ATU_CR2			0x908 +#define PCIE_ATU_REGION_CTRL2		0x004  #define PCIE_ATU_ENABLE			BIT(31)  #define PCIE_ATU_BAR_MODE_ENABLE	BIT(30)  #define PCIE_ATU_FUNC_NUM_MATCH_EN      BIT(19) -#define PCIE_ATU_LOWER_BASE		0x90C -#define PCIE_ATU_UPPER_BASE		0x910 -#define PCIE_ATU_LIMIT			0x914 -#define PCIE_ATU_LOWER_TARGET		0x918 +#define PCIE_ATU_LOWER_BASE		0x008 +#define PCIE_ATU_UPPER_BASE		0x00C +#define PCIE_ATU_LIMIT			0x010 +#define PCIE_ATU_LOWER_TARGET		0x014  #define PCIE_ATU_BUS(x)			FIELD_PREP(GENMASK(31, 24), x)  #define PCIE_ATU_DEV(x)			FIELD_PREP(GENMASK(23, 19), x)  #define PCIE_ATU_FUNC(x)		FIELD_PREP(GENMASK(18, 16), x) -#define PCIE_ATU_UPPER_TARGET		0x91C -#define PCIE_ATU_UPPER_LIMIT		0x924 +#define PCIE_ATU_UPPER_TARGET		0x018 +#define PCIE_ATU_UPPER_LIMIT		0x020  #define PCIE_MISC_CONTROL_1_OFF		0x8BC  #define PCIE_DBI_RO_WR_EN		BIT(0) @@ -131,6 +175,25 @@  #define PCIE_ATU_UNR_UPPER_LIMIT	0x20  /* + * RAS-DES register definitions + */ +#define PCIE_RAS_DES_EVENT_COUNTER_CONTROL	0x8 +#define EVENT_COUNTER_ALL_CLEAR		0x3 +#define EVENT_COUNTER_ENABLE_ALL	0x7 +#define EVENT_COUNTER_ENABLE_SHIFT	2 +#define EVENT_COUNTER_EVENT_SEL_MASK	GENMASK(7, 0) +#define EVENT_COUNTER_EVENT_SEL_SHIFT	16 +#define EVENT_COUNTER_EVENT_Tx_L0S	0x2 +#define EVENT_COUNTER_EVENT_Rx_L0S	0x3 +#define EVENT_COUNTER_EVENT_L1		0x5 +#define EVENT_COUNTER_EVENT_L1_1	0x7 +#define EVENT_COUNTER_EVENT_L1_2	0x8 +#define EVENT_COUNTER_GROUP_SEL_SHIFT	24 +#define EVENT_COUNTER_GROUP_5		0x5 + +#define PCIE_RAS_DES_EVENT_COUNTER_DATA		0xc + +/*   * The default address offset between dbi_base and atu_base. Root controller   * drivers are not required to initialize atu_base if the offset matches this   * default; the driver core automatically derives atu_base from dbi_base using @@ -138,13 +201,6 @@   */  #define DEFAULT_DBI_ATU_OFFSET (0x3 << 20) -/* Register address builder */ -#define PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(region) \ -		((region) << 9) - -#define PCIE_GET_ATU_INB_UNR_REG_OFFSET(region) \ -		(((region) << 9) | BIT(8)) -  #define MAX_MSI_IRQS			256  #define MAX_MSI_IRQS_PER_CTRL		32  #define MAX_MSI_CTRLS			(MAX_MSI_IRQS / MAX_MSI_IRQS_PER_CTRL) @@ -155,16 +211,10 @@  #define MAX_IATU_IN			256  #define MAX_IATU_OUT			256 -struct pcie_port;  struct dw_pcie; +struct dw_pcie_rp;  struct dw_pcie_ep; -enum dw_pcie_region_type { -	DW_PCIE_REGION_UNKNOWN, -	DW_PCIE_REGION_INBOUND, -	DW_PCIE_REGION_OUTBOUND, -}; -  enum dw_pcie_device_mode {  	DW_PCIE_UNKNOWN_TYPE,  	DW_PCIE_EP_TYPE, @@ -173,12 +223,14 @@ enum dw_pcie_device_mode {  };  struct dw_pcie_host_ops { -	int (*host_init)(struct pcie_port *pp); -	int (*msi_host_init)(struct pcie_port *pp); +	int (*host_init)(struct dw_pcie_rp *pp); +	void (*host_deinit)(struct dw_pcie_rp *pp); +	int (*msi_host_init)(struct dw_pcie_rp *pp);  }; -struct pcie_port { +struct dw_pcie_rp {  	bool			has_msi_ctrl:1; +	bool			cfg0_io_shared:1;  	u64			cfg0_base;  	void __iomem		*va_cfg0_base;  	u32			cfg0_size; @@ -187,11 +239,11 @@ struct pcie_port {  	u32			io_size;  	int			irq;  	const struct dw_pcie_host_ops *ops; -	int			msi_irq; +	int			msi_irq[MAX_MSI_CTRLS];  	struct irq_domain	*irq_domain;  	struct irq_domain	*msi_domain; -	u16			msi_msg;  	dma_addr_t		msi_data; +	struct page		*msi_page;  	struct irq_chip		*msi_irq_chip;  	u32			num_vectors;  	u32			irq_mask[MAX_MSI_CTRLS]; @@ -200,12 +252,6 @@ struct pcie_port {  	DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS);  }; -enum dw_pcie_as_type { -	DW_PCIE_AS_UNKNOWN, -	DW_PCIE_AS_MEM, -	DW_PCIE_AS_IO, -}; -  struct dw_pcie_ep_ops {  	void	(*ep_init)(struct dw_pcie_ep *ep);  	int	(*raise_irq)(struct dw_pcie_ep *ep, u8 func_no, @@ -261,20 +307,21 @@ struct dw_pcie {  	struct device		*dev;  	void __iomem		*dbi_base;  	void __iomem		*dbi_base2; -	/* Used when iatu_unroll_enabled is true */  	void __iomem		*atu_base;  	size_t			atu_size;  	u32			num_ib_windows;  	u32			num_ob_windows; -	struct pcie_port	pp; +	u32			region_align; +	u64			region_limit; +	struct dw_pcie_rp	pp;  	struct dw_pcie_ep	ep;  	const struct dw_pcie_ops *ops; -	unsigned int		version; +	u32			version; +	u32			type;  	int			num_lanes;  	int			link_gen;  	u8			n_fts[2];  	bool			iatu_unroll_enabled: 1; -	bool			io_cfg_atu_shared: 1;  };  #define to_dw_pcie_from_pp(port) container_of((port), struct dw_pcie, pp) @@ -282,6 +329,8 @@ struct dw_pcie {  #define to_dw_pcie_from_ep(endpoint)   \  		container_of((endpoint), struct dw_pcie, ep) +void dw_pcie_version_detect(struct dw_pcie *pci); +  u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap);  u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap); @@ -294,17 +343,13 @@ void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val);  int dw_pcie_link_up(struct dw_pcie *pci);  void dw_pcie_upconfig_setup(struct dw_pcie *pci);  int dw_pcie_wait_for_link(struct dw_pcie *pci); -void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, -			       int type, u64 cpu_addr, u64 pci_addr, -			       u64 size); -void dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index, -				  int type, u64 cpu_addr, u64 pci_addr, -				  u64 size); +int dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type, +			      u64 cpu_addr, u64 pci_addr, u64 size); +int dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index, +				 int type, u64 cpu_addr, u64 pci_addr, u64 size);  int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, u8 func_no, int index, -			     int bar, u64 cpu_addr, -			     enum dw_pcie_as_type as_type); -void dw_pcie_disable_atu(struct dw_pcie *pci, int index, -			 enum dw_pcie_region_type type); +			     int type, u64 cpu_addr, u8 bar); +void dw_pcie_disable_atu(struct dw_pcie *pci, u32 dir, int index);  void dw_pcie_setup(struct dw_pcie *pci);  void dw_pcie_iatu_detect(struct dw_pcie *pci); @@ -365,34 +410,49 @@ static inline void dw_pcie_dbi_ro_wr_dis(struct dw_pcie *pci)  	dw_pcie_writel_dbi(pci, reg, val);  } +static inline int dw_pcie_start_link(struct dw_pcie *pci) +{ +	if (pci->ops && pci->ops->start_link) +		return pci->ops->start_link(pci); + +	return 0; +} + +static inline void dw_pcie_stop_link(struct dw_pcie *pci) +{ +	if (pci->ops && pci->ops->stop_link) +		pci->ops->stop_link(pci); +} +  #ifdef CONFIG_PCIE_DW_HOST -irqreturn_t dw_handle_msi_irq(struct pcie_port *pp); -void dw_pcie_setup_rc(struct pcie_port *pp); -int dw_pcie_host_init(struct pcie_port *pp); -void dw_pcie_host_deinit(struct pcie_port *pp); -int dw_pcie_allocate_domains(struct pcie_port *pp); +irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp); +int dw_pcie_setup_rc(struct dw_pcie_rp *pp); +int dw_pcie_host_init(struct dw_pcie_rp *pp); +void dw_pcie_host_deinit(struct dw_pcie_rp *pp); +int dw_pcie_allocate_domains(struct dw_pcie_rp *pp);  void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn,  				       int where);  #else -static inline irqreturn_t dw_handle_msi_irq(struct pcie_port *pp) +static inline irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp)  {  	return IRQ_NONE;  } -static inline void dw_pcie_setup_rc(struct pcie_port *pp) +static inline int dw_pcie_setup_rc(struct dw_pcie_rp *pp)  { +	return 0;  } -static inline int dw_pcie_host_init(struct pcie_port *pp) +static inline int dw_pcie_host_init(struct dw_pcie_rp *pp)  {  	return 0;  } -static inline void dw_pcie_host_deinit(struct pcie_port *pp) +static inline void dw_pcie_host_deinit(struct dw_pcie_rp *pp)  {  } -static inline int dw_pcie_allocate_domains(struct pcie_port *pp) +static inline int dw_pcie_allocate_domains(struct dw_pcie_rp *pp)  {  	return 0;  } diff --git a/drivers/pci/controller/dwc/pcie-dw-rockchip.c b/drivers/pci/controller/dwc/pcie-dw-rockchip.c index 8c5bb9d7cc36..c1e7653e508e 100644 --- a/drivers/pci/controller/dwc/pcie-dw-rockchip.c +++ b/drivers/pci/controller/dwc/pcie-dw-rockchip.c @@ -186,7 +186,7 @@ static int rockchip_pcie_start_link(struct dw_pcie *pci)  	return 0;  } -static int rockchip_pcie_host_init(struct pcie_port *pp) +static int rockchip_pcie_host_init(struct dw_pcie_rp *pp)  {  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);  	struct rockchip_pcie *rockchip = to_rockchip_pcie(pci); @@ -288,7 +288,7 @@ static int rockchip_pcie_probe(struct platform_device *pdev)  {  	struct device *dev = &pdev->dev;  	struct rockchip_pcie *rockchip; -	struct pcie_port *pp; +	struct dw_pcie_rp *pp;  	int ret;  	rockchip = devm_kzalloc(dev, sizeof(*rockchip), GFP_KERNEL); diff --git a/drivers/pci/controller/dwc/pcie-fu740.c b/drivers/pci/controller/dwc/pcie-fu740.c index 02cc70d8cc06..0c90583c078b 100644 --- a/drivers/pci/controller/dwc/pcie-fu740.c +++ b/drivers/pci/controller/dwc/pcie-fu740.c @@ -16,11 +16,9 @@  #include <linux/gpio.h>  #include <linux/gpio/consumer.h>  #include <linux/kernel.h> -#include <linux/mfd/syscon.h>  #include <linux/module.h>  #include <linux/pci.h>  #include <linux/platform_device.h> -#include <linux/regulator/consumer.h>  #include <linux/resource.h>  #include <linux/types.h>  #include <linux/interrupt.h> @@ -236,7 +234,7 @@ err:  	return ret;  } -static int fu740_pcie_host_init(struct pcie_port *pp) +static int fu740_pcie_host_init(struct dw_pcie_rp *pp)  {  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);  	struct fu740_pcie *afp = to_fu740_pcie(pci); diff --git a/drivers/pci/controller/dwc/pcie-histb.c b/drivers/pci/controller/dwc/pcie-histb.c index 410555dccb6d..e2b80f10030d 100644 --- a/drivers/pci/controller/dwc/pcie-histb.c +++ b/drivers/pci/controller/dwc/pcie-histb.c @@ -74,7 +74,7 @@ static void histb_pcie_writel(struct histb_pcie *histb_pcie, u32 reg, u32 val)  	writel(val, histb_pcie->ctrl + reg);  } -static void histb_pcie_dbi_w_mode(struct pcie_port *pp, bool enable) +static void histb_pcie_dbi_w_mode(struct dw_pcie_rp *pp, bool enable)  {  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);  	struct histb_pcie *hipcie = to_histb_pcie(pci); @@ -88,7 +88,7 @@ static void histb_pcie_dbi_w_mode(struct pcie_port *pp, bool enable)  	histb_pcie_writel(hipcie, PCIE_SYS_CTRL0, val);  } -static void histb_pcie_dbi_r_mode(struct pcie_port *pp, bool enable) +static void histb_pcie_dbi_r_mode(struct dw_pcie_rp *pp, bool enable)  {  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);  	struct histb_pcie *hipcie = to_histb_pcie(pci); @@ -180,7 +180,7 @@ static int histb_pcie_start_link(struct dw_pcie *pci)  	return 0;  } -static int histb_pcie_host_init(struct pcie_port *pp) +static int histb_pcie_host_init(struct dw_pcie_rp *pp)  {  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);  	struct histb_pcie *hipcie = to_histb_pcie(pci); @@ -219,7 +219,7 @@ static void histb_pcie_host_disable(struct histb_pcie *hipcie)  		regulator_disable(hipcie->vpcie);  } -static int histb_pcie_host_enable(struct pcie_port *pp) +static int histb_pcie_host_enable(struct dw_pcie_rp *pp)  {  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);  	struct histb_pcie *hipcie = to_histb_pcie(pci); @@ -297,7 +297,7 @@ static int histb_pcie_probe(struct platform_device *pdev)  {  	struct histb_pcie *hipcie;  	struct dw_pcie *pci; -	struct pcie_port *pp; +	struct dw_pcie_rp *pp;  	struct device_node *np = pdev->dev.of_node;  	struct device *dev = &pdev->dev;  	enum of_gpio_flags of_flags; diff --git a/drivers/pci/controller/dwc/pcie-intel-gw.c b/drivers/pci/controller/dwc/pcie-intel-gw.c index 5ba144924ff8..333c33d98a70 100644 --- a/drivers/pci/controller/dwc/pcie-intel-gw.c +++ b/drivers/pci/controller/dwc/pcie-intel-gw.c @@ -58,10 +58,6 @@  #define BUS_IATU_OFFSET			SZ_256M  #define RESET_INTERVAL_MS		100 -struct intel_pcie_soc { -	unsigned int	pcie_ver; -}; -  struct intel_pcie {  	struct dw_pcie		pci;  	void __iomem		*app_base; @@ -306,7 +302,11 @@ static int intel_pcie_host_setup(struct intel_pcie *pcie)  	intel_pcie_ltssm_disable(pcie);  	intel_pcie_link_setup(pcie);  	intel_pcie_init_n_fts(pci); -	dw_pcie_setup_rc(&pci->pp); + +	ret = dw_pcie_setup_rc(&pci->pp); +	if (ret) +		goto app_init_err; +  	dw_pcie_upconfig_setup(pci);  	intel_pcie_device_rst_deassert(pcie); @@ -343,7 +343,7 @@ static void __intel_pcie_remove(struct intel_pcie *pcie)  static int intel_pcie_remove(struct platform_device *pdev)  {  	struct intel_pcie *pcie = platform_get_drvdata(pdev); -	struct pcie_port *pp = &pcie->pci.pp; +	struct dw_pcie_rp *pp = &pcie->pci.pp;  	dw_pcie_host_deinit(pp);  	__intel_pcie_remove(pcie); @@ -351,7 +351,7 @@ static int intel_pcie_remove(struct platform_device *pdev)  	return 0;  } -static int __maybe_unused intel_pcie_suspend_noirq(struct device *dev) +static int intel_pcie_suspend_noirq(struct device *dev)  {  	struct intel_pcie *pcie = dev_get_drvdata(dev);  	int ret; @@ -366,14 +366,14 @@ static int __maybe_unused intel_pcie_suspend_noirq(struct device *dev)  	return ret;  } -static int __maybe_unused intel_pcie_resume_noirq(struct device *dev) +static int intel_pcie_resume_noirq(struct device *dev)  {  	struct intel_pcie *pcie = dev_get_drvdata(dev);  	return intel_pcie_host_setup(pcie);  } -static int intel_pcie_rc_init(struct pcie_port *pp) +static int intel_pcie_rc_init(struct dw_pcie_rp *pp)  {  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);  	struct intel_pcie *pcie = dev_get_drvdata(pci->dev); @@ -394,16 +394,11 @@ static const struct dw_pcie_host_ops intel_pcie_dw_ops = {  	.host_init =		intel_pcie_rc_init,  }; -static const struct intel_pcie_soc pcie_data = { -	.pcie_ver =		0x520A, -}; -  static int intel_pcie_probe(struct platform_device *pdev)  { -	const struct intel_pcie_soc *data;  	struct device *dev = &pdev->dev;  	struct intel_pcie *pcie; -	struct pcie_port *pp; +	struct dw_pcie_rp *pp;  	struct dw_pcie *pci;  	int ret; @@ -424,12 +419,7 @@ static int intel_pcie_probe(struct platform_device *pdev)  	if (ret)  		return ret; -	data = device_get_match_data(dev); -	if (!data) -		return -ENODEV; -  	pci->ops = &intel_pcie_ops; -	pci->version = data->pcie_ver;  	pp->ops = &intel_pcie_dw_ops;  	ret = dw_pcie_host_init(pp); @@ -442,12 +432,12 @@ static int intel_pcie_probe(struct platform_device *pdev)  }  static const struct dev_pm_ops intel_pcie_pm_ops = { -	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(intel_pcie_suspend_noirq, -				      intel_pcie_resume_noirq) +	NOIRQ_SYSTEM_SLEEP_PM_OPS(intel_pcie_suspend_noirq, +				  intel_pcie_resume_noirq)  };  static const struct of_device_id of_intel_pcie_match[] = { -	{ .compatible = "intel,lgm-pcie", .data = &pcie_data }, +	{ .compatible = "intel,lgm-pcie" },  	{}  }; diff --git a/drivers/pci/controller/dwc/pcie-keembay.c b/drivers/pci/controller/dwc/pcie-keembay.c index 1ac29a6eef22..f90f36bac018 100644 --- a/drivers/pci/controller/dwc/pcie-keembay.c +++ b/drivers/pci/controller/dwc/pcie-keembay.c @@ -231,7 +231,7 @@ static void keembay_pcie_msi_irq_handler(struct irq_desc *desc)  	struct keembay_pcie *pcie = irq_desc_get_handler_data(desc);  	struct irq_chip *chip = irq_desc_get_chip(desc);  	u32 val, mask, status; -	struct pcie_port *pp; +	struct dw_pcie_rp *pp;  	/*  	 * Keem Bay PCIe Controller provides an additional IP logic on top of @@ -332,13 +332,13 @@ static int keembay_pcie_add_pcie_port(struct keembay_pcie *pcie,  				      struct platform_device *pdev)  {  	struct dw_pcie *pci = &pcie->pci; -	struct pcie_port *pp = &pci->pp; +	struct dw_pcie_rp *pp = &pci->pp;  	struct device *dev = &pdev->dev;  	u32 val;  	int ret;  	pp->ops = &keembay_pcie_host_ops; -	pp->msi_irq = -ENODEV; +	pp->msi_irq[0] = -ENODEV;  	ret = keembay_pcie_setup_msi_irq(pcie);  	if (ret) diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c index a52cad269f85..7f67aad71df4 100644 --- a/drivers/pci/controller/dwc/pcie-kirin.c +++ b/drivers/pci/controller/dwc/pcie-kirin.c @@ -620,7 +620,7 @@ static int kirin_pcie_start_link(struct dw_pcie *pci)  	return 0;  } -static int kirin_pcie_host_init(struct pcie_port *pp) +static int kirin_pcie_host_init(struct dw_pcie_rp *pp)  {  	pp->bridge->ops = &kirin_pci_ops; diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c index 2ea13750b492..66886dc6e777 100644 --- a/drivers/pci/controller/dwc/pcie-qcom.c +++ b/drivers/pci/controller/dwc/pcie-qcom.c @@ -41,6 +41,9 @@  #define L23_CLK_RMV_DIS				BIT(2)  #define L1_CLK_RMV_DIS				BIT(1) +#define PCIE20_PARF_PM_CTRL			0x20 +#define REQ_NOT_ENTR_L1				BIT(5) +  #define PCIE20_PARF_PHY_CTRL			0x40  #define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK	GENMASK(20, 16)  #define PHY_CTRL_PHY_TX0_TERM_OFFSET(x)		((x) << 16) @@ -52,6 +55,10 @@  #define PCIE20_PARF_DBI_BASE_ADDR		0x168  #define PCIE20_PARF_SLV_ADDR_SPACE_SIZE		0x16C  #define PCIE20_PARF_MHI_CLOCK_RESET_CTRL	0x174 +#define AHB_CLK_EN				BIT(0) +#define MSTR_AXI_CLK_EN				BIT(1) +#define BYPASS					BIT(4) +  #define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT	0x178  #define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2	0x1A8  #define PCIE20_PARF_LTSSM			0x1B0 @@ -69,7 +76,20 @@  #define PCIE20_AXI_MSTR_RESP_COMP_CTRL1		0x81c  #define CFG_BRIDGE_SB_INIT			BIT(0) -#define PCIE_CAP_LINK1_VAL			0x2FD7F +#define PCIE_CAP_SLOT_POWER_LIMIT_VAL		FIELD_PREP(PCI_EXP_SLTCAP_SPLV, \ +						250) +#define PCIE_CAP_SLOT_POWER_LIMIT_SCALE		FIELD_PREP(PCI_EXP_SLTCAP_SPLS, \ +						1) +#define PCIE_CAP_SLOT_VAL			(PCI_EXP_SLTCAP_ABP | \ +						PCI_EXP_SLTCAP_PCP | \ +						PCI_EXP_SLTCAP_MRLSP | \ +						PCI_EXP_SLTCAP_AIP | \ +						PCI_EXP_SLTCAP_PIP | \ +						PCI_EXP_SLTCAP_HPS | \ +						PCI_EXP_SLTCAP_HPC | \ +						PCI_EXP_SLTCAP_EIP | \ +						PCIE_CAP_SLOT_POWER_LIMIT_VAL | \ +						PCIE_CAP_SLOT_POWER_LIMIT_SCALE)  #define PCIE20_PARF_Q2A_FLUSH			0x1AC @@ -128,7 +148,6 @@ struct qcom_pcie_resources_2_3_2 {  	struct clk *master_clk;  	struct clk *slave_clk;  	struct clk *cfg_clk; -	struct clk *pipe_clk;  	struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY];  }; @@ -165,10 +184,11 @@ struct qcom_pcie_resources_2_7_0 {  	int num_clks;  	struct regulator_bulk_data supplies[2];  	struct reset_control *pci_reset; -	struct clk *pipe_clk; -	struct clk *pipe_clk_src; -	struct clk *phy_pipe_clk; -	struct clk *ref_clk_src; +}; + +struct qcom_pcie_resources_2_9_0 { +	struct clk_bulk_data clks[5]; +	struct reset_control *rst;  };  union qcom_pcie_resources { @@ -178,6 +198,7 @@ union qcom_pcie_resources {  	struct qcom_pcie_resources_2_3_3 v2_3_3;  	struct qcom_pcie_resources_2_4_0 v2_4_0;  	struct qcom_pcie_resources_2_7_0 v2_7_0; +	struct qcom_pcie_resources_2_9_0 v2_9_0;  };  struct qcom_pcie; @@ -194,7 +215,6 @@ struct qcom_pcie_ops {  struct qcom_pcie_cfg {  	const struct qcom_pcie_ops *ops; -	unsigned int pipe_clk_need_muxing:1;  	unsigned int has_tbu_clk:1;  	unsigned int has_ddrss_sf_tbu_clk:1;  	unsigned int has_aggre0_clk:1; @@ -325,8 +345,6 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)  	struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;  	struct dw_pcie *pci = pcie->pci;  	struct device *dev = pci->dev; -	struct device_node *node = dev->of_node; -	u32 val;  	int ret;  	/* reset the PCIe interface as uboot can leave it undefined state */ @@ -337,8 +355,6 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)  	reset_control_assert(res->ext_reset);  	reset_control_assert(res->phy_reset); -	writel(1, pcie->parf + PCIE20_PARF_PHY_CTRL); -  	ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);  	if (ret < 0) {  		dev_err(dev, "cannot enable regulators\n"); @@ -381,15 +397,42 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)  		goto err_deassert_axi;  	} -	ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks); -	if (ret) -		goto err_clks; +	return 0; + +err_deassert_axi: +	reset_control_assert(res->por_reset); +err_deassert_por: +	reset_control_assert(res->pci_reset); +err_deassert_pci: +	reset_control_assert(res->phy_reset); +err_deassert_phy: +	reset_control_assert(res->ext_reset); +err_deassert_ext: +	reset_control_assert(res->ahb_reset); +err_deassert_ahb: +	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); + +	return ret; +} + +static int qcom_pcie_post_init_2_1_0(struct qcom_pcie *pcie) +{ +	struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; +	struct dw_pcie *pci = pcie->pci; +	struct device *dev = pci->dev; +	struct device_node *node = dev->of_node; +	u32 val; +	int ret;  	/* enable PCIe clocks and resets */  	val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);  	val &= ~BIT(0);  	writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); +	ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks); +	if (ret) +		return ret; +  	if (of_device_is_compatible(node, "qcom,pcie-ipq8064") ||  	    of_device_is_compatible(node, "qcom,pcie-ipq8064-v2")) {  		writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) | @@ -428,23 +471,6 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)  	       pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL1);  	return 0; - -err_clks: -	reset_control_assert(res->axi_reset); -err_deassert_axi: -	reset_control_assert(res->por_reset); -err_deassert_por: -	reset_control_assert(res->pci_reset); -err_deassert_pci: -	reset_control_assert(res->phy_reset); -err_deassert_phy: -	reset_control_assert(res->ext_reset); -err_deassert_ext: -	reset_control_assert(res->ahb_reset); -err_deassert_ahb: -	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); - -	return ret;  }  static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie) @@ -532,16 +558,6 @@ static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie)  		goto err_slave;  	} -	/* change DBI base address */ -	writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); - -	if (IS_ENABLED(CONFIG_PCI_MSI)) { -		u32 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT); - -		val |= BIT(31); -		writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT); -	} -  	return 0;  err_slave:  	clk_disable_unprepare(res->slave_bus); @@ -557,6 +573,21 @@ err_res:  	return ret;  } +static int qcom_pcie_post_init_1_0_0(struct qcom_pcie *pcie) +{ +	/* change DBI base address */ +	writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); + +	if (IS_ENABLED(CONFIG_PCI_MSI)) { +		u32 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT); + +		val |= BIT(31); +		writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT); +	} + +	return 0; +} +  static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie)  {  	u32 val; @@ -597,8 +628,7 @@ static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie)  	if (IS_ERR(res->slave_clk))  		return PTR_ERR(res->slave_clk); -	res->pipe_clk = devm_clk_get(dev, "pipe"); -	return PTR_ERR_OR_ZERO(res->pipe_clk); +	return 0;  }  static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie) @@ -613,19 +643,11 @@ static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie)  	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);  } -static void qcom_pcie_post_deinit_2_3_2(struct qcom_pcie *pcie) -{ -	struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; - -	clk_disable_unprepare(res->pipe_clk); -} -  static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie)  {  	struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;  	struct dw_pcie *pci = pcie->pci;  	struct device *dev = pci->dev; -	u32 val;  	int ret;  	ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); @@ -658,6 +680,25 @@ static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie)  		goto err_slave_clk;  	} +	return 0; + +err_slave_clk: +	clk_disable_unprepare(res->master_clk); +err_master_clk: +	clk_disable_unprepare(res->cfg_clk); +err_cfg_clk: +	clk_disable_unprepare(res->aux_clk); + +err_aux_clk: +	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); + +	return ret; +} + +static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie) +{ +	u32 val; +  	/* enable PCIe clocks and resets */  	val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);  	val &= ~BIT(0); @@ -680,34 +721,6 @@ static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie)  	writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);  	return 0; - -err_slave_clk: -	clk_disable_unprepare(res->master_clk); -err_master_clk: -	clk_disable_unprepare(res->cfg_clk); -err_cfg_clk: -	clk_disable_unprepare(res->aux_clk); - -err_aux_clk: -	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); - -	return ret; -} - -static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie) -{ -	struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; -	struct dw_pcie *pci = pcie->pci; -	struct device *dev = pci->dev; -	int ret; - -	ret = clk_prepare_enable(res->pipe_clk); -	if (ret) { -		dev_err(dev, "cannot prepare/enable pipe clock\n"); -		return ret; -	} - -	return 0;  }  static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie) @@ -814,7 +827,6 @@ static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie)  	struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;  	struct dw_pcie *pci = pcie->pci;  	struct device *dev = pci->dev; -	u32 val;  	int ret;  	ret = reset_control_assert(res->axi_m_reset); @@ -939,6 +951,33 @@ static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie)  	if (ret)  		goto err_clks; +	return 0; + +err_clks: +	reset_control_assert(res->ahb_reset); +err_rst_ahb: +	reset_control_assert(res->pwr_reset); +err_rst_pwr: +	reset_control_assert(res->axi_s_reset); +err_rst_axi_s: +	reset_control_assert(res->axi_m_sticky_reset); +err_rst_axi_m_sticky: +	reset_control_assert(res->axi_m_reset); +err_rst_axi_m: +	reset_control_assert(res->pipe_sticky_reset); +err_rst_pipe_sticky: +	reset_control_assert(res->pipe_reset); +err_rst_pipe: +	reset_control_assert(res->phy_reset); +err_rst_phy: +	reset_control_assert(res->phy_ahb_reset); +	return ret; +} + +static int qcom_pcie_post_init_2_4_0(struct qcom_pcie *pcie) +{ +	u32 val; +  	/* enable PCIe clocks and resets */  	val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);  	val &= ~BIT(0); @@ -961,26 +1000,6 @@ static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie)  	writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);  	return 0; - -err_clks: -	reset_control_assert(res->ahb_reset); -err_rst_ahb: -	reset_control_assert(res->pwr_reset); -err_rst_pwr: -	reset_control_assert(res->axi_s_reset); -err_rst_axi_s: -	reset_control_assert(res->axi_m_sticky_reset); -err_rst_axi_m_sticky: -	reset_control_assert(res->axi_m_reset); -err_rst_axi_m: -	reset_control_assert(res->pipe_sticky_reset); -err_rst_pipe_sticky: -	reset_control_assert(res->pipe_reset); -err_rst_pipe: -	reset_control_assert(res->phy_reset); -err_rst_phy: -	reset_control_assert(res->phy_ahb_reset); -	return ret;  }  static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie) @@ -1038,9 +1057,7 @@ static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)  	struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;  	struct dw_pcie *pci = pcie->pci;  	struct device *dev = pci->dev; -	u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);  	int i, ret; -	u32 val;  	for (i = 0; i < ARRAY_SIZE(res->rst); i++) {  		ret = reset_control_assert(res->rst[i]); @@ -1097,6 +1114,33 @@ static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)  		goto err_clk_aux;  	} +	return 0; + +err_clk_aux: +	clk_disable_unprepare(res->ahb_clk); +err_clk_ahb: +	clk_disable_unprepare(res->axi_s_clk); +err_clk_axi_s: +	clk_disable_unprepare(res->axi_m_clk); +err_clk_axi_m: +	clk_disable_unprepare(res->iface); +err_clk_iface: +	/* +	 * Not checking for failure, will anyway return +	 * the original failure in 'ret'. +	 */ +	for (i = 0; i < ARRAY_SIZE(res->rst); i++) +		reset_control_assert(res->rst[i]); + +	return ret; +} + +static int qcom_pcie_post_init_2_3_3(struct qcom_pcie *pcie) +{ +	struct dw_pcie *pci = pcie->pci; +	u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); +	u32 val; +  	writel(SLV_ADDR_SPACE_SZ,  		pcie->parf + PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE); @@ -1114,7 +1158,7 @@ static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)  	writel(PCI_COMMAND_MASTER, pci->dbi_base + PCI_COMMAND);  	writel(DBI_RO_WR_EN, pci->dbi_base + PCIE20_MISC_CONTROL_1_REG); -	writel(PCIE_CAP_LINK1_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP); +	writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP);  	val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP);  	val &= ~PCI_EXP_LNKCAP_ASPMS; @@ -1124,24 +1168,6 @@ static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)  		PCI_EXP_DEVCTL2);  	return 0; - -err_clk_aux: -	clk_disable_unprepare(res->ahb_clk); -err_clk_ahb: -	clk_disable_unprepare(res->axi_s_clk); -err_clk_axi_s: -	clk_disable_unprepare(res->axi_m_clk); -err_clk_axi_m: -	clk_disable_unprepare(res->iface); -err_clk_iface: -	/* -	 * Not checking for failure, will anyway return -	 * the original failure in 'ret'. -	 */ -	for (i = 0; i < ARRAY_SIZE(res->rst); i++) -		reset_control_assert(res->rst[i]); - -	return ret;  }  static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie) @@ -1184,22 +1210,7 @@ static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie)  	if (ret < 0)  		return ret; -	if (pcie->cfg->pipe_clk_need_muxing) { -		res->pipe_clk_src = devm_clk_get(dev, "pipe_mux"); -		if (IS_ERR(res->pipe_clk_src)) -			return PTR_ERR(res->pipe_clk_src); - -		res->phy_pipe_clk = devm_clk_get(dev, "phy_pipe"); -		if (IS_ERR(res->phy_pipe_clk)) -			return PTR_ERR(res->phy_pipe_clk); - -		res->ref_clk_src = devm_clk_get(dev, "ref"); -		if (IS_ERR(res->ref_clk_src)) -			return PTR_ERR(res->ref_clk_src); -	} - -	res->pipe_clk = devm_clk_get(dev, "pipe"); -	return PTR_ERR_OR_ZERO(res->pipe_clk); +	return 0;  }  static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie) @@ -1216,10 +1227,6 @@ static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)  		return ret;  	} -	/* Set TCXO as clock source for pcie_pipe_clk_src */ -	if (pcie->cfg->pipe_clk_need_muxing) -		clk_set_parent(res->pipe_clk_src, res->ref_clk_src); -  	ret = clk_bulk_prepare_enable(res->num_clks, res->clks);  	if (ret < 0)  		goto err_disable_regulators; @@ -1261,6 +1268,11 @@ static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)  	val |= BIT(4);  	writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); +	/* Enable L1 and L1SS */ +	val = readl(pcie->parf + PCIE20_PARF_PM_CTRL); +	val &= ~REQ_NOT_ENTR_L1; +	writel(val, pcie->parf + PCIE20_PARF_PM_CTRL); +  	if (IS_ENABLED(CONFIG_PCI_MSI)) {  		val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);  		val |= BIT(31); @@ -1281,25 +1293,114 @@ static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie)  	struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;  	clk_bulk_disable_unprepare(res->num_clks, res->clks); +  	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);  } -static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie) +static int qcom_pcie_get_resources_2_9_0(struct qcom_pcie *pcie)  { -	struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; +	struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0; +	struct dw_pcie *pci = pcie->pci; +	struct device *dev = pci->dev; +	int ret; -	/* Set pipe clock as clock source for pcie_pipe_clk_src */ -	if (pcie->cfg->pipe_clk_need_muxing) -		clk_set_parent(res->pipe_clk_src, res->phy_pipe_clk); +	res->clks[0].id = "iface"; +	res->clks[1].id = "axi_m"; +	res->clks[2].id = "axi_s"; +	res->clks[3].id = "axi_bridge"; +	res->clks[4].id = "rchng"; + +	ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks); +	if (ret < 0) +		return ret; + +	res->rst = devm_reset_control_array_get_exclusive(dev); +	if (IS_ERR(res->rst)) +		return PTR_ERR(res->rst); -	return clk_prepare_enable(res->pipe_clk); +	return 0;  } -static void qcom_pcie_post_deinit_2_7_0(struct qcom_pcie *pcie) +static void qcom_pcie_deinit_2_9_0(struct qcom_pcie *pcie)  { -	struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; +	struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0; + +	clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks); +} + +static int qcom_pcie_init_2_9_0(struct qcom_pcie *pcie) +{ +	struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0; +	struct device *dev = pcie->pci->dev; +	int ret; + +	ret = reset_control_assert(res->rst); +	if (ret) { +		dev_err(dev, "reset assert failed (%d)\n", ret); +		return ret; +	} + +	/* +	 * Delay periods before and after reset deassert are working values +	 * from downstream Codeaurora kernel +	 */ +	usleep_range(2000, 2500); + +	ret = reset_control_deassert(res->rst); +	if (ret) { +		dev_err(dev, "reset deassert failed (%d)\n", ret); +		return ret; +	} + +	usleep_range(2000, 2500); -	clk_disable_unprepare(res->pipe_clk); +	return clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks); +} + +static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie) +{ +	struct dw_pcie *pci = pcie->pci; +	u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); +	u32 val; +	int i; + +	writel(SLV_ADDR_SPACE_SZ, +		pcie->parf + PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE); + +	val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); +	val &= ~BIT(0); +	writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); + +	writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); + +	writel(DEVICE_TYPE_RC, pcie->parf + PCIE20_PARF_DEVICE_TYPE); +	writel(BYPASS | MSTR_AXI_CLK_EN | AHB_CLK_EN, +		pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); +	writel(GEN3_RELATED_OFF_RXEQ_RGRDLESS_RXTS | +		GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL, +		pci->dbi_base + GEN3_RELATED_OFF); + +	writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS | +		SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS | +		AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS, +		pcie->parf + PCIE20_PARF_SYS_CTRL); + +	writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH); + +	dw_pcie_dbi_ro_wr_en(pci); +	writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP); + +	val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP); +	val &= ~PCI_EXP_LNKCAP_ASPMS; +	writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP); + +	writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset + +			PCI_EXP_DEVCTL2); + +	for (i = 0; i < 256; i++) +		writel(0, pcie->parf + PCIE20_PARF_BDF_TO_SID_TABLE_N + (4 * i)); + +	return 0;  }  static int qcom_pcie_link_up(struct dw_pcie *pci) @@ -1381,7 +1482,7 @@ static int qcom_pcie_config_sid_sm8250(struct qcom_pcie *pcie)  	return 0;  } -static int qcom_pcie_host_init(struct pcie_port *pp) +static int qcom_pcie_host_init(struct dw_pcie_rp *pp)  {  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);  	struct qcom_pcie *pcie = to_qcom_pcie(pci); @@ -1433,6 +1534,7 @@ static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {  static const struct qcom_pcie_ops ops_2_1_0 = {  	.get_resources = qcom_pcie_get_resources_2_1_0,  	.init = qcom_pcie_init_2_1_0, +	.post_init = qcom_pcie_post_init_2_1_0,  	.deinit = qcom_pcie_deinit_2_1_0,  	.ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,  }; @@ -1441,6 +1543,7 @@ static const struct qcom_pcie_ops ops_2_1_0 = {  static const struct qcom_pcie_ops ops_1_0_0 = {  	.get_resources = qcom_pcie_get_resources_1_0_0,  	.init = qcom_pcie_init_1_0_0, +	.post_init = qcom_pcie_post_init_1_0_0,  	.deinit = qcom_pcie_deinit_1_0_0,  	.ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,  }; @@ -1451,7 +1554,6 @@ static const struct qcom_pcie_ops ops_2_3_2 = {  	.init = qcom_pcie_init_2_3_2,  	.post_init = qcom_pcie_post_init_2_3_2,  	.deinit = qcom_pcie_deinit_2_3_2, -	.post_deinit = qcom_pcie_post_deinit_2_3_2,  	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,  }; @@ -1459,6 +1561,7 @@ static const struct qcom_pcie_ops ops_2_3_2 = {  static const struct qcom_pcie_ops ops_2_4_0 = {  	.get_resources = qcom_pcie_get_resources_2_4_0,  	.init = qcom_pcie_init_2_4_0, +	.post_init = qcom_pcie_post_init_2_4_0,  	.deinit = qcom_pcie_deinit_2_4_0,  	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,  }; @@ -1467,6 +1570,7 @@ static const struct qcom_pcie_ops ops_2_4_0 = {  static const struct qcom_pcie_ops ops_2_3_3 = {  	.get_resources = qcom_pcie_get_resources_2_3_3,  	.init = qcom_pcie_init_2_3_3, +	.post_init = qcom_pcie_post_init_2_3_3,  	.deinit = qcom_pcie_deinit_2_3_3,  	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,  }; @@ -1477,8 +1581,6 @@ static const struct qcom_pcie_ops ops_2_7_0 = {  	.init = qcom_pcie_init_2_7_0,  	.deinit = qcom_pcie_deinit_2_7_0,  	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, -	.post_init = qcom_pcie_post_init_2_7_0, -	.post_deinit = qcom_pcie_post_deinit_2_7_0,  };  /* Qcom IP rev.: 1.9.0 */ @@ -1487,11 +1589,18 @@ static const struct qcom_pcie_ops ops_1_9_0 = {  	.init = qcom_pcie_init_2_7_0,  	.deinit = qcom_pcie_deinit_2_7_0,  	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, -	.post_init = qcom_pcie_post_init_2_7_0, -	.post_deinit = qcom_pcie_post_deinit_2_7_0,  	.config_sid = qcom_pcie_config_sid_sm8250,  }; +/* Qcom IP rev.: 2.9.0  Synopsys IP rev.: 5.00a */ +static const struct qcom_pcie_ops ops_2_9_0 = { +	.get_resources = qcom_pcie_get_resources_2_9_0, +	.init = qcom_pcie_init_2_9_0, +	.post_init = qcom_pcie_post_init_2_9_0, +	.deinit = qcom_pcie_deinit_2_9_0, +	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, +}; +  static const struct qcom_pcie_cfg apq8084_cfg = {  	.ops = &ops_1_0_0,  }; @@ -1533,7 +1642,6 @@ static const struct qcom_pcie_cfg sm8250_cfg = {  static const struct qcom_pcie_cfg sm8450_pcie0_cfg = {  	.ops = &ops_1_9_0,  	.has_ddrss_sf_tbu_clk = true, -	.pipe_clk_need_muxing = true,  	.has_aggre0_clk = true,  	.has_aggre1_clk = true,  }; @@ -1541,14 +1649,12 @@ static const struct qcom_pcie_cfg sm8450_pcie0_cfg = {  static const struct qcom_pcie_cfg sm8450_pcie1_cfg = {  	.ops = &ops_1_9_0,  	.has_ddrss_sf_tbu_clk = true, -	.pipe_clk_need_muxing = true,  	.has_aggre1_clk = true,  };  static const struct qcom_pcie_cfg sc7280_cfg = {  	.ops = &ops_1_9_0,  	.has_tbu_clk = true, -	.pipe_clk_need_muxing = true,  };  static const struct qcom_pcie_cfg sc8180x_cfg = { @@ -1556,6 +1662,10 @@ static const struct qcom_pcie_cfg sc8180x_cfg = {  	.has_tbu_clk = true,  }; +static const struct qcom_pcie_cfg ipq6018_cfg = { +	.ops = &ops_2_9_0, +}; +  static const struct dw_pcie_ops dw_pcie_ops = {  	.link_up = qcom_pcie_link_up,  	.start_link = qcom_pcie_start_link, @@ -1564,7 +1674,7 @@ static const struct dw_pcie_ops dw_pcie_ops = {  static int qcom_pcie_probe(struct platform_device *pdev)  {  	struct device *dev = &pdev->dev; -	struct pcie_port *pp; +	struct dw_pcie_rp *pp;  	struct dw_pcie *pci;  	struct qcom_pcie *pcie;  	const struct qcom_pcie_cfg *pcie_cfg; @@ -1666,6 +1776,7 @@ static const struct of_device_id qcom_pcie_match[] = {  	{ .compatible = "qcom,pcie-sm8450-pcie0", .data = &sm8450_pcie0_cfg },  	{ .compatible = "qcom,pcie-sm8450-pcie1", .data = &sm8450_pcie1_cfg },  	{ .compatible = "qcom,pcie-sc7280", .data = &sc7280_cfg }, +	{ .compatible = "qcom,pcie-ipq6018", .data = &ipq6018_cfg },  	{ }  }; diff --git a/drivers/pci/controller/dwc/pcie-spear13xx.c b/drivers/pci/controller/dwc/pcie-spear13xx.c index 1569e82b5568..99d47ae80331 100644 --- a/drivers/pci/controller/dwc/pcie-spear13xx.c +++ b/drivers/pci/controller/dwc/pcie-spear13xx.c @@ -85,7 +85,7 @@ static irqreturn_t spear13xx_pcie_irq_handler(int irq, void *arg)  	struct spear13xx_pcie *spear13xx_pcie = arg;  	struct pcie_app_reg __iomem *app_reg = spear13xx_pcie->app_base;  	struct dw_pcie *pci = spear13xx_pcie->pci; -	struct pcie_port *pp = &pci->pp; +	struct dw_pcie_rp *pp = &pci->pp;  	unsigned int status;  	status = readl(&app_reg->int_sts); @@ -121,7 +121,7 @@ static int spear13xx_pcie_link_up(struct dw_pcie *pci)  	return 0;  } -static int spear13xx_pcie_host_init(struct pcie_port *pp) +static int spear13xx_pcie_host_init(struct dw_pcie_rp *pp)  {  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);  	struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pci); @@ -155,7 +155,7 @@ static int spear13xx_add_pcie_port(struct spear13xx_pcie *spear13xx_pcie,  				   struct platform_device *pdev)  {  	struct dw_pcie *pci = spear13xx_pcie->pci; -	struct pcie_port *pp = &pci->pp; +	struct dw_pcie_rp *pp = &pci->pp;  	struct device *dev = &pdev->dev;  	int ret; @@ -172,7 +172,7 @@ static int spear13xx_add_pcie_port(struct spear13xx_pcie *spear13xx_pcie,  	}  	pp->ops = &spear13xx_pcie_host_ops; -	pp->msi_irq = -ENODEV; +	pp->msi_irq[0] = -ENODEV;  	ret = dw_pcie_host_init(pp);  	if (ret) { @@ -258,7 +258,7 @@ static struct platform_driver spear13xx_pcie_driver = {  	.probe		= spear13xx_pcie_probe,  	.driver = {  		.name	= "spear-pcie", -		.of_match_table = of_match_ptr(spear13xx_pcie_of_match), +		.of_match_table = spear13xx_pcie_of_match,  		.suppress_bind_attrs = true,  	},  }; diff --git a/drivers/pci/controller/dwc/pcie-tegra194-acpi.c b/drivers/pci/controller/dwc/pcie-tegra194-acpi.c index c2de6ed4d86f..55f61914a986 100644 --- a/drivers/pci/controller/dwc/pcie-tegra194-acpi.c +++ b/drivers/pci/controller/dwc/pcie-tegra194-acpi.c @@ -39,7 +39,8 @@ static int tegra194_acpi_init(struct pci_config_window *cfg)  static void atu_reg_write(struct tegra194_pcie_ecam *pcie_ecam, int index,  			  u32 val, u32 reg)  { -	u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index); +	u32 offset = PCIE_ATU_UNROLL_BASE(PCIE_ATU_REGION_DIR_OB, index) + +		     PCIE_ATU_VIEWPORT_BASE;  	writel(val, pcie_ecam->iatu_base + offset + reg);  } @@ -58,8 +59,8 @@ static void program_outbound_atu(struct tegra194_pcie_ecam *pcie_ecam,  		      PCIE_ATU_LIMIT);  	atu_reg_write(pcie_ecam, index, upper_32_bits(pci_addr),  		      PCIE_ATU_UPPER_TARGET); -	atu_reg_write(pcie_ecam, index, type, PCIE_ATU_CR1); -	atu_reg_write(pcie_ecam, index, PCIE_ATU_ENABLE, PCIE_ATU_CR2); +	atu_reg_write(pcie_ecam, index, type, PCIE_ATU_REGION_CTRL1); +	atu_reg_write(pcie_ecam, index, PCIE_ATU_ENABLE, PCIE_ATU_REGION_CTRL2);  }  static void __iomem *tegra194_map_bus(struct pci_bus *bus, diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c index cc2678490162..1b6b437823d2 100644 --- a/drivers/pci/controller/dwc/pcie-tegra194.c +++ b/drivers/pci/controller/dwc/pcie-tegra194.c @@ -1,8 +1,10 @@  // SPDX-License-Identifier: GPL-2.0+  /* - * PCIe host controller driver for Tegra194 SoC + * PCIe host controller driver for the following SoCs + * Tegra194 + * Tegra234   * - * Copyright (C) 2019 NVIDIA Corporation. + * Copyright (C) 2019-2022 NVIDIA Corporation.   *   * Author: Vidya Sagar <vidyas@nvidia.com>   */ @@ -35,6 +37,9 @@  #include <soc/tegra/bpmp-abi.h>  #include "../../pci.h" +#define TEGRA194_DWC_IP_VER			0x490A +#define TEGRA234_DWC_IP_VER			0x562A +  #define APPL_PINMUX				0x0  #define APPL_PINMUX_PEX_RST			BIT(0)  #define APPL_PINMUX_CLKREQ_OVERRIDE_EN		BIT(2) @@ -49,6 +54,7 @@  #define APPL_CTRL_HW_HOT_RST_MODE_MASK		GENMASK(1, 0)  #define APPL_CTRL_HW_HOT_RST_MODE_SHIFT		22  #define APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST	0x1 +#define APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST_LTSSM_EN	0x2  #define APPL_INTR_EN_L0_0			0x8  #define APPL_INTR_EN_L0_0_LINK_STATE_INT_EN	BIT(0) @@ -170,19 +176,6 @@  #define CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF	0x718  #define CFG_TIMER_CTRL_ACK_NAK_SHIFT	(19) -#define EVENT_COUNTER_ALL_CLEAR		0x3 -#define EVENT_COUNTER_ENABLE_ALL	0x7 -#define EVENT_COUNTER_ENABLE_SHIFT	2 -#define EVENT_COUNTER_EVENT_SEL_MASK	GENMASK(7, 0) -#define EVENT_COUNTER_EVENT_SEL_SHIFT	16 -#define EVENT_COUNTER_EVENT_Tx_L0S	0x2 -#define EVENT_COUNTER_EVENT_Rx_L0S	0x3 -#define EVENT_COUNTER_EVENT_L1		0x5 -#define EVENT_COUNTER_EVENT_L1_1	0x7 -#define EVENT_COUNTER_EVENT_L1_2	0x8 -#define EVENT_COUNTER_GROUP_SEL_SHIFT	24 -#define EVENT_COUNTER_GROUP_5		0x5 -  #define N_FTS_VAL					52  #define FTS_VAL						52 @@ -191,12 +184,6 @@  #define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK	GENMASK(23, 8)  #define GEN3_EQ_CONTROL_OFF_FB_MODE_MASK	GENMASK(3, 0) -#define GEN3_RELATED_OFF			0x890 -#define GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL	BIT(0) -#define GEN3_RELATED_OFF_GEN3_EQ_DISABLE	BIT(16) -#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT	24 -#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK	GENMASK(25, 24) -  #define PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT	0x8D0  #define AMBA_ERROR_RESPONSE_CRS_SHIFT		3  #define AMBA_ERROR_RESPONSE_CRS_MASK		GENMASK(1, 0) @@ -243,7 +230,19 @@ static const unsigned int pcie_gen_freq[] = {  	GEN4_CORE_CLK_FREQ  }; -struct tegra194_pcie { +struct tegra_pcie_dw_of_data { +	u32 version; +	enum dw_pcie_device_mode mode; +	bool has_msix_doorbell_access_fix; +	bool has_sbr_reset_fix; +	bool has_l1ss_exit_fix; +	bool has_ltr_req_fix; +	u32 cdm_chk_int_en_bit; +	u32 gen4_preset_vec; +	u8 n_fts[2]; +}; + +struct tegra_pcie_dw {  	struct device *dev;  	struct resource *appl_res;  	struct resource *dbi_res; @@ -255,17 +254,20 @@ struct tegra194_pcie {  	struct dw_pcie pci;  	struct tegra_bpmp *bpmp; -	enum dw_pcie_device_mode mode; +	struct tegra_pcie_dw_of_data *of_data;  	bool supports_clkreq;  	bool enable_cdm_check; +	bool enable_srns;  	bool link_state;  	bool update_fc_fixup; +	bool enable_ext_refclk;  	u8 init_link_width;  	u32 msi_ctrl_int;  	u32 num_lanes;  	u32 cid;  	u32 cfg_link_cap_l1sub; +	u32 ras_des_cap;  	u32 pcie_cap_base;  	u32 aspm_cmrt;  	u32 aspm_pwr_on_t; @@ -287,22 +289,18 @@ struct tegra194_pcie {  	int ep_state;  }; -struct tegra194_pcie_of_data { -	enum dw_pcie_device_mode mode; -}; - -static inline struct tegra194_pcie *to_tegra_pcie(struct dw_pcie *pci) +static inline struct tegra_pcie_dw *to_tegra_pcie(struct dw_pcie *pci)  { -	return container_of(pci, struct tegra194_pcie, pci); +	return container_of(pci, struct tegra_pcie_dw, pci);  } -static inline void appl_writel(struct tegra194_pcie *pcie, const u32 value, +static inline void appl_writel(struct tegra_pcie_dw *pcie, const u32 value,  			       const u32 reg)  {  	writel_relaxed(value, pcie->appl_base + reg);  } -static inline u32 appl_readl(struct tegra194_pcie *pcie, const u32 reg) +static inline u32 appl_readl(struct tegra_pcie_dw *pcie, const u32 reg)  {  	return readl_relaxed(pcie->appl_base + reg);  } @@ -311,10 +309,10 @@ struct tegra_pcie_soc {  	enum dw_pcie_device_mode mode;  }; -static void apply_bad_link_workaround(struct pcie_port *pp) +static void apply_bad_link_workaround(struct dw_pcie_rp *pp)  {  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp); -	struct tegra194_pcie *pcie = to_tegra_pcie(pci); +	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);  	u32 current_link_width;  	u16 val; @@ -347,18 +345,18 @@ static void apply_bad_link_workaround(struct pcie_port *pp)  static irqreturn_t tegra_pcie_rp_irq_handler(int irq, void *arg)  { -	struct tegra194_pcie *pcie = arg; +	struct tegra_pcie_dw *pcie = arg;  	struct dw_pcie *pci = &pcie->pci; -	struct pcie_port *pp = &pci->pp; -	u32 val, tmp; +	struct dw_pcie_rp *pp = &pci->pp; +	u32 val, status_l0, status_l1;  	u16 val_w; -	val = appl_readl(pcie, APPL_INTR_STATUS_L0); -	if (val & APPL_INTR_STATUS_L0_LINK_STATE_INT) { -		val = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0); -		if (val & APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED) { -			appl_writel(pcie, val, APPL_INTR_STATUS_L1_0_0); - +	status_l0 = appl_readl(pcie, APPL_INTR_STATUS_L0); +	if (status_l0 & APPL_INTR_STATUS_L0_LINK_STATE_INT) { +		status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0); +		appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_0_0); +		if (!pcie->of_data->has_sbr_reset_fix && +		    status_l1 & APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED) {  			/* SBR & Surprise Link Down WAR */  			val = appl_readl(pcie, APPL_CAR_RESET_OVRD);  			val &= ~APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N; @@ -374,15 +372,21 @@ static irqreturn_t tegra_pcie_rp_irq_handler(int irq, void *arg)  		}  	} -	if (val & APPL_INTR_STATUS_L0_INT_INT) { -		val = appl_readl(pcie, APPL_INTR_STATUS_L1_8_0); -		if (val & APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS) { +	if (status_l0 & APPL_INTR_STATUS_L0_INT_INT) { +		status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_8_0); +		if (status_l1 & APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS) {  			appl_writel(pcie,  				    APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS,  				    APPL_INTR_STATUS_L1_8_0);  			apply_bad_link_workaround(pp);  		} -		if (val & APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS) { +		if (status_l1 & APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS) { +			val_w = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + +						  PCI_EXP_LNKSTA); +			val_w |= PCI_EXP_LNKSTA_LBMS; +			dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + +					   PCI_EXP_LNKSTA, val_w); +  			appl_writel(pcie,  				    APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS,  				    APPL_INTR_STATUS_L1_8_0); @@ -394,31 +398,30 @@ static irqreturn_t tegra_pcie_rp_irq_handler(int irq, void *arg)  		}  	} -	val = appl_readl(pcie, APPL_INTR_STATUS_L0); -	if (val & APPL_INTR_STATUS_L0_CDM_REG_CHK_INT) { -		val = appl_readl(pcie, APPL_INTR_STATUS_L1_18); -		tmp = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS); -		if (val & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT) { +	if (status_l0 & APPL_INTR_STATUS_L0_CDM_REG_CHK_INT) { +		status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_18); +		val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS); +		if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT) {  			dev_info(pci->dev, "CDM check complete\n"); -			tmp |= PCIE_PL_CHK_REG_CHK_REG_COMPLETE; +			val |= PCIE_PL_CHK_REG_CHK_REG_COMPLETE;  		} -		if (val & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR) { +		if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR) {  			dev_err(pci->dev, "CDM comparison mismatch\n"); -			tmp |= PCIE_PL_CHK_REG_CHK_REG_COMPARISON_ERROR; +			val |= PCIE_PL_CHK_REG_CHK_REG_COMPARISON_ERROR;  		} -		if (val & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR) { +		if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR) {  			dev_err(pci->dev, "CDM Logic error\n"); -			tmp |= PCIE_PL_CHK_REG_CHK_REG_LOGIC_ERROR; +			val |= PCIE_PL_CHK_REG_CHK_REG_LOGIC_ERROR;  		} -		dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, tmp); -		tmp = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_ERR_ADDR); -		dev_err(pci->dev, "CDM Error Address Offset = 0x%08X\n", tmp); +		dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val); +		val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_ERR_ADDR); +		dev_err(pci->dev, "CDM Error Address Offset = 0x%08X\n", val);  	}  	return IRQ_HANDLED;  } -static void pex_ep_event_hot_rst_done(struct tegra194_pcie *pcie) +static void pex_ep_event_hot_rst_done(struct tegra_pcie_dw *pcie)  {  	u32 val; @@ -446,7 +449,7 @@ static void pex_ep_event_hot_rst_done(struct tegra194_pcie *pcie)  static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg)  { -	struct tegra194_pcie *pcie = arg; +	struct tegra_pcie_dw *pcie = arg;  	struct dw_pcie *pci = &pcie->pci;  	u32 val, speed; @@ -454,6 +457,9 @@ static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg)  		PCI_EXP_LNKSTA_CLS;  	clk_set_rate(pcie->core_clk, pcie_gen_freq[speed - 1]); +	if (pcie->of_data->has_ltr_req_fix) +		return IRQ_HANDLED; +  	/* If EP doesn't advertise L1SS, just return */  	val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub);  	if (!(val & (PCI_L1SS_CAP_ASPM_L1_1 | PCI_L1SS_CAP_ASPM_L1_2))) @@ -492,7 +498,7 @@ static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg)  static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg)  { -	struct tegra194_pcie *pcie = arg; +	struct tegra_pcie_dw *pcie = arg;  	struct dw_pcie_ep *ep = &pcie->pci.ep;  	int spurious = 1;  	u32 status_l0, status_l1, link_status; @@ -535,16 +541,21 @@ static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg)  	return IRQ_HANDLED;  } -static int tegra194_pcie_rd_own_conf(struct pci_bus *bus, u32 devfn, int where, +static int tegra_pcie_dw_rd_own_conf(struct pci_bus *bus, u32 devfn, int where,  				     int size, u32 *val)  { +	struct dw_pcie_rp *pp = bus->sysdata; +	struct dw_pcie *pci = to_dw_pcie_from_pp(pp); +	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); +  	/*  	 * This is an endpoint mode specific register happen to appear even  	 * when controller is operating in root port mode and system hangs  	 * when it is accessed with link being in ASPM-L1 state.  	 * So skip accessing it altogether  	 */ -	if (!PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL) { +	if (!pcie->of_data->has_msix_doorbell_access_fix && +	    !PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL) {  		*val = 0x00000000;  		return PCIBIOS_SUCCESSFUL;  	} @@ -552,16 +563,21 @@ static int tegra194_pcie_rd_own_conf(struct pci_bus *bus, u32 devfn, int where,  	return pci_generic_config_read(bus, devfn, where, size, val);  } -static int tegra194_pcie_wr_own_conf(struct pci_bus *bus, u32 devfn, int where, +static int tegra_pcie_dw_wr_own_conf(struct pci_bus *bus, u32 devfn, int where,  				     int size, u32 val)  { +	struct dw_pcie_rp *pp = bus->sysdata; +	struct dw_pcie *pci = to_dw_pcie_from_pp(pp); +	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); +  	/*  	 * This is an endpoint mode specific register happen to appear even  	 * when controller is operating in root port mode and system hangs  	 * when it is accessed with link being in ASPM-L1 state.  	 * So skip accessing it altogether  	 */ -	if (!PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL) +	if (!pcie->of_data->has_msix_doorbell_access_fix && +	    !PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL)  		return PCIBIOS_SUCCESSFUL;  	return pci_generic_config_write(bus, devfn, where, size, val); @@ -569,30 +585,12 @@ static int tegra194_pcie_wr_own_conf(struct pci_bus *bus, u32 devfn, int where,  static struct pci_ops tegra_pci_ops = {  	.map_bus = dw_pcie_own_conf_map_bus, -	.read = tegra194_pcie_rd_own_conf, -	.write = tegra194_pcie_wr_own_conf, +	.read = tegra_pcie_dw_rd_own_conf, +	.write = tegra_pcie_dw_wr_own_conf,  };  #if defined(CONFIG_PCIEASPM) -static const u32 event_cntr_ctrl_offset[] = { -	0x1d8, -	0x1a8, -	0x1a8, -	0x1a8, -	0x1c4, -	0x1d8 -}; - -static const u32 event_cntr_data_offset[] = { -	0x1dc, -	0x1ac, -	0x1ac, -	0x1ac, -	0x1c8, -	0x1dc -}; - -static void disable_aspm_l11(struct tegra194_pcie *pcie) +static void disable_aspm_l11(struct tegra_pcie_dw *pcie)  {  	u32 val; @@ -601,7 +599,7 @@ static void disable_aspm_l11(struct tegra194_pcie *pcie)  	dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val);  } -static void disable_aspm_l12(struct tegra194_pcie *pcie) +static void disable_aspm_l12(struct tegra_pcie_dw *pcie)  {  	u32 val; @@ -610,24 +608,27 @@ static void disable_aspm_l12(struct tegra194_pcie *pcie)  	dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val);  } -static inline u32 event_counter_prog(struct tegra194_pcie *pcie, u32 event) +static inline u32 event_counter_prog(struct tegra_pcie_dw *pcie, u32 event)  {  	u32 val; -	val = dw_pcie_readl_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid]); +	val = dw_pcie_readl_dbi(&pcie->pci, pcie->ras_des_cap + +				PCIE_RAS_DES_EVENT_COUNTER_CONTROL);  	val &= ~(EVENT_COUNTER_EVENT_SEL_MASK << EVENT_COUNTER_EVENT_SEL_SHIFT);  	val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT;  	val |= event << EVENT_COUNTER_EVENT_SEL_SHIFT;  	val |= EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT; -	dw_pcie_writel_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid], val); -	val = dw_pcie_readl_dbi(&pcie->pci, event_cntr_data_offset[pcie->cid]); +	dw_pcie_writel_dbi(&pcie->pci, pcie->ras_des_cap + +			   PCIE_RAS_DES_EVENT_COUNTER_CONTROL, val); +	val = dw_pcie_readl_dbi(&pcie->pci, pcie->ras_des_cap + +				PCIE_RAS_DES_EVENT_COUNTER_DATA);  	return val;  }  static int aspm_state_cnt(struct seq_file *s, void *data)  { -	struct tegra194_pcie *pcie = (struct tegra194_pcie *) +	struct tegra_pcie_dw *pcie = (struct tegra_pcie_dw *)  				     dev_get_drvdata(s->private);  	u32 val; @@ -647,18 +648,20 @@ static int aspm_state_cnt(struct seq_file *s, void *data)  		   event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1_2));  	/* Clear all counters */ -	dw_pcie_writel_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid], +	dw_pcie_writel_dbi(&pcie->pci, pcie->ras_des_cap + +			   PCIE_RAS_DES_EVENT_COUNTER_CONTROL,  			   EVENT_COUNTER_ALL_CLEAR);  	/* Re-enable counting */  	val = EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT;  	val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT; -	dw_pcie_writel_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid], val); +	dw_pcie_writel_dbi(&pcie->pci, pcie->ras_des_cap + +			   PCIE_RAS_DES_EVENT_COUNTER_CONTROL, val);  	return 0;  } -static void init_host_aspm(struct tegra194_pcie *pcie) +static void init_host_aspm(struct tegra_pcie_dw *pcie)  {  	struct dw_pcie *pci = &pcie->pci;  	u32 val; @@ -666,10 +669,14 @@ static void init_host_aspm(struct tegra194_pcie *pcie)  	val = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_L1SS);  	pcie->cfg_link_cap_l1sub = val + PCI_L1SS_CAP; +	pcie->ras_des_cap = dw_pcie_find_ext_capability(&pcie->pci, +							PCI_EXT_CAP_ID_VNDR); +  	/* Enable ASPM counters */  	val = EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT;  	val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT; -	dw_pcie_writel_dbi(pci, event_cntr_ctrl_offset[pcie->cid], val); +	dw_pcie_writel_dbi(pci, pcie->ras_des_cap + +			   PCIE_RAS_DES_EVENT_COUNTER_CONTROL, val);  	/* Program T_cmrt and T_pwr_on values */  	val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub); @@ -686,22 +693,22 @@ static void init_host_aspm(struct tegra194_pcie *pcie)  	dw_pcie_writel_dbi(pci, PCIE_PORT_AFR, val);  } -static void init_debugfs(struct tegra194_pcie *pcie) +static void init_debugfs(struct tegra_pcie_dw *pcie)  {  	debugfs_create_devm_seqfile(pcie->dev, "aspm_state_cnt", pcie->debugfs,  				    aspm_state_cnt);  }  #else -static inline void disable_aspm_l12(struct tegra194_pcie *pcie) { return; } -static inline void disable_aspm_l11(struct tegra194_pcie *pcie) { return; } -static inline void init_host_aspm(struct tegra194_pcie *pcie) { return; } -static inline void init_debugfs(struct tegra194_pcie *pcie) { return; } +static inline void disable_aspm_l12(struct tegra_pcie_dw *pcie) { return; } +static inline void disable_aspm_l11(struct tegra_pcie_dw *pcie) { return; } +static inline void init_host_aspm(struct tegra_pcie_dw *pcie) { return; } +static inline void init_debugfs(struct tegra_pcie_dw *pcie) { return; }  #endif -static void tegra_pcie_enable_system_interrupts(struct pcie_port *pp) +static void tegra_pcie_enable_system_interrupts(struct dw_pcie_rp *pp)  {  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp); -	struct tegra194_pcie *pcie = to_tegra_pcie(pci); +	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);  	u32 val;  	u16 val_w; @@ -709,13 +716,15 @@ static void tegra_pcie_enable_system_interrupts(struct pcie_port *pp)  	val |= APPL_INTR_EN_L0_0_LINK_STATE_INT_EN;  	appl_writel(pcie, val, APPL_INTR_EN_L0_0); -	val = appl_readl(pcie, APPL_INTR_EN_L1_0_0); -	val |= APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN; -	appl_writel(pcie, val, APPL_INTR_EN_L1_0_0); +	if (!pcie->of_data->has_sbr_reset_fix) { +		val = appl_readl(pcie, APPL_INTR_EN_L1_0_0); +		val |= APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN; +		appl_writel(pcie, val, APPL_INTR_EN_L1_0_0); +	}  	if (pcie->enable_cdm_check) {  		val = appl_readl(pcie, APPL_INTR_EN_L0_0); -		val |= APPL_INTR_EN_L0_0_CDM_REG_CHK_INT_EN; +		val |= pcie->of_data->cdm_chk_int_en_bit;  		appl_writel(pcie, val, APPL_INTR_EN_L0_0);  		val = appl_readl(pcie, APPL_INTR_EN_L1_18); @@ -736,10 +745,10 @@ static void tegra_pcie_enable_system_interrupts(struct pcie_port *pp)  			   val_w);  } -static void tegra_pcie_enable_legacy_interrupts(struct pcie_port *pp) +static void tegra_pcie_enable_legacy_interrupts(struct dw_pcie_rp *pp)  {  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp); -	struct tegra194_pcie *pcie = to_tegra_pcie(pci); +	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);  	u32 val;  	/* Enable legacy interrupt generation */ @@ -757,10 +766,10 @@ static void tegra_pcie_enable_legacy_interrupts(struct pcie_port *pp)  	appl_writel(pcie, val, APPL_INTR_EN_L1_8_0);  } -static void tegra_pcie_enable_msi_interrupts(struct pcie_port *pp) +static void tegra_pcie_enable_msi_interrupts(struct dw_pcie_rp *pp)  {  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp); -	struct tegra194_pcie *pcie = to_tegra_pcie(pci); +	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);  	u32 val;  	/* Enable MSI interrupt generation */ @@ -770,10 +779,10 @@ static void tegra_pcie_enable_msi_interrupts(struct pcie_port *pp)  	appl_writel(pcie, val, APPL_INTR_EN_L0_0);  } -static void tegra_pcie_enable_interrupts(struct pcie_port *pp) +static void tegra_pcie_enable_interrupts(struct dw_pcie_rp *pp)  {  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp); -	struct tegra194_pcie *pcie = to_tegra_pcie(pci); +	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);  	/* Clear interrupt statuses before enabling interrupts */  	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0); @@ -798,7 +807,7 @@ static void tegra_pcie_enable_interrupts(struct pcie_port *pp)  		tegra_pcie_enable_msi_interrupts(pp);  } -static void config_gen3_gen4_eq_presets(struct tegra194_pcie *pcie) +static void config_gen3_gen4_eq_presets(struct tegra_pcie_dw *pcie)  {  	struct dw_pcie *pci = &pcie->pci;  	u32 val, offset, i; @@ -842,7 +851,8 @@ static void config_gen3_gen4_eq_presets(struct tegra194_pcie *pcie)  	val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);  	val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK; -	val |= (0x360 << GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT); +	val |= (pcie->of_data->gen4_preset_vec << +		GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT);  	val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK;  	dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val); @@ -851,11 +861,12 @@ static void config_gen3_gen4_eq_presets(struct tegra194_pcie *pcie)  	dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);  } -static int tegra194_pcie_host_init(struct pcie_port *pp) +static int tegra_pcie_dw_host_init(struct dw_pcie_rp *pp)  {  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp); -	struct tegra194_pcie *pcie = to_tegra_pcie(pci); +	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);  	u32 val; +	u16 val_16;  	pp->bridge->ops = &tegra_pci_ops; @@ -863,6 +874,11 @@ static int tegra194_pcie_host_init(struct pcie_port *pp)  		pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci,  							      PCI_CAP_ID_EXP); +	val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_DEVCTL); +	val_16 &= ~PCI_EXP_DEVCTL_PAYLOAD; +	val_16 |= PCI_EXP_DEVCTL_PAYLOAD_256B; +	dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_DEVCTL, val_16); +  	val = dw_pcie_readl_dbi(pci, PCI_IO_BASE);  	val &= ~(IO_BASE_IO_DECODE | IO_BASE_IO_DECODE_BIT8);  	dw_pcie_writel_dbi(pci, PCI_IO_BASE, val); @@ -887,6 +903,15 @@ static int tegra194_pcie_host_init(struct pcie_port *pp)  	val |= (pcie->num_lanes << PCI_EXP_LNKSTA_NLW_SHIFT);  	dw_pcie_writel_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP, val); +	/* Clear Slot Clock Configuration bit if SRNS configuration */ +	if (pcie->enable_srns) { +		val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + +					   PCI_EXP_LNKSTA); +		val_16 &= ~PCI_EXP_LNKSTA_SLC; +		dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA, +				   val_16); +	} +  	config_gen3_gen4_eq_presets(pcie);  	init_host_aspm(pcie); @@ -897,9 +922,11 @@ static int tegra194_pcie_host_init(struct pcie_port *pp)  		disable_aspm_l12(pcie);  	} -	val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); -	val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL; -	dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val); +	if (!pcie->of_data->has_l1ss_exit_fix) { +		val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); +		val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL; +		dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val); +	}  	if (pcie->update_fc_fixup) {  		val = dw_pcie_readl_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF); @@ -912,14 +939,14 @@ static int tegra194_pcie_host_init(struct pcie_port *pp)  	return 0;  } -static int tegra194_pcie_start_link(struct dw_pcie *pci) +static int tegra_pcie_dw_start_link(struct dw_pcie *pci)  {  	u32 val, offset, speed, tmp; -	struct tegra194_pcie *pcie = to_tegra_pcie(pci); -	struct pcie_port *pp = &pci->pp; +	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); +	struct dw_pcie_rp *pp = &pci->pp;  	bool retry = true; -	if (pcie->mode == DW_PCIE_EP_TYPE) { +	if (pcie->of_data->mode == DW_PCIE_EP_TYPE) {  		enable_irq(pcie->pex_rst_irq);  		return 0;  	} @@ -978,9 +1005,9 @@ retry_link:  		offset = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_DLF);  		val = dw_pcie_readl_dbi(pci, offset + PCI_DLF_CAP);  		val &= ~PCI_DLF_EXCHANGE_ENABLE; -		dw_pcie_writel_dbi(pci, offset, val); +		dw_pcie_writel_dbi(pci, offset + PCI_DLF_CAP, val); -		tegra194_pcie_host_init(pp); +		tegra_pcie_dw_host_init(pp);  		dw_pcie_setup_rc(pp);  		retry = false; @@ -996,32 +1023,32 @@ retry_link:  	return 0;  } -static int tegra194_pcie_link_up(struct dw_pcie *pci) +static int tegra_pcie_dw_link_up(struct dw_pcie *pci)  { -	struct tegra194_pcie *pcie = to_tegra_pcie(pci); +	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);  	u32 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA);  	return !!(val & PCI_EXP_LNKSTA_DLLLA);  } -static void tegra194_pcie_stop_link(struct dw_pcie *pci) +static void tegra_pcie_dw_stop_link(struct dw_pcie *pci)  { -	struct tegra194_pcie *pcie = to_tegra_pcie(pci); +	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);  	disable_irq(pcie->pex_rst_irq);  }  static const struct dw_pcie_ops tegra_dw_pcie_ops = { -	.link_up = tegra194_pcie_link_up, -	.start_link = tegra194_pcie_start_link, -	.stop_link = tegra194_pcie_stop_link, +	.link_up = tegra_pcie_dw_link_up, +	.start_link = tegra_pcie_dw_start_link, +	.stop_link = tegra_pcie_dw_stop_link,  }; -static const struct dw_pcie_host_ops tegra194_pcie_host_ops = { -	.host_init = tegra194_pcie_host_init, +static const struct dw_pcie_host_ops tegra_pcie_dw_host_ops = { +	.host_init = tegra_pcie_dw_host_init,  }; -static void tegra_pcie_disable_phy(struct tegra194_pcie *pcie) +static void tegra_pcie_disable_phy(struct tegra_pcie_dw *pcie)  {  	unsigned int phy_count = pcie->phy_count; @@ -1031,7 +1058,7 @@ static void tegra_pcie_disable_phy(struct tegra194_pcie *pcie)  	}  } -static int tegra_pcie_enable_phy(struct tegra194_pcie *pcie) +static int tegra_pcie_enable_phy(struct tegra_pcie_dw *pcie)  {  	unsigned int i;  	int ret; @@ -1058,7 +1085,7 @@ phy_exit:  	return ret;  } -static int tegra194_pcie_parse_dt(struct tegra194_pcie *pcie) +static int tegra_pcie_dw_parse_dt(struct tegra_pcie_dw *pcie)  {  	struct platform_device *pdev = to_platform_device(pcie->dev);  	struct device_node *np = pcie->dev->of_node; @@ -1111,13 +1138,27 @@ static int tegra194_pcie_parse_dt(struct tegra194_pcie *pcie)  	if (of_property_read_bool(np, "nvidia,update-fc-fixup"))  		pcie->update_fc_fixup = true; +	/* RP using an external REFCLK is supported only in Tegra234 */ +	if (pcie->of_data->version == TEGRA194_DWC_IP_VER) { +		if (pcie->of_data->mode == DW_PCIE_EP_TYPE) +			pcie->enable_ext_refclk = true; +	} else { +		pcie->enable_ext_refclk = +			of_property_read_bool(pcie->dev->of_node, +					      "nvidia,enable-ext-refclk"); +	} +  	pcie->supports_clkreq =  		of_property_read_bool(pcie->dev->of_node, "supports-clkreq");  	pcie->enable_cdm_check =  		of_property_read_bool(np, "snps,enable-cdm-check"); -	if (pcie->mode == DW_PCIE_RC_TYPE) +	if (pcie->of_data->version == TEGRA234_DWC_IP_VER) +		pcie->enable_srns = +			of_property_read_bool(np, "nvidia,enable-srns"); + +	if (pcie->of_data->mode == DW_PCIE_RC_TYPE)  		return 0;  	/* Endpoint mode specific DT entries */ @@ -1154,15 +1195,18 @@ static int tegra194_pcie_parse_dt(struct tegra194_pcie *pcie)  	return 0;  } -static int tegra_pcie_bpmp_set_ctrl_state(struct tegra194_pcie *pcie, +static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie,  					  bool enable)  {  	struct mrq_uphy_response resp;  	struct tegra_bpmp_message msg;  	struct mrq_uphy_request req; -	/* Controller-5 doesn't need to have its state set by BPMP-FW */ -	if (pcie->cid == 5) +	/* +	 * Controller-5 doesn't need to have its state set by BPMP-FW in +	 * Tegra194 +	 */ +	if (pcie->of_data->version == TEGRA194_DWC_IP_VER && pcie->cid == 5)  		return 0;  	memset(&req, 0, sizeof(req)); @@ -1182,7 +1226,7 @@ static int tegra_pcie_bpmp_set_ctrl_state(struct tegra194_pcie *pcie,  	return tegra_bpmp_transfer(pcie->bpmp, &msg);  } -static int tegra_pcie_bpmp_set_pll_state(struct tegra194_pcie *pcie, +static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie,  					 bool enable)  {  	struct mrq_uphy_response resp; @@ -1210,9 +1254,9 @@ static int tegra_pcie_bpmp_set_pll_state(struct tegra194_pcie *pcie,  	return tegra_bpmp_transfer(pcie->bpmp, &msg);  } -static void tegra_pcie_downstream_dev_to_D0(struct tegra194_pcie *pcie) +static void tegra_pcie_downstream_dev_to_D0(struct tegra_pcie_dw *pcie)  { -	struct pcie_port *pp = &pcie->pci.pp; +	struct dw_pcie_rp *pp = &pcie->pci.pp;  	struct pci_bus *child, *root_bus = NULL;  	struct pci_dev *pdev; @@ -1248,7 +1292,7 @@ static void tegra_pcie_downstream_dev_to_D0(struct tegra194_pcie *pcie)  	}  } -static int tegra_pcie_get_slot_regulators(struct tegra194_pcie *pcie) +static int tegra_pcie_get_slot_regulators(struct tegra_pcie_dw *pcie)  {  	pcie->slot_ctl_3v3 = devm_regulator_get_optional(pcie->dev, "vpcie3v3");  	if (IS_ERR(pcie->slot_ctl_3v3)) { @@ -1269,7 +1313,7 @@ static int tegra_pcie_get_slot_regulators(struct tegra194_pcie *pcie)  	return 0;  } -static int tegra_pcie_enable_slot_regulators(struct tegra194_pcie *pcie) +static int tegra_pcie_enable_slot_regulators(struct tegra_pcie_dw *pcie)  {  	int ret; @@ -1307,7 +1351,7 @@ fail_12v_enable:  	return ret;  } -static void tegra_pcie_disable_slot_regulators(struct tegra194_pcie *pcie) +static void tegra_pcie_disable_slot_regulators(struct tegra_pcie_dw *pcie)  {  	if (pcie->slot_ctl_12v)  		regulator_disable(pcie->slot_ctl_12v); @@ -1315,7 +1359,7 @@ static void tegra_pcie_disable_slot_regulators(struct tegra194_pcie *pcie)  		regulator_disable(pcie->slot_ctl_3v3);  } -static int tegra_pcie_config_controller(struct tegra194_pcie *pcie, +static int tegra_pcie_config_controller(struct tegra_pcie_dw *pcie,  					bool en_hw_hot_rst)  {  	int ret; @@ -1328,6 +1372,14 @@ static int tegra_pcie_config_controller(struct tegra194_pcie *pcie,  		return ret;  	} +	if (pcie->enable_ext_refclk) { +		ret = tegra_pcie_bpmp_set_pll_state(pcie, true); +		if (ret) { +			dev_err(pcie->dev, "Failed to init UPHY: %d\n", ret); +			goto fail_pll_init; +		} +	} +  	ret = tegra_pcie_enable_slot_regulators(pcie);  	if (ret < 0)  		goto fail_slot_reg_en; @@ -1351,11 +1403,13 @@ static int tegra_pcie_config_controller(struct tegra194_pcie *pcie,  		goto fail_core_apb_rst;  	} -	if (en_hw_hot_rst) { +	if (en_hw_hot_rst || pcie->of_data->has_sbr_reset_fix) {  		/* Enable HW_HOT_RST mode */  		val = appl_readl(pcie, APPL_CTRL);  		val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<  			 APPL_CTRL_HW_HOT_RST_MODE_SHIFT); +		val |= (APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST_LTSSM_EN << +			APPL_CTRL_HW_HOT_RST_MODE_SHIFT);  		val |= APPL_CTRL_HW_HOT_RST_EN;  		appl_writel(pcie, val, APPL_CTRL);  	} @@ -1382,6 +1436,19 @@ static int tegra_pcie_config_controller(struct tegra194_pcie *pcie,  	val |= (APPL_CFG_MISC_ARCACHE_VAL << APPL_CFG_MISC_ARCACHE_SHIFT);  	appl_writel(pcie, val, APPL_CFG_MISC); +	if (pcie->enable_srns || pcie->enable_ext_refclk) { +		/* +		 * When Tegra PCIe RP is using external clock, it cannot supply +		 * same clock to its downstream hierarchy. Hence, gate PCIe RP +		 * REFCLK out pads when RP & EP are using separate clocks or RP +		 * is using an external REFCLK. +		 */ +		val = appl_readl(pcie, APPL_PINMUX); +		val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN; +		val &= ~APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE; +		appl_writel(pcie, val, APPL_PINMUX); +	} +  	if (!pcie->supports_clkreq) {  		val = appl_readl(pcie, APPL_PINMUX);  		val |= APPL_PINMUX_CLKREQ_OVERRIDE_EN; @@ -1407,12 +1474,15 @@ fail_core_clk:  fail_reg_en:  	tegra_pcie_disable_slot_regulators(pcie);  fail_slot_reg_en: +	if (pcie->enable_ext_refclk) +		tegra_pcie_bpmp_set_pll_state(pcie, false); +fail_pll_init:  	tegra_pcie_bpmp_set_ctrl_state(pcie, false);  	return ret;  } -static void tegra_pcie_unconfig_controller(struct tegra194_pcie *pcie) +static void tegra_pcie_unconfig_controller(struct tegra_pcie_dw *pcie)  {  	int ret; @@ -1434,23 +1504,29 @@ static void tegra_pcie_unconfig_controller(struct tegra194_pcie *pcie)  	tegra_pcie_disable_slot_regulators(pcie); +	if (pcie->enable_ext_refclk) { +		ret = tegra_pcie_bpmp_set_pll_state(pcie, false); +		if (ret) +			dev_err(pcie->dev, "Failed to deinit UPHY: %d\n", ret); +	} +  	ret = tegra_pcie_bpmp_set_ctrl_state(pcie, false);  	if (ret)  		dev_err(pcie->dev, "Failed to disable controller %d: %d\n",  			pcie->cid, ret);  } -static int tegra_pcie_init_controller(struct tegra194_pcie *pcie) +static int tegra_pcie_init_controller(struct tegra_pcie_dw *pcie)  {  	struct dw_pcie *pci = &pcie->pci; -	struct pcie_port *pp = &pci->pp; +	struct dw_pcie_rp *pp = &pci->pp;  	int ret;  	ret = tegra_pcie_config_controller(pcie, false);  	if (ret < 0)  		return ret; -	pp->ops = &tegra194_pcie_host_ops; +	pp->ops = &tegra_pcie_dw_host_ops;  	ret = dw_pcie_host_init(pp);  	if (ret < 0) { @@ -1465,11 +1541,11 @@ fail_host_init:  	return ret;  } -static int tegra_pcie_try_link_l2(struct tegra194_pcie *pcie) +static int tegra_pcie_try_link_l2(struct tegra_pcie_dw *pcie)  {  	u32 val; -	if (!tegra194_pcie_link_up(&pcie->pci)) +	if (!tegra_pcie_dw_link_up(&pcie->pci))  		return 0;  	val = appl_readl(pcie, APPL_RADM_STATUS); @@ -1481,12 +1557,12 @@ static int tegra_pcie_try_link_l2(struct tegra194_pcie *pcie)  				 1, PME_ACK_TIMEOUT);  } -static void tegra194_pcie_pme_turnoff(struct tegra194_pcie *pcie) +static void tegra_pcie_dw_pme_turnoff(struct tegra_pcie_dw *pcie)  {  	u32 data;  	int err; -	if (!tegra194_pcie_link_up(&pcie->pci)) { +	if (!tegra_pcie_dw_link_up(&pcie->pci)) {  		dev_dbg(pcie->dev, "PCIe link is not up...!\n");  		return;  	} @@ -1543,15 +1619,15 @@ static void tegra194_pcie_pme_turnoff(struct tegra194_pcie *pcie)  	appl_writel(pcie, data, APPL_PINMUX);  } -static void tegra_pcie_deinit_controller(struct tegra194_pcie *pcie) +static void tegra_pcie_deinit_controller(struct tegra_pcie_dw *pcie)  {  	tegra_pcie_downstream_dev_to_D0(pcie);  	dw_pcie_host_deinit(&pcie->pci.pp); -	tegra194_pcie_pme_turnoff(pcie); +	tegra_pcie_dw_pme_turnoff(pcie);  	tegra_pcie_unconfig_controller(pcie);  } -static int tegra_pcie_config_rp(struct tegra194_pcie *pcie) +static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie)  {  	struct device *dev = pcie->dev;  	char *name; @@ -1578,7 +1654,7 @@ static int tegra_pcie_config_rp(struct tegra194_pcie *pcie)  		goto fail_pm_get_sync;  	} -	pcie->link_state = tegra194_pcie_link_up(&pcie->pci); +	pcie->link_state = tegra_pcie_dw_link_up(&pcie->pci);  	if (!pcie->link_state) {  		ret = -ENOMEDIUM;  		goto fail_host_init; @@ -1603,7 +1679,7 @@ fail_pm_get_sync:  	return ret;  } -static void pex_ep_event_pex_rst_assert(struct tegra194_pcie *pcie) +static void pex_ep_event_pex_rst_assert(struct tegra_pcie_dw *pcie)  {  	u32 val;  	int ret; @@ -1634,6 +1710,13 @@ static void pex_ep_event_pex_rst_assert(struct tegra194_pcie *pcie)  	pm_runtime_put_sync(pcie->dev); +	if (pcie->enable_ext_refclk) { +		ret = tegra_pcie_bpmp_set_pll_state(pcie, false); +		if (ret) +			dev_err(pcie->dev, "Failed to turn off UPHY: %d\n", +				ret); +	} +  	ret = tegra_pcie_bpmp_set_pll_state(pcie, false);  	if (ret)  		dev_err(pcie->dev, "Failed to turn off UPHY: %d\n", ret); @@ -1642,13 +1725,14 @@ static void pex_ep_event_pex_rst_assert(struct tegra194_pcie *pcie)  	dev_dbg(pcie->dev, "Uninitialization of endpoint is completed\n");  } -static void pex_ep_event_pex_rst_deassert(struct tegra194_pcie *pcie) +static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie)  {  	struct dw_pcie *pci = &pcie->pci;  	struct dw_pcie_ep *ep = &pci->ep;  	struct device *dev = pcie->dev;  	u32 val;  	int ret; +	u16 val_16;  	if (pcie->ep_state == EP_STATE_ENABLED)  		return; @@ -1660,10 +1744,20 @@ static void pex_ep_event_pex_rst_deassert(struct tegra194_pcie *pcie)  		return;  	} -	ret = tegra_pcie_bpmp_set_pll_state(pcie, true); +	ret = tegra_pcie_bpmp_set_ctrl_state(pcie, true);  	if (ret) { -		dev_err(dev, "Failed to init UPHY for PCIe EP: %d\n", ret); -		goto fail_pll_init; +		dev_err(pcie->dev, "Failed to enable controller %u: %d\n", +			pcie->cid, ret); +		goto fail_set_ctrl_state; +	} + +	if (pcie->enable_ext_refclk) { +		ret = tegra_pcie_bpmp_set_pll_state(pcie, true); +		if (ret) { +			dev_err(dev, "Failed to init UPHY for PCIe EP: %d\n", +				ret); +			goto fail_pll_init; +		}  	}  	ret = clk_prepare_enable(pcie->core_clk); @@ -1760,12 +1854,29 @@ static void pex_ep_event_pex_rst_deassert(struct tegra194_pcie *pcie)  		disable_aspm_l12(pcie);  	} -	val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); -	val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL; -	dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val); +	if (!pcie->of_data->has_l1ss_exit_fix) { +		val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); +		val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL; +		dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val); +	}  	pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci,  						      PCI_CAP_ID_EXP); + +	val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_DEVCTL); +	val_16 &= ~PCI_EXP_DEVCTL_PAYLOAD; +	val_16 |= PCI_EXP_DEVCTL_PAYLOAD_256B; +	dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_DEVCTL, val_16); + +	/* Clear Slot Clock Configuration bit if SRNS configuration */ +	if (pcie->enable_srns) { +		val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + +					   PCI_EXP_LNKSTA); +		val_16 &= ~PCI_EXP_LNKSTA_SLC; +		dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA, +				   val_16); +	} +  	clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ);  	val = (ep->msi_mem_phys & MSIX_ADDR_MATCH_LOW_OFF_MASK); @@ -1782,6 +1893,13 @@ static void pex_ep_event_pex_rst_deassert(struct tegra194_pcie *pcie)  	dw_pcie_ep_init_notify(ep); +	/* Program the private control to allow sending LTR upstream */ +	if (pcie->of_data->has_ltr_req_fix) { +		val = appl_readl(pcie, APPL_LTR_MSG_2); +		val |= APPL_LTR_MSG_2_LTR_MSG_REQ_STATE; +		appl_writel(pcie, val, APPL_LTR_MSG_2); +	} +  	/* Enable LTSSM */  	val = appl_readl(pcie, APPL_CTRL);  	val |= APPL_CTRL_LTSSM_EN; @@ -1802,12 +1920,14 @@ fail_core_apb_rst:  fail_core_clk_enable:  	tegra_pcie_bpmp_set_pll_state(pcie, false);  fail_pll_init: +	tegra_pcie_bpmp_set_ctrl_state(pcie, false); +fail_set_ctrl_state:  	pm_runtime_put_sync(dev);  }  static irqreturn_t tegra_pcie_ep_pex_rst_irq(int irq, void *arg)  { -	struct tegra194_pcie *pcie = arg; +	struct tegra_pcie_dw *pcie = arg;  	if (gpiod_get_value(pcie->pex_rst_gpiod))  		pex_ep_event_pex_rst_assert(pcie); @@ -1817,7 +1937,7 @@ static irqreturn_t tegra_pcie_ep_pex_rst_irq(int irq, void *arg)  	return IRQ_HANDLED;  } -static int tegra_pcie_ep_raise_legacy_irq(struct tegra194_pcie *pcie, u16 irq) +static int tegra_pcie_ep_raise_legacy_irq(struct tegra_pcie_dw *pcie, u16 irq)  {  	/* Tegra194 supports only INTA */  	if (irq > 1) @@ -1829,7 +1949,7 @@ static int tegra_pcie_ep_raise_legacy_irq(struct tegra194_pcie *pcie, u16 irq)  	return 0;  } -static int tegra_pcie_ep_raise_msi_irq(struct tegra194_pcie *pcie, u16 irq) +static int tegra_pcie_ep_raise_msi_irq(struct tegra_pcie_dw *pcie, u16 irq)  {  	if (unlikely(irq > 31))  		return -EINVAL; @@ -1839,7 +1959,7 @@ static int tegra_pcie_ep_raise_msi_irq(struct tegra194_pcie *pcie, u16 irq)  	return 0;  } -static int tegra_pcie_ep_raise_msix_irq(struct tegra194_pcie *pcie, u16 irq) +static int tegra_pcie_ep_raise_msix_irq(struct tegra_pcie_dw *pcie, u16 irq)  {  	struct dw_pcie_ep *ep = &pcie->pci.ep; @@ -1853,7 +1973,7 @@ static int tegra_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,  				   u16 interrupt_num)  {  	struct dw_pcie *pci = to_dw_pcie_from_ep(ep); -	struct tegra194_pcie *pcie = to_tegra_pcie(pci); +	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);  	switch (type) {  	case PCI_EPC_IRQ_LEGACY: @@ -1894,7 +2014,7 @@ static const struct dw_pcie_ep_ops pcie_ep_ops = {  	.get_features = tegra_pcie_ep_get_features,  }; -static int tegra_pcie_config_ep(struct tegra194_pcie *pcie, +static int tegra_pcie_config_ep(struct tegra_pcie_dw *pcie,  				struct platform_device *pdev)  {  	struct dw_pcie *pci = &pcie->pci; @@ -1949,19 +2069,20 @@ static int tegra_pcie_config_ep(struct tegra194_pcie *pcie,  	if (ret) {  		dev_err(dev, "Failed to initialize DWC Endpoint subsystem: %d\n",  			ret); +		pm_runtime_disable(dev);  		return ret;  	}  	return 0;  } -static int tegra194_pcie_probe(struct platform_device *pdev) +static int tegra_pcie_dw_probe(struct platform_device *pdev)  { -	const struct tegra194_pcie_of_data *data; +	const struct tegra_pcie_dw_of_data *data;  	struct device *dev = &pdev->dev;  	struct resource *atu_dma_res; -	struct tegra194_pcie *pcie; -	struct pcie_port *pp; +	struct tegra_pcie_dw *pcie; +	struct dw_pcie_rp *pp;  	struct dw_pcie *pci;  	struct phy **phys;  	char *name; @@ -1977,16 +2098,14 @@ static int tegra194_pcie_probe(struct platform_device *pdev)  	pci = &pcie->pci;  	pci->dev = &pdev->dev;  	pci->ops = &tegra_dw_pcie_ops; -	pci->n_fts[0] = N_FTS_VAL; -	pci->n_fts[1] = FTS_VAL; -	pci->version = 0x490A; - +	pcie->dev = &pdev->dev; +	pcie->of_data = (struct tegra_pcie_dw_of_data *)data; +	pci->n_fts[0] = pcie->of_data->n_fts[0]; +	pci->n_fts[1] = pcie->of_data->n_fts[1];  	pp = &pci->pp;  	pp->num_vectors = MAX_MSI_IRQS; -	pcie->dev = &pdev->dev; -	pcie->mode = (enum dw_pcie_device_mode)data->mode; -	ret = tegra194_pcie_parse_dt(pcie); +	ret = tegra_pcie_dw_parse_dt(pcie);  	if (ret < 0) {  		const char *level = KERN_ERR; @@ -2101,7 +2220,7 @@ static int tegra194_pcie_probe(struct platform_device *pdev)  	platform_set_drvdata(pdev, pcie); -	switch (pcie->mode) { +	switch (pcie->of_data->mode) {  	case DW_PCIE_RC_TYPE:  		ret = devm_request_irq(dev, pp->irq, tegra_pcie_rp_irq_handler,  				       IRQF_SHARED, "tegra-pcie-intr", pcie); @@ -2136,7 +2255,8 @@ static int tegra194_pcie_probe(struct platform_device *pdev)  		break;  	default: -		dev_err(dev, "Invalid PCIe device type %d\n", pcie->mode); +		dev_err(dev, "Invalid PCIe device type %d\n", +			pcie->of_data->mode);  	}  fail: @@ -2144,16 +2264,22 @@ fail:  	return ret;  } -static int tegra194_pcie_remove(struct platform_device *pdev) +static int tegra_pcie_dw_remove(struct platform_device *pdev)  { -	struct tegra194_pcie *pcie = platform_get_drvdata(pdev); +	struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev); -	if (!pcie->link_state) -		return 0; +	if (pcie->of_data->mode == DW_PCIE_RC_TYPE) { +		if (!pcie->link_state) +			return 0; + +		debugfs_remove_recursive(pcie->debugfs); +		tegra_pcie_deinit_controller(pcie); +		pm_runtime_put_sync(pcie->dev); +	} else { +		disable_irq(pcie->pex_rst_irq); +		pex_ep_event_pex_rst_assert(pcie); +	} -	debugfs_remove_recursive(pcie->debugfs); -	tegra_pcie_deinit_controller(pcie); -	pm_runtime_put_sync(pcie->dev);  	pm_runtime_disable(pcie->dev);  	tegra_bpmp_put(pcie->bpmp);  	if (pcie->pex_refclk_sel_gpiod) @@ -2162,41 +2288,48 @@ static int tegra194_pcie_remove(struct platform_device *pdev)  	return 0;  } -static int tegra194_pcie_suspend_late(struct device *dev) +static int tegra_pcie_dw_suspend_late(struct device *dev)  { -	struct tegra194_pcie *pcie = dev_get_drvdata(dev); +	struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);  	u32 val; +	if (pcie->of_data->mode == DW_PCIE_EP_TYPE) { +		dev_err(dev, "Failed to Suspend as Tegra PCIe is in EP mode\n"); +		return -EPERM; +	} +  	if (!pcie->link_state)  		return 0;  	/* Enable HW_HOT_RST mode */ -	val = appl_readl(pcie, APPL_CTRL); -	val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK << -		 APPL_CTRL_HW_HOT_RST_MODE_SHIFT); -	val |= APPL_CTRL_HW_HOT_RST_EN; -	appl_writel(pcie, val, APPL_CTRL); +	if (!pcie->of_data->has_sbr_reset_fix) { +		val = appl_readl(pcie, APPL_CTRL); +		val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK << +			 APPL_CTRL_HW_HOT_RST_MODE_SHIFT); +		val |= APPL_CTRL_HW_HOT_RST_EN; +		appl_writel(pcie, val, APPL_CTRL); +	}  	return 0;  } -static int tegra194_pcie_suspend_noirq(struct device *dev) +static int tegra_pcie_dw_suspend_noirq(struct device *dev)  { -	struct tegra194_pcie *pcie = dev_get_drvdata(dev); +	struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);  	if (!pcie->link_state)  		return 0;  	tegra_pcie_downstream_dev_to_D0(pcie); -	tegra194_pcie_pme_turnoff(pcie); +	tegra_pcie_dw_pme_turnoff(pcie);  	tegra_pcie_unconfig_controller(pcie);  	return 0;  } -static int tegra194_pcie_resume_noirq(struct device *dev) +static int tegra_pcie_dw_resume_noirq(struct device *dev)  { -	struct tegra194_pcie *pcie = dev_get_drvdata(dev); +	struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);  	int ret;  	if (!pcie->link_state) @@ -2206,7 +2339,7 @@ static int tegra194_pcie_resume_noirq(struct device *dev)  	if (ret < 0)  		return ret; -	ret = tegra194_pcie_host_init(&pcie->pci.pp); +	ret = tegra_pcie_dw_host_init(&pcie->pci.pp);  	if (ret < 0) {  		dev_err(dev, "Failed to init host: %d\n", ret);  		goto fail_host_init; @@ -2214,7 +2347,7 @@ static int tegra194_pcie_resume_noirq(struct device *dev)  	dw_pcie_setup_rc(&pcie->pci.pp); -	ret = tegra194_pcie_start_link(&pcie->pci); +	ret = tegra_pcie_dw_start_link(&pcie->pci);  	if (ret < 0)  		goto fail_host_init; @@ -2225,12 +2358,12 @@ fail_host_init:  	return ret;  } -static int tegra194_pcie_resume_early(struct device *dev) +static int tegra_pcie_dw_resume_early(struct device *dev)  { -	struct tegra194_pcie *pcie = dev_get_drvdata(dev); +	struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);  	u32 val; -	if (pcie->mode == DW_PCIE_EP_TYPE) { +	if (pcie->of_data->mode == DW_PCIE_EP_TYPE) {  		dev_err(dev, "Suspend is not supported in EP mode");  		return -ENOTSUPP;  	} @@ -2239,75 +2372,124 @@ static int tegra194_pcie_resume_early(struct device *dev)  		return 0;  	/* Disable HW_HOT_RST mode */ -	val = appl_readl(pcie, APPL_CTRL); -	val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK << -		 APPL_CTRL_HW_HOT_RST_MODE_SHIFT); -	val |= APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST << -	       APPL_CTRL_HW_HOT_RST_MODE_SHIFT; -	val &= ~APPL_CTRL_HW_HOT_RST_EN; -	appl_writel(pcie, val, APPL_CTRL); +	if (!pcie->of_data->has_sbr_reset_fix) { +		val = appl_readl(pcie, APPL_CTRL); +		val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK << +			 APPL_CTRL_HW_HOT_RST_MODE_SHIFT); +		val |= APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST << +		       APPL_CTRL_HW_HOT_RST_MODE_SHIFT; +		val &= ~APPL_CTRL_HW_HOT_RST_EN; +		appl_writel(pcie, val, APPL_CTRL); +	}  	return 0;  } -static void tegra194_pcie_shutdown(struct platform_device *pdev) +static void tegra_pcie_dw_shutdown(struct platform_device *pdev)  { -	struct tegra194_pcie *pcie = platform_get_drvdata(pdev); +	struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev); -	if (!pcie->link_state) -		return; +	if (pcie->of_data->mode == DW_PCIE_RC_TYPE) { +		if (!pcie->link_state) +			return; -	debugfs_remove_recursive(pcie->debugfs); -	tegra_pcie_downstream_dev_to_D0(pcie); +		debugfs_remove_recursive(pcie->debugfs); +		tegra_pcie_downstream_dev_to_D0(pcie); -	disable_irq(pcie->pci.pp.irq); -	if (IS_ENABLED(CONFIG_PCI_MSI)) -		disable_irq(pcie->pci.pp.msi_irq); +		disable_irq(pcie->pci.pp.irq); +		if (IS_ENABLED(CONFIG_PCI_MSI)) +			disable_irq(pcie->pci.pp.msi_irq[0]); -	tegra194_pcie_pme_turnoff(pcie); -	tegra_pcie_unconfig_controller(pcie); +		tegra_pcie_dw_pme_turnoff(pcie); +		tegra_pcie_unconfig_controller(pcie); +		pm_runtime_put_sync(pcie->dev); +	} else { +		disable_irq(pcie->pex_rst_irq); +		pex_ep_event_pex_rst_assert(pcie); +	}  } -static const struct tegra194_pcie_of_data tegra194_pcie_rc_of_data = { +static const struct tegra_pcie_dw_of_data tegra194_pcie_dw_rc_of_data = { +	.version = TEGRA194_DWC_IP_VER, +	.mode = DW_PCIE_RC_TYPE, +	.cdm_chk_int_en_bit = BIT(19), +	/* Gen4 - 5, 6, 8 and 9 presets enabled */ +	.gen4_preset_vec = 0x360, +	.n_fts = { 52, 52 }, +}; + +static const struct tegra_pcie_dw_of_data tegra194_pcie_dw_ep_of_data = { +	.version = TEGRA194_DWC_IP_VER, +	.mode = DW_PCIE_EP_TYPE, +	.cdm_chk_int_en_bit = BIT(19), +	/* Gen4 - 5, 6, 8 and 9 presets enabled */ +	.gen4_preset_vec = 0x360, +	.n_fts = { 52, 52 }, +}; + +static const struct tegra_pcie_dw_of_data tegra234_pcie_dw_rc_of_data = { +	.version = TEGRA234_DWC_IP_VER,  	.mode = DW_PCIE_RC_TYPE, +	.has_msix_doorbell_access_fix = true, +	.has_sbr_reset_fix = true, +	.has_l1ss_exit_fix = true, +	.cdm_chk_int_en_bit = BIT(18), +	/* Gen4 - 6, 8 and 9 presets enabled */ +	.gen4_preset_vec = 0x340, +	.n_fts = { 52, 80 },  }; -static const struct tegra194_pcie_of_data tegra194_pcie_ep_of_data = { +static const struct tegra_pcie_dw_of_data tegra234_pcie_dw_ep_of_data = { +	.version = TEGRA234_DWC_IP_VER,  	.mode = DW_PCIE_EP_TYPE, +	.has_l1ss_exit_fix = true, +	.has_ltr_req_fix = true, +	.cdm_chk_int_en_bit = BIT(18), +	/* Gen4 - 6, 8 and 9 presets enabled */ +	.gen4_preset_vec = 0x340, +	.n_fts = { 52, 80 },  }; -static const struct of_device_id tegra194_pcie_of_match[] = { +static const struct of_device_id tegra_pcie_dw_of_match[] = {  	{  		.compatible = "nvidia,tegra194-pcie", -		.data = &tegra194_pcie_rc_of_data, +		.data = &tegra194_pcie_dw_rc_of_data,  	},  	{  		.compatible = "nvidia,tegra194-pcie-ep", -		.data = &tegra194_pcie_ep_of_data, +		.data = &tegra194_pcie_dw_ep_of_data, +	}, +	{ +		.compatible = "nvidia,tegra234-pcie", +		.data = &tegra234_pcie_dw_rc_of_data, +	}, +	{ +		.compatible = "nvidia,tegra234-pcie-ep", +		.data = &tegra234_pcie_dw_ep_of_data,  	}, -	{}, +	{}  }; -static const struct dev_pm_ops tegra194_pcie_pm_ops = { -	.suspend_late = tegra194_pcie_suspend_late, -	.suspend_noirq = tegra194_pcie_suspend_noirq, -	.resume_noirq = tegra194_pcie_resume_noirq, -	.resume_early = tegra194_pcie_resume_early, +static const struct dev_pm_ops tegra_pcie_dw_pm_ops = { +	.suspend_late = tegra_pcie_dw_suspend_late, +	.suspend_noirq = tegra_pcie_dw_suspend_noirq, +	.resume_noirq = tegra_pcie_dw_resume_noirq, +	.resume_early = tegra_pcie_dw_resume_early,  }; -static struct platform_driver tegra194_pcie_driver = { -	.probe = tegra194_pcie_probe, -	.remove = tegra194_pcie_remove, -	.shutdown = tegra194_pcie_shutdown, +static struct platform_driver tegra_pcie_dw_driver = { +	.probe = tegra_pcie_dw_probe, +	.remove = tegra_pcie_dw_remove, +	.shutdown = tegra_pcie_dw_shutdown,  	.driver = {  		.name	= "tegra194-pcie", -		.pm = &tegra194_pcie_pm_ops, -		.of_match_table = tegra194_pcie_of_match, +		.pm = &tegra_pcie_dw_pm_ops, +		.of_match_table = tegra_pcie_dw_of_match,  	},  }; -module_platform_driver(tegra194_pcie_driver); +module_platform_driver(tegra_pcie_dw_driver); -MODULE_DEVICE_TABLE(of, tegra194_pcie_of_match); +MODULE_DEVICE_TABLE(of, tegra_pcie_dw_of_match);  MODULE_AUTHOR("Vidya Sagar <vidyas@nvidia.com>");  MODULE_DESCRIPTION("NVIDIA PCIe host controller driver"); diff --git a/drivers/pci/controller/dwc/pcie-uniphier.c b/drivers/pci/controller/dwc/pcie-uniphier.c index b45ac3754242..48c3eba817b4 100644 --- a/drivers/pci/controller/dwc/pcie-uniphier.c +++ b/drivers/pci/controller/dwc/pcie-uniphier.c @@ -171,7 +171,7 @@ static void uniphier_pcie_irq_enable(struct uniphier_pcie *pcie)  static void uniphier_pcie_irq_mask(struct irq_data *d)  { -	struct pcie_port *pp = irq_data_get_irq_chip_data(d); +	struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);  	struct uniphier_pcie *pcie = to_uniphier_pcie(pci);  	unsigned long flags; @@ -188,7 +188,7 @@ static void uniphier_pcie_irq_mask(struct irq_data *d)  static void uniphier_pcie_irq_unmask(struct irq_data *d)  { -	struct pcie_port *pp = irq_data_get_irq_chip_data(d); +	struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);  	struct uniphier_pcie *pcie = to_uniphier_pcie(pci);  	unsigned long flags; @@ -225,7 +225,7 @@ static const struct irq_domain_ops uniphier_intx_domain_ops = {  static void uniphier_pcie_irq_handler(struct irq_desc *desc)  { -	struct pcie_port *pp = irq_desc_get_handler_data(desc); +	struct dw_pcie_rp *pp = irq_desc_get_handler_data(desc);  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);  	struct uniphier_pcie *pcie = to_uniphier_pcie(pci);  	struct irq_chip *chip = irq_desc_get_chip(desc); @@ -258,7 +258,7 @@ static void uniphier_pcie_irq_handler(struct irq_desc *desc)  	chained_irq_exit(chip, desc);  } -static int uniphier_pcie_config_legacy_irq(struct pcie_port *pp) +static int uniphier_pcie_config_legacy_irq(struct dw_pcie_rp *pp)  {  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);  	struct uniphier_pcie *pcie = to_uniphier_pcie(pci); @@ -295,7 +295,7 @@ out_put_node:  	return ret;  } -static int uniphier_pcie_host_init(struct pcie_port *pp) +static int uniphier_pcie_host_init(struct dw_pcie_rp *pp)  {  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);  	struct uniphier_pcie *pcie = to_uniphier_pcie(pci); diff --git a/drivers/pci/controller/dwc/pcie-visconti.c b/drivers/pci/controller/dwc/pcie-visconti.c index 50f80f07e4db..71026fefa366 100644 --- a/drivers/pci/controller/dwc/pcie-visconti.c +++ b/drivers/pci/controller/dwc/pcie-visconti.c @@ -178,7 +178,7 @@ static void visconti_pcie_stop_link(struct dw_pcie *pci)   */  static u64 visconti_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 cpu_addr)  { -	struct pcie_port *pp = &pci->pp; +	struct dw_pcie_rp *pp = &pci->pp;  	return cpu_addr & ~pp->io_base;  } @@ -190,7 +190,7 @@ static const struct dw_pcie_ops dw_pcie_ops = {  	.stop_link = visconti_pcie_stop_link,  }; -static int visconti_pcie_host_init(struct pcie_port *pp) +static int visconti_pcie_host_init(struct dw_pcie_rp *pp)  {  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);  	struct visconti_pcie *pcie = dev_get_drvdata(pci->dev); @@ -278,7 +278,7 @@ static int visconti_add_pcie_port(struct visconti_pcie *pcie,  				  struct platform_device *pdev)  {  	struct dw_pcie *pci = &pcie->pci; -	struct pcie_port *pp = &pci->pp; +	struct dw_pcie_rp *pp = &pci->pp;  	pp->irq = platform_get_irq_byname(pdev, "intr");  	if (pp->irq < 0) diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c index ffec82c8a523..966c8b48bd96 100644 --- a/drivers/pci/controller/pci-aardvark.c +++ b/drivers/pci/controller/pci-aardvark.c @@ -8,6 +8,7 @@   * Author: Hezi Shahmoon <hezi.shahmoon@marvell.com>   */ +#include <linux/bitfield.h>  #include <linux/delay.h>  #include <linux/gpio/consumer.h>  #include <linux/interrupt.h> @@ -33,6 +34,7 @@  #define PCIE_CORE_CMD_STATUS_REG				0x4  #define PCIE_CORE_DEV_REV_REG					0x8  #define PCIE_CORE_PCIEXP_CAP					0xc0 +#define PCIE_CORE_PCIERR_CAP					0x100  #define PCIE_CORE_ERR_CAPCTL_REG				0x118  #define     PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX			BIT(5)  #define     PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN			BIT(6) @@ -857,14 +859,11 @@ advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,  	switch (reg) { -	case PCI_EXP_SLTCTL: -		*value = PCI_EXP_SLTSTA_PDS << 16; -		return PCI_BRIDGE_EMUL_HANDLED; -  	/* -	 * PCI_EXP_RTCTL and PCI_EXP_RTSTA are also supported, but do not need -	 * to be handled here, because their values are stored in emulated -	 * config space buffer, and we read them from there when needed. +	 * PCI_EXP_SLTCAP, PCI_EXP_SLTCTL, PCI_EXP_RTCTL and PCI_EXP_RTSTA are +	 * also supported, but do not need to be handled here, because their +	 * values are stored in emulated config space buffer, and we read them +	 * from there when needed.  	 */  	case PCI_EXP_LNKCAP: { @@ -944,11 +943,89 @@ advk_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,  	}  } +static pci_bridge_emul_read_status_t +advk_pci_bridge_emul_ext_conf_read(struct pci_bridge_emul *bridge, +				   int reg, u32 *value) +{ +	struct advk_pcie *pcie = bridge->data; + +	switch (reg) { +	case 0: +		*value = advk_readl(pcie, PCIE_CORE_PCIERR_CAP + reg); + +		/* +		 * PCI_EXT_CAP_NEXT bits are set to offset 0x150, but Armada +		 * 3700 Functional Specification does not document registers +		 * at those addresses. +		 * +		 * Thus we clear PCI_EXT_CAP_NEXT bits to make Advanced Error +		 * Reporting Capability header the last Extended Capability. +		 * If we obtain documentation for those registers in the +		 * future, this can be changed. +		 */ +		*value &= 0x000fffff; +		return PCI_BRIDGE_EMUL_HANDLED; + +	case PCI_ERR_UNCOR_STATUS: +	case PCI_ERR_UNCOR_MASK: +	case PCI_ERR_UNCOR_SEVER: +	case PCI_ERR_COR_STATUS: +	case PCI_ERR_COR_MASK: +	case PCI_ERR_CAP: +	case PCI_ERR_HEADER_LOG + 0: +	case PCI_ERR_HEADER_LOG + 4: +	case PCI_ERR_HEADER_LOG + 8: +	case PCI_ERR_HEADER_LOG + 12: +	case PCI_ERR_ROOT_COMMAND: +	case PCI_ERR_ROOT_STATUS: +	case PCI_ERR_ROOT_ERR_SRC: +		*value = advk_readl(pcie, PCIE_CORE_PCIERR_CAP + reg); +		return PCI_BRIDGE_EMUL_HANDLED; + +	default: +		return PCI_BRIDGE_EMUL_NOT_HANDLED; +	} +} + +static void +advk_pci_bridge_emul_ext_conf_write(struct pci_bridge_emul *bridge, +				    int reg, u32 old, u32 new, u32 mask) +{ +	struct advk_pcie *pcie = bridge->data; + +	switch (reg) { +	/* These are W1C registers, so clear other bits */ +	case PCI_ERR_UNCOR_STATUS: +	case PCI_ERR_COR_STATUS: +	case PCI_ERR_ROOT_STATUS: +		new &= mask; +		fallthrough; + +	case PCI_ERR_UNCOR_MASK: +	case PCI_ERR_UNCOR_SEVER: +	case PCI_ERR_COR_MASK: +	case PCI_ERR_CAP: +	case PCI_ERR_HEADER_LOG + 0: +	case PCI_ERR_HEADER_LOG + 4: +	case PCI_ERR_HEADER_LOG + 8: +	case PCI_ERR_HEADER_LOG + 12: +	case PCI_ERR_ROOT_COMMAND: +	case PCI_ERR_ROOT_ERR_SRC: +		advk_writel(pcie, new, PCIE_CORE_PCIERR_CAP + reg); +		break; + +	default: +		break; +	} +} +  static const struct pci_bridge_emul_ops advk_pci_bridge_emul_ops = {  	.read_base = advk_pci_bridge_emul_base_conf_read,  	.write_base = advk_pci_bridge_emul_base_conf_write,  	.read_pcie = advk_pci_bridge_emul_pcie_conf_read,  	.write_pcie = advk_pci_bridge_emul_pcie_conf_write, +	.read_ext = advk_pci_bridge_emul_ext_conf_read, +	.write_ext = advk_pci_bridge_emul_ext_conf_write,  };  /* @@ -977,8 +1054,25 @@ static int advk_sw_pci_bridge_init(struct advk_pcie *pcie)  	/* Support interrupt A for MSI feature */  	bridge->conf.intpin = PCI_INTERRUPT_INTA; -	/* Aardvark HW provides PCIe Capability structure in version 2 */ -	bridge->pcie_conf.cap = cpu_to_le16(2); +	/* +	 * Aardvark HW provides PCIe Capability structure in version 2 and +	 * indicate slot support, which is emulated. +	 */ +	bridge->pcie_conf.cap = cpu_to_le16(2 | PCI_EXP_FLAGS_SLOT); + +	/* +	 * Set Presence Detect State bit permanently since there is no support +	 * for unplugging the card nor detecting whether it is plugged. (If a +	 * platform exists in the future that supports it, via a GPIO for +	 * example, it should be implemented via this bit.) +	 * +	 * Set physical slot number to 1 since there is only one port and zero +	 * value is reserved for ports within the same silicon as Root Port +	 * which is not our case. +	 */ +	bridge->pcie_conf.slotcap = cpu_to_le32(FIELD_PREP(PCI_EXP_SLTCAP_PSN, +							   1)); +	bridge->pcie_conf.slotsta = cpu_to_le16(PCI_EXP_SLTSTA_PDS);  	/* Indicates supports for Completion Retry Status */  	bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_CRSVIS); diff --git a/drivers/pci/controller/pci-loongson.c b/drivers/pci/controller/pci-loongson.c index 50a8e1d6f70a..05c50408f13b 100644 --- a/drivers/pci/controller/pci-loongson.c +++ b/drivers/pci/controller/pci-loongson.c @@ -9,6 +9,8 @@  #include <linux/of_pci.h>  #include <linux/pci.h>  #include <linux/pci_ids.h> +#include <linux/pci-acpi.h> +#include <linux/pci-ecam.h>  #include "../pci.h" @@ -18,18 +20,31 @@  #define DEV_PCIE_PORT_2	0x7a29  #define DEV_LS2K_APB	0x7a02 -#define DEV_LS7A_CONF	0x7a10 +#define DEV_LS7A_GMAC	0x7a03 +#define DEV_LS7A_DC1	0x7a06  #define DEV_LS7A_LPC	0x7a0c +#define DEV_LS7A_AHCI	0x7a08 +#define DEV_LS7A_CONF	0x7a10 +#define DEV_LS7A_GNET	0x7a13 +#define DEV_LS7A_EHCI	0x7a14 +#define DEV_LS7A_DC2	0x7a36 +#define DEV_LS7A_HDMI	0x7a37  #define FLAG_CFG0	BIT(0)  #define FLAG_CFG1	BIT(1)  #define FLAG_DEV_FIX	BIT(2) +#define FLAG_DEV_HIDDEN	BIT(3) + +struct loongson_pci_data { +	u32 flags; +	struct pci_ops *ops; +};  struct loongson_pci {  	void __iomem *cfg0_base;  	void __iomem *cfg1_base;  	struct platform_device *pdev; -	u32 flags; +	const struct loongson_pci_data *data;  };  /* Fixup wrong class code in PCIe bridges */ @@ -92,55 +107,106 @@ static void loongson_mrrs_quirk(struct pci_dev *dev)  }  DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, loongson_mrrs_quirk); -static void __iomem *cfg1_map(struct loongson_pci *priv, int bus, -				unsigned int devfn, int where) +static void loongson_pci_pin_quirk(struct pci_dev *pdev)  { -	unsigned long addroff = 0x0; +	pdev->pin = 1 + (PCI_FUNC(pdev->devfn) & 3); +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, +			DEV_LS7A_DC1, loongson_pci_pin_quirk); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, +			DEV_LS7A_DC2, loongson_pci_pin_quirk); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, +			DEV_LS7A_GMAC, loongson_pci_pin_quirk); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, +			DEV_LS7A_AHCI, loongson_pci_pin_quirk); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, +			DEV_LS7A_EHCI, loongson_pci_pin_quirk); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, +			DEV_LS7A_GNET, loongson_pci_pin_quirk); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, +			DEV_LS7A_HDMI, loongson_pci_pin_quirk); + +static struct loongson_pci *pci_bus_to_loongson_pci(struct pci_bus *bus) +{ +	struct pci_config_window *cfg; -	if (bus != 0) -		addroff |= BIT(28); /* Type 1 Access */ -	addroff |= (where & 0xff) | ((where & 0xf00) << 16); -	addroff |= (bus << 16) | (devfn << 8); -	return priv->cfg1_base + addroff; +	if (acpi_disabled) +		return (struct loongson_pci *)(bus->sysdata); + +	cfg = bus->sysdata; +	return (struct loongson_pci *)(cfg->priv);  } -static void __iomem *cfg0_map(struct loongson_pci *priv, int bus, -				unsigned int devfn, int where) +static void __iomem *cfg0_map(struct loongson_pci *priv, struct pci_bus *bus, +			      unsigned int devfn, int where)  {  	unsigned long addroff = 0x0; +	unsigned char busnum = bus->number; -	if (bus != 0) +	if (!pci_is_root_bus(bus)) {  		addroff |= BIT(24); /* Type 1 Access */ -	addroff |= (bus << 16) | (devfn << 8) | where; +		addroff |= (busnum << 16); +	} +	addroff |= (devfn << 8) | where;  	return priv->cfg0_base + addroff;  } -static void __iomem *pci_loongson_map_bus(struct pci_bus *bus, unsigned int devfn, -			       int where) +static void __iomem *cfg1_map(struct loongson_pci *priv, struct pci_bus *bus, +			      unsigned int devfn, int where)  { +	unsigned long addroff = 0x0;  	unsigned char busnum = bus->number; -	struct pci_host_bridge *bridge = pci_find_host_bridge(bus); -	struct loongson_pci *priv =  pci_host_bridge_priv(bridge); + +	if (!pci_is_root_bus(bus)) { +		addroff |= BIT(28); /* Type 1 Access */ +		addroff |= (busnum << 16); +	} +	addroff |= (devfn << 8) | (where & 0xff) | ((where & 0xf00) << 16); +	return priv->cfg1_base + addroff; +} + +static bool pdev_may_exist(struct pci_bus *bus, unsigned int device, +			   unsigned int function) +{ +	return !(pci_is_root_bus(bus) && +		(device >= 9 && device <= 20) && (function > 0)); +} + +static void __iomem *pci_loongson_map_bus(struct pci_bus *bus, +					  unsigned int devfn, int where) +{ +	unsigned int device = PCI_SLOT(devfn); +	unsigned int function = PCI_FUNC(devfn); +	struct loongson_pci *priv = pci_bus_to_loongson_pci(bus);  	/*  	 * Do not read more than one device on the bus other than -	 * the host bus. For our hardware the root bus is always bus 0. +	 * the host bus.  	 */ -	if (priv->flags & FLAG_DEV_FIX && busnum != 0 && -		PCI_SLOT(devfn) > 0) -		return NULL; +	if ((priv->data->flags & FLAG_DEV_FIX) && bus->self) { +		if (!pci_is_root_bus(bus) && (device > 0)) +			return NULL; +	} + +	/* Don't access non-existent devices */ +	if (priv->data->flags & FLAG_DEV_HIDDEN) { +		if (!pdev_may_exist(bus, device, function)) +			return NULL; +	}  	/* CFG0 can only access standard space */  	if (where < PCI_CFG_SPACE_SIZE && priv->cfg0_base) -		return cfg0_map(priv, busnum, devfn, where); +		return cfg0_map(priv, bus, devfn, where);  	/* CFG1 can access extended space */  	if (where < PCI_CFG_SPACE_EXP_SIZE && priv->cfg1_base) -		return cfg1_map(priv, busnum, devfn, where); +		return cfg1_map(priv, bus, devfn, where);  	return NULL;  } +#ifdef CONFIG_OF +  static int loongson_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)  {  	int irq; @@ -159,20 +225,42 @@ static int loongson_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)  	return val;  } -/* H/w only accept 32-bit PCI operations */ +/* LS2K/LS7A accept 8/16/32-bit PCI config operations */  static struct pci_ops loongson_pci_ops = {  	.map_bus = pci_loongson_map_bus, +	.read	= pci_generic_config_read, +	.write	= pci_generic_config_write, +}; + +/* RS780/SR5690 only accept 32-bit PCI config operations */ +static struct pci_ops loongson_pci_ops32 = { +	.map_bus = pci_loongson_map_bus,  	.read	= pci_generic_config_read32,  	.write	= pci_generic_config_write32,  }; +static const struct loongson_pci_data ls2k_pci_data = { +	.flags = FLAG_CFG1 | FLAG_DEV_FIX | FLAG_DEV_HIDDEN, +	.ops = &loongson_pci_ops, +}; + +static const struct loongson_pci_data ls7a_pci_data = { +	.flags = FLAG_CFG1 | FLAG_DEV_FIX | FLAG_DEV_HIDDEN, +	.ops = &loongson_pci_ops, +}; + +static const struct loongson_pci_data rs780e_pci_data = { +	.flags = FLAG_CFG0, +	.ops = &loongson_pci_ops32, +}; +  static const struct of_device_id loongson_pci_of_match[] = {  	{ .compatible = "loongson,ls2k-pci", -		.data = (void *)(FLAG_CFG0 | FLAG_CFG1 | FLAG_DEV_FIX), }, +		.data = &ls2k_pci_data, },  	{ .compatible = "loongson,ls7a-pci", -		.data = (void *)(FLAG_CFG0 | FLAG_CFG1 | FLAG_DEV_FIX), }, +		.data = &ls7a_pci_data, },  	{ .compatible = "loongson,rs780e-pci", -		.data = (void *)(FLAG_CFG0), }, +		.data = &rs780e_pci_data, },  	{}  }; @@ -193,20 +281,20 @@ static int loongson_pci_probe(struct platform_device *pdev)  	priv = pci_host_bridge_priv(bridge);  	priv->pdev = pdev; -	priv->flags = (unsigned long)of_device_get_match_data(dev); +	priv->data = of_device_get_match_data(dev); -	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); -	if (!regs) { -		dev_err(dev, "missing mem resources for cfg0\n"); -		return -EINVAL; +	if (priv->data->flags & FLAG_CFG0) { +		regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); +		if (!regs) +			dev_err(dev, "missing mem resources for cfg0\n"); +		else { +			priv->cfg0_base = devm_pci_remap_cfg_resource(dev, regs); +			if (IS_ERR(priv->cfg0_base)) +				return PTR_ERR(priv->cfg0_base); +		}  	} -	priv->cfg0_base = devm_pci_remap_cfg_resource(dev, regs); -	if (IS_ERR(priv->cfg0_base)) -		return PTR_ERR(priv->cfg0_base); - -	/* CFG1 is optional */ -	if (priv->flags & FLAG_CFG1) { +	if (priv->data->flags & FLAG_CFG1) {  		regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);  		if (!regs)  			dev_info(dev, "missing mem resource for cfg1\n"); @@ -218,7 +306,7 @@ static int loongson_pci_probe(struct platform_device *pdev)  	}  	bridge->sysdata = priv; -	bridge->ops = &loongson_pci_ops; +	bridge->ops = priv->data->ops;  	bridge->map_irq = loongson_map_irq;  	return pci_host_probe(bridge); @@ -232,3 +320,41 @@ static struct platform_driver loongson_pci_driver = {  	.probe = loongson_pci_probe,  };  builtin_platform_driver(loongson_pci_driver); + +#endif + +#ifdef CONFIG_ACPI + +static int loongson_pci_ecam_init(struct pci_config_window *cfg) +{ +	struct device *dev = cfg->parent; +	struct loongson_pci *priv; +	struct loongson_pci_data *data; + +	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); +	if (!priv) +		return -ENOMEM; + +	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); +	if (!data) +		return -ENOMEM; + +	cfg->priv = priv; +	data->flags = FLAG_CFG1 | FLAG_DEV_HIDDEN; +	priv->data = data; +	priv->cfg1_base = cfg->win - (cfg->busr.start << 16); + +	return 0; +} + +const struct pci_ecam_ops loongson_pci_ecam_ops = { +	.bus_shift = 16, +	.init	   = loongson_pci_ecam_init, +	.pci_ops   = { +		.map_bus = pci_loongson_map_bus, +		.read	 = pci_generic_config_read, +		.write	 = pci_generic_config_write, +	} +}; + +#endif diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c index c1ffdb06c971..af915c951f06 100644 --- a/drivers/pci/controller/pci-mvebu.c +++ b/drivers/pci/controller/pci-mvebu.c @@ -1216,7 +1216,6 @@ static int mvebu_get_tgt_attr(struct device_node *np, int devfn,  	return -ENOENT;  } -#ifdef CONFIG_PM_SLEEP  static int mvebu_pcie_suspend(struct device *dev)  {  	struct mvebu_pcie *pcie; @@ -1249,7 +1248,6 @@ static int mvebu_pcie_resume(struct device *dev)  	return 0;  } -#endif  static void mvebu_pcie_port_clk_put(void *data)  { @@ -1737,7 +1735,7 @@ static const struct of_device_id mvebu_pcie_of_match_table[] = {  };  static const struct dev_pm_ops mvebu_pcie_pm_ops = { -	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mvebu_pcie_suspend, mvebu_pcie_resume) +	NOIRQ_SYSTEM_SLEEP_PM_OPS(mvebu_pcie_suspend, mvebu_pcie_resume)  };  static struct platform_driver mvebu_pcie_driver = { diff --git a/drivers/pci/controller/pci-rcar-gen2.c b/drivers/pci/controller/pci-rcar-gen2.c index 35804ea394fd..839695791757 100644 --- a/drivers/pci/controller/pci-rcar-gen2.c +++ b/drivers/pci/controller/pci-rcar-gen2.c @@ -328,6 +328,7 @@ static const struct of_device_id rcar_pci_of_match[] = {  	{ .compatible = "renesas,pci-r8a7791", },  	{ .compatible = "renesas,pci-r8a7794", },  	{ .compatible = "renesas,pci-rcar-gen2", }, +	{ .compatible = "renesas,pci-rzn1", },  	{ },  }; diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c index 0457ec02ab70..8e323e93be91 100644 --- a/drivers/pci/controller/pci-tegra.c +++ b/drivers/pci/controller/pci-tegra.c @@ -2707,7 +2707,7 @@ static int tegra_pcie_remove(struct platform_device *pdev)  	return 0;  } -static int __maybe_unused tegra_pcie_pm_suspend(struct device *dev) +static int tegra_pcie_pm_suspend(struct device *dev)  {  	struct tegra_pcie *pcie = dev_get_drvdata(dev);  	struct tegra_pcie_port *port; @@ -2742,7 +2742,7 @@ static int __maybe_unused tegra_pcie_pm_suspend(struct device *dev)  	return 0;  } -static int __maybe_unused tegra_pcie_pm_resume(struct device *dev) +static int tegra_pcie_pm_resume(struct device *dev)  {  	struct tegra_pcie *pcie = dev_get_drvdata(dev);  	int err; @@ -2798,9 +2798,8 @@ poweroff:  }  static const struct dev_pm_ops tegra_pcie_pm_ops = { -	SET_RUNTIME_PM_OPS(tegra_pcie_pm_suspend, tegra_pcie_pm_resume, NULL) -	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(tegra_pcie_pm_suspend, -				      tegra_pcie_pm_resume) +	RUNTIME_PM_OPS(tegra_pcie_pm_suspend, tegra_pcie_pm_resume, NULL) +	NOIRQ_SYSTEM_SLEEP_PM_OPS(tegra_pcie_pm_suspend, tegra_pcie_pm_resume)  };  static struct platform_driver tegra_pcie_driver = { diff --git a/drivers/pci/controller/pci-xgene.c b/drivers/pci/controller/pci-xgene.c index eb6240958bb0..549d3bd6d1c2 100644 --- a/drivers/pci/controller/pci-xgene.c +++ b/drivers/pci/controller/pci-xgene.c @@ -641,7 +641,7 @@ static const struct of_device_id xgene_pcie_match_table[] = {  static struct platform_driver xgene_pcie_driver = {  	.driver = {  		.name = "xgene-pcie", -		.of_match_table = of_match_ptr(xgene_pcie_match_table), +		.of_match_table = xgene_pcie_match_table,  		.suppress_bind_attrs = true,  	},  	.probe = xgene_pcie_probe, diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c index e61058e13818..521acd632f1a 100644 --- a/drivers/pci/controller/pcie-brcmstb.c +++ b/drivers/pci/controller/pcie-brcmstb.c @@ -24,6 +24,7 @@  #include <linux/pci.h>  #include <linux/pci-ecam.h>  #include <linux/printk.h> +#include <linux/regulator/consumer.h>  #include <linux/reset.h>  #include <linux/sizes.h>  #include <linux/slab.h> @@ -190,11 +191,6 @@  /* Forward declarations */  struct brcm_pcie; -static inline void brcm_pcie_bridge_sw_init_set_7278(struct brcm_pcie *pcie, u32 val); -static inline void brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie, u32 val); -static inline void brcm_pcie_perst_set_4908(struct brcm_pcie *pcie, u32 val); -static inline void brcm_pcie_perst_set_7278(struct brcm_pcie *pcie, u32 val); -static inline void brcm_pcie_perst_set_generic(struct brcm_pcie *pcie, u32 val);  enum {  	RGR1_SW_INIT_1, @@ -223,64 +219,9 @@ struct pcie_cfg_data {  	void (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val);  }; -static const int pcie_offsets[] = { -	[RGR1_SW_INIT_1] = 0x9210, -	[EXT_CFG_INDEX]  = 0x9000, -	[EXT_CFG_DATA]   = 0x9004, -}; - -static const int pcie_offsets_bmips_7425[] = { -	[RGR1_SW_INIT_1] = 0x8010, -	[EXT_CFG_INDEX]  = 0x8300, -	[EXT_CFG_DATA]   = 0x8304, -}; - -static const struct pcie_cfg_data generic_cfg = { -	.offsets	= pcie_offsets, -	.type		= GENERIC, -	.perst_set	= brcm_pcie_perst_set_generic, -	.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic, -}; - -static const struct pcie_cfg_data bcm7425_cfg = { -	.offsets	= pcie_offsets_bmips_7425, -	.type		= BCM7425, -	.perst_set	= brcm_pcie_perst_set_generic, -	.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic, -}; - -static const struct pcie_cfg_data bcm7435_cfg = { -	.offsets	= pcie_offsets, -	.type		= BCM7435, -	.perst_set	= brcm_pcie_perst_set_generic, -	.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic, -}; - -static const struct pcie_cfg_data bcm4908_cfg = { -	.offsets	= pcie_offsets, -	.type		= BCM4908, -	.perst_set	= brcm_pcie_perst_set_4908, -	.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic, -}; - -static const int pcie_offset_bcm7278[] = { -	[RGR1_SW_INIT_1] = 0xc010, -	[EXT_CFG_INDEX] = 0x9000, -	[EXT_CFG_DATA] = 0x9004, -}; - -static const struct pcie_cfg_data bcm7278_cfg = { -	.offsets	= pcie_offset_bcm7278, -	.type		= BCM7278, -	.perst_set	= brcm_pcie_perst_set_7278, -	.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_7278, -}; - -static const struct pcie_cfg_data bcm2711_cfg = { -	.offsets	= pcie_offsets, -	.type		= BCM2711, -	.perst_set	= brcm_pcie_perst_set_generic, -	.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic, +struct subdev_regulators { +	unsigned int num_supplies; +	struct regulator_bulk_data supplies[];  };  struct brcm_msi { @@ -320,6 +261,8 @@ struct brcm_pcie {  	u32			hw_rev;  	void			(*perst_set)(struct brcm_pcie *pcie, u32 val);  	void			(*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val); +	struct subdev_regulators *sr; +	bool			ep_wakeup_capable;  };  static inline bool is_bmips(const struct brcm_pcie *pcie) @@ -741,52 +684,48 @@ static bool brcm_pcie_link_up(struct brcm_pcie *pcie)  	return dla && plu;  } -static void __iomem *brcm_pcie_map_conf(struct pci_bus *bus, unsigned int devfn, -					int where) +static void __iomem *brcm_pcie_map_bus(struct pci_bus *bus, +				       unsigned int devfn, int where)  {  	struct brcm_pcie *pcie = bus->sysdata;  	void __iomem *base = pcie->base;  	int idx; -	/* Accesses to the RC go right to the RC registers if slot==0 */ +	/* Accesses to the RC go right to the RC registers if !devfn */  	if (pci_is_root_bus(bus)) -		return PCI_SLOT(devfn) ? NULL : base + where; +		return devfn ? NULL : base + PCIE_ECAM_REG(where); + +	/* An access to our HW w/o link-up will cause a CPU Abort */ +	if (!brcm_pcie_link_up(pcie)) +		return NULL;  	/* For devices, write to the config space index register */  	idx = PCIE_ECAM_OFFSET(bus->number, devfn, 0);  	writel(idx, pcie->base + PCIE_EXT_CFG_INDEX); -	return base + PCIE_EXT_CFG_DATA + where; +	return base + PCIE_EXT_CFG_DATA + PCIE_ECAM_REG(where);  } -static void __iomem *brcm_pcie_map_conf32(struct pci_bus *bus, unsigned int devfn, -					 int where) +static void __iomem *brcm7425_pcie_map_bus(struct pci_bus *bus, +					   unsigned int devfn, int where)  {  	struct brcm_pcie *pcie = bus->sysdata;  	void __iomem *base = pcie->base;  	int idx; -	/* Accesses to the RC go right to the RC registers if slot==0 */ +	/* Accesses to the RC go right to the RC registers if !devfn */  	if (pci_is_root_bus(bus)) -		return PCI_SLOT(devfn) ? NULL : base + (where & ~0x3); +		return devfn ? NULL : base + PCIE_ECAM_REG(where); + +	/* An access to our HW w/o link-up will cause a CPU Abort */ +	if (!brcm_pcie_link_up(pcie)) +		return NULL;  	/* For devices, write to the config space index register */ -	idx = PCIE_ECAM_OFFSET(bus->number, devfn, (where & ~3)); +	idx = PCIE_ECAM_OFFSET(bus->number, devfn, where);  	writel(idx, base + IDX_ADDR(pcie));  	return base + DATA_ADDR(pcie);  } -static struct pci_ops brcm_pcie_ops = { -	.map_bus = brcm_pcie_map_conf, -	.read = pci_generic_config_read, -	.write = pci_generic_config_write, -}; - -static struct pci_ops brcm_pcie_ops32 = { -	.map_bus = brcm_pcie_map_conf32, -	.read = pci_generic_config_read32, -	.write = pci_generic_config_write32, -}; -  static inline void brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie, u32 val)  {  	u32 tmp, mask =  RGR1_SW_INIT_1_INIT_GENERIC_MASK; @@ -926,17 +865,13 @@ static inline int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie,  static int brcm_pcie_setup(struct brcm_pcie *pcie)  { -	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);  	u64 rc_bar2_offset, rc_bar2_size;  	void __iomem *base = pcie->base; -	struct device *dev = pcie->dev; +	struct pci_host_bridge *bridge;  	struct resource_entry *entry; -	bool ssc_good = false; -	struct resource *res; -	int num_out_wins = 0; -	u16 nlw, cls, lnksta; -	int i, ret, memc;  	u32 tmp, burst, aspm_support; +	int num_out_wins = 0; +	int ret, memc;  	/* Reset the bridge */  	pcie->bridge_sw_init_set(pcie, 1); @@ -1012,6 +947,11 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)  	else  		pcie->msi_target_addr = BRCM_MSI_TARGET_ADDR_GT_4GB; +	if (!brcm_pcie_rc_mode(pcie)) { +		dev_err(pcie->dev, "PCIe RC controller misconfigured as Endpoint\n"); +		return -EINVAL; +	} +  	/* disable the PCIe->GISB memory window (RC_BAR1) */  	tmp = readl(base + PCIE_MISC_RC_BAR1_CONFIG_LO);  	tmp &= ~PCIE_MISC_RC_BAR1_CONFIG_LO_SIZE_MASK; @@ -1022,31 +962,27 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)  	tmp &= ~PCIE_MISC_RC_BAR3_CONFIG_LO_SIZE_MASK;  	writel(tmp, base + PCIE_MISC_RC_BAR3_CONFIG_LO); -	if (pcie->gen) -		brcm_pcie_set_gen(pcie, pcie->gen); - -	/* Unassert the fundamental reset */ -	pcie->perst_set(pcie, 0); +	/* Don't advertise L0s capability if 'aspm-no-l0s' */ +	aspm_support = PCIE_LINK_STATE_L1; +	if (!of_property_read_bool(pcie->np, "aspm-no-l0s")) +		aspm_support |= PCIE_LINK_STATE_L0S; +	tmp = readl(base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY); +	u32p_replace_bits(&tmp, aspm_support, +		PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK); +	writel(tmp, base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);  	/* -	 * Give the RC/EP time to wake up, before trying to configure RC. -	 * Intermittently check status for link-up, up to a total of 100ms. +	 * For config space accesses on the RC, show the right class for +	 * a PCIe-PCIe bridge (the default setting is to be EP mode).  	 */ -	for (i = 0; i < 100 && !brcm_pcie_link_up(pcie); i += 5) -		msleep(5); - -	if (!brcm_pcie_link_up(pcie)) { -		dev_err(dev, "link down\n"); -		return -ENODEV; -	} - -	if (!brcm_pcie_rc_mode(pcie)) { -		dev_err(dev, "PCIe misconfigured; is in EP mode\n"); -		return -EINVAL; -	} +	tmp = readl(base + PCIE_RC_CFG_PRIV1_ID_VAL3); +	u32p_replace_bits(&tmp, 0x060400, +			  PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK); +	writel(tmp, base + PCIE_RC_CFG_PRIV1_ID_VAL3); +	bridge = pci_host_bridge_from_priv(pcie);  	resource_list_for_each_entry(entry, &bridge->windows) { -		res = entry->res; +		struct resource *res = entry->res;  		if (resource_type(res) != IORESOURCE_MEM)  			continue; @@ -1075,23 +1011,41 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)  		num_out_wins++;  	} -	/* Don't advertise L0s capability if 'aspm-no-l0s' */ -	aspm_support = PCIE_LINK_STATE_L1; -	if (!of_property_read_bool(pcie->np, "aspm-no-l0s")) -		aspm_support |= PCIE_LINK_STATE_L0S; -	tmp = readl(base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY); -	u32p_replace_bits(&tmp, aspm_support, -		PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK); -	writel(tmp, base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY); +	/* PCIe->SCB endian mode for BAR */ +	tmp = readl(base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1); +	u32p_replace_bits(&tmp, PCIE_RC_CFG_VENDOR_SPCIFIC_REG1_LITTLE_ENDIAN, +		PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BAR2_MASK); +	writel(tmp, base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1); + +	return 0; +} + +static int brcm_pcie_start_link(struct brcm_pcie *pcie) +{ +	struct device *dev = pcie->dev; +	void __iomem *base = pcie->base; +	u16 nlw, cls, lnksta; +	bool ssc_good = false; +	u32 tmp; +	int ret, i; + +	/* Unassert the fundamental reset */ +	pcie->perst_set(pcie, 0);  	/* -	 * For config space accesses on the RC, show the right class for -	 * a PCIe-PCIe bridge (the default setting is to be EP mode). +	 * Give the RC/EP time to wake up, before trying to configure RC. +	 * Intermittently check status for link-up, up to a total of 100ms.  	 */ -	tmp = readl(base + PCIE_RC_CFG_PRIV1_ID_VAL3); -	u32p_replace_bits(&tmp, 0x060400, -			  PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK); -	writel(tmp, base + PCIE_RC_CFG_PRIV1_ID_VAL3); +	for (i = 0; i < 100 && !brcm_pcie_link_up(pcie); i += 5) +		msleep(5); + +	if (!brcm_pcie_link_up(pcie)) { +		dev_err(dev, "link down\n"); +		return -ENODEV; +	} + +	if (pcie->gen) +		brcm_pcie_set_gen(pcie, pcie->gen);  	if (pcie->ssc) {  		ret = brcm_pcie_set_ssc(pcie); @@ -1108,12 +1062,6 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)  		 pci_speed_string(pcie_link_speed[cls]), nlw,  		 ssc_good ? "(SSC)" : "(!SSC)"); -	/* PCIe->SCB endian mode for BAR */ -	tmp = readl(base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1); -	u32p_replace_bits(&tmp, PCIE_RC_CFG_VENDOR_SPCIFIC_REG1_LITTLE_ENDIAN, -		PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BAR2_MASK); -	writel(tmp, base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1); -  	/*  	 * Refclk from RC should be gated with CLKREQ# input when ASPM L0s,L1  	 * is enabled => setting the CLKREQ_DEBUG_ENABLE field to 1. @@ -1125,6 +1073,82 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)  	return 0;  } +static const char * const supplies[] = { +	"vpcie3v3", +	"vpcie3v3aux", +	"vpcie12v", +}; + +static void *alloc_subdev_regulators(struct device *dev) +{ +	const size_t size = sizeof(struct subdev_regulators) + +		sizeof(struct regulator_bulk_data) * ARRAY_SIZE(supplies); +	struct subdev_regulators *sr; +	int i; + +	sr = devm_kzalloc(dev, size, GFP_KERNEL); +	if (sr) { +		sr->num_supplies = ARRAY_SIZE(supplies); +		for (i = 0; i < ARRAY_SIZE(supplies); i++) +			sr->supplies[i].supply = supplies[i]; +	} + +	return sr; +} + +static int brcm_pcie_add_bus(struct pci_bus *bus) +{ +	struct brcm_pcie *pcie = bus->sysdata; +	struct device *dev = &bus->dev; +	struct subdev_regulators *sr; +	int ret; + +	if (!bus->parent || !pci_is_root_bus(bus->parent)) +		return 0; + +	if (dev->of_node) { +		sr = alloc_subdev_regulators(dev); +		if (!sr) { +			dev_info(dev, "Can't allocate regulators for downstream device\n"); +			goto no_regulators; +		} + +		pcie->sr = sr; + +		ret = regulator_bulk_get(dev, sr->num_supplies, sr->supplies); +		if (ret) { +			dev_info(dev, "No regulators for downstream device\n"); +			goto no_regulators; +		} + +		ret = regulator_bulk_enable(sr->num_supplies, sr->supplies); +		if (ret) { +			dev_err(dev, "Can't enable regulators for downstream device\n"); +			regulator_bulk_free(sr->num_supplies, sr->supplies); +			pcie->sr = NULL; +		} +	} + +no_regulators: +	brcm_pcie_start_link(pcie); +	return 0; +} + +static void brcm_pcie_remove_bus(struct pci_bus *bus) +{ +	struct brcm_pcie *pcie = bus->sysdata; +	struct subdev_regulators *sr = pcie->sr; +	struct device *dev = &bus->dev; + +	if (!sr) +		return; + +	if (regulator_bulk_disable(sr->num_supplies, sr->supplies)) +		dev_err(dev, "Failed to disable regulators for downstream device\n"); +	regulator_bulk_free(sr->num_supplies, sr->supplies); +	pcie->sr = NULL; +} +  /* L23 is a low-power PCIe link state */  static void brcm_pcie_enter_l23(struct brcm_pcie *pcie)  { @@ -1221,9 +1245,21 @@ static void brcm_pcie_turn_off(struct brcm_pcie *pcie)  	pcie->bridge_sw_init_set(pcie, 1);  } -static int brcm_pcie_suspend(struct device *dev) +static int pci_dev_may_wakeup(struct pci_dev *dev, void *data) +{ +	bool *ret = data; + +	if (device_may_wakeup(&dev->dev)) { +		*ret = true; +		dev_info(&dev->dev, "Possible wake-up device; regulators will not be disabled\n"); +	} +	return (int) *ret; +} + +static int brcm_pcie_suspend_noirq(struct device *dev)  {  	struct brcm_pcie *pcie = dev_get_drvdata(dev); +	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);  	int ret;  	brcm_pcie_turn_off(pcie); @@ -1241,12 +1277,31 @@ static int brcm_pcie_suspend(struct device *dev)  		return ret;  	} +	if (pcie->sr) { +		/* +		 * Now turn off the regulators, but if at least one +		 * downstream device is enabled as a wake-up source, do not +		 * turn off regulators. +		 */ +		pcie->ep_wakeup_capable = false; +		pci_walk_bus(bridge->bus, pci_dev_may_wakeup, +			     &pcie->ep_wakeup_capable); +		if (!pcie->ep_wakeup_capable) { +			ret = regulator_bulk_disable(pcie->sr->num_supplies, +						     pcie->sr->supplies); +			if (ret) { +				dev_err(dev, "Could not turn off regulators\n"); +				reset_control_reset(pcie->rescal); +				return ret; +			} +		} +	}  	clk_disable_unprepare(pcie->clk);  	return 0;  } -static int brcm_pcie_resume(struct device *dev) +static int brcm_pcie_resume_noirq(struct device *dev)  {  	struct brcm_pcie *pcie = dev_get_drvdata(dev);  	void __iomem *base; @@ -1281,11 +1336,37 @@ static int brcm_pcie_resume(struct device *dev)  	if (ret)  		goto err_reset; +	if (pcie->sr) { +		if (pcie->ep_wakeup_capable) { +			/* +			 * We are resuming from a suspend.  In the suspend we +			 * did not disable the power supplies, so there is +			 * no need to enable them (and falsely increase their +			 * usage count). +			 */ +			pcie->ep_wakeup_capable = false; +		} else { +			ret = regulator_bulk_enable(pcie->sr->num_supplies, +						    pcie->sr->supplies); +			if (ret) { +				dev_err(dev, "Could not turn on regulators\n"); +				goto err_reset; +			} +		} +	} + +	ret = brcm_pcie_start_link(pcie); +	if (ret) +		goto err_regulator; +  	if (pcie->msi)  		brcm_msi_set_regs(pcie->msi);  	return 0; +err_regulator: +	if (pcie->sr) +		regulator_bulk_disable(pcie->sr->num_supplies, pcie->sr->supplies);  err_reset:  	reset_control_rearm(pcie->rescal);  err_disable_clk: @@ -1316,6 +1397,66 @@ static int brcm_pcie_remove(struct platform_device *pdev)  	return 0;  } +static const int pcie_offsets[] = { +	[RGR1_SW_INIT_1] = 0x9210, +	[EXT_CFG_INDEX]  = 0x9000, +	[EXT_CFG_DATA]   = 0x9004, +}; + +static const int pcie_offsets_bmips_7425[] = { +	[RGR1_SW_INIT_1] = 0x8010, +	[EXT_CFG_INDEX]  = 0x8300, +	[EXT_CFG_DATA]   = 0x8304, +}; + +static const struct pcie_cfg_data generic_cfg = { +	.offsets	= pcie_offsets, +	.type		= GENERIC, +	.perst_set	= brcm_pcie_perst_set_generic, +	.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic, +}; + +static const struct pcie_cfg_data bcm7425_cfg = { +	.offsets	= pcie_offsets_bmips_7425, +	.type		= BCM7425, +	.perst_set	= brcm_pcie_perst_set_generic, +	.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic, +}; + +static const struct pcie_cfg_data bcm7435_cfg = { +	.offsets	= pcie_offsets, +	.type		= BCM7435, +	.perst_set	= brcm_pcie_perst_set_generic, +	.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic, +}; + +static const struct pcie_cfg_data bcm4908_cfg = { +	.offsets	= pcie_offsets, +	.type		= BCM4908, +	.perst_set	= brcm_pcie_perst_set_4908, +	.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic, +}; + +static const int pcie_offset_bcm7278[] = { +	[RGR1_SW_INIT_1] = 0xc010, +	[EXT_CFG_INDEX] = 0x9000, +	[EXT_CFG_DATA] = 0x9004, +}; + +static const struct pcie_cfg_data bcm7278_cfg = { +	.offsets	= pcie_offset_bcm7278, +	.type		= BCM7278, +	.perst_set	= brcm_pcie_perst_set_7278, +	.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_7278, +}; + +static const struct pcie_cfg_data bcm2711_cfg = { +	.offsets	= pcie_offsets, +	.type		= BCM2711, +	.perst_set	= brcm_pcie_perst_set_generic, +	.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic, +}; +  static const struct of_device_id brcm_pcie_match[] = {  	{ .compatible = "brcm,bcm2711-pcie", .data = &bcm2711_cfg },  	{ .compatible = "brcm,bcm4908-pcie", .data = &bcm4908_cfg }, @@ -1328,6 +1469,22 @@ static const struct of_device_id brcm_pcie_match[] = {  	{},  }; +static struct pci_ops brcm_pcie_ops = { +	.map_bus = brcm_pcie_map_bus, +	.read = pci_generic_config_read, +	.write = pci_generic_config_write, +	.add_bus = brcm_pcie_add_bus, +	.remove_bus = brcm_pcie_remove_bus, +}; + +static struct pci_ops brcm7425_pcie_ops = { +	.map_bus = brcm7425_pcie_map_bus, +	.read = pci_generic_config_read32, +	.write = pci_generic_config_write32, +	.add_bus = brcm_pcie_add_bus, +	.remove_bus = brcm_pcie_remove_bus, +}; +  static int brcm_pcie_probe(struct platform_device *pdev)  {  	struct device_node *np = pdev->dev.of_node, *msi_np; @@ -1414,12 +1571,22 @@ static int brcm_pcie_probe(struct platform_device *pdev)  		}  	} -	bridge->ops = pcie->type == BCM7425 ? &brcm_pcie_ops32 : &brcm_pcie_ops; +	bridge->ops = pcie->type == BCM7425 ? &brcm7425_pcie_ops : &brcm_pcie_ops;  	bridge->sysdata = pcie;  	platform_set_drvdata(pdev, pcie); -	return pci_host_probe(bridge); +	ret = pci_host_probe(bridge); +	if (!ret && !brcm_pcie_link_up(pcie)) +		ret = -ENODEV; + +	if (ret) { +		brcm_pcie_remove(pdev); +		return ret; +	} + +	return 0; +  fail:  	__brcm_pcie_remove(pcie);  	return ret; @@ -1428,8 +1595,8 @@ fail:  MODULE_DEVICE_TABLE(of, brcm_pcie_match);  static const struct dev_pm_ops brcm_pcie_pm_ops = { -	.suspend = brcm_pcie_suspend, -	.resume = brcm_pcie_resume, +	.suspend_noirq = brcm_pcie_suspend_noirq, +	.resume_noirq = brcm_pcie_resume_noirq,  };  static struct platform_driver brcm_pcie_driver = { diff --git a/drivers/pci/controller/pcie-iproc-msi.c b/drivers/pci/controller/pcie-iproc-msi.c index 757b7fbcdc59..fee036b07cd4 100644 --- a/drivers/pci/controller/pcie-iproc-msi.c +++ b/drivers/pci/controller/pcie-iproc-msi.c @@ -589,8 +589,8 @@ int iproc_msi_init(struct iproc_pcie *pcie, struct device_node *node)  		msi->has_inten_reg = true;  	msi->nr_msi_vecs = msi->nr_irqs * EQ_LEN; -	msi->bitmap = devm_kcalloc(pcie->dev, BITS_TO_LONGS(msi->nr_msi_vecs), -				   sizeof(*msi->bitmap), GFP_KERNEL); +	msi->bitmap = devm_bitmap_zalloc(pcie->dev, msi->nr_msi_vecs, +					 GFP_KERNEL);  	if (!msi->bitmap)  		return -ENOMEM; diff --git a/drivers/pci/controller/pcie-mediatek-gen3.c b/drivers/pci/controller/pcie-mediatek-gen3.c index 5d9fd36b02d1..11cdb9b6f109 100644 --- a/drivers/pci/controller/pcie-mediatek-gen3.c +++ b/drivers/pci/controller/pcie-mediatek-gen3.c @@ -153,6 +153,37 @@ struct mtk_gen3_pcie {  	DECLARE_BITMAP(msi_irq_in_use, PCIE_MSI_IRQS_NUM);  }; +/* LTSSM state in PCIE_LTSSM_STATUS_REG bit[28:24] */ +static const char *const ltssm_str[] = { +	"detect.quiet",			/* 0x00 */ +	"detect.active",		/* 0x01 */ +	"polling.active",		/* 0x02 */ +	"polling.compliance",		/* 0x03 */ +	"polling.configuration",	/* 0x04 */ +	"config.linkwidthstart",	/* 0x05 */ +	"config.linkwidthaccept",	/* 0x06 */ +	"config.lanenumwait",		/* 0x07 */ +	"config.lanenumaccept",		/* 0x08 */ +	"config.complete",		/* 0x09 */ +	"config.idle",			/* 0x0A */ +	"recovery.receiverlock",	/* 0x0B */ +	"recovery.equalization",	/* 0x0C */ +	"recovery.speed",		/* 0x0D */ +	"recovery.receiverconfig",	/* 0x0E */ +	"recovery.idle",		/* 0x0F */ +	"L0",				/* 0x10 */ +	"L0s",				/* 0x11 */ +	"L1.entry",			/* 0x12 */ +	"L1.idle",			/* 0x13 */ +	"L2.idle",			/* 0x14 */ +	"L2.transmitwake",		/* 0x15 */ +	"disable",			/* 0x16 */ +	"loopback.entry",		/* 0x17 */ +	"loopback.active",		/* 0x18 */ +	"loopback.exit",		/* 0x19 */ +	"hotreset",			/* 0x1A */ +}; +  /**   * mtk_pcie_config_tlp_header() - Configure a configuration TLP header   * @bus: PCI bus to query @@ -327,8 +358,16 @@ static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie)  				 !!(val & PCIE_PORT_LINKUP), 20,  				 PCI_PM_D3COLD_WAIT * USEC_PER_MSEC);  	if (err) { +		const char *ltssm_state; +		int ltssm_index; +  		val = readl_relaxed(pcie->base + PCIE_LTSSM_STATUS_REG); -		dev_err(pcie->dev, "PCIe link down, ltssm reg val: %#x\n", val); +		ltssm_index = PCIE_LTSSM_STATE(val); +		ltssm_state = ltssm_index >= ARRAY_SIZE(ltssm_str) ? +			      "Unknown state" : ltssm_str[ltssm_index]; +		dev_err(pcie->dev, +			"PCIe link down, current LTSSM state: %s (%#x)\n", +			ltssm_state, val);  		return err;  	} @@ -600,7 +639,8 @@ static int mtk_pcie_init_irq_domains(struct mtk_gen3_pcie *pcie)  						  &intx_domain_ops, pcie);  	if (!pcie->intx_domain) {  		dev_err(dev, "failed to create INTx IRQ domain\n"); -		return -ENODEV; +		ret = -ENODEV; +		goto out_put_node;  	}  	/* Setup MSI */ @@ -623,13 +663,15 @@ static int mtk_pcie_init_irq_domains(struct mtk_gen3_pcie *pcie)  		goto err_msi_domain;  	} +	of_node_put(intc_node);  	return 0;  err_msi_domain:  	irq_domain_remove(pcie->msi_bottom_domain);  err_msi_bottom_domain:  	irq_domain_remove(pcie->intx_domain); - +out_put_node: +	of_node_put(intc_node);  	return ret;  } @@ -917,7 +959,7 @@ static int mtk_pcie_remove(struct platform_device *pdev)  	return 0;  } -static void __maybe_unused mtk_pcie_irq_save(struct mtk_gen3_pcie *pcie) +static void mtk_pcie_irq_save(struct mtk_gen3_pcie *pcie)  {  	int i; @@ -935,7 +977,7 @@ static void __maybe_unused mtk_pcie_irq_save(struct mtk_gen3_pcie *pcie)  	raw_spin_unlock(&pcie->irq_lock);  } -static void __maybe_unused mtk_pcie_irq_restore(struct mtk_gen3_pcie *pcie) +static void mtk_pcie_irq_restore(struct mtk_gen3_pcie *pcie)  {  	int i; @@ -953,7 +995,7 @@ static void __maybe_unused mtk_pcie_irq_restore(struct mtk_gen3_pcie *pcie)  	raw_spin_unlock(&pcie->irq_lock);  } -static int __maybe_unused mtk_pcie_turn_off_link(struct mtk_gen3_pcie *pcie) +static int mtk_pcie_turn_off_link(struct mtk_gen3_pcie *pcie)  {  	u32 val; @@ -968,7 +1010,7 @@ static int __maybe_unused mtk_pcie_turn_off_link(struct mtk_gen3_pcie *pcie)  				   50 * USEC_PER_MSEC);  } -static int __maybe_unused mtk_pcie_suspend_noirq(struct device *dev) +static int mtk_pcie_suspend_noirq(struct device *dev)  {  	struct mtk_gen3_pcie *pcie = dev_get_drvdata(dev);  	int err; @@ -994,7 +1036,7 @@ static int __maybe_unused mtk_pcie_suspend_noirq(struct device *dev)  	return 0;  } -static int __maybe_unused mtk_pcie_resume_noirq(struct device *dev) +static int mtk_pcie_resume_noirq(struct device *dev)  {  	struct mtk_gen3_pcie *pcie = dev_get_drvdata(dev);  	int err; @@ -1015,8 +1057,8 @@ static int __maybe_unused mtk_pcie_resume_noirq(struct device *dev)  }  static const struct dev_pm_ops mtk_pcie_pm_ops = { -	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_pcie_suspend_noirq, -				      mtk_pcie_resume_noirq) +	NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_pcie_suspend_noirq, +				  mtk_pcie_resume_noirq)  };  static const struct of_device_id mtk_pcie_of_match[] = { diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c index be8bd919cb88..ae5ad05ddc1d 100644 --- a/drivers/pci/controller/pcie-mediatek.c +++ b/drivers/pci/controller/pcie-mediatek.c @@ -1150,7 +1150,7 @@ static int mtk_pcie_remove(struct platform_device *pdev)  	return 0;  } -static int __maybe_unused mtk_pcie_suspend_noirq(struct device *dev) +static int mtk_pcie_suspend_noirq(struct device *dev)  {  	struct mtk_pcie *pcie = dev_get_drvdata(dev);  	struct mtk_pcie_port *port; @@ -1174,7 +1174,7 @@ static int __maybe_unused mtk_pcie_suspend_noirq(struct device *dev)  	return 0;  } -static int __maybe_unused mtk_pcie_resume_noirq(struct device *dev) +static int mtk_pcie_resume_noirq(struct device *dev)  {  	struct mtk_pcie *pcie = dev_get_drvdata(dev);  	struct mtk_pcie_port *port, *tmp; @@ -1195,8 +1195,8 @@ static int __maybe_unused mtk_pcie_resume_noirq(struct device *dev)  }  static const struct dev_pm_ops mtk_pcie_pm_ops = { -	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_pcie_suspend_noirq, -				      mtk_pcie_resume_noirq) +	NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_pcie_suspend_noirq, +				  mtk_pcie_resume_noirq)  };  static const struct mtk_pcie_soc mtk_pcie_soc_v1 = { diff --git a/drivers/pci/controller/pcie-microchip-host.c b/drivers/pci/controller/pcie-microchip-host.c index dd5dba419047..7263d175b5ad 100644 --- a/drivers/pci/controller/pcie-microchip-host.c +++ b/drivers/pci/controller/pcie-microchip-host.c @@ -904,6 +904,7 @@ static int mc_pcie_init_irq_domains(struct mc_pcie *port)  						   &event_domain_ops, port);  	if (!port->event_domain) {  		dev_err(dev, "failed to get event domain\n"); +		of_node_put(pcie_intc_node);  		return -ENOMEM;  	} @@ -913,6 +914,7 @@ static int mc_pcie_init_irq_domains(struct mc_pcie *port)  						  &intx_domain_ops, port);  	if (!port->intx_domain) {  		dev_err(dev, "failed to get an INTx IRQ domain\n"); +		of_node_put(pcie_intc_node);  		return -ENOMEM;  	} diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c index 997c4df6a1e7..e4faf90feaf5 100644 --- a/drivers/pci/controller/pcie-rcar-host.c +++ b/drivers/pci/controller/pcie-rcar-host.c @@ -1072,7 +1072,7 @@ err_pm_put:  	return err;  } -static int __maybe_unused rcar_pcie_resume(struct device *dev) +static int rcar_pcie_resume(struct device *dev)  {  	struct rcar_pcie_host *host = dev_get_drvdata(dev);  	struct rcar_pcie *pcie = &host->pcie; @@ -1127,7 +1127,7 @@ static int rcar_pcie_resume_noirq(struct device *dev)  }  static const struct dev_pm_ops rcar_pcie_pm_ops = { -	SET_SYSTEM_SLEEP_PM_OPS(NULL, rcar_pcie_resume) +	SYSTEM_SLEEP_PM_OPS(NULL, rcar_pcie_resume)  	.resume_noirq = rcar_pcie_resume_noirq,  }; diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c index 7f56f99b4116..7352b5ff8d35 100644 --- a/drivers/pci/controller/pcie-rockchip-host.c +++ b/drivers/pci/controller/pcie-rockchip-host.c @@ -864,7 +864,7 @@ static int rockchip_pcie_wait_l2(struct rockchip_pcie *rockchip)  	return 0;  } -static int __maybe_unused rockchip_pcie_suspend_noirq(struct device *dev) +static int rockchip_pcie_suspend_noirq(struct device *dev)  {  	struct rockchip_pcie *rockchip = dev_get_drvdata(dev);  	int ret; @@ -889,7 +889,7 @@ static int __maybe_unused rockchip_pcie_suspend_noirq(struct device *dev)  	return ret;  } -static int __maybe_unused rockchip_pcie_resume_noirq(struct device *dev) +static int rockchip_pcie_resume_noirq(struct device *dev)  {  	struct rockchip_pcie *rockchip = dev_get_drvdata(dev);  	int err; @@ -1035,8 +1035,8 @@ static int rockchip_pcie_remove(struct platform_device *pdev)  }  static const struct dev_pm_ops rockchip_pcie_pm_ops = { -	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(rockchip_pcie_suspend_noirq, -				      rockchip_pcie_resume_noirq) +	NOIRQ_SYSTEM_SLEEP_PM_OPS(rockchip_pcie_suspend_noirq, +				  rockchip_pcie_resume_noirq)  };  static const struct of_device_id rockchip_pcie_of_match[] = { diff --git a/drivers/pci/controller/pcie-xilinx-cpm.c b/drivers/pci/controller/pcie-xilinx-cpm.c index c7cd44ed4dfc..e4ab48041eb6 100644 --- a/drivers/pci/controller/pcie-xilinx-cpm.c +++ b/drivers/pci/controller/pcie-xilinx-cpm.c @@ -35,6 +35,10 @@  #define XILINX_CPM_PCIE_MISC_IR_ENABLE	0x00000348  #define XILINX_CPM_PCIE_MISC_IR_LOCAL	BIT(1) +#define XILINX_CPM_PCIE_IR_STATUS       0x000002A0 +#define XILINX_CPM_PCIE_IR_ENABLE       0x000002A8 +#define XILINX_CPM_PCIE_IR_LOCAL        BIT(0) +  /* Interrupt registers definitions */  #define XILINX_CPM_PCIE_INTR_LINK_DOWN		0  #define XILINX_CPM_PCIE_INTR_HOT_RESET		3 @@ -98,6 +102,19 @@  /* Phy Status/Control Register definitions */  #define XILINX_CPM_PCIE_REG_PSCR_LNKUP		BIT(11) +enum xilinx_cpm_version { +	CPM, +	CPM5, +}; + +/** + * struct xilinx_cpm_variant - CPM variant information + * @version: CPM version + */ +struct xilinx_cpm_variant { +	enum xilinx_cpm_version version; +}; +  /**   * struct xilinx_cpm_pcie - PCIe port information   * @dev: Device pointer @@ -109,6 +126,7 @@   * @intx_irq: legacy interrupt number   * @irq: Error interrupt number   * @lock: lock protecting shared register access + * @variant: CPM version check pointer   */  struct xilinx_cpm_pcie {  	struct device			*dev; @@ -120,6 +138,7 @@ struct xilinx_cpm_pcie {  	int				intx_irq;  	int				irq;  	raw_spinlock_t			lock; +	const struct xilinx_cpm_variant   *variant;  };  static u32 pcie_read(struct xilinx_cpm_pcie *port, u32 reg) @@ -285,6 +304,13 @@ static void xilinx_cpm_pcie_event_flow(struct irq_desc *desc)  		generic_handle_domain_irq(port->cpm_domain, i);  	pcie_write(port, val, XILINX_CPM_PCIE_REG_IDR); +	if (port->variant->version == CPM5) { +		val = readl_relaxed(port->cpm_base + XILINX_CPM_PCIE_IR_STATUS); +		if (val) +			writel_relaxed(val, port->cpm_base + +					    XILINX_CPM_PCIE_IR_STATUS); +	} +  	/*  	 * XILINX_CPM_PCIE_MISC_IR_STATUS register is mapped to  	 * CPM SLCR block. @@ -484,6 +510,12 @@ static void xilinx_cpm_pcie_init_port(struct xilinx_cpm_pcie *port)  	 */  	writel(XILINX_CPM_PCIE_MISC_IR_LOCAL,  	       port->cpm_base + XILINX_CPM_PCIE_MISC_IR_ENABLE); + +	if (port->variant->version == CPM5) { +		writel(XILINX_CPM_PCIE_IR_LOCAL, +		       port->cpm_base + XILINX_CPM_PCIE_IR_ENABLE); +	} +  	/* Enable the Bridge enable bit */  	pcie_write(port, pcie_read(port, XILINX_CPM_PCIE_REG_RPSC) |  		   XILINX_CPM_PCIE_REG_RPSC_BEN, @@ -518,7 +550,14 @@ static int xilinx_cpm_pcie_parse_dt(struct xilinx_cpm_pcie *port,  	if (IS_ERR(port->cfg))  		return PTR_ERR(port->cfg); -	port->reg_base = port->cfg->win; +	if (port->variant->version == CPM5) { +		port->reg_base = devm_platform_ioremap_resource_byname(pdev, +								    "cpm_csr"); +		if (IS_ERR(port->reg_base)) +			return PTR_ERR(port->reg_base); +	} else { +		port->reg_base = port->cfg->win; +	}  	return 0;  } @@ -559,6 +598,8 @@ static int xilinx_cpm_pcie_probe(struct platform_device *pdev)  	if (!bus)  		return -ENODEV; +	port->variant = of_device_get_match_data(dev); +  	err = xilinx_cpm_pcie_parse_dt(port, bus->res);  	if (err) {  		dev_err(dev, "Parsing DT failed\n"); @@ -591,8 +632,23 @@ err_parse_dt:  	return err;  } +static const struct xilinx_cpm_variant cpm_host = { +	.version = CPM, +}; + +static const struct xilinx_cpm_variant cpm5_host = { +	.version = CPM5, +}; +  static const struct of_device_id xilinx_cpm_pcie_of_match[] = { -	{ .compatible = "xlnx,versal-cpm-host-1.00", }, +	{ +		.compatible = "xlnx,versal-cpm-host-1.00", +		.data = &cpm_host, +	}, +	{ +		.compatible = "xlnx,versal-cpm5-host", +		.data = &cpm5_host, +	},  	{}  }; diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c index 94a14a3d7e55..e06e9f4fc50f 100644 --- a/drivers/pci/controller/vmd.c +++ b/drivers/pci/controller/vmd.c @@ -898,7 +898,8 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)  	if (vmd->instance < 0)  		return vmd->instance; -	vmd->name = kasprintf(GFP_KERNEL, "vmd%d", vmd->instance); +	vmd->name = devm_kasprintf(&dev->dev, GFP_KERNEL, "vmd%d", +				   vmd->instance);  	if (!vmd->name) {  		err = -ENOMEM;  		goto out_release_instance; @@ -936,7 +937,6 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)   out_release_instance:  	ida_simple_remove(&vmd_instance_ida, vmd->instance); -	kfree(vmd->name);  	return err;  } @@ -959,7 +959,6 @@ static void vmd_remove(struct pci_dev *dev)  	vmd_detach_resources(vmd);  	vmd_remove_irq_domain(vmd);  	ida_simple_remove(&vmd_instance_ida, vmd->instance); -	kfree(vmd->name);  }  #ifdef CONFIG_PM_SLEEP @@ -1013,6 +1012,14 @@ static const struct pci_device_id vmd_ids[] = {  		.driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |  				VMD_FEAT_HAS_BUS_RESTRICTIONS |  				VMD_FEAT_OFFSET_FIRST_VECTOR,}, +	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7d0b), +		.driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP | +				VMD_FEAT_HAS_BUS_RESTRICTIONS | +				VMD_FEAT_OFFSET_FIRST_VECTOR,}, +	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xad0b), +		.driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP | +				VMD_FEAT_HAS_BUS_RESTRICTIONS | +				VMD_FEAT_OFFSET_FIRST_VECTOR,},  	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_9A0B),  		.driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |  				VMD_FEAT_HAS_BUS_RESTRICTIONS | diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c index 5b833f00e980..36b1801a061b 100644 --- a/drivers/pci/endpoint/functions/pci-epf-test.c +++ b/drivers/pci/endpoint/functions/pci-epf-test.c @@ -52,9 +52,11 @@ struct pci_epf_test {  	enum pci_barno		test_reg_bar;  	size_t			msix_table_offset;  	struct delayed_work	cmd_handler; -	struct dma_chan		*dma_chan; +	struct dma_chan		*dma_chan_tx; +	struct dma_chan		*dma_chan_rx;  	struct completion	transfer_complete;  	bool			dma_supported; +	bool			dma_private;  	const struct pci_epc_features *epc_features;  }; @@ -96,6 +98,8 @@ static void pci_epf_test_dma_callback(void *param)   * @dma_src: The source address of the data transfer. It can be a physical   *	     address given by pci_epc_mem_alloc_addr or DMA mapping APIs.   * @len: The size of the data transfer + * @dma_remote: remote RC physical address + * @dir: DMA transfer direction   *   * Function that uses dmaengine API to transfer data between PCIe EP and remote   * PCIe RC. The source and destination address can be a physical address given @@ -105,12 +109,16 @@ static void pci_epf_test_dma_callback(void *param)   */  static int pci_epf_test_data_transfer(struct pci_epf_test *epf_test,  				      dma_addr_t dma_dst, dma_addr_t dma_src, -				      size_t len) +				      size_t len, dma_addr_t dma_remote, +				      enum dma_transfer_direction dir)  { +	struct dma_chan *chan = (dir == DMA_DEV_TO_MEM) ? +				 epf_test->dma_chan_tx : epf_test->dma_chan_rx; +	dma_addr_t dma_local = (dir == DMA_MEM_TO_DEV) ? dma_src : dma_dst;  	enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; -	struct dma_chan *chan = epf_test->dma_chan;  	struct pci_epf *epf = epf_test->epf;  	struct dma_async_tx_descriptor *tx; +	struct dma_slave_config sconf = {};  	struct device *dev = &epf->dev;  	dma_cookie_t cookie;  	int ret; @@ -120,7 +128,24 @@ static int pci_epf_test_data_transfer(struct pci_epf_test *epf_test,  		return -EINVAL;  	} -	tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len, flags); +	if (epf_test->dma_private) { +		sconf.direction = dir; +		if (dir == DMA_MEM_TO_DEV) +			sconf.dst_addr = dma_remote; +		else +			sconf.src_addr = dma_remote; + +		if (dmaengine_slave_config(chan, &sconf)) { +			dev_err(dev, "DMA slave config fail\n"); +			return -EIO; +		} +		tx = dmaengine_prep_slave_single(chan, dma_local, len, dir, +						 flags); +	} else { +		tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len, +					       flags); +	} +  	if (!tx) {  		dev_err(dev, "Failed to prepare DMA memcpy\n");  		return -EIO; @@ -148,6 +173,23 @@ static int pci_epf_test_data_transfer(struct pci_epf_test *epf_test,  	return 0;  } +struct epf_dma_filter { +	struct device *dev; +	u32 dma_mask; +}; + +static bool epf_dma_filter_fn(struct dma_chan *chan, void *node) +{ +	struct epf_dma_filter *filter = node; +	struct dma_slave_caps caps; + +	memset(&caps, 0, sizeof(caps)); +	dma_get_slave_caps(chan, &caps); + +	return chan->device->dev == filter->dev +		&& (filter->dma_mask & caps.directions); +} +  /**   * pci_epf_test_init_dma_chan() - Function to initialize EPF test DMA channel   * @epf_test: the EPF test device that performs data transfer operation @@ -158,10 +200,44 @@ static int pci_epf_test_init_dma_chan(struct pci_epf_test *epf_test)  {  	struct pci_epf *epf = epf_test->epf;  	struct device *dev = &epf->dev; +	struct epf_dma_filter filter;  	struct dma_chan *dma_chan;  	dma_cap_mask_t mask;  	int ret; +	filter.dev = epf->epc->dev.parent; +	filter.dma_mask = BIT(DMA_DEV_TO_MEM); + +	dma_cap_zero(mask); +	dma_cap_set(DMA_SLAVE, mask); +	dma_chan = dma_request_channel(mask, epf_dma_filter_fn, &filter); +	if (!dma_chan) { +		dev_info(dev, "Failed to get private DMA rx channel. Falling back to generic one\n"); +		goto fail_back_tx; +	} + +	epf_test->dma_chan_rx = dma_chan; + +	filter.dma_mask = BIT(DMA_MEM_TO_DEV); +	dma_chan = dma_request_channel(mask, epf_dma_filter_fn, &filter); + +	if (!dma_chan) { +		dev_info(dev, "Failed to get private DMA tx channel. Falling back to generic one\n"); +		goto fail_back_rx; +	} + +	epf_test->dma_chan_tx = dma_chan; +	epf_test->dma_private = true; + +	init_completion(&epf_test->transfer_complete); + +	return 0; + +fail_back_rx: +	dma_release_channel(epf_test->dma_chan_rx); +	epf_test->dma_chan_tx = NULL; + +fail_back_tx:  	dma_cap_zero(mask);  	dma_cap_set(DMA_MEMCPY, mask); @@ -174,7 +250,7 @@ static int pci_epf_test_init_dma_chan(struct pci_epf_test *epf_test)  	}  	init_completion(&epf_test->transfer_complete); -	epf_test->dma_chan = dma_chan; +	epf_test->dma_chan_tx = epf_test->dma_chan_rx = dma_chan;  	return 0;  } @@ -190,8 +266,17 @@ static void pci_epf_test_clean_dma_chan(struct pci_epf_test *epf_test)  	if (!epf_test->dma_supported)  		return; -	dma_release_channel(epf_test->dma_chan); -	epf_test->dma_chan = NULL; +	dma_release_channel(epf_test->dma_chan_tx); +	if (epf_test->dma_chan_tx == epf_test->dma_chan_rx) { +		epf_test->dma_chan_tx = NULL; +		epf_test->dma_chan_rx = NULL; +		return; +	} + +	dma_release_channel(epf_test->dma_chan_rx); +	epf_test->dma_chan_rx = NULL; + +	return;  }  static void pci_epf_test_print_rate(const char *ops, u64 size, @@ -280,8 +365,15 @@ static int pci_epf_test_copy(struct pci_epf_test *epf_test)  			goto err_map_addr;  		} +		if (epf_test->dma_private) { +			dev_err(dev, "Cannot transfer data using DMA\n"); +			ret = -EINVAL; +			goto err_map_addr; +		} +  		ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr, -						 src_phys_addr, reg->size); +						 src_phys_addr, reg->size, 0, +						 DMA_MEM_TO_MEM);  		if (ret)  			dev_err(dev, "Data transfer failed\n");  	} else { @@ -373,7 +465,8 @@ static int pci_epf_test_read(struct pci_epf_test *epf_test)  		ktime_get_ts64(&start);  		ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr, -						 phys_addr, reg->size); +						 phys_addr, reg->size, +						 reg->src_addr, DMA_DEV_TO_MEM);  		if (ret)  			dev_err(dev, "Data transfer failed\n");  		ktime_get_ts64(&end); @@ -463,8 +556,11 @@ static int pci_epf_test_write(struct pci_epf_test *epf_test)  		}  		ktime_get_ts64(&start); +  		ret = pci_epf_test_data_transfer(epf_test, phys_addr, -						 src_phys_addr, reg->size); +						 src_phys_addr, reg->size, +						 reg->dst_addr, +						 DMA_MEM_TO_DEV);  		if (ret)  			dev_err(dev, "Data transfer failed\n");  		ktime_get_ts64(&end); @@ -627,7 +723,6 @@ static void pci_epf_test_unbind(struct pci_epf *epf)  	cancel_delayed_work(&epf_test->cmd_handler);  	pci_epf_test_clean_dma_chan(epf_test); -	pci_epc_stop(epc);  	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {  		epf_bar = &epf->bar[bar]; diff --git a/drivers/pci/mmap.c b/drivers/pci/mmap.c index b8c9011987f4..4504039056d1 100644 --- a/drivers/pci/mmap.c +++ b/drivers/pci/mmap.c @@ -13,27 +13,6 @@  #ifdef ARCH_GENERIC_PCI_MMAP_RESOURCE -/* - * Modern setup: generic pci_mmap_resource_range(), and implement the legacy - * pci_mmap_page_range() (if needed) as a wrapper round it. - */ - -#ifdef HAVE_PCI_MMAP -int pci_mmap_page_range(struct pci_dev *pdev, int bar, -			struct vm_area_struct *vma, -			enum pci_mmap_state mmap_state, int write_combine) -{ -	resource_size_t start, end; - -	pci_resource_to_user(pdev, bar, &pdev->resource[bar], &start, &end); - -	/* Adjust vm_pgoff to be the offset within the resource */ -	vma->vm_pgoff -= start >> PAGE_SHIFT; -	return pci_mmap_resource_range(pdev, bar, vma, mmap_state, -				       write_combine); -} -#endif -  static const struct vm_operations_struct pci_phys_vm_ops = {  #ifdef CONFIG_HAVE_IOREMAP_PROT  	.access = generic_access_phys, @@ -70,27 +49,4 @@ int pci_mmap_resource_range(struct pci_dev *pdev, int bar,  				  vma->vm_page_prot);  } -#elif defined(HAVE_PCI_MMAP) /* && !ARCH_GENERIC_PCI_MMAP_RESOURCE */ - -/* - * Legacy setup: Implement pci_mmap_resource_range() as a wrapper around - * the architecture's pci_mmap_page_range(), converting to "user visible" - * addresses as necessary. - */ - -int pci_mmap_resource_range(struct pci_dev *pdev, int bar, -			    struct vm_area_struct *vma, -			    enum pci_mmap_state mmap_state, int write_combine) -{ -	resource_size_t start, end; - -	/* -	 * pci_mmap_page_range() expects the same kind of entry as coming -	 * from /proc/bus/pci/ which is a "user visible" value. If this is -	 * different from the resource itself, arch will do necessary fixup. -	 */ -	pci_resource_to_user(pdev, bar, &pdev->resource[bar], &start, &end); -	vma->vm_pgoff += start >> PAGE_SHIFT; -	return pci_mmap_page_range(pdev, bar, vma, mmap_state, write_combine); -}  #endif diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index 3760d85c10d2..a46fec776ad7 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -21,8 +21,9 @@  #include "pci.h"  /* - * The GUID is defined in the PCI Firmware Specification available here: - * https://www.pcisig.com/members/downloads/pcifw_r3_1_13Dec10.pdf + * The GUID is defined in the PCI Firmware Specification available + * here to PCI-SIG members: + * https://members.pcisig.com/wg/PCI-SIG/document/15350   */  const guid_t pci_acpi_dsm_guid =  	GUID_INIT(0xe5c937d0, 0x3553, 0x4d7a, diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index cfaf40a540a8..95bc329e74c0 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -41,8 +41,10 @@ const char *pci_power_names[] = {  };  EXPORT_SYMBOL_GPL(pci_power_names); +#ifdef CONFIG_X86_32  int isa_dma_bridge_buggy;  EXPORT_SYMBOL(isa_dma_bridge_buggy); +#endif  int pci_pci_problems;  EXPORT_SYMBOL(pci_pci_problems); @@ -1293,9 +1295,6 @@ static int pci_set_full_power_state(struct pci_dev *dev)  		pci_restore_bars(dev);  	} -	if (dev->bus->self) -		pcie_aspm_pm_state_change(dev->bus->self); -  	return 0;  } @@ -1390,9 +1389,6 @@ static int pci_set_low_power_state(struct pci_dev *dev, pci_power_t state)  				     pci_power_name(dev->current_state),  				     pci_power_name(state)); -	if (dev->bus->self) -		pcie_aspm_pm_state_change(dev->bus->self); -  	return 0;  } diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index e10cdec6c56e..785f31086313 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -560,12 +560,10 @@ bool pcie_wait_for_link(struct pci_dev *pdev, bool active);  #ifdef CONFIG_PCIEASPM  void pcie_aspm_init_link_state(struct pci_dev *pdev);  void pcie_aspm_exit_link_state(struct pci_dev *pdev); -void pcie_aspm_pm_state_change(struct pci_dev *pdev);  void pcie_aspm_powersave_config_link(struct pci_dev *pdev);  #else  static inline void pcie_aspm_init_link_state(struct pci_dev *pdev) { }  static inline void pcie_aspm_exit_link_state(struct pci_dev *pdev) { } -static inline void pcie_aspm_pm_state_change(struct pci_dev *pdev) { }  static inline void pcie_aspm_powersave_config_link(struct pci_dev *pdev) { }  #endif diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c index 7952e5efd6cf..e2d8a74f83c3 100644 --- a/drivers/pci/pcie/aer.c +++ b/drivers/pci/pcie/aer.c @@ -392,6 +392,11 @@ void pci_aer_init(struct pci_dev *dev)  	pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_ERR, sizeof(u32) * n);  	pci_aer_clear_status(dev); + +	if (pci_aer_available()) +		pci_enable_pcie_error_reporting(dev); + +	pcie_set_ecrc_checking(dev);  }  void pci_aer_exit(struct pci_dev *dev) @@ -538,7 +543,7 @@ static const char *aer_agent_string[] = {  	u64 *stats = pdev->aer_stats->stats_array;			\  	size_t len = 0;							\  									\ -	for (i = 0; i < ARRAY_SIZE(strings_array); i++) {		\ +	for (i = 0; i < ARRAY_SIZE(pdev->aer_stats->stats_array); i++) {\  		if (strings_array[i])					\  			len += sysfs_emit_at(buf, len, "%s %llu\n",	\  					     strings_array[i],		\ @@ -1228,9 +1233,6 @@ static int set_device_error_reporting(struct pci_dev *dev, void *data)  			pci_disable_pcie_error_reporting(dev);  	} -	if (enable) -		pcie_set_ecrc_checking(dev); -  	return 0;  } @@ -1347,6 +1349,11 @@ static int aer_probe(struct pcie_device *dev)  	struct device *device = &dev->device;  	struct pci_dev *port = dev->port; +	BUILD_BUG_ON(ARRAY_SIZE(aer_correctable_error_string) < +		     AER_MAX_TYPEOF_COR_ERRS); +	BUILD_BUG_ON(ARRAY_SIZE(aer_uncorrectable_error_string) < +		     AER_MAX_TYPEOF_UNCOR_ERRS); +  	/* Limit to Root Ports or Root Complex Event Collectors */  	if ((pci_pcie_type(port) != PCI_EXP_TYPE_RC_EC) &&  	    (pci_pcie_type(port) != PCI_EXP_TYPE_ROOT_PORT)) diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index a96b7424c9bc..a8aec190986c 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -1012,25 +1012,6 @@ out:  	up_read(&pci_bus_sem);  } -/* @pdev: the root port or switch downstream port */ -void pcie_aspm_pm_state_change(struct pci_dev *pdev) -{ -	struct pcie_link_state *link = pdev->link_state; - -	if (aspm_disabled || !link) -		return; -	/* -	 * Devices changed PM state, we should recheck if latency -	 * meets all functions' requirement -	 */ -	down_read(&pci_bus_sem); -	mutex_lock(&aspm_lock); -	pcie_update_aspm_capable(link->root); -	pcie_config_aspm_path(link); -	mutex_unlock(&aspm_lock); -	up_read(&pci_bus_sem); -} -  void pcie_aspm_powersave_config_link(struct pci_dev *pdev)  {  	struct pcie_link_state *link = pdev->link_state; @@ -1366,4 +1347,3 @@ bool pcie_aspm_support_enabled(void)  {  	return aspm_support_enabled;  } -EXPORT_SYMBOL(pcie_aspm_support_enabled); diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c index 0c5a143025af..59c90d04a609 100644 --- a/drivers/pci/pcie/err.c +++ b/drivers/pci/pcie/err.c @@ -55,10 +55,14 @@ static int report_error_detected(struct pci_dev *dev,  	device_lock(&dev->dev);  	pdrv = dev->driver; -	if (!pci_dev_set_io_state(dev, state) || -		!pdrv || -		!pdrv->err_handler || -		!pdrv->err_handler->error_detected) { +	if (pci_dev_is_disconnected(dev)) { +		vote = PCI_ERS_RESULT_DISCONNECT; +	} else if (!pci_dev_set_io_state(dev, state)) { +		pci_info(dev, "can't recover (state transition %u -> %u invalid)\n", +			dev->error_state, state); +		vote = PCI_ERS_RESULT_NONE; +	} else if (!pdrv || !pdrv->err_handler || +		   !pdrv->err_handler->error_detected) {  		/*  		 * If any device in the subtree does not have an error_detected  		 * callback, PCI_ERS_RESULT_NO_AER_DRIVER prevents subsequent diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c index 604feeb84ee4..1ac7fec47d6f 100644 --- a/drivers/pci/pcie/portdrv_core.c +++ b/drivers/pci/pcie/portdrv_core.c @@ -222,15 +222,8 @@ static int get_port_device_capability(struct pci_dev *dev)  #ifdef CONFIG_PCIEAER  	if (dev->aer_cap && pci_aer_available() && -	    (pcie_ports_native || host->native_aer)) { +	    (pcie_ports_native || host->native_aer))  		services |= PCIE_PORT_SERVICE_AER; - -		/* -		 * Disable AER on this port in case it's been enabled by the -		 * BIOS (the AER service driver will enable it when necessary). -		 */ -		pci_disable_pcie_error_reporting(dev); -	}  #endif  	/* Root Ports and Root Complex Event Collectors may generate PMEs */ diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 17a969942d37..9884d8b29d3b 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -1890,6 +1890,9 @@ int pci_setup_device(struct pci_dev *dev)  	dev->broken_intx_masking = pci_intx_mask_broken(dev); +	/* Clear errors left from system firmware */ +	pci_write_config_word(dev, PCI_STATUS, 0xffff); +  	switch (dev->hdr_type) {		    /* header type */  	case PCI_HEADER_TYPE_NORMAL:		    /* standard header */  		if (class == PCI_CLASS_BRIDGE_PCI) @@ -2579,33 +2582,39 @@ struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn)  }  EXPORT_SYMBOL(pci_scan_single_device); -static unsigned int next_fn(struct pci_bus *bus, struct pci_dev *dev, -			    unsigned int fn) +static int next_ari_fn(struct pci_bus *bus, struct pci_dev *dev, int fn)  {  	int pos;  	u16 cap = 0;  	unsigned int next_fn; -	if (pci_ari_enabled(bus)) { -		if (!dev) -			return 0; -		pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI); -		if (!pos) -			return 0; +	if (!dev) +		return -ENODEV; -		pci_read_config_word(dev, pos + PCI_ARI_CAP, &cap); -		next_fn = PCI_ARI_CAP_NFN(cap); -		if (next_fn <= fn) -			return 0;	/* protect against malformed list */ +	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI); +	if (!pos) +		return -ENODEV; -		return next_fn; -	} +	pci_read_config_word(dev, pos + PCI_ARI_CAP, &cap); +	next_fn = PCI_ARI_CAP_NFN(cap); +	if (next_fn <= fn) +		return -ENODEV;	/* protect against malformed list */ -	/* dev may be NULL for non-contiguous multifunction devices */ -	if (!dev || dev->multifunction) -		return (fn + 1) % 8; +	return next_fn; +} -	return 0; +static int next_fn(struct pci_bus *bus, struct pci_dev *dev, int fn) +{ +	if (pci_ari_enabled(bus)) +		return next_ari_fn(bus, dev, fn); + +	if (fn >= 7) +		return -ENODEV; +	/* only multifunction devices may have more functions */ +	if (dev && !dev->multifunction) +		return -ENODEV; + +	return fn + 1;  }  static int only_one_child(struct pci_bus *bus) @@ -2643,26 +2652,30 @@ static int only_one_child(struct pci_bus *bus)   */  int pci_scan_slot(struct pci_bus *bus, int devfn)  { -	unsigned int fn, nr = 0;  	struct pci_dev *dev; +	int fn = 0, nr = 0;  	if (only_one_child(bus) && (devfn > 0))  		return 0; /* Already scanned the entire slot */ -	dev = pci_scan_single_device(bus, devfn); -	if (!dev) -		return 0; -	if (!pci_dev_is_added(dev)) -		nr++; - -	for (fn = next_fn(bus, dev, 0); fn > 0; fn = next_fn(bus, dev, fn)) { +	do {  		dev = pci_scan_single_device(bus, devfn + fn);  		if (dev) {  			if (!pci_dev_is_added(dev))  				nr++; -			dev->multifunction = 1; +			if (fn > 0) +				dev->multifunction = 1; +		} else if (fn == 0) { +			/* +			 * Function 0 is required unless we are running on +			 * a hypervisor that passes through individual PCI +			 * functions. +			 */ +			if (!hypervisor_isolated_pci_functions()) +				break;  		} -	} +		fn = next_fn(bus, dev, fn); +	} while (fn >= 0);  	/* Only one slot has PCIe device */  	if (bus->self && nr) @@ -2858,29 +2871,14 @@ static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,  {  	unsigned int used_buses, normal_bridges = 0, hotplug_bridges = 0;  	unsigned int start = bus->busn_res.start; -	unsigned int devfn, fn, cmax, max = start; +	unsigned int devfn, cmax, max = start;  	struct pci_dev *dev; -	int nr_devs;  	dev_dbg(&bus->dev, "scanning bus\n");  	/* Go find them, Rover! */ -	for (devfn = 0; devfn < 256; devfn += 8) { -		nr_devs = pci_scan_slot(bus, devfn); - -		/* -		 * The Jailhouse hypervisor may pass individual functions of a -		 * multi-function device to a guest without passing function 0. -		 * Look for them as well. -		 */ -		if (jailhouse_paravirt() && nr_devs == 0) { -			for (fn = 1; fn < 8; fn++) { -				dev = pci_scan_single_device(bus, devfn + fn); -				if (dev) -					dev->multifunction = 1; -			} -		} -	} +	for (devfn = 0; devfn < 256; devfn += 8) +		pci_scan_slot(bus, devfn);  	/* Reserve buses for SR-IOV capability */  	used_buses = pci_iov_bus_range(bus); diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c index 31b26d8ea6cc..f967709082d6 100644 --- a/drivers/pci/proc.c +++ b/drivers/pci/proc.c @@ -244,6 +244,7 @@ static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma)  {  	struct pci_dev *dev = pde_data(file_inode(file));  	struct pci_filp_private *fpriv = file->private_data; +	resource_size_t start, end;  	int i, ret, write_combine = 0, res_bit = IORESOURCE_MEM;  	if (!capable(CAP_SYS_RAWIO) || @@ -278,7 +279,11 @@ static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma)  	    iomem_is_exclusive(dev->resource[i].start))  		return -EINVAL; -	ret = pci_mmap_page_range(dev, i, vma, +	pci_resource_to_user(dev, i, &dev->resource[i], &start, &end); + +	/* Adjust vm_pgoff to be the offset within the resource */ +	vma->vm_pgoff -= start >> PAGE_SHIFT; +	ret = pci_mmap_resource_range(dev, i, vma,  				  fpriv->mmap_state, write_combine);  	if (ret < 0)  		return ret; diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 41aeaa235132..4944798e75b5 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -17,6 +17,7 @@  #include <linux/kernel.h>  #include <linux/export.h>  #include <linux/pci.h> +#include <linux/isa-dma.h> /* isa_dma_bridge_buggy */  #include <linux/init.h>  #include <linux/delay.h>  #include <linux/acpi.h> @@ -30,7 +31,6 @@  #include <linux/pm_runtime.h>  #include <linux/suspend.h>  #include <linux/switchtec.h> -#include <asm/dma.h>	/* isa_dma_bridge_buggy */  #include "pci.h"  static ktime_t fixup_debug_start(struct pci_dev *dev, @@ -239,6 +239,7 @@ static void quirk_passive_release(struct pci_dev *dev)  DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82441,	quirk_passive_release);  DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82441,	quirk_passive_release); +#ifdef CONFIG_X86_32  /*   * The VIA VP2/VP3/MVP3 seem to have some 'features'. There may be a   * workaround but VIA don't answer queries. If you happen to have good @@ -265,6 +266,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL,	PCI_DEVICE_ID_AL_M1533,		quirk_isa_dma  DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC,	PCI_DEVICE_ID_NEC_CBUS_1,	quirk_isa_dma_hangs);  DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC,	PCI_DEVICE_ID_NEC_CBUS_2,	quirk_isa_dma_hangs);  DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC,	PCI_DEVICE_ID_NEC_CBUS_3,	quirk_isa_dma_hangs); +#endif  /*   * Intel NM10 "TigerPoint" LPC PM1a_STS.BM_STS must be clear @@ -2709,10 +2711,10 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,  			nvenet_msi_disable);  /* - * PCIe spec r4.0 sec 7.7.1.2 and sec 7.7.2.2 say that if MSI/MSI-X is enabled, - * then the device can't use INTx interrupts. Tegra's PCIe root ports don't - * generate MSI interrupts for PME and AER events instead only INTx interrupts - * are generated. Though Tegra's PCIe root ports can generate MSI interrupts + * PCIe spec r6.0 sec 6.1.4.3 says that if MSI/MSI-X is enabled, the device + * can't use INTx interrupts. Tegra's PCIe Root Ports don't generate MSI + * interrupts for PME and AER events; instead only INTx interrupts are + * generated. Though Tegra's PCIe Root Ports can generate MSI interrupts   * for other events, since PCIe specification doesn't support using a mix of   * INTx and MSI/MSI-X, it is required to disable MSI interrupts to avoid port   * service drivers registering their respective ISRs for MSIs. @@ -2760,6 +2762,15 @@ DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x10e5,  DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x10e6,  			      PCI_CLASS_BRIDGE_PCI, 8,  			      pci_quirk_nvidia_tegra_disable_rp_msi); +DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x229a, +			      PCI_CLASS_BRIDGE_PCI, 8, +			      pci_quirk_nvidia_tegra_disable_rp_msi); +DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x229c, +			      PCI_CLASS_BRIDGE_PCI, 8, +			      pci_quirk_nvidia_tegra_disable_rp_msi); +DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x229e, +			      PCI_CLASS_BRIDGE_PCI, 8, +			      pci_quirk_nvidia_tegra_disable_rp_msi);  /*   * Some versions of the MCP55 bridge from Nvidia have a legacy IRQ routing @@ -4924,6 +4935,9 @@ static const struct pci_dev_acs_enabled {  	{ PCI_VENDOR_ID_AMPERE, 0xE00C, pci_quirk_xgene_acs },  	/* Broadcom multi-function device */  	{ PCI_VENDOR_ID_BROADCOM, 0x16D7, pci_quirk_mf_endpoint_acs }, +	{ PCI_VENDOR_ID_BROADCOM, 0x1750, pci_quirk_mf_endpoint_acs }, +	{ PCI_VENDOR_ID_BROADCOM, 0x1751, pci_quirk_mf_endpoint_acs }, +	{ PCI_VENDOR_ID_BROADCOM, 0x1752, pci_quirk_mf_endpoint_acs },  	{ PCI_VENDOR_ID_BROADCOM, 0xD714, pci_quirk_brcm_acs },  	/* Amazon Annapurna Labs */  	{ PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031, pci_quirk_al_acs }, diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c index c36c1238c604..75be4fe22509 100644 --- a/drivers/pci/switch/switchtec.c +++ b/drivers/pci/switch/switchtec.c @@ -1376,8 +1376,7 @@ static struct switchtec_dev *stdev_create(struct pci_dev *pdev)  	dev->groups = switchtec_device_groups;  	dev->release = stdev_release; -	minor = ida_simple_get(&switchtec_minor_ida, 0, 0, -			       GFP_KERNEL); +	minor = ida_alloc(&switchtec_minor_ida, GFP_KERNEL);  	if (minor < 0) {  		rc = minor;  		goto err_put; @@ -1692,7 +1691,7 @@ static int switchtec_pci_probe(struct pci_dev *pdev,  err_devadd:  	stdev_kill(stdev);  err_put: -	ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt)); +	ida_free(&switchtec_minor_ida, MINOR(stdev->dev.devt));  	put_device(&stdev->dev);  	return rc;  } @@ -1704,7 +1703,7 @@ static void switchtec_pci_remove(struct pci_dev *pdev)  	pci_set_drvdata(pdev, NULL);  	cdev_device_del(&stdev->cdev, &stdev->dev); -	ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt)); +	ida_free(&switchtec_minor_ida, MINOR(stdev->dev.devt));  	dev_info(&stdev->dev, "unregistered.\n");  	stdev_kill(stdev);  	put_device(&stdev->dev); diff --git a/drivers/phy/samsung/phy-exynos-pcie.c b/drivers/phy/samsung/phy-exynos-pcie.c index 578cfe07d07a..53c9230c2907 100644 --- a/drivers/phy/samsung/phy-exynos-pcie.c +++ b/drivers/phy/samsung/phy-exynos-pcie.c @@ -51,6 +51,13 @@ static int exynos5433_pcie_phy_init(struct phy *phy)  {  	struct exynos_pcie_phy *ep = phy_get_drvdata(phy); +	regmap_update_bits(ep->pmureg, EXYNOS5433_PMU_PCIE_PHY_OFFSET, +			   BIT(0), 1); +	regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_GLOBAL_RESET, +			   PCIE_APP_REQ_EXIT_L1_MODE, 0); +	regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_L1SUB_CM_CON, +			   PCIE_REFCLK_GATING_EN, 0); +  	regmap_update_bits(ep->fsysreg,	PCIE_EXYNOS5433_PHY_COMMON_RESET,  			   PCIE_PHY_RESET, 1);  	regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_MAC_RESET, @@ -109,20 +116,7 @@ static int exynos5433_pcie_phy_init(struct phy *phy)  	return 0;  } -static int exynos5433_pcie_phy_power_on(struct phy *phy) -{ -	struct exynos_pcie_phy *ep = phy_get_drvdata(phy); - -	regmap_update_bits(ep->pmureg, EXYNOS5433_PMU_PCIE_PHY_OFFSET, -			   BIT(0), 1); -	regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_GLOBAL_RESET, -			   PCIE_APP_REQ_EXIT_L1_MODE, 0); -	regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_L1SUB_CM_CON, -			   PCIE_REFCLK_GATING_EN, 0); -	return 0; -} - -static int exynos5433_pcie_phy_power_off(struct phy *phy) +static int exynos5433_pcie_phy_exit(struct phy *phy)  {  	struct exynos_pcie_phy *ep = phy_get_drvdata(phy); @@ -135,8 +129,7 @@ static int exynos5433_pcie_phy_power_off(struct phy *phy)  static const struct phy_ops exynos5433_phy_ops = {  	.init		= exynos5433_pcie_phy_init, -	.power_on	= exynos5433_pcie_phy_power_on, -	.power_off	= exynos5433_pcie_phy_power_off, +	.exit		= exynos5433_pcie_phy_exit,  	.owner		= THIS_MODULE,  }; diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c index 2fa0f7d55259..8f7695624c8c 100644 --- a/drivers/pnp/resource.c +++ b/drivers/pnp/resource.c @@ -17,6 +17,7 @@  #include <asm/dma.h>  #include <asm/irq.h>  #include <linux/pci.h> +#include <linux/libata.h>  #include <linux/ioport.h>  #include <linux/init.h> @@ -322,8 +323,8 @@ static int pci_dev_uses_irq(struct pnp_dev *pnp, struct pci_dev *pci,  		 * treat the compatibility IRQs as busy.  		 */  		if ((progif & 0x5) != 0x5) -			if (pci_get_legacy_ide_irq(pci, 0) == irq || -			    pci_get_legacy_ide_irq(pci, 1) == irq) { +			if (ATA_PRIMARY_IRQ(pci) == irq || +			    ATA_SECONDARY_IRQ(pci) == irq) {  				pnp_dbg(&pnp->dev, "  legacy IDE device %s "  					"using irq %d\n", pci_name(pci), irq);  				return 1; diff --git a/include/asm-generic/pci.h b/include/asm-generic/pci.h index 6bb3cd3d695a..6869f1061528 100644 --- a/include/asm-generic/pci.h +++ b/include/asm-generic/pci.h @@ -1,17 +1,30 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * linux/include/asm-generic/pci.h - * - *  Copyright (C) 2003 Russell King - */ -#ifndef _ASM_GENERIC_PCI_H -#define _ASM_GENERIC_PCI_H +/* SPDX-License-Identifier: GPL-2.0-only */ -#ifndef HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ -static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) +#ifndef __ASM_GENERIC_PCI_H +#define __ASM_GENERIC_PCI_H + +#ifndef PCIBIOS_MIN_IO +#define PCIBIOS_MIN_IO		0 +#endif + +#ifndef PCIBIOS_MIN_MEM +#define PCIBIOS_MIN_MEM		0 +#endif + +#ifndef pcibios_assign_all_busses +/* For bootloaders that do not initialize the PCI bus */ +#define pcibios_assign_all_busses() 1 +#endif + +/* Enable generic resource mapping code in drivers/pci/ */ +#define ARCH_GENERIC_PCI_MMAP_RESOURCE + +#ifdef CONFIG_PCI_DOMAINS +static inline int pci_proc_domain(struct pci_bus *bus)  { -	return channel ? 15 : 14; +	/* always show the domain in /proc */ +	return 1;  } -#endif /* HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ */ +#endif /* CONFIG_PCI_DOMAINS */ -#endif /* _ASM_GENERIC_PCI_H */ +#endif /* __ASM_GENERIC_PCI_H */ diff --git a/include/asm-generic/pci_iomap.h b/include/asm-generic/pci_iomap.h index 5a2f9bf53384..8fbb0a55545d 100644 --- a/include/asm-generic/pci_iomap.h +++ b/include/asm-generic/pci_iomap.h @@ -25,6 +25,8 @@ extern void pci_iounmap(struct pci_dev *dev, void __iomem *);  #ifdef CONFIG_NO_GENERIC_PCI_IOPORT_MAP  extern void __iomem *__pci_ioport_map(struct pci_dev *dev, unsigned long port,  				      unsigned int nr); +#elif !defined(CONFIG_HAS_IOPORT_MAP) +#define __pci_ioport_map(dev, port, nr) NULL  #else  #define __pci_ioport_map(dev, port, nr) ioport_map((port), (nr))  #endif diff --git a/include/linux/dma/edma.h b/include/linux/dma/edma.h index cab6e18773da..7d8062e9c544 100644 --- a/include/linux/dma/edma.h +++ b/include/linux/dma/edma.h @@ -12,19 +12,74 @@  #include <linux/device.h>  #include <linux/dmaengine.h> +#define EDMA_MAX_WR_CH                                  8 +#define EDMA_MAX_RD_CH                                  8 +  struct dw_edma; +struct dw_edma_region { +	phys_addr_t	paddr; +	void __iomem	*vaddr; +	size_t		sz; +}; + +struct dw_edma_core_ops { +	int (*irq_vector)(struct device *dev, unsigned int nr); +}; + +enum dw_edma_map_format { +	EDMA_MF_EDMA_LEGACY = 0x0, +	EDMA_MF_EDMA_UNROLL = 0x1, +	EDMA_MF_HDMA_COMPAT = 0x5 +}; + +/** + * enum dw_edma_chip_flags - Flags specific to an eDMA chip + * @DW_EDMA_CHIP_LOCAL:		eDMA is used locally by an endpoint + */ +enum dw_edma_chip_flags { +	DW_EDMA_CHIP_LOCAL	= BIT(0), +}; +  /**   * struct dw_edma_chip - representation of DesignWare eDMA controller hardware   * @dev:		 struct device of the eDMA controller   * @id:			 instance ID - * @irq:		 irq line - * @dw:			 struct dw_edma that is filed by dw_edma_probe() + * @nr_irqs:		 total number of DMA IRQs + * @ops			 DMA channel to IRQ number mapping + * @flags		 dw_edma_chip_flags + * @reg_base		 DMA register base address + * @ll_wr_cnt		 DMA write link list count + * @ll_rd_cnt		 DMA read link list count + * @rg_region		 DMA register region + * @ll_region_wr	 DMA descriptor link list memory for write channel + * @ll_region_rd	 DMA descriptor link list memory for read channel + * @dt_region_wr	 DMA data memory for write channel + * @dt_region_rd	 DMA data memory for read channel + * @mf			 DMA register map format + * @dw:			 struct dw_edma that is filled by dw_edma_probe()   */  struct dw_edma_chip {  	struct device		*dev;  	int			id; -	int			irq; +	int			nr_irqs; +	const struct dw_edma_core_ops   *ops; +	u32			flags; + +	void __iomem		*reg_base; + +	u16			ll_wr_cnt; +	u16			ll_rd_cnt; +	/* link list address */ +	struct dw_edma_region	ll_region_wr[EDMA_MAX_WR_CH]; +	struct dw_edma_region	ll_region_rd[EDMA_MAX_RD_CH]; + +	/* data region */ +	struct dw_edma_region	dt_region_wr[EDMA_MAX_WR_CH]; +	struct dw_edma_region	dt_region_rd[EDMA_MAX_RD_CH]; + +	enum dw_edma_map_format	mf; +  	struct dw_edma		*dw;  }; diff --git a/include/linux/hypervisor.h b/include/linux/hypervisor.h index fc08b433c856..9efbc54e35e5 100644 --- a/include/linux/hypervisor.h +++ b/include/linux/hypervisor.h @@ -32,4 +32,12 @@ static inline bool jailhouse_paravirt(void)  #endif /* !CONFIG_X86 */ +static inline bool hypervisor_isolated_pci_functions(void) +{ +	if (IS_ENABLED(CONFIG_S390)) +		return true; + +	return jailhouse_paravirt(); +} +  #endif /* __LINUX_HYPEVISOR_H */ diff --git a/include/linux/isa-dma.h b/include/linux/isa-dma.h new file mode 100644 index 000000000000..61504a8c1b9e --- /dev/null +++ b/include/linux/isa-dma.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __LINUX_ISA_DMA_H +#define __LINUX_ISA_DMA_H + +#include <asm/dma.h> + +#if defined(CONFIG_PCI) && defined(CONFIG_X86_32) +extern int isa_dma_bridge_buggy; +#else +#define isa_dma_bridge_buggy	(0) +#endif + +#endif /* __LINUX_ISA_DMA_H */ diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h index adea5a4771cf..6b1301e2498e 100644 --- a/include/linux/pci-ecam.h +++ b/include/linux/pci-ecam.h @@ -87,6 +87,7 @@ extern const struct pci_ecam_ops xgene_v1_pcie_ecam_ops; /* APM X-Gene PCIe v1 *  extern const struct pci_ecam_ops xgene_v2_pcie_ecam_ops; /* APM X-Gene PCIe v2.x */  extern const struct pci_ecam_ops al_pcie_ops;	/* Amazon Annapurna Labs PCIe */  extern const struct pci_ecam_ops tegra194_pcie_ops; /* Tegra194 PCIe */ +extern const struct pci_ecam_ops loongson_pci_ecam_ops; /* Loongson PCIe */  #endif  #if IS_ENABLED(CONFIG_PCI_HOST_COMMON) diff --git a/include/linux/pci.h b/include/linux/pci.h index 81a57b498f22..060af91bafcd 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1909,24 +1909,14 @@ pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,  #include <asm/pci.h> -/* These two functions provide almost identical functionality. Depending - * on the architecture, one will be implemented as a wrapper around the - * other (in drivers/pci/mmap.c). - * +/*   * pci_mmap_resource_range() maps a specific BAR, and vm->vm_pgoff   * is expected to be an offset within that region.   * - * pci_mmap_page_range() is the legacy architecture-specific interface, - * which accepts a "user visible" resource address converted by - * pci_resource_to_user(), as used in the legacy mmap() interface in - * /proc/bus/pci/.   */  int pci_mmap_resource_range(struct pci_dev *dev, int bar,  			    struct vm_area_struct *vma,  			    enum pci_mmap_state mmap_state, int write_combine); -int pci_mmap_page_range(struct pci_dev *pdev, int bar, -			struct vm_area_struct *vma, -			enum pci_mmap_state mmap_state, int write_combine);  #ifndef arch_can_pci_mmap_wc  #define arch_can_pci_mmap_wc()		0 diff --git a/sound/core/isadma.c b/sound/core/isadma.c index 1f45ede023b4..18a86212e3a8 100644 --- a/sound/core/isadma.c +++ b/sound/core/isadma.c @@ -12,8 +12,8 @@  #undef HAVE_REALLY_SLOW_DMA_CONTROLLER  #include <linux/export.h> +#include <linux/isa-dma.h>  #include <sound/core.h> -#include <asm/dma.h>  /**   * snd_dma_program - program an ISA DMA transfer  | 
