diff options
870 files changed, 28333 insertions, 10792 deletions
diff --git a/Documentation/ABI/testing/sysfs-driver-hid-appletb-kbd b/Documentation/ABI/testing/sysfs-driver-hid-appletb-kbd index 2a19584d091e..8c9718d83e9d 100644 --- a/Documentation/ABI/testing/sysfs-driver-hid-appletb-kbd +++ b/Documentation/ABI/testing/sysfs-driver-hid-appletb-kbd @@ -1,6 +1,6 @@ What: /sys/bus/hid/drivers/hid-appletb-kbd/<dev>/mode -Date: September, 2023 -KernelVersion: 6.5 +Date: March, 2025 +KernelVersion: 6.15 Contact: linux-input@vger.kernel.org Description: The set of keys displayed on the Touch Bar. diff --git a/Documentation/devicetree/bindings/net/aeonsemi,as21xxx.yaml b/Documentation/devicetree/bindings/net/aeonsemi,as21xxx.yaml new file mode 100644 index 000000000000..69eb29dc4d7b --- /dev/null +++ b/Documentation/devicetree/bindings/net/aeonsemi,as21xxx.yaml @@ -0,0 +1,122 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/aeonsemi,as21xxx.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Aeonsemi AS21XXX Ethernet PHY + +maintainers: + - Christian Marangi <ansuelsmth@gmail.com> + +description: | + Aeonsemi AS21xxx Ethernet PHYs requires a firmware to be loaded to actually + work. The same firmware is compatible with various PHYs of the same family. + + A PHY with not firmware loaded will be exposed on the MDIO bus with ID + 0x7500 0x7500 or 0x7500 0x9410 on C45 registers. + + This can be done and is implemented by OEM in 2 different way: + - Attached SPI flash directly to the PHY with the firmware. The PHY + will self load the firmware in the presence of this configuration. + - Manually provided firmware loaded from a file in the filesystem. + + Each PHY can support up to 5 LEDs. + + AS2xxx PHY Name logic: + + AS21x1xxB1 + ^ ^^ + | |J: Supports SyncE/PTP + | |P: No SyncE/PTP support + | 1: Supports 2nd Serdes + | 2: Not 2nd Serdes support + 0: 10G, 5G, 2.5G + 5: 5G, 2.5G + 2: 2.5G + +allOf: + - $ref: ethernet-phy.yaml# + +select: + properties: + compatible: + contains: + enum: + - ethernet-phy-id7500.9410 + - ethernet-phy-id7500.9402 + - ethernet-phy-id7500.9412 + - ethernet-phy-id7500.9422 + - ethernet-phy-id7500.9432 + - ethernet-phy-id7500.9442 + - ethernet-phy-id7500.9452 + - ethernet-phy-id7500.9462 + - ethernet-phy-id7500.9472 + - ethernet-phy-id7500.9482 + - ethernet-phy-id7500.9492 + required: + - compatible + +properties: + reg: + maxItems: 1 + + firmware-name: + description: specify the name of PHY firmware to load + maxItems: 1 + +required: + - compatible + - reg + +if: + properties: + compatible: + contains: + const: ethernet-phy-id7500.9410 +then: + required: + - firmware-name +else: + properties: + firmware-name: false + +unevaluatedProperties: false + +examples: + - | + #include <dt-bindings/leds/common.h> + + mdio { + #address-cells = <1>; + #size-cells = <0>; + + ethernet-phy@1f { + compatible = "ethernet-phy-id7500.9410", + "ethernet-phy-ieee802.3-c45"; + + reg = <31>; + firmware-name = "as21x1x_fw.bin"; + + leds { + #address-cells = <1>; + #size-cells = <0>; + + led@0 { + reg = <0>; + color = <LED_COLOR_ID_GREEN>; + function = LED_FUNCTION_LAN; + function-enumerator = <0>; + default-state = "keep"; + }; + + led@1 { + reg = <1>; + color = <LED_COLOR_ID_GREEN>; + function = LED_FUNCTION_LAN; + function-enumerator = <1>; + default-state = "keep"; + }; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/net/airoha,en7581-eth.yaml b/Documentation/devicetree/bindings/net/airoha,en7581-eth.yaml index 0fdd11265417..6d22131ac2f9 100644 --- a/Documentation/devicetree/bindings/net/airoha,en7581-eth.yaml +++ b/Documentation/devicetree/bindings/net/airoha,en7581-eth.yaml @@ -57,6 +57,16 @@ properties: - const: hsi-mac - const: xfp-mac + memory-region: + items: + - description: QDMA0 buffer memory + - description: QDMA1 buffer memory + + memory-region-names: + items: + - const: qdma0-buf + - const: qdma1-buf + "#address-cells": const: 1 @@ -140,6 +150,9 @@ examples: <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>; + memory-region = <&qdma0_buf>, <&qdma1_buf>; + memory-region-names = "qdma0-buf", "qdma1-buf"; + airoha,npu = <&npu>; #address-cells = <1>; diff --git a/Documentation/devicetree/bindings/net/bluetooth/nxp,88w8987-bt.yaml b/Documentation/devicetree/bindings/net/bluetooth/nxp,88w8987-bt.yaml index d02e9dd847ef..3ab60c70286f 100644 --- a/Documentation/devicetree/bindings/net/bluetooth/nxp,88w8987-bt.yaml +++ b/Documentation/devicetree/bindings/net/bluetooth/nxp,88w8987-bt.yaml @@ -48,6 +48,18 @@ properties: description: The GPIO number of the NXP chipset used for BT_WAKE_IN. + interrupts: + maxItems: 1 + description: + Host wakeup by falling edge interrupt on this pin which is + connected to BT_WAKE_OUT pin of the NXP chipset. + + interrupt-names: + items: + - const: wakeup + + wakeup-source: true + nxp,wakeout-pin: $ref: /schemas/types.yaml#/definitions/uint8 description: @@ -61,6 +73,7 @@ unevaluatedProperties: false examples: - | #include <dt-bindings/gpio/gpio.h> + #include <dt-bindings/interrupt-controller/irq.h> serial { bluetooth { compatible = "nxp,88w8987-bt"; @@ -70,5 +83,9 @@ examples: nxp,wakein-pin = /bits/ 8 <18>; nxp,wakeout-pin = /bits/ 8 <19>; local-bd-address = [66 55 44 33 22 11]; + interrupt-parent = <&gpio>; + interrupts = <8 IRQ_TYPE_EDGE_FALLING>; + interrupt-names = "wakeup"; + wakeup-source; }; }; diff --git a/Documentation/devicetree/bindings/net/can/microchip,mcp2510.yaml b/Documentation/devicetree/bindings/net/can/microchip,mcp2510.yaml index e0ec53bc10c6..1525a50ded47 100644 --- a/Documentation/devicetree/bindings/net/can/microchip,mcp2510.yaml +++ b/Documentation/devicetree/bindings/net/can/microchip,mcp2510.yaml @@ -1,7 +1,7 @@ # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) %YAML 1.2 --- -$id: http://devicetree.org/schemas/can/microchip,mcp2510.yaml# +$id: http://devicetree.org/schemas/net/can/microchip,mcp2510.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# title: Microchip MCP251X stand-alone CAN controller diff --git a/Documentation/devicetree/bindings/net/can/renesas,rcar-canfd.yaml b/Documentation/devicetree/bindings/net/can/renesas,rcar-canfd.yaml index f6884f6e59e7..f4ac21c68427 100644 --- a/Documentation/devicetree/bindings/net/can/renesas,rcar-canfd.yaml +++ b/Documentation/devicetree/bindings/net/can/renesas,rcar-canfd.yaml @@ -42,19 +42,80 @@ properties: - renesas,r9a07g054-canfd # RZ/V2L - const: renesas,rzg2l-canfd # RZ/G2L family + - const: renesas,r9a09g047-canfd # RZ/G3E + reg: maxItems: 1 - interrupts: true + interrupts: + oneOf: + - items: + - description: Channel interrupt + - description: Global interrupt + - items: + - description: CAN global error interrupt + - description: CAN receive FIFO interrupt + - description: CAN0 error interrupt + - description: CAN0 transmit interrupt + - description: CAN0 transmit/receive FIFO receive completion interrupt + - description: CAN1 error interrupt + - description: CAN1 transmit interrupt + - description: CAN1 transmit/receive FIFO receive completion interrupt + - description: CAN2 error interrupt + - description: CAN2 transmit interrupt + - description: CAN2 transmit/receive FIFO receive completion interrupt + - description: CAN3 error interrupt + - description: CAN3 transmit interrupt + - description: CAN3 transmit/receive FIFO receive completion interrupt + - description: CAN4 error interrupt + - description: CAN4 transmit interrupt + - description: CAN4 transmit/receive FIFO receive completion interrupt + - description: CAN5 error interrupt + - description: CAN5 transmit interrupt + - description: CAN5 transmit/receive FIFO receive completion interrupt + minItems: 8 + + interrupt-names: + oneOf: + - items: + - const: ch_int + - const: g_int + - items: + - const: g_err + - const: g_recc + - const: ch0_err + - const: ch0_rec + - const: ch0_trx + - const: ch1_err + - const: ch1_rec + - const: ch1_trx + - const: ch2_err + - const: ch2_rec + - const: ch2_trx + - const: ch3_err + - const: ch3_rec + - const: ch3_trx + - const: ch4_err + - const: ch4_rec + - const: ch4_trx + - const: ch5_err + - const: ch5_rec + - const: ch5_trx + minItems: 8 clocks: maxItems: 3 clock-names: - items: - - const: fck - - const: canfd - - const: can_clk + oneOf: + - items: + - const: fck + - const: canfd + - const: can_clk + - items: + - const: fck + - const: ram_clk + - const: can_clk power-domains: maxItems: 1 @@ -117,48 +178,38 @@ allOf: then: properties: interrupts: - items: - - description: CAN global error interrupt - - description: CAN receive FIFO interrupt - - description: CAN0 error interrupt - - description: CAN0 transmit interrupt - - description: CAN0 transmit/receive FIFO receive completion interrupt - - description: CAN1 error interrupt - - description: CAN1 transmit interrupt - - description: CAN1 transmit/receive FIFO receive completion interrupt + maxItems: 8 interrupt-names: - items: - - const: g_err - - const: g_recc - - const: ch0_err - - const: ch0_rec - - const: ch0_trx - - const: ch1_err - - const: ch1_rec - - const: ch1_trx + maxItems: 8 resets: + minItems: 2 maxItems: 2 reset-names: - items: - - const: rstp_n - - const: rstc_n + minItems: 2 + maxItems: 2 required: - reset-names - else: + + - if: + properties: + compatible: + contains: + enum: + - renesas,rcar-gen3-canfd + - renesas,rcar-gen4-canfd + then: properties: interrupts: - items: - - description: Channel interrupt - - description: Global interrupt + minItems: 2 + maxItems: 2 interrupt-names: - items: - - const: ch_int - - const: g_int + minItems: 2 + maxItems: 2 resets: maxItems: 1 @@ -167,20 +218,54 @@ allOf: properties: compatible: contains: + const: renesas,r9a09g047-canfd + then: + properties: + interrupts: + minItems: 20 + + interrupt-names: + minItems: 20 + + resets: + minItems: 2 + maxItems: 2 + + reset-names: + minItems: 2 + maxItems: 2 + + required: + - reset-names + + - if: + properties: + compatible: + contains: + enum: + - renesas,rcar-gen3-canfd + - renesas,rzg2l-canfd + then: + patternProperties: + "^channel[2-7]$": false + + - if: + properties: + compatible: + contains: const: renesas,r8a779h0-canfd then: patternProperties: "^channel[4-7]$": false - else: - if: - not: - properties: - compatible: - contains: - const: renesas,rcar-gen4-canfd - then: - patternProperties: - "^channel[2-7]$": false + + - if: + properties: + compatible: + contains: + const: renesas,r9a09g047-canfd + then: + patternProperties: + "^channel[6-7]$": false unevaluatedProperties: false diff --git a/Documentation/devicetree/bindings/net/wireless/qcom,ath12k.yaml b/Documentation/devicetree/bindings/net/wireless/qcom,ath12k.yaml index 9e557cb838c7..dc68dd59988f 100644 --- a/Documentation/devicetree/bindings/net/wireless/qcom,ath12k.yaml +++ b/Documentation/devicetree/bindings/net/wireless/qcom,ath12k.yaml @@ -21,6 +21,12 @@ properties: reg: maxItems: 1 + firmware-name: + maxItems: 1 + description: + If present, a board or platform specific string used to lookup + usecase-specific firmware files for the device. + vddaon-supply: description: VDD_AON supply regulator handle diff --git a/Documentation/netlink/specs/ethtool.yaml b/Documentation/netlink/specs/ethtool.yaml index c650cd3dcb80..9f98715a6512 100644 --- a/Documentation/netlink/specs/ethtool.yaml +++ b/Documentation/netlink/specs/ethtool.yaml @@ -98,6 +98,24 @@ definitions: name: tcp-data-split type: enum entries: [ unknown, disabled, enabled ] + - + name: hwtstamp-source + doc: Source of the hardware timestamp + enum-name: hwtstamp-source + name-prefix: hwtstamp-source- + type: enum + entries: + - + name: netdev + doc: | + Hardware timestamp comes from a MAC or a device + which has MAC and PHY integrated + value: 1 + - + name: phylib + doc: | + Hardware timestamp comes from one PHY device + of the network topology attribute-sets: - @@ -896,6 +914,13 @@ attribute-sets: name: hwtstamp-provider type: nest nested-attributes: ts-hwtstamp-provider + - + name: hwtstamp-source + type: u32 + enum: hwtstamp-source + - + name: hwtstamp-phyindex + type: u32 - name: cable-result attr-cnt-name: __ethtool-a-cable-result-cnt @@ -1981,6 +2006,8 @@ operations: - phc-index - stats - hwtstamp-provider + - hwtstamp-source + - hwtstamp-phyindex dump: *tsinfo-get-op - name: cable-test-act diff --git a/Documentation/netlink/specs/tc.yaml b/Documentation/netlink/specs/tc.yaml index 953aa837958b..cb7ea7d62e56 100644 --- a/Documentation/netlink/specs/tc.yaml +++ b/Documentation/netlink/specs/tc.yaml @@ -2,6 +2,7 @@ name: tc protocol: netlink-raw +uapi-header: linux/pkt_cls.h protonum: 0 doc: @@ -12,6 +13,7 @@ definitions: - name: tcmsg type: struct + header: linux/rtnetlink.h members: - name: family @@ -33,7 +35,8 @@ definitions: name: info type: u32 - - name: tc-cls-flags + name: cls-flags + enum-name: type: flags entries: - skip-hw @@ -42,7 +45,9 @@ definitions: - not-in-nw - verbose - - name: tc-flower-key-ctrl-flags + name: flower-key-ctrl-flags + name-prefix: tca-flower-key-flags- + enum-name: type: flags entries: - frag @@ -630,6 +635,7 @@ definitions: - name: tc-ratespec type: struct + header: linux/pkt_sched.h members: - name: cell-log @@ -1186,7 +1192,7 @@ definitions: name: firstuse type: u64 - - name: tc-gen + name: tc-gact type: struct members: - @@ -1377,7 +1383,8 @@ definitions: type: s32 attribute-sets: - - name: tc-attrs + name: attrs + name-prefix: tca- attributes: - name: kind @@ -1385,7 +1392,7 @@ attribute-sets: - name: options type: sub-message - sub-message: tc-options-msg + sub-message: options-msg selector: kind - name: stats @@ -1436,7 +1443,8 @@ attribute-sets: name: ext-warn-msg type: string - - name: tc-act-attrs + name: act-attrs + name-prefix: tca-act- attributes: - name: kind @@ -1444,7 +1452,7 @@ attribute-sets: - name: options type: sub-message - sub-message: tc-act-options-msg + sub-message: act-options-msg selector: kind - name: index @@ -1452,7 +1460,7 @@ attribute-sets: - name: stats type: nest - nested-attributes: tc-act-stats-attrs + nested-attributes: tca-stats-attrs - name: pad type: pad @@ -1472,39 +1480,9 @@ attribute-sets: name: in-hw-count type: u32 - - name: tc-act-stats-attrs - attributes: - - - name: basic - type: binary - struct: gnet-stats-basic - - - name: rate-est - type: binary - struct: gnet-stats-rate-est - - - name: queue - type: binary - struct: gnet-stats-queue - - - name: app - type: binary - - - name: rate-est64 - type: binary - struct: gnet-stats-rate-est64 - - - name: pad - type: pad - - - name: basic-hw - type: binary - struct: gnet-stats-basic - - - name: pkt64 - type: u64 - - - name: tc-act-bpf-attrs + name: act-bpf-attrs + name-prefix: tca-act-bpf- + header: linux/tc_act/tc_bpf.h attributes: - name: tm @@ -1535,7 +1513,9 @@ attribute-sets: name: id type: binary - - name: tc-act-connmark-attrs + name: act-connmark-attrs + name-prefix: tca-connmark- + header: linux/tc_act/tc_connmark.h attributes: - name: parms @@ -1548,7 +1528,9 @@ attribute-sets: name: pad type: pad - - name: tc-act-csum-attrs + name: act-csum-attrs + name-prefix: tca-csum- + header: linux/tc_act/tc_csum.h attributes: - name: parms @@ -1561,7 +1543,9 @@ attribute-sets: name: pad type: pad - - name: tc-act-ct-attrs + name: act-ct-attrs + name-prefix: tca-ct- + header: linux/tc_act/tc_ct.h attributes: - name: parms @@ -1623,7 +1607,9 @@ attribute-sets: name: helper-proto type: u8 - - name: tc-act-ctinfo-attrs + name: act-ctinfo-attrs + name-prefix: tca-ctinfo- + header: linux/tc_act/tc_ctinfo.h attributes: - name: pad @@ -1657,7 +1643,9 @@ attribute-sets: name: stats-cpmark-set type: u64 - - name: tc-act-gate-attrs + name: act-gate-attrs + name-prefix: tca-gate- + header: linux/tc_act/tc_gate.h attributes: - name: tm @@ -1691,7 +1679,9 @@ attribute-sets: name: clockid type: s32 - - name: tc-act-ife-attrs + name: act-ife-attrs + name-prefix: tca-ife- + header: linux/tc_act/tc_ife.h attributes: - name: parms @@ -1716,7 +1706,9 @@ attribute-sets: name: pad type: pad - - name: tc-act-mirred-attrs + name: act-mirred-attrs + name-prefix: tca-mirred- + header: linux/tc_act/tc_mirred.h attributes: - name: tm @@ -1732,7 +1724,9 @@ attribute-sets: name: blockid type: binary - - name: tc-act-mpls-attrs + name: act-mpls-attrs + name-prefix: tca-mpls- + header: linux/tc_act/tc_mpls.h attributes: - name: tm @@ -1762,7 +1756,9 @@ attribute-sets: name: bos type: u8 - - name: tc-act-nat-attrs + name: act-nat-attrs + name-prefix: tca-nat- + header: linux/tc_act/tc_nat.h attributes: - name: parms @@ -1775,7 +1771,9 @@ attribute-sets: name: pad type: pad - - name: tc-act-pedit-attrs + name: act-pedit-attrs + name-prefix: tca-pedit- + header: linux/tc_act/tc_pedit.h attributes: - name: tm @@ -1798,45 +1796,9 @@ attribute-sets: name: key-ex type: binary - - name: tc-act-police-attrs - attributes: - - - name: tbf - type: binary - struct: tc-police - - - name: rate - type: binary # TODO - - - name: peakrate - type: binary # TODO - - - name: avrate - type: u32 - - - name: result - type: u32 - - - name: tm - type: binary - struct: tcf-t - - - name: pad - type: pad - - - name: rate64 - type: u64 - - - name: peakrate64 - type: u64 - - - name: pktrate64 - type: u64 - - - name: pktburst64 - type: u64 - - - name: tc-act-simple-attrs + name: act-simple-attrs + name-prefix: tca-def- + header: linux/tc_act/tc_defact.h attributes: - name: tm @@ -1852,7 +1814,9 @@ attribute-sets: name: pad type: pad - - name: tc-act-skbedit-attrs + name: act-skbedit-attrs + name-prefix: tca-skbedit- + header: linux/tc_act/tc_skbedit.h attributes: - name: tm @@ -1886,7 +1850,9 @@ attribute-sets: name: queue-mapping-max type: u16 - - name: tc-act-skbmod-attrs + name: act-skbmod-attrs + name-prefix: tca-skbmod- + header: linux/tc_act/tc_skbmod.h attributes: - name: tm @@ -1908,7 +1874,9 @@ attribute-sets: name: pad type: pad - - name: tc-act-tunnel-key-attrs + name: act-tunnel-key-attrs + name-prefix: tca-tunnel-key- + header: linux/tc_act/tc_tunnel_key.h attributes: - name: tm @@ -1958,7 +1926,9 @@ attribute-sets: name: no-frag type: flag - - name: tc-act-vlan-attrs + name: act-vlan-attrs + name-prefix: tca-vlan- + header: linux/tc_act/tc_vlan.h attributes: - name: tm @@ -1987,7 +1957,8 @@ attribute-sets: name: push-eth-src type: binary - - name: tc-basic-attrs + name: basic-attrs + name-prefix: tca-basic- attributes: - name: classid @@ -1995,16 +1966,16 @@ attribute-sets: - name: ematches type: nest - nested-attributes: tc-ematch-attrs + nested-attributes: ematch-attrs - name: act type: indexed-array sub-type: nest - nested-attributes: tc-act-attrs + nested-attributes: act-attrs - name: police type: nest - nested-attributes: tc-police-attrs + nested-attributes: police-attrs - name: pcnt type: binary @@ -2013,17 +1984,18 @@ attribute-sets: name: pad type: pad - - name: tc-bpf-attrs + name: bpf-attrs + name-prefix: tca-bpf- attributes: - name: act type: indexed-array sub-type: nest - nested-attributes: tc-act-attrs + nested-attributes: act-attrs - name: police type: nest - nested-attributes: tc-police-attrs + nested-attributes: police-attrs - name: classid type: u32 @@ -2052,7 +2024,8 @@ attribute-sets: name: id type: u32 - - name: tc-cake-attrs + name: cake-attrs + name-prefix: tca-cake- attributes: - name: pad @@ -2109,7 +2082,8 @@ attribute-sets: name: fwmark type: u32 - - name: tc-cake-stats-attrs + name: cake-stats-attrs + name-prefix: tca-cake-stats- attributes: - name: pad @@ -2142,7 +2116,7 @@ attribute-sets: name: tin-stats type: indexed-array sub-type: nest - nested-attributes: tc-cake-tin-stats-attrs + nested-attributes: cake-tin-stats-attrs - name: deficit type: s32 @@ -2162,7 +2136,8 @@ attribute-sets: name: blue-timer-us type: s32 - - name: tc-cake-tin-stats-attrs + name: cake-tin-stats-attrs + name-prefix: tca-cake-tin-stats- attributes: - name: pad @@ -2240,29 +2215,32 @@ attribute-sets: name: flow-quantum type: u32 - - name: tc-cbs-attrs + name: cbs-attrs + name-prefix: tca-cbs- attributes: - name: parms type: binary struct: tc-cbs-qopt - - name: tc-cgroup-attrs + name: cgroup-attrs + name-prefix: tca-cgroup- attributes: - name: act type: indexed-array sub-type: nest - nested-attributes: tc-act-attrs + nested-attributes: act-attrs - name: police type: nest - nested-attributes: tc-police-attrs + nested-attributes: police-attrs - name: ematches type: binary - - name: tc-choke-attrs + name: choke-attrs + name-prefix: tca-choke- attributes: - name: parms @@ -2278,7 +2256,8 @@ attribute-sets: name: max-p type: u32 - - name: tc-codel-attrs + name: codel-attrs + name-prefix: tca-codel- attributes: - name: target @@ -2296,13 +2275,16 @@ attribute-sets: name: ce-threshold type: u32 - - name: tc-drr-attrs + name: drr-attrs + name-prefix: tca-drr- attributes: - name: quantum type: u32 - - name: tc-ematch-attrs + name: ematch-attrs + name-prefix: tca-ematch- + attr-max-name: tca-ematch-tree-max attributes: - name: tree-hdr @@ -2312,7 +2294,8 @@ attribute-sets: name: tree-list type: binary - - name: tc-flow-attrs + name: flow-attrs + name-prefix: tca-flow- attributes: - name: keys @@ -2344,7 +2327,7 @@ attribute-sets: - name: police type: nest - nested-attributes: tc-police-attrs + nested-attributes: police-attrs - name: ematches type: binary @@ -2352,7 +2335,8 @@ attribute-sets: name: perturb type: u32 - - name: tc-flower-attrs + name: flower-attrs + name-prefix: tca-flower- attributes: - name: classid @@ -2364,7 +2348,7 @@ attribute-sets: name: act type: indexed-array sub-type: nest - nested-attributes: tc-act-attrs + nested-attributes: act-attrs - name: key-eth-dst type: binary @@ -2443,7 +2427,7 @@ attribute-sets: - name: flags type: u32 - enum: tc-cls-flags + enum: cls-flags enum-as-flags: true - name: key-vlan-id @@ -2548,13 +2532,13 @@ attribute-sets: name: key-flags type: u32 byte-order: big-endian - enum: tc-flower-key-ctrl-flags + enum: flower-key-ctrl-flags enum-as-flags: true - name: key-flags-mask type: u32 byte-order: big-endian - enum: tc-flower-key-ctrl-flags + enum: flower-key-ctrl-flags enum-as-flags: true - name: key-icmpv4-code @@ -2677,11 +2661,11 @@ attribute-sets: - name: key-enc-opts type: nest - nested-attributes: tc-flower-key-enc-opts-attrs + nested-attributes: flower-key-enc-opts-attrs - name: key-enc-opts-mask type: nest - nested-attributes: tc-flower-key-enc-opts-attrs + nested-attributes: flower-key-enc-opts-attrs - name: in-hw-count type: u32 @@ -2728,7 +2712,7 @@ attribute-sets: - name: key-mpls-opts type: nest - nested-attributes: tc-flower-key-mpls-opt-attrs + nested-attributes: flower-key-mpls-opt-attrs - name: key-hash type: u32 @@ -2756,7 +2740,7 @@ attribute-sets: - name: key-cfm type: nest - nested-attributes: tc-flower-key-cfm-attrs + nested-attributes: flower-key-cfm-attrs - name: key-spi type: u32 @@ -2769,35 +2753,37 @@ attribute-sets: name: key-enc-flags type: u32 byte-order: big-endian - enum: tc-flower-key-ctrl-flags + enum: flower-key-ctrl-flags enum-as-flags: true - name: key-enc-flags-mask type: u32 byte-order: big-endian - enum: tc-flower-key-ctrl-flags + enum: flower-key-ctrl-flags enum-as-flags: true - - name: tc-flower-key-enc-opts-attrs + name: flower-key-enc-opts-attrs + name-prefix: tca-flower-key-enc-opts- attributes: - name: geneve type: nest - nested-attributes: tc-flower-key-enc-opt-geneve-attrs + nested-attributes: flower-key-enc-opt-geneve-attrs - name: vxlan type: nest - nested-attributes: tc-flower-key-enc-opt-vxlan-attrs + nested-attributes: flower-key-enc-opt-vxlan-attrs - name: erspan type: nest - nested-attributes: tc-flower-key-enc-opt-erspan-attrs + nested-attributes: flower-key-enc-opt-erspan-attrs - name: gtp type: nest - nested-attributes: tc-flower-key-enc-opt-gtp-attrs + nested-attributes: flower-key-enc-opt-gtp-attrs - - name: tc-flower-key-enc-opt-geneve-attrs + name: flower-key-enc-opt-geneve-attrs + name-prefix: tca-flower-key-enc-opt-geneve- attributes: - name: class @@ -2809,13 +2795,15 @@ attribute-sets: name: data type: binary - - name: tc-flower-key-enc-opt-vxlan-attrs + name: flower-key-enc-opt-vxlan-attrs + name-prefix: tca-flower-key-enc-opt-vxlan- attributes: - name: gbp type: u32 - - name: tc-flower-key-enc-opt-erspan-attrs + name: flower-key-enc-opt-erspan-attrs + name-prefix: tca-flower-key-enc-opt-erspan- attributes: - name: ver @@ -2830,7 +2818,8 @@ attribute-sets: name: hwid type: u8 - - name: tc-flower-key-enc-opt-gtp-attrs + name: flower-key-enc-opt-gtp-attrs + name-prefix: tca-flower-key-enc-opt-gtp- attributes: - name: pdu-type @@ -2839,7 +2828,9 @@ attribute-sets: name: qfi type: u8 - - name: tc-flower-key-mpls-opt-attrs + name: flower-key-mpls-opt-attrs + name-prefix: tca-flower-key-mpls-opt- + attr-max-name: tca-flower-key-mpls-opt-lse-max attributes: - name: lse-depth @@ -2857,7 +2848,8 @@ attribute-sets: name: lse-label type: u32 - - name: tc-flower-key-cfm-attrs + name: flower-key-cfm-attrs + name-prefix: tca-flower-key-cfm- attributes: - name: md-level @@ -2866,7 +2858,8 @@ attribute-sets: name: opcode type: u8 - - name: tc-fw-attrs + name: fw-attrs + name-prefix: tca-fw- attributes: - name: classid @@ -2874,7 +2867,7 @@ attribute-sets: - name: police type: nest - nested-attributes: tc-police-attrs + nested-attributes: police-attrs - name: indev type: string @@ -2882,12 +2875,13 @@ attribute-sets: name: act type: indexed-array sub-type: nest - nested-attributes: tc-act-attrs + nested-attributes: act-attrs - name: mask type: u32 - - name: tc-gred-attrs + name: gred-attrs + name-prefix: tca-gred- attributes: - name: parms @@ -2913,6 +2907,7 @@ attribute-sets: nested-attributes: tca-gred-vq-list-attrs - name: tca-gred-vq-list-attrs + name-prefix: tca-gred-vq- attributes: - name: entry @@ -2921,6 +2916,7 @@ attribute-sets: multi-attr: true - name: tca-gred-vq-entry-attrs + name-prefix: tca-gred-vq- attributes: - name: pad @@ -2959,7 +2955,7 @@ attribute-sets: name: flags type: u32 - - name: tc-hfsc-attrs + name: hfsc-attrs attributes: - name: rsc @@ -2971,7 +2967,8 @@ attribute-sets: name: usc type: binary - - name: tc-hhf-attrs + name: hhf-attrs + name-prefix: tca-hhf- attributes: - name: backlog-limit @@ -2995,7 +2992,8 @@ attribute-sets: name: non-hh-weight type: u32 - - name: tc-htb-attrs + name: htb-attrs + name-prefix: tca-htb- attributes: - name: parms @@ -3027,7 +3025,8 @@ attribute-sets: name: offload type: flag - - name: tc-matchall-attrs + name: matchall-attrs + name-prefix: tca-matchall- attributes: - name: classid @@ -3036,7 +3035,7 @@ attribute-sets: name: act type: indexed-array sub-type: nest - nested-attributes: tc-act-attrs + nested-attributes: act-attrs - name: flags type: u32 @@ -3048,14 +3047,16 @@ attribute-sets: name: pad type: pad - - name: tc-etf-attrs + name: etf-attrs + name-prefix: tca-etf- attributes: - name: parms type: binary struct: tc-etf-qopt - - name: tc-ets-attrs + name: ets-attrs + name-prefix: tca-ets- attributes: - name: nbands @@ -3066,7 +3067,7 @@ attribute-sets: - name: quanta type: nest - nested-attributes: tc-ets-attrs + nested-attributes: ets-attrs - name: quanta-band type: u32 @@ -3074,13 +3075,14 @@ attribute-sets: - name: priomap type: nest - nested-attributes: tc-ets-attrs + nested-attributes: ets-attrs - name: priomap-band type: u8 multi-attr: true - - name: tc-fq-attrs + name: fq-attrs + name-prefix: tca-fq- attributes: - name: plimit @@ -3151,7 +3153,8 @@ attribute-sets: sub-type: s32 doc: Weights for each band - - name: tc-fq-codel-attrs + name: fq-codel-attrs + name-prefix: tca-fq-codel- attributes: - name: target @@ -3187,7 +3190,8 @@ attribute-sets: name: ce-threshold-mask type: u8 - - name: tc-fq-pie-attrs + name: fq-pie-attrs + name-prefix: tca-fq-pie- attributes: - name: limit @@ -3226,7 +3230,8 @@ attribute-sets: name: dq-rate-estimator type: u32 - - name: tc-netem-attrs + name: netem-attrs + name-prefix: tca-netem- attributes: - name: corr @@ -3247,7 +3252,7 @@ attribute-sets: - name: loss type: nest - nested-attributes: tc-netem-loss-attrs + nested-attributes: netem-loss-attrs - name: rate type: binary @@ -3279,7 +3284,8 @@ attribute-sets: name: prng-seed type: u64 - - name: tc-netem-loss-attrs + name: netem-loss-attrs + name-prefix: netem-loss- attributes: - name: gi @@ -3292,7 +3298,8 @@ attribute-sets: doc: Gilbert Elliot models struct: tc-netem-gemodel - - name: tc-pie-attrs + name: pie-attrs + name-prefix: tca-pie- attributes: - name: target @@ -3319,7 +3326,8 @@ attribute-sets: name: dq-rate-estimator type: u32 - - name: tc-police-attrs + name: police-attrs + name-prefix: tca-police- attributes: - name: tbf @@ -3327,10 +3335,10 @@ attribute-sets: struct: tc-police - name: rate - type: binary + type: binary # TODO - name: peakrate - type: binary + type: binary # TODO - name: avrate type: u32 @@ -3357,7 +3365,8 @@ attribute-sets: name: pktburst64 type: u64 - - name: tc-qfq-attrs + name: qfq-attrs + name-prefix: tca-qfq- attributes: - name: weight @@ -3366,7 +3375,8 @@ attribute-sets: name: lmax type: u32 - - name: tc-red-attrs + name: red-attrs + name-prefix: tca-red- attributes: - name: parms @@ -3388,7 +3398,8 @@ attribute-sets: name: mark-block type: u32 - - name: tc-route-attrs + name: route-attrs + name-prefix: tca-route4- attributes: - name: classid @@ -3405,14 +3416,15 @@ attribute-sets: - name: police type: nest - nested-attributes: tc-police-attrs + nested-attributes: police-attrs - name: act type: indexed-array sub-type: nest - nested-attributes: tc-act-attrs + nested-attributes: act-attrs - - name: tc-taprio-attrs + name: taprio-attrs + name-prefix: tca-taprio-attr- attributes: - name: priomap @@ -3421,14 +3433,14 @@ attribute-sets: - name: sched-entry-list type: nest - nested-attributes: tc-taprio-sched-entry-list + nested-attributes: taprio-sched-entry-list - name: sched-base-time type: s64 - name: sched-single-entry type: nest - nested-attributes: tc-taprio-sched-entry + nested-attributes: taprio-sched-entry - name: sched-clockid type: s32 @@ -3453,17 +3465,19 @@ attribute-sets: - name: tc-entry type: nest - nested-attributes: tc-taprio-tc-entry-attrs + nested-attributes: taprio-tc-entry-attrs - - name: tc-taprio-sched-entry-list + name: taprio-sched-entry-list + name-prefix: tca-taprio-sched- attributes: - name: entry type: nest - nested-attributes: tc-taprio-sched-entry + nested-attributes: taprio-sched-entry multi-attr: true - - name: tc-taprio-sched-entry + name: taprio-sched-entry + name-prefix: tca-taprio-sched-entry- attributes: - name: index @@ -3478,7 +3492,8 @@ attribute-sets: name: interval type: u32 - - name: tc-taprio-tc-entry-attrs + name: taprio-tc-entry-attrs + name-prefix: tca-taprio-tc-entry- attributes: - name: index @@ -3490,7 +3505,8 @@ attribute-sets: name: fp type: u32 - - name: tc-tbf-attrs + name: tbf-attrs + name-prefix: tca-tbf- attributes: - name: parms @@ -3518,7 +3534,9 @@ attribute-sets: name: pad type: pad - - name: tc-act-sample-attrs + name: act-sample-attrs + name-prefix: tca-sample- + header: linux/tc_act/tc_sample.h attributes: - name: tm @@ -3527,7 +3545,7 @@ attribute-sets: - name: parms type: binary - struct: tc-gen + struct: tc-gact - name: rate type: u32 @@ -3541,7 +3559,9 @@ attribute-sets: name: pad type: pad - - name: tc-act-gact-attrs + name: act-gact-attrs + name-prefix: tca-gact- + header: linux/tc_act/tc_gact.h attributes: - name: tm @@ -3550,7 +3570,7 @@ attribute-sets: - name: parms type: binary - struct: tc-gen + struct: tc-gact - name: prob type: binary @@ -3560,6 +3580,7 @@ attribute-sets: type: pad - name: tca-stab-attrs + name-prefix: tca-stab- attributes: - name: base @@ -3570,6 +3591,8 @@ attribute-sets: type: binary - name: tca-stats-attrs + name-prefix: tca-stats- + header: linux/gen_stats.h attributes: - name: basic @@ -3603,7 +3626,8 @@ attribute-sets: name: pkt64 type: u64 - - name: tc-u32-attrs + name: u32-attrs + name-prefix: tca-u32- attributes: - name: classid @@ -3624,12 +3648,12 @@ attribute-sets: - name: police type: nest - nested-attributes: tc-police-attrs + nested-attributes: police-attrs - name: act type: indexed-array sub-type: nest - nested-attributes: tc-act-attrs + nested-attributes: act-attrs - name: indev type: string @@ -3650,78 +3674,78 @@ attribute-sets: sub-messages: - - name: tc-options-msg + name: options-msg formats: - value: basic - attribute-set: tc-basic-attrs + attribute-set: basic-attrs - value: bpf - attribute-set: tc-bpf-attrs + attribute-set: bpf-attrs - value: bfifo fixed-header: tc-fifo-qopt - value: cake - attribute-set: tc-cake-attrs + attribute-set: cake-attrs - value: cbs - attribute-set: tc-cbs-attrs + attribute-set: cbs-attrs - value: cgroup - attribute-set: tc-cgroup-attrs + attribute-set: cgroup-attrs - value: choke - attribute-set: tc-choke-attrs + attribute-set: choke-attrs - value: clsact # no content - value: codel - attribute-set: tc-codel-attrs + attribute-set: codel-attrs - value: drr - attribute-set: tc-drr-attrs + attribute-set: drr-attrs - value: etf - attribute-set: tc-etf-attrs + attribute-set: etf-attrs - value: ets - attribute-set: tc-ets-attrs + attribute-set: ets-attrs - value: flow - attribute-set: tc-flow-attrs + attribute-set: flow-attrs - value: flower - attribute-set: tc-flower-attrs + attribute-set: flower-attrs - value: fq - attribute-set: tc-fq-attrs + attribute-set: fq-attrs - value: fq_codel - attribute-set: tc-fq-codel-attrs + attribute-set: fq-codel-attrs - value: fq_pie - attribute-set: tc-fq-pie-attrs + attribute-set: fq-pie-attrs - value: fw - attribute-set: tc-fw-attrs + attribute-set: fw-attrs - value: gred - attribute-set: tc-gred-attrs + attribute-set: gred-attrs - value: hfsc fixed-header: tc-hfsc-qopt - value: hhf - attribute-set: tc-hhf-attrs + attribute-set: hhf-attrs - value: htb - attribute-set: tc-htb-attrs + attribute-set: htb-attrs - value: ingress # no content - value: matchall - attribute-set: tc-matchall-attrs + attribute-set: matchall-attrs - value: mq # no content - @@ -3733,7 +3757,7 @@ sub-messages: - value: netem fixed-header: tc-netem-qopt - attribute-set: tc-netem-attrs + attribute-set: netem-attrs - value: pfifo fixed-header: tc-fifo-qopt @@ -3745,7 +3769,7 @@ sub-messages: fixed-header: tc-fifo-qopt - value: pie - attribute-set: tc-pie-attrs + attribute-set: pie-attrs - value: plug fixed-header: tc-plug-qopt @@ -3754,13 +3778,13 @@ sub-messages: fixed-header: tc-prio-qopt - value: qfq - attribute-set: tc-qfq-attrs + attribute-set: qfq-attrs - value: red - attribute-set: tc-red-attrs + attribute-set: red-attrs - value: route - attribute-set: tc-route-attrs + attribute-set: route-attrs - value: sfb fixed-header: tc-sfb-qopt @@ -3769,79 +3793,79 @@ sub-messages: fixed-header: tc-sfq-qopt-v1 - value: taprio - attribute-set: tc-taprio-attrs + attribute-set: taprio-attrs - value: tbf - attribute-set: tc-tbf-attrs + attribute-set: tbf-attrs - value: u32 - attribute-set: tc-u32-attrs + attribute-set: u32-attrs - - name: tc-act-options-msg + name: act-options-msg formats: - value: bpf - attribute-set: tc-act-bpf-attrs + attribute-set: act-bpf-attrs - value: connmark - attribute-set: tc-act-connmark-attrs + attribute-set: act-connmark-attrs - value: csum - attribute-set: tc-act-csum-attrs + attribute-set: act-csum-attrs - value: ct - attribute-set: tc-act-ct-attrs + attribute-set: act-ct-attrs - value: ctinfo - attribute-set: tc-act-ctinfo-attrs + attribute-set: act-ctinfo-attrs - value: gact - attribute-set: tc-act-gact-attrs + attribute-set: act-gact-attrs - value: gate - attribute-set: tc-act-gate-attrs + attribute-set: act-gate-attrs - value: ife - attribute-set: tc-act-ife-attrs + attribute-set: act-ife-attrs - value: mirred - attribute-set: tc-act-mirred-attrs + attribute-set: act-mirred-attrs - value: mpls - attribute-set: tc-act-mpls-attrs + attribute-set: act-mpls-attrs - value: nat - attribute-set: tc-act-nat-attrs + attribute-set: act-nat-attrs - value: pedit - attribute-set: tc-act-pedit-attrs + attribute-set: act-pedit-attrs - value: police - attribute-set: tc-act-police-attrs + attribute-set: police-attrs - value: sample - attribute-set: tc-act-sample-attrs + attribute-set: act-sample-attrs - value: simple - attribute-set: tc-act-simple-attrs + attribute-set: act-simple-attrs - value: skbedit - attribute-set: tc-act-skbedit-attrs + attribute-set: act-skbedit-attrs - value: skbmod - attribute-set: tc-act-skbmod-attrs + attribute-set: act-skbmod-attrs - value: tunnel_key - attribute-set: tc-act-tunnel-key-attrs + attribute-set: act-tunnel-key-attrs - value: vlan - attribute-set: tc-act-vlan-attrs + attribute-set: act-vlan-attrs - name: tca-stats-app-msg formats: - value: cake - attribute-set: tc-cake-stats-attrs + attribute-set: cake-stats-attrs - value: choke fixed-header: tc-choke-xstats @@ -3875,11 +3899,12 @@ sub-messages: operations: enum-model: directional + name-prefix: rtm- list: - name: newqdisc doc: Create new tc qdisc. - attribute-set: tc-attrs + attribute-set: attrs fixed-header: tcmsg do: request: @@ -3894,7 +3919,7 @@ operations: - name: delqdisc doc: Delete existing tc qdisc. - attribute-set: tc-attrs + attribute-set: attrs fixed-header: tcmsg do: request: @@ -3902,9 +3927,9 @@ operations: - name: getqdisc doc: Get / dump tc qdisc information. - attribute-set: tc-attrs + attribute-set: attrs fixed-header: tcmsg - do: + do: &getqdisc-do request: value: 38 attributes: @@ -3923,10 +3948,11 @@ operations: - chain - ingress-block - egress-block + dump: *getqdisc-do - name: newtclass doc: Get / dump tc traffic class information. - attribute-set: tc-attrs + attribute-set: attrs fixed-header: tcmsg do: request: @@ -3935,7 +3961,7 @@ operations: - name: deltclass doc: Get / dump tc traffic class information. - attribute-set: tc-attrs + attribute-set: attrs fixed-header: tcmsg do: request: @@ -3943,7 +3969,7 @@ operations: - name: gettclass doc: Get / dump tc traffic class information. - attribute-set: tc-attrs + attribute-set: attrs fixed-header: tcmsg do: request: @@ -3954,7 +3980,7 @@ operations: - name: newtfilter doc: Get / dump tc filter information. - attribute-set: tc-attrs + attribute-set: attrs fixed-header: tcmsg do: request: @@ -3963,7 +3989,7 @@ operations: - name: deltfilter doc: Get / dump tc filter information. - attribute-set: tc-attrs + attribute-set: attrs fixed-header: tcmsg do: request: @@ -3974,7 +4000,7 @@ operations: - name: gettfilter doc: Get / dump tc filter information. - attribute-set: tc-attrs + attribute-set: attrs fixed-header: tcmsg do: request: @@ -3997,7 +4023,7 @@ operations: - name: newchain doc: Get / dump tc chain information. - attribute-set: tc-attrs + attribute-set: attrs fixed-header: tcmsg do: request: @@ -4006,7 +4032,7 @@ operations: - name: delchain doc: Get / dump tc chain information. - attribute-set: tc-attrs + attribute-set: attrs fixed-header: tcmsg do: request: @@ -4016,7 +4042,7 @@ operations: - name: getchain doc: Get / dump tc chain information. - attribute-set: tc-attrs + attribute-set: attrs fixed-header: tcmsg do: request: diff --git a/Documentation/networking/device_drivers/ethernet/huawei/hinic3.rst b/Documentation/networking/device_drivers/ethernet/huawei/hinic3.rst new file mode 100644 index 000000000000..e3dfd083fa52 --- /dev/null +++ b/Documentation/networking/device_drivers/ethernet/huawei/hinic3.rst @@ -0,0 +1,137 @@ +.. SPDX-License-Identifier: GPL-2.0 + +===================================================================== +Linux kernel driver for Huawei Ethernet Device Driver (hinic3) family +===================================================================== + +Overview +======== + +The hinic3 is a network interface card (NIC) for Data Center. It supports +a range of link-speed devices (10GE, 25GE, 100GE, etc.). The hinic3 +devices can have multiple physical forms: LOM (Lan on Motherboard) NIC, +PCIe standard NIC, OCP (Open Compute Project) NIC, etc. + +The hinic3 driver supports the following features: +- IPv4/IPv6 TCP/UDP checksum offload +- TSO (TCP Segmentation Offload), LRO (Large Receive Offload) +- RSS (Receive Side Scaling) +- MSI-X interrupt aggregation configuration and interrupt adaptation. +- SR-IOV (Single Root I/O Virtualization). + +Content +======= + +- Supported PCI vendor ID/device IDs +- Source Code Structure of Hinic3 Driver +- Management Interface + +Supported PCI vendor ID/device IDs +================================== + +19e5:0222 - hinic3 PF/PPF +19e5:375F - hinic3 VF + +Prime Physical Function (PPF) is responsible for the management of the +whole NIC card. For example, clock synchronization between the NIC and +the host. Any PF may serve as a PPF. The PPF is selected dynamically. + +Source Code Structure of Hinic3 Driver +====================================== + +======================== ================================================ +hinic3_pci_id_tbl.h Supported device IDs +hinic3_hw_intf.h Interface between HW and driver +hinic3_queue_common.[ch] Common structures and methods for NIC queues +hinic3_common.[ch] Encapsulation of memory operations in Linux +hinic3_csr.h Register definitions in the BAR +hinic3_hwif.[ch] Interface for BAR +hinic3_eqs.[ch] Interface for AEQs and CEQs +hinic3_mbox.[ch] Interface for mailbox +hinic3_mgmt.[ch] Management interface based on mailbox and AEQ +hinic3_wq.[ch] Work queue data structures and interface +hinic3_cmdq.[ch] Command queue is used to post command to HW +hinic3_hwdev.[ch] HW structures and methods abstractions +hinic3_lld.[ch] Auxiliary driver adaptation layer +hinic3_hw_comm.[ch] Interface for common HW operations +hinic3_mgmt_interface.h Interface between firmware and driver +hinic3_hw_cfg.[ch] Interface for HW configuration +hinic3_irq.c Interrupt request +hinic3_netdev_ops.c Operations registered to Linux kernel stack +hinic3_nic_dev.h NIC structures and methods abstractions +hinic3_main.c Main Linux kernel driver +hinic3_nic_cfg.[ch] NIC service configuration +hinic3_nic_io.[ch] Management plane interface for TX and RX +hinic3_rss.[ch] Interface for Receive Side Scaling (RSS) +hinic3_rx.[ch] Interface for transmit +hinic3_tx.[ch] Interface for receive +hinic3_ethtool.c Interface for ethtool operations (ops) +hinic3_filter.c Interface for MAC address +======================== ================================================ + +Management Interface +==================== + +Asynchronous Event Queue (AEQ) +------------------------------ + +AEQ receives high priority events from the HW over a descriptor queue. +Every descriptor is a fixed size of 64 bytes. AEQ can receive solicited or +unsolicited events. Every device, VF or PF, can have up to 4 AEQs. +Every AEQ is associated to a dedicated IRQ. AEQ can receive multiple types +of events, but in practice the hinic3 driver ignores all events except for +2 mailbox related events. + +Mailbox +------- + +Mailbox is a communication mechanism between the hinic3 driver and the HW. +Each device has an independent mailbox. Driver can use the mailbox to send +requests to management. Driver receives mailbox messages, such as responses +to requests, over the AEQ (using event HINIC3_AEQ_FOR_MBOX). Due to the +limited size of mailbox data register, mailbox messages are sent +segment-by-segment. + +Every device can use its mailbox to post request to firmware. The mailbox +can also be used to post requests and responses between the PF and its VFs. + +Completion Event Queue (CEQ) +---------------------------- + +The implementation of CEQ is the same as AEQ. It receives completion events +from HW over a fixed size descriptor of 32 bits. Every device can have up +to 32 CEQs. Every CEQ has a dedicated IRQ. CEQ only receives solicited +events that are responses to requests from the driver. CEQ can receive +multiple types of events, but in practice the hinic3 driver ignores all +events except for HINIC3_CMDQ that represents completion of previously +posted commands on a cmdq. + +Command Queue (cmdq) +-------------------- + +Every cmdq has a dedicated work queue on which commands are posted. +Commands on the work queue are fixed size descriptor of size 64 bytes. +Completion of a command will be indicated using ctrl bits in the +descriptor that carried the command. Notification of command completions +will also be provided via event on CEQ. Every device has 4 command queues +that are initialized as a set (called cmdqs), each with its own type. +Hinic3 driver only uses type HINIC3_CMDQ_SYNC. + +Work Queues(WQ) +--------------- + +Work queues are logical arrays of fixed size WQEs. The array may be spread +over multiple non-contiguous pages using indirection table. Work queues are +used by I/O queues and command queues. + +Global function ID +------------------ + +Every function, PF or VF, has a unique ordinal identification within the device. +Many management commands (mbox or cmdq) contain this ID so HW can apply the +command effect to the right function. + +PF is allowed to post management commands to a subordinate VF by specifying the +VFs ID. A VF must provide its own ID. Anti-spoofing in the HW will cause +command from a VF to fail if it contains the wrong ID. + diff --git a/Documentation/networking/device_drivers/ethernet/index.rst b/Documentation/networking/device_drivers/ethernet/index.rst index f9ed93c1da35..139b4c75a191 100644 --- a/Documentation/networking/device_drivers/ethernet/index.rst +++ b/Documentation/networking/device_drivers/ethernet/index.rst @@ -28,6 +28,7 @@ Contents: freescale/gianfar google/gve huawei/hinic + huawei/hinic3 intel/e100 intel/e1000 intel/e1000e diff --git a/Documentation/networking/xfrm_device.rst b/Documentation/networking/xfrm_device.rst index 7f24c09f2694..122204da0fff 100644 --- a/Documentation/networking/xfrm_device.rst +++ b/Documentation/networking/xfrm_device.rst @@ -65,9 +65,13 @@ Callbacks to implement /* from include/linux/netdevice.h */ struct xfrmdev_ops { /* Crypto and Packet offload callbacks */ - int (*xdo_dev_state_add) (struct xfrm_state *x, struct netlink_ext_ack *extack); - void (*xdo_dev_state_delete) (struct xfrm_state *x); - void (*xdo_dev_state_free) (struct xfrm_state *x); + int (*xdo_dev_state_add)(struct net_device *dev, + struct xfrm_state *x, + struct netlink_ext_ack *extack); + void (*xdo_dev_state_delete)(struct net_device *dev, + struct xfrm_state *x); + void (*xdo_dev_state_free)(struct net_device *dev, + struct xfrm_state *x); bool (*xdo_dev_offload_ok) (struct sk_buff *skb, struct xfrm_state *x); void (*xdo_dev_state_advance_esn) (struct xfrm_state *x); diff --git a/MAINTAINERS b/MAINTAINERS index c8e91820b527..b0a3860dd44a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -646,6 +646,13 @@ F: drivers/iio/accel/adxl380.h F: drivers/iio/accel/adxl380_i2c.c F: drivers/iio/accel/adxl380_spi.c +AEONSEMI PHY DRIVER +M: Christian Marangi <ansuelsmth@gmail.com> +L: netdev@vger.kernel.org +S: Maintained +F: Documentation/devicetree/bindings/net/aeonsemi,as21xxx.yaml +F: drivers/net/phy/as21xxx.c + AF8133J THREE-AXIS MAGNETOMETER DRIVER M: OndÅ™ej Jirman <megi@xff.cz> S: Maintained @@ -5267,6 +5274,7 @@ F: include/uapi/linux/can/isotp.h F: include/uapi/linux/can/raw.h F: net/can/ F: net/sched/em_canid.c +F: tools/testing/selftests/net/can/ CAN-J1939 NETWORK LAYER M: Robin van der Gracht <robin@protonic.nl> @@ -10139,6 +10147,13 @@ F: drivers/gpio/gpio-regmap.c F: include/linux/gpio/regmap.h K: (devm_)?gpio_regmap_(un)?register +GPIO SLOPPY LOGIC ANALYZER +M: Wolfram Sang <wsa+renesas@sang-engineering.com> +S: Supported +F: Documentation/dev-tools/gpio-sloppy-logic-analyzer.rst +F: drivers/gpio/gpio-sloppy-logic-analyzer.c +F: tools/gpio/gpio-sloppy-logic-analyzer.sh + GPIO SUBSYSTEM M: Linus Walleij <linus.walleij@linaro.org> M: Bartosz Golaszewski <brgl@bgdev.pl> @@ -10956,6 +10971,13 @@ S: Maintained F: Documentation/networking/device_drivers/ethernet/huawei/hinic.rst F: drivers/net/ethernet/huawei/hinic/ +HUAWEI 3RD GEN ETHERNET DRIVER +M: Fan Gong <gongfan1@huawei.com> +L: netdev@vger.kernel.org +S: Maintained +F: Documentation/networking/device_drivers/ethernet/huawei/hinic3.rst +F: drivers/net/ethernet/huawei/hinic3/ + HUAWEI MATEBOOK E GO EMBEDDED CONTROLLER DRIVER M: Pengyu Luo <mitltlatltl@gmail.com> S: Maintained @@ -15034,6 +15056,7 @@ M: Qingfang Deng <dqfext@gmail.com> M: SkyLake Huang <SkyLake.Huang@mediatek.com> L: netdev@vger.kernel.org S: Maintained +F: drivers/net/phy/mediatek/mtk-2p5ge.c F: drivers/net/phy/mediatek/mtk-ge-soc.c F: drivers/net/phy/mediatek/mtk-phy-lib.c F: drivers/net/phy/mediatek/mtk-ge.c @@ -15534,6 +15557,18 @@ S: Maintained F: include/linux/execmem.h F: mm/execmem.c +MEMORY MANAGEMENT - GUP (GET USER PAGES) +M: Andrew Morton <akpm@linux-foundation.org> +M: David Hildenbrand <david@redhat.com> +R: Jason Gunthorpe <jgg@nvidia.com> +R: John Hubbard <jhubbard@nvidia.com> +R: Peter Xu <peterx@redhat.com> +L: linux-mm@kvack.org +S: Maintained +W: http://www.linux-mm.org +T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm +F: mm/gup.c + MEMORY MANAGEMENT - NUMA MEMBLOCKS AND NUMA EMULATION M: Andrew Morton <akpm@linux-foundation.org> M: Mike Rapoport <rppt@kernel.org> @@ -17042,6 +17077,7 @@ X: net/ceph/ X: net/mac80211/ X: net/rfkill/ X: net/wireless/ +X: tools/testing/selftests/net/can/ NETWORKING [IPSEC] M: Steffen Klassert <steffen.klassert@secunet.com> @@ -18436,7 +18472,7 @@ F: include/uapi/linux/ppdev.h PARAVIRT_OPS INTERFACE M: Juergen Gross <jgross@suse.com> R: Ajay Kaher <ajay.kaher@broadcom.com> -R: Alexey Makhalov <alexey.amakhalov@broadcom.com> +R: Alexey Makhalov <alexey.makhalov@broadcom.com> R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com> L: virtualization@lists.linux.dev L: x86@kernel.org @@ -20598,8 +20634,8 @@ F: Documentation/devicetree/bindings/i2c/renesas,iic-emev2.yaml F: drivers/i2c/busses/i2c-emev2.c RENESAS ETHERNET AVB DRIVER -M: Paul Barker <paul.barker.ct@bp.renesas.com> M: Niklas Söderlund <niklas.soderlund@ragnatech.se> +R: Paul Barker <paul@pbarker.dev> L: netdev@vger.kernel.org L: linux-renesas-soc@vger.kernel.org S: Maintained @@ -25929,7 +25965,7 @@ F: drivers/misc/vmw_balloon.c VMWARE HYPERVISOR INTERFACE M: Ajay Kaher <ajay.kaher@broadcom.com> -M: Alexey Makhalov <alexey.amakhalov@broadcom.com> +M: Alexey Makhalov <alexey.makhalov@broadcom.com> R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com> L: virtualization@lists.linux.dev L: x86@kernel.org @@ -25957,7 +25993,7 @@ F: drivers/scsi/vmw_pvscsi.h VMWARE VIRTUAL PTP CLOCK DRIVER M: Nick Shi <nick.shi@broadcom.com> R: Ajay Kaher <ajay.kaher@broadcom.com> -R: Alexey Makhalov <alexey.amakhalov@broadcom.com> +R: Alexey Makhalov <alexey.makhalov@broadcom.com> R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com> L: netdev@vger.kernel.org S: Supported @@ -2,7 +2,7 @@ VERSION = 6 PATCHLEVEL = 15 SUBLEVEL = 0 -EXTRAVERSION = -rc6 +EXTRAVERSION = -rc7 NAME = Baby Opossum Posse # *DOCUMENTATION* diff --git a/arch/alpha/include/uapi/asm/socket.h b/arch/alpha/include/uapi/asm/socket.h index 3df5f2dd4c0f..8f1f18adcdb5 100644 --- a/arch/alpha/include/uapi/asm/socket.h +++ b/arch/alpha/include/uapi/asm/socket.h @@ -150,6 +150,8 @@ #define SO_RCVPRIORITY 82 +#define SO_PASSRIGHTS 83 + #if !defined(__KERNEL__) #if __BITS_PER_LONG == 64 diff --git a/arch/loongarch/include/asm/ptrace.h b/arch/loongarch/include/asm/ptrace.h index a5b63c84f854..e5d21e836d99 100644 --- a/arch/loongarch/include/asm/ptrace.h +++ b/arch/loongarch/include/asm/ptrace.h @@ -55,7 +55,7 @@ static inline void instruction_pointer_set(struct pt_regs *regs, unsigned long v /* Query offset/name of register from its name/offset */ extern int regs_query_register_offset(const char *name); -#define MAX_REG_OFFSET (offsetof(struct pt_regs, __last)) +#define MAX_REG_OFFSET (offsetof(struct pt_regs, __last) - sizeof(unsigned long)) /** * regs_get_register() - get register value from its offset diff --git a/arch/loongarch/include/asm/uprobes.h b/arch/loongarch/include/asm/uprobes.h index 99a0d198927f..025fc3f0a102 100644 --- a/arch/loongarch/include/asm/uprobes.h +++ b/arch/loongarch/include/asm/uprobes.h @@ -15,7 +15,6 @@ typedef u32 uprobe_opcode_t; #define UPROBE_XOLBP_INSN __emit_break(BRK_UPROBE_XOLBP) struct arch_uprobe { - unsigned long resume_era; u32 insn[2]; u32 ixol[2]; bool simulate; diff --git a/arch/loongarch/kernel/genex.S b/arch/loongarch/kernel/genex.S index 4f0912141781..733a7665e434 100644 --- a/arch/loongarch/kernel/genex.S +++ b/arch/loongarch/kernel/genex.S @@ -16,6 +16,7 @@ #include <asm/stackframe.h> #include <asm/thread_info.h> + .section .cpuidle.text, "ax" .align 5 SYM_FUNC_START(__arch_cpu_idle) /* start of idle interrupt region */ @@ -31,14 +32,16 @@ SYM_FUNC_START(__arch_cpu_idle) */ idle 0 /* end of idle interrupt region */ -1: jr ra +idle_exit: + jr ra SYM_FUNC_END(__arch_cpu_idle) + .previous SYM_CODE_START(handle_vint) UNWIND_HINT_UNDEFINED BACKUP_T0T1 SAVE_ALL - la_abs t1, 1b + la_abs t1, idle_exit LONG_L t0, sp, PT_ERA /* 3 instructions idle interrupt region */ ori t0, t0, 0b1100 diff --git a/arch/loongarch/kernel/kfpu.c b/arch/loongarch/kernel/kfpu.c index ec5b28e570c9..4c476904227f 100644 --- a/arch/loongarch/kernel/kfpu.c +++ b/arch/loongarch/kernel/kfpu.c @@ -18,11 +18,28 @@ static unsigned int euen_mask = CSR_EUEN_FPEN; static DEFINE_PER_CPU(bool, in_kernel_fpu); static DEFINE_PER_CPU(unsigned int, euen_current); +static inline void fpregs_lock(void) +{ + if (IS_ENABLED(CONFIG_PREEMPT_RT)) + preempt_disable(); + else + local_bh_disable(); +} + +static inline void fpregs_unlock(void) +{ + if (IS_ENABLED(CONFIG_PREEMPT_RT)) + preempt_enable(); + else + local_bh_enable(); +} + void kernel_fpu_begin(void) { unsigned int *euen_curr; - preempt_disable(); + if (!irqs_disabled()) + fpregs_lock(); WARN_ON(this_cpu_read(in_kernel_fpu)); @@ -73,7 +90,8 @@ void kernel_fpu_end(void) this_cpu_write(in_kernel_fpu, false); - preempt_enable(); + if (!irqs_disabled()) + fpregs_unlock(); } EXPORT_SYMBOL_GPL(kernel_fpu_end); diff --git a/arch/loongarch/kernel/time.c b/arch/loongarch/kernel/time.c index e2d3bfeb6366..bc75a3a69fc8 100644 --- a/arch/loongarch/kernel/time.c +++ b/arch/loongarch/kernel/time.c @@ -111,7 +111,7 @@ static unsigned long __init get_loops_per_jiffy(void) return lpj; } -static long init_offset __nosavedata; +static long init_offset; void save_counter(void) { diff --git a/arch/loongarch/kernel/uprobes.c b/arch/loongarch/kernel/uprobes.c index 87abc7137b73..6022eb0f71db 100644 --- a/arch/loongarch/kernel/uprobes.c +++ b/arch/loongarch/kernel/uprobes.c @@ -42,7 +42,6 @@ int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) utask->autask.saved_trap_nr = current->thread.trap_nr; current->thread.trap_nr = UPROBE_TRAP_NR; instruction_pointer_set(regs, utask->xol_vaddr); - user_enable_single_step(current); return 0; } @@ -53,13 +52,7 @@ int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR); current->thread.trap_nr = utask->autask.saved_trap_nr; - - if (auprobe->simulate) - instruction_pointer_set(regs, auprobe->resume_era); - else - instruction_pointer_set(regs, utask->vaddr + LOONGARCH_INSN_SIZE); - - user_disable_single_step(current); + instruction_pointer_set(regs, utask->vaddr + LOONGARCH_INSN_SIZE); return 0; } @@ -70,7 +63,6 @@ void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) current->thread.trap_nr = utask->autask.saved_trap_nr; instruction_pointer_set(regs, utask->vaddr); - user_disable_single_step(current); } bool arch_uprobe_xol_was_trapped(struct task_struct *t) @@ -90,7 +82,6 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) insn.word = auprobe->insn[0]; arch_simulate_insn(insn, regs); - auprobe->resume_era = regs->csr_era; return true; } diff --git a/arch/loongarch/power/hibernate.c b/arch/loongarch/power/hibernate.c index 1e0590542f98..e7b7346592cb 100644 --- a/arch/loongarch/power/hibernate.c +++ b/arch/loongarch/power/hibernate.c @@ -2,6 +2,7 @@ #include <asm/fpu.h> #include <asm/loongson.h> #include <asm/sections.h> +#include <asm/time.h> #include <asm/tlbflush.h> #include <linux/suspend.h> @@ -14,6 +15,7 @@ struct pt_regs saved_regs; void save_processor_state(void) { + save_counter(); saved_crmd = csr_read32(LOONGARCH_CSR_CRMD); saved_prmd = csr_read32(LOONGARCH_CSR_PRMD); saved_euen = csr_read32(LOONGARCH_CSR_EUEN); @@ -26,6 +28,7 @@ void save_processor_state(void) void restore_processor_state(void) { + sync_counter(); csr_write32(saved_crmd, LOONGARCH_CSR_CRMD); csr_write32(saved_prmd, LOONGARCH_CSR_PRMD); csr_write32(saved_euen, LOONGARCH_CSR_EUEN); diff --git a/arch/m68k/coldfire/m5272.c b/arch/m68k/coldfire/m5272.c index 734dab657fe3..5b70dfdab368 100644 --- a/arch/m68k/coldfire/m5272.c +++ b/arch/m68k/coldfire/m5272.c @@ -119,7 +119,7 @@ static struct fixed_phy_status nettel_fixed_phy_status __initdata = { static int __init init_BSP(void) { m5272_uarts_init(); - fixed_phy_add(PHY_POLL, 0, &nettel_fixed_phy_status); + fixed_phy_add(0, &nettel_fixed_phy_status); clkdev_add_table(m5272_clk_lookup, ARRAY_SIZE(m5272_clk_lookup)); return 0; } diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c index 247be207f293..de426a474b5b 100644 --- a/arch/mips/bcm47xx/setup.c +++ b/arch/mips/bcm47xx/setup.c @@ -282,7 +282,7 @@ static int __init bcm47xx_register_bus_complete(void) bcm47xx_leds_register(); bcm47xx_workarounds(); - fixed_phy_add(PHY_POLL, 0, &bcm47xx_fixed_phy_status); + fixed_phy_add(0, &bcm47xx_fixed_phy_status); return 0; } device_initcall(bcm47xx_register_bus_complete); diff --git a/arch/mips/include/uapi/asm/socket.h b/arch/mips/include/uapi/asm/socket.h index 22fa8f19924a..31ac655b7837 100644 --- a/arch/mips/include/uapi/asm/socket.h +++ b/arch/mips/include/uapi/asm/socket.h @@ -161,6 +161,8 @@ #define SO_RCVPRIORITY 82 +#define SO_PASSRIGHTS 83 + #if !defined(__KERNEL__) #if __BITS_PER_LONG == 64 diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h index 96831c988606..1f2d5b7a7f5d 100644 --- a/arch/parisc/include/uapi/asm/socket.h +++ b/arch/parisc/include/uapi/asm/socket.h @@ -142,6 +142,8 @@ #define SCM_DEVMEM_DMABUF SO_DEVMEM_DMABUF #define SO_DEVMEM_DONTNEED 0x4050 +#define SO_PASSRIGHTS 0x4051 + #if !defined(__KERNEL__) #if __BITS_PER_LONG == 64 diff --git a/arch/sparc/include/uapi/asm/socket.h b/arch/sparc/include/uapi/asm/socket.h index 5b464a568664..adcba7329386 100644 --- a/arch/sparc/include/uapi/asm/socket.h +++ b/arch/sparc/include/uapi/asm/socket.h @@ -143,6 +143,8 @@ #define SO_RCVPRIORITY 0x005b +#define SO_PASSRIGHTS 0x005c + #if !defined(__KERNEL__) diff --git a/arch/x86/coco/sev/core.c b/arch/x86/coco/sev/core.c index b0c1a7a57497..36beaac713c1 100644 --- a/arch/x86/coco/sev/core.c +++ b/arch/x86/coco/sev/core.c @@ -959,6 +959,102 @@ void snp_accept_memory(phys_addr_t start, phys_addr_t end) set_pages_state(vaddr, npages, SNP_PAGE_STATE_PRIVATE); } +static int vmgexit_ap_control(u64 event, struct sev_es_save_area *vmsa, u32 apic_id) +{ + bool create = event != SVM_VMGEXIT_AP_DESTROY; + struct ghcb_state state; + unsigned long flags; + struct ghcb *ghcb; + int ret = 0; + + local_irq_save(flags); + + ghcb = __sev_get_ghcb(&state); + + vc_ghcb_invalidate(ghcb); + + if (create) + ghcb_set_rax(ghcb, vmsa->sev_features); + + ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_CREATION); + ghcb_set_sw_exit_info_1(ghcb, + ((u64)apic_id << 32) | + ((u64)snp_vmpl << 16) | + event); + ghcb_set_sw_exit_info_2(ghcb, __pa(vmsa)); + + sev_es_wr_ghcb_msr(__pa(ghcb)); + VMGEXIT(); + + if (!ghcb_sw_exit_info_1_is_valid(ghcb) || + lower_32_bits(ghcb->save.sw_exit_info_1)) { + pr_err("SNP AP %s error\n", (create ? "CREATE" : "DESTROY")); + ret = -EINVAL; + } + + __sev_put_ghcb(&state); + + local_irq_restore(flags); + + return ret; +} + +static int snp_set_vmsa(void *va, void *caa, int apic_id, bool make_vmsa) +{ + int ret; + + if (snp_vmpl) { + struct svsm_call call = {}; + unsigned long flags; + + local_irq_save(flags); + + call.caa = this_cpu_read(svsm_caa); + call.rcx = __pa(va); + + if (make_vmsa) { + /* Protocol 0, Call ID 2 */ + call.rax = SVSM_CORE_CALL(SVSM_CORE_CREATE_VCPU); + call.rdx = __pa(caa); + call.r8 = apic_id; + } else { + /* Protocol 0, Call ID 3 */ + call.rax = SVSM_CORE_CALL(SVSM_CORE_DELETE_VCPU); + } + + ret = svsm_perform_call_protocol(&call); + + local_irq_restore(flags); + } else { + /* + * If the kernel runs at VMPL0, it can change the VMSA + * bit for a page using the RMPADJUST instruction. + * However, for the instruction to succeed it must + * target the permissions of a lesser privileged (higher + * numbered) VMPL level, so use VMPL1. + */ + u64 attrs = 1; + + if (make_vmsa) + attrs |= RMPADJUST_VMSA_PAGE_BIT; + + ret = rmpadjust((unsigned long)va, RMP_PG_SIZE_4K, attrs); + } + + return ret; +} + +static void snp_cleanup_vmsa(struct sev_es_save_area *vmsa, int apic_id) +{ + int err; + + err = snp_set_vmsa(vmsa, NULL, apic_id, false); + if (err) + pr_err("clear VMSA page failed (%u), leaking page\n", err); + else + free_page((unsigned long)vmsa); +} + static void set_pte_enc(pte_t *kpte, int level, void *va) { struct pte_enc_desc d = { @@ -1005,7 +1101,8 @@ static void unshare_all_memory(void) data = per_cpu(runtime_data, cpu); ghcb = (unsigned long)&data->ghcb_page; - if (addr <= ghcb && ghcb <= addr + size) { + /* Handle the case of a huge page containing the GHCB page */ + if (addr <= ghcb && ghcb < addr + size) { skipped_addr = true; break; } @@ -1055,11 +1152,70 @@ void snp_kexec_begin(void) pr_warn("Failed to stop shared<->private conversions\n"); } +/* + * Shutdown all APs except the one handling kexec/kdump and clearing + * the VMSA tag on AP's VMSA pages as they are not being used as + * VMSA page anymore. + */ +static void shutdown_all_aps(void) +{ + struct sev_es_save_area *vmsa; + int apic_id, this_cpu, cpu; + + this_cpu = get_cpu(); + + /* + * APs are already in HLT loop when enc_kexec_finish() callback + * is invoked. + */ + for_each_present_cpu(cpu) { + vmsa = per_cpu(sev_vmsa, cpu); + + /* + * The BSP or offlined APs do not have guest allocated VMSA + * and there is no need to clear the VMSA tag for this page. + */ + if (!vmsa) + continue; + + /* + * Cannot clear the VMSA tag for the currently running vCPU. + */ + if (this_cpu == cpu) { + unsigned long pa; + struct page *p; + + pa = __pa(vmsa); + /* + * Mark the VMSA page of the running vCPU as offline + * so that is excluded and not touched by makedumpfile + * while generating vmcore during kdump. + */ + p = pfn_to_online_page(pa >> PAGE_SHIFT); + if (p) + __SetPageOffline(p); + continue; + } + + apic_id = cpuid_to_apicid[cpu]; + + /* + * Issue AP destroy to ensure AP gets kicked out of guest mode + * to allow using RMPADJUST to remove the VMSA tag on it's + * VMSA page. + */ + vmgexit_ap_control(SVM_VMGEXIT_AP_DESTROY, vmsa, apic_id); + snp_cleanup_vmsa(vmsa, apic_id); + } + + put_cpu(); +} + void snp_kexec_finish(void) { struct sev_es_runtime_data *data; + unsigned long size, addr; unsigned int level, cpu; - unsigned long size; struct ghcb *ghcb; pte_t *pte; @@ -1069,6 +1225,8 @@ void snp_kexec_finish(void) if (!IS_ENABLED(CONFIG_KEXEC_CORE)) return; + shutdown_all_aps(); + unshare_all_memory(); /* @@ -1085,54 +1243,11 @@ void snp_kexec_finish(void) ghcb = &data->ghcb_page; pte = lookup_address((unsigned long)ghcb, &level); size = page_level_size(level); - set_pte_enc(pte, level, (void *)ghcb); - snp_set_memory_private((unsigned long)ghcb, (size / PAGE_SIZE)); - } -} - -static int snp_set_vmsa(void *va, void *caa, int apic_id, bool make_vmsa) -{ - int ret; - - if (snp_vmpl) { - struct svsm_call call = {}; - unsigned long flags; - - local_irq_save(flags); - - call.caa = this_cpu_read(svsm_caa); - call.rcx = __pa(va); - - if (make_vmsa) { - /* Protocol 0, Call ID 2 */ - call.rax = SVSM_CORE_CALL(SVSM_CORE_CREATE_VCPU); - call.rdx = __pa(caa); - call.r8 = apic_id; - } else { - /* Protocol 0, Call ID 3 */ - call.rax = SVSM_CORE_CALL(SVSM_CORE_DELETE_VCPU); - } - - ret = svsm_perform_call_protocol(&call); - - local_irq_restore(flags); - } else { - /* - * If the kernel runs at VMPL0, it can change the VMSA - * bit for a page using the RMPADJUST instruction. - * However, for the instruction to succeed it must - * target the permissions of a lesser privileged (higher - * numbered) VMPL level, so use VMPL1. - */ - u64 attrs = 1; - - if (make_vmsa) - attrs |= RMPADJUST_VMSA_PAGE_BIT; - - ret = rmpadjust((unsigned long)va, RMP_PG_SIZE_4K, attrs); + /* Handle the case of a huge page containing the GHCB page */ + addr = (unsigned long)ghcb & page_level_mask(level); + set_pte_enc(pte, level, (void *)addr); + snp_set_memory_private(addr, (size / PAGE_SIZE)); } - - return ret; } #define __ATTR_BASE (SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK) @@ -1166,24 +1281,10 @@ static void *snp_alloc_vmsa_page(int cpu) return page_address(p + 1); } -static void snp_cleanup_vmsa(struct sev_es_save_area *vmsa, int apic_id) -{ - int err; - - err = snp_set_vmsa(vmsa, NULL, apic_id, false); - if (err) - pr_err("clear VMSA page failed (%u), leaking page\n", err); - else - free_page((unsigned long)vmsa); -} - static int wakeup_cpu_via_vmgexit(u32 apic_id, unsigned long start_ip) { struct sev_es_save_area *cur_vmsa, *vmsa; - struct ghcb_state state; struct svsm_ca *caa; - unsigned long flags; - struct ghcb *ghcb; u8 sipi_vector; int cpu, ret; u64 cr4; @@ -1297,33 +1398,7 @@ static int wakeup_cpu_via_vmgexit(u32 apic_id, unsigned long start_ip) } /* Issue VMGEXIT AP Creation NAE event */ - local_irq_save(flags); - - ghcb = __sev_get_ghcb(&state); - - vc_ghcb_invalidate(ghcb); - ghcb_set_rax(ghcb, vmsa->sev_features); - ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_CREATION); - ghcb_set_sw_exit_info_1(ghcb, - ((u64)apic_id << 32) | - ((u64)snp_vmpl << 16) | - SVM_VMGEXIT_AP_CREATE); - ghcb_set_sw_exit_info_2(ghcb, __pa(vmsa)); - - sev_es_wr_ghcb_msr(__pa(ghcb)); - VMGEXIT(); - - if (!ghcb_sw_exit_info_1_is_valid(ghcb) || - lower_32_bits(ghcb->save.sw_exit_info_1)) { - pr_err("SNP AP Creation error\n"); - ret = -EINVAL; - } - - __sev_put_ghcb(&state); - - local_irq_restore(flags); - - /* Perform cleanup if there was an error */ + ret = vmgexit_ap_control(SVM_VMGEXIT_AP_CREATE, vmsa, apic_id); if (ret) { snp_cleanup_vmsa(vmsa, apic_id); vmsa = NULL; diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 9b20acc0e932..8d86e91bd5e5 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -2465,8 +2465,9 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs, struct perf_sample_ setup_pebs_fixed_sample_data); } -static void intel_pmu_pebs_event_update_no_drain(struct cpu_hw_events *cpuc, int size) +static void intel_pmu_pebs_event_update_no_drain(struct cpu_hw_events *cpuc, u64 mask) { + u64 pebs_enabled = cpuc->pebs_enabled & mask; struct perf_event *event; int bit; @@ -2477,7 +2478,7 @@ static void intel_pmu_pebs_event_update_no_drain(struct cpu_hw_events *cpuc, int * It needs to call intel_pmu_save_and_restart_reload() to * update the event->count for this case. */ - for_each_set_bit(bit, (unsigned long *)&cpuc->pebs_enabled, size) { + for_each_set_bit(bit, (unsigned long *)&pebs_enabled, X86_PMC_IDX_MAX) { event = cpuc->events[bit]; if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD) intel_pmu_save_and_restart_reload(event, 0); @@ -2512,7 +2513,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, struct perf_sample_d } if (unlikely(base >= top)) { - intel_pmu_pebs_event_update_no_drain(cpuc, size); + intel_pmu_pebs_event_update_no_drain(cpuc, mask); return; } @@ -2626,7 +2627,7 @@ static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sample_d (hybrid(cpuc->pmu, fixed_cntr_mask64) << INTEL_PMC_IDX_FIXED); if (unlikely(base >= top)) { - intel_pmu_pebs_event_update_no_drain(cpuc, X86_PMC_IDX_MAX); + intel_pmu_pebs_event_update_no_drain(cpuc, mask); return; } diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 39e61212ac9a..30144ef9ef02 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -75,7 +75,7 @@ #define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* "centaur_mcr" Centaur MCRs (= MTRRs) */ #define X86_FEATURE_K8 ( 3*32+ 4) /* Opteron, Athlon64 */ #define X86_FEATURE_ZEN5 ( 3*32+ 5) /* CPU based on Zen5 microarchitecture */ -/* Free ( 3*32+ 6) */ +#define X86_FEATURE_ZEN6 ( 3*32+ 6) /* CPU based on Zen6 microarchitecture */ /* Free ( 3*32+ 7) */ #define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* "constant_tsc" TSC ticks at a constant rate */ #define X86_FEATURE_UP ( 3*32+ 9) /* "up" SMP kernel running on UP */ diff --git a/arch/x86/include/asm/sev-common.h b/arch/x86/include/asm/sev-common.h index acb85b9346d8..0020d77a0800 100644 --- a/arch/x86/include/asm/sev-common.h +++ b/arch/x86/include/asm/sev-common.h @@ -116,7 +116,7 @@ enum psc_op { #define GHCB_MSR_VMPL_REQ 0x016 #define GHCB_MSR_VMPL_REQ_LEVEL(v) \ /* GHCBData[39:32] */ \ - (((u64)(v) & GENMASK_ULL(7, 0) << 32) | \ + ((((u64)(v) & GENMASK_ULL(7, 0)) << 32) | \ /* GHCBDdata[11:0] */ \ GHCB_MSR_VMPL_REQ) diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 2b36379ff675..4e06baab40bb 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -472,6 +472,11 @@ static void bsp_init_amd(struct cpuinfo_x86 *c) case 0x60 ... 0x7f: setup_force_cpu_cap(X86_FEATURE_ZEN5); break; + case 0x50 ... 0x5f: + case 0x90 ... 0xaf: + case 0xc0 ... 0xcf: + setup_force_cpu_cap(X86_FEATURE_ZEN6); + break; default: goto warn; } diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 7fa3965dcef1..bb8d99e717b9 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -566,7 +566,7 @@ static void __init lowmem_pfn_init(void) "only %luMB highmem pages available, ignoring highmem size of %luMB!\n" #define MSG_HIGHMEM_TRIMMED \ - "Warning: only 4GB will be used. Support for for CONFIG_HIGHMEM64G was removed!\n" + "Warning: only 4GB will be used. Support for CONFIG_HIGHMEM64G was removed!\n" /* * We have more RAM than fits into lowmem - we try to put it into * highmem, also taking the highmem=x boot parameter into account: diff --git a/block/bio-integrity-auto.c b/block/bio-integrity-auto.c index e524c609be50..9c6657664792 100644 --- a/block/bio-integrity-auto.c +++ b/block/bio-integrity-auto.c @@ -9,6 +9,7 @@ * not aware of PI. */ #include <linux/blk-integrity.h> +#include <linux/t10-pi.h> #include <linux/workqueue.h> #include "blk.h" @@ -43,6 +44,29 @@ static void bio_integrity_verify_fn(struct work_struct *work) bio_endio(bio); } +#define BIP_CHECK_FLAGS (BIP_CHECK_GUARD | BIP_CHECK_REFTAG | BIP_CHECK_APPTAG) +static bool bip_should_check(struct bio_integrity_payload *bip) +{ + return bip->bip_flags & BIP_CHECK_FLAGS; +} + +static bool bi_offload_capable(struct blk_integrity *bi) +{ + switch (bi->csum_type) { + case BLK_INTEGRITY_CSUM_CRC64: + return bi->tuple_size == sizeof(struct crc64_pi_tuple); + case BLK_INTEGRITY_CSUM_CRC: + case BLK_INTEGRITY_CSUM_IP: + return bi->tuple_size == sizeof(struct t10_pi_tuple); + default: + pr_warn_once("%s: unknown integrity checksum type:%d\n", + __func__, bi->csum_type); + fallthrough; + case BLK_INTEGRITY_CSUM_NONE: + return false; + } +} + /** * __bio_integrity_endio - Integrity I/O completion function * @bio: Protected bio @@ -54,12 +78,12 @@ static void bio_integrity_verify_fn(struct work_struct *work) */ bool __bio_integrity_endio(struct bio *bio) { - struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk); struct bio_integrity_payload *bip = bio_integrity(bio); struct bio_integrity_data *bid = container_of(bip, struct bio_integrity_data, bip); - if (bio_op(bio) == REQ_OP_READ && !bio->bi_status && bi->csum_type) { + if (bio_op(bio) == REQ_OP_READ && !bio->bi_status && + bip_should_check(bip)) { INIT_WORK(&bid->work, bio_integrity_verify_fn); queue_work(kintegrityd_wq, &bid->work); return false; @@ -84,6 +108,7 @@ bool bio_integrity_prep(struct bio *bio) { struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk); struct bio_integrity_data *bid; + bool set_flags = true; gfp_t gfp = GFP_NOIO; unsigned int len; void *buf; @@ -100,19 +125,24 @@ bool bio_integrity_prep(struct bio *bio) switch (bio_op(bio)) { case REQ_OP_READ: - if (bi->flags & BLK_INTEGRITY_NOVERIFY) - return true; + if (bi->flags & BLK_INTEGRITY_NOVERIFY) { + if (bi_offload_capable(bi)) + return true; + set_flags = false; + } break; case REQ_OP_WRITE: - if (bi->flags & BLK_INTEGRITY_NOGENERATE) - return true; - /* * Zero the memory allocated to not leak uninitialized kernel * memory to disk for non-integrity metadata where nothing else * initializes the memory. */ - if (bi->csum_type == BLK_INTEGRITY_CSUM_NONE) + if (bi->flags & BLK_INTEGRITY_NOGENERATE) { + if (bi_offload_capable(bi)) + return true; + set_flags = false; + gfp |= __GFP_ZERO; + } else if (bi->csum_type == BLK_INTEGRITY_CSUM_NONE) gfp |= __GFP_ZERO; break; default: @@ -137,19 +167,21 @@ bool bio_integrity_prep(struct bio *bio) bid->bip.bip_flags |= BIP_BLOCK_INTEGRITY; bip_set_seed(&bid->bip, bio->bi_iter.bi_sector); - if (bi->csum_type == BLK_INTEGRITY_CSUM_IP) - bid->bip.bip_flags |= BIP_IP_CHECKSUM; - if (bi->csum_type) - bid->bip.bip_flags |= BIP_CHECK_GUARD; - if (bi->flags & BLK_INTEGRITY_REF_TAG) - bid->bip.bip_flags |= BIP_CHECK_REFTAG; + if (set_flags) { + if (bi->csum_type == BLK_INTEGRITY_CSUM_IP) + bid->bip.bip_flags |= BIP_IP_CHECKSUM; + if (bi->csum_type) + bid->bip.bip_flags |= BIP_CHECK_GUARD; + if (bi->flags & BLK_INTEGRITY_REF_TAG) + bid->bip.bip_flags |= BIP_CHECK_REFTAG; + } if (bio_integrity_add_page(bio, virt_to_page(buf), len, offset_in_page(buf)) < len) goto err_end_io; /* Auto-generate integrity metadata if this is a write */ - if (bio_data_dir(bio) == WRITE) + if (bio_data_dir(bio) == WRITE && bip_should_check(&bid->bip)) blk_integrity_generate(bio); else bid->saved_bio_iter = bio->bi_iter; diff --git a/block/bio.c b/block/bio.c index 4e6c85a33d74..4be592d37fb6 100644 --- a/block/bio.c +++ b/block/bio.c @@ -611,7 +611,7 @@ struct bio *bio_kmalloc(unsigned short nr_vecs, gfp_t gfp_mask) { struct bio *bio; - if (nr_vecs > UIO_MAXIOV) + if (nr_vecs > BIO_MAX_INLINE_VECS) return NULL; return kmalloc(struct_size(bio, bi_inline_vecs, nr_vecs), gfp_mask); } diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c index 5498a87249d3..e3f1a4852737 100644 --- a/crypto/algif_hash.c +++ b/crypto/algif_hash.c @@ -265,10 +265,6 @@ static int hash_accept(struct socket *sock, struct socket *newsock, goto out_free_state; err = crypto_ahash_import(&ctx2->req, state); - if (err) { - sock_orphan(sk2); - sock_put(sk2); - } out_free_state: kfree_sensitive(state); diff --git a/drivers/accel/ivpu/ivpu_debugfs.c b/drivers/accel/ivpu/ivpu_debugfs.c index f0dad0c9ce33..cd24ccd20ba6 100644 --- a/drivers/accel/ivpu/ivpu_debugfs.c +++ b/drivers/accel/ivpu/ivpu_debugfs.c @@ -455,7 +455,7 @@ priority_bands_fops_write(struct file *file, const char __user *user_buf, size_t if (ret < 0) return ret; - buf[size] = '\0'; + buf[ret] = '\0'; ret = sscanf(buf, "%u %u %u %u", &band, &grace_period, &process_grace_period, &process_quantum); if (ret != 4) diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c index f73ce6e13065..54676e3d82dd 100644 --- a/drivers/acpi/pptt.c +++ b/drivers/acpi/pptt.c @@ -231,16 +231,18 @@ static int acpi_pptt_leaf_node(struct acpi_table_header *table_hdr, sizeof(struct acpi_table_pptt)); proc_sz = sizeof(struct acpi_pptt_processor); - while ((unsigned long)entry + proc_sz < table_end) { + /* ignore subtable types that are smaller than a processor node */ + while ((unsigned long)entry + proc_sz <= table_end) { cpu_node = (struct acpi_pptt_processor *)entry; + if (entry->type == ACPI_PPTT_TYPE_PROCESSOR && cpu_node->parent == node_entry) return 0; if (entry->length == 0) return 0; + entry = ACPI_ADD_PTR(struct acpi_subtable_header, entry, entry->length); - } return 1; } @@ -273,15 +275,18 @@ static struct acpi_pptt_processor *acpi_find_processor_node(struct acpi_table_he proc_sz = sizeof(struct acpi_pptt_processor); /* find the processor structure associated with this cpuid */ - while ((unsigned long)entry + proc_sz < table_end) { + while ((unsigned long)entry + proc_sz <= table_end) { cpu_node = (struct acpi_pptt_processor *)entry; if (entry->length == 0) { pr_warn("Invalid zero length subtable\n"); break; } + /* entry->length may not equal proc_sz, revalidate the processor structure length */ if (entry->type == ACPI_PPTT_TYPE_PROCESSOR && acpi_cpu_id == cpu_node->acpi_processor_id && + (unsigned long)entry + entry->length <= table_end && + entry->length == proc_sz + cpu_node->number_of_priv_resources * sizeof(u32) && acpi_pptt_leaf_node(table_hdr, cpu_node)) { return (struct acpi_pptt_processor *)entry; } diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c index f9032076bc06..dc104c025cd5 100644 --- a/drivers/block/ublk_drv.c +++ b/drivers/block/ublk_drv.c @@ -1708,7 +1708,7 @@ static void ublk_cancel_cmd(struct ublk_queue *ubq, unsigned tag, * that ublk_dispatch_req() is always called */ req = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], tag); - if (req && blk_mq_request_started(req)) + if (req && blk_mq_request_started(req) && req->tag == tag) return; spin_lock(&ubq->cancel_lock); diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig index 7771edf54fb3..4ab32abf0f48 100644 --- a/drivers/bluetooth/Kconfig +++ b/drivers/bluetooth/Kconfig @@ -56,18 +56,6 @@ config BT_HCIBTUSB_POLL_SYNC Say Y here to enable USB poll_sync for Bluetooth USB devices by default. -config BT_HCIBTUSB_AUTO_ISOC_ALT - bool "Automatically adjust alternate setting for Isoc endpoints" - depends on BT_HCIBTUSB - default y if CHROME_PLATFORMS - help - Say Y here to automatically adjusting the alternate setting for - HCI_USER_CHANNEL whenever a SCO link is established. - - When enabled, btusb intercepts the HCI_EV_SYNC_CONN_COMPLETE packets - and configures isoc endpoint alternate setting automatically when - HCI_USER_CHANNEL is in use. - config BT_HCIBTUSB_BCM bool "Broadcom protocol support" depends on BT_HCIBTUSB diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c index 48e2f400957b..55cc1652bfe4 100644 --- a/drivers/bluetooth/btintel.c +++ b/drivers/bluetooth/btintel.c @@ -2719,7 +2719,7 @@ static int btintel_uefi_get_dsbr(u32 *dsbr_var) } __packed data; efi_status_t status; - unsigned long data_size = 0; + unsigned long data_size = sizeof(data); efi_guid_t guid = EFI_GUID(0xe65d8884, 0xd4af, 0x4b20, 0x8d, 0x03, 0x77, 0x2e, 0xcc, 0x3d, 0xa5, 0x31); @@ -2730,15 +2730,9 @@ static int btintel_uefi_get_dsbr(u32 *dsbr_var) return -EOPNOTSUPP; status = efi.get_variable(BTINTEL_EFI_DSBR, &guid, NULL, &data_size, - NULL); - - if (status != EFI_BUFFER_TOO_SMALL || !data_size) - return -EIO; - - status = efi.get_variable(BTINTEL_EFI_DSBR, &guid, NULL, &data_size, &data); - if (status != EFI_SUCCESS) + if (status != EFI_SUCCESS || data_size != sizeof(data)) return -ENXIO; *dsbr_var = data.dsbr; @@ -3688,7 +3682,7 @@ int btintel_configure_setup(struct hci_dev *hdev, const char *driver_name) } EXPORT_SYMBOL_GPL(btintel_configure_setup); -int btintel_diagnostics(struct hci_dev *hdev, struct sk_buff *skb) +static int btintel_diagnostics(struct hci_dev *hdev, struct sk_buff *skb) { struct intel_tlv *tlv = (void *)&skb->data[5]; @@ -3716,7 +3710,6 @@ int btintel_diagnostics(struct hci_dev *hdev, struct sk_buff *skb) recv_frame: return hci_recv_frame(hdev, skb); } -EXPORT_SYMBOL_GPL(btintel_diagnostics); int btintel_recv_event(struct hci_dev *hdev, struct sk_buff *skb) { diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h index 2aece3effa4e..1d12c4113c66 100644 --- a/drivers/bluetooth/btintel.h +++ b/drivers/bluetooth/btintel.h @@ -277,7 +277,6 @@ int btintel_bootloader_setup_tlv(struct hci_dev *hdev, int btintel_shutdown_combined(struct hci_dev *hdev); void btintel_hw_error(struct hci_dev *hdev, u8 code); void btintel_print_fseq_info(struct hci_dev *hdev); -int btintel_diagnostics(struct hci_dev *hdev, struct sk_buff *skb); #else static inline int btintel_check_bdaddr(struct hci_dev *hdev) @@ -411,9 +410,4 @@ static inline void btintel_hw_error(struct hci_dev *hdev, u8 code) static inline void btintel_print_fseq_info(struct hci_dev *hdev) { } - -static inline int btintel_diagnostics(struct hci_dev *hdev, struct sk_buff *skb) -{ - return -EOPNOTSUPP; -} #endif diff --git a/drivers/bluetooth/btintel_pcie.c b/drivers/bluetooth/btintel_pcie.c index 0a759ea26fd3..50fe17f1e1d1 100644 --- a/drivers/bluetooth/btintel_pcie.c +++ b/drivers/bluetooth/btintel_pcie.c @@ -208,6 +208,96 @@ static void btintel_pcie_prepare_tx(struct txq *txq, u16 tfd_index, memcpy(buf->data, skb->data, tfd->size); } +static inline void btintel_pcie_dump_debug_registers(struct hci_dev *hdev) +{ + struct btintel_pcie_data *data = hci_get_drvdata(hdev); + u16 cr_hia, cr_tia; + u32 reg, mbox_reg; + struct sk_buff *skb; + u8 buf[80]; + + skb = alloc_skb(1024, GFP_ATOMIC); + if (!skb) + return; + + snprintf(buf, sizeof(buf), "%s", "---- Dump of debug registers ---"); + bt_dev_dbg(hdev, "%s", buf); + skb_put_data(skb, buf, strlen(buf)); + + reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_BOOT_STAGE_REG); + snprintf(buf, sizeof(buf), "boot stage: 0x%8.8x", reg); + bt_dev_dbg(hdev, "%s", buf); + skb_put_data(skb, buf, strlen(buf)); + data->boot_stage_cache = reg; + + reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_IPC_STATUS_REG); + snprintf(buf, sizeof(buf), "ipc status: 0x%8.8x", reg); + skb_put_data(skb, buf, strlen(buf)); + bt_dev_dbg(hdev, "%s", buf); + + reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_IPC_CONTROL_REG); + snprintf(buf, sizeof(buf), "ipc control: 0x%8.8x", reg); + skb_put_data(skb, buf, strlen(buf)); + bt_dev_dbg(hdev, "%s", buf); + + reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_IPC_SLEEP_CTL_REG); + snprintf(buf, sizeof(buf), "ipc sleep control: 0x%8.8x", reg); + skb_put_data(skb, buf, strlen(buf)); + bt_dev_dbg(hdev, "%s", buf); + + /*Read the Mail box status and registers*/ + reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_MBOX_STATUS_REG); + snprintf(buf, sizeof(buf), "mbox status: 0x%8.8x", reg); + skb_put_data(skb, buf, strlen(buf)); + if (reg & BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX1) { + mbox_reg = btintel_pcie_rd_reg32(data, + BTINTEL_PCIE_CSR_MBOX_1_REG); + snprintf(buf, sizeof(buf), "mbox_1: 0x%8.8x", mbox_reg); + skb_put_data(skb, buf, strlen(buf)); + bt_dev_dbg(hdev, "%s", buf); + } + + if (reg & BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX2) { + mbox_reg = btintel_pcie_rd_reg32(data, + BTINTEL_PCIE_CSR_MBOX_2_REG); + snprintf(buf, sizeof(buf), "mbox_2: 0x%8.8x", mbox_reg); + skb_put_data(skb, buf, strlen(buf)); + bt_dev_dbg(hdev, "%s", buf); + } + + if (reg & BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX3) { + mbox_reg = btintel_pcie_rd_reg32(data, + BTINTEL_PCIE_CSR_MBOX_3_REG); + snprintf(buf, sizeof(buf), "mbox_3: 0x%8.8x", mbox_reg); + skb_put_data(skb, buf, strlen(buf)); + bt_dev_dbg(hdev, "%s", buf); + } + + if (reg & BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX4) { + mbox_reg = btintel_pcie_rd_reg32(data, + BTINTEL_PCIE_CSR_MBOX_4_REG); + snprintf(buf, sizeof(buf), "mbox_4: 0x%8.8x", mbox_reg); + skb_put_data(skb, buf, strlen(buf)); + bt_dev_dbg(hdev, "%s", buf); + } + + cr_hia = data->ia.cr_hia[BTINTEL_PCIE_RXQ_NUM]; + cr_tia = data->ia.cr_tia[BTINTEL_PCIE_RXQ_NUM]; + snprintf(buf, sizeof(buf), "rxq: cr_tia: %u cr_hia: %u", cr_tia, cr_hia); + skb_put_data(skb, buf, strlen(buf)); + bt_dev_dbg(hdev, "%s", buf); + + cr_hia = data->ia.cr_hia[BTINTEL_PCIE_TXQ_NUM]; + cr_tia = data->ia.cr_tia[BTINTEL_PCIE_TXQ_NUM]; + snprintf(buf, sizeof(buf), "txq: cr_tia: %u cr_hia: %u", cr_tia, cr_hia); + skb_put_data(skb, buf, strlen(buf)); + bt_dev_dbg(hdev, "%s", buf); + snprintf(buf, sizeof(buf), "--------------------------------"); + bt_dev_dbg(hdev, "%s", buf); + + hci_recv_diag(hdev, skb); +} + static int btintel_pcie_send_sync(struct btintel_pcie_data *data, struct sk_buff *skb) { @@ -237,8 +327,11 @@ static int btintel_pcie_send_sync(struct btintel_pcie_data *data, /* Wait for the complete interrupt - URBD0 */ ret = wait_event_timeout(data->tx_wait_q, data->tx_wait_done, msecs_to_jiffies(BTINTEL_PCIE_TX_WAIT_TIMEOUT_MS)); - if (!ret) + if (!ret) { + bt_dev_err(data->hdev, "tx completion timeout"); + btintel_pcie_dump_debug_registers(data->hdev); return -ETIME; + } return 0; } @@ -756,6 +849,26 @@ static int btintel_pcie_read_device_mem(struct btintel_pcie_data *data, return 0; } +static inline bool btintel_pcie_in_lockdown(struct btintel_pcie_data *data) +{ + return (data->boot_stage_cache & + BTINTEL_PCIE_CSR_BOOT_STAGE_ROM_LOCKDOWN) || + (data->boot_stage_cache & + BTINTEL_PCIE_CSR_BOOT_STAGE_IML_LOCKDOWN); +} + +static inline bool btintel_pcie_in_error(struct btintel_pcie_data *data) +{ + return (data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_DEVICE_ERR) || + (data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_ABORT_HANDLER); +} + +static void btintel_pcie_msix_gp1_handler(struct btintel_pcie_data *data) +{ + bt_dev_err(data->hdev, "Received gp1 mailbox interrupt"); + btintel_pcie_dump_debug_registers(data->hdev); +} + /* This function handles the MSI-X interrupt for gp0 cause (bit 0 in * BTINTEL_PCIE_CSR_MSIX_HW_INT_CAUSES) which is sent for boot stage and image response. */ @@ -779,6 +892,18 @@ static void btintel_pcie_msix_gp0_handler(struct btintel_pcie_data *data) if (reg != data->img_resp_cache) data->img_resp_cache = reg; + if (btintel_pcie_in_error(data)) { + bt_dev_err(data->hdev, "Controller in error state"); + btintel_pcie_dump_debug_registers(data->hdev); + return; + } + + if (btintel_pcie_in_lockdown(data)) { + bt_dev_err(data->hdev, "Controller in lockdown state"); + btintel_pcie_dump_debug_registers(data->hdev); + return; + } + data->gp0_received = true; old_ctxt = data->alive_intr_ctxt; @@ -889,7 +1014,6 @@ static void btintel_pcie_msix_tx_handle(struct btintel_pcie_data *data) static int btintel_pcie_recv_event(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_event_hdr *hdr = (void *)skb->data; - const char diagnostics_hdr[] = { 0x87, 0x80, 0x03 }; struct btintel_pcie_data *data = hci_get_drvdata(hdev); if (skb->len > HCI_EVENT_HDR_SIZE && hdr->evt == 0xff && @@ -945,15 +1069,6 @@ static int btintel_pcie_recv_event(struct hci_dev *hdev, struct sk_buff *skb) } } - /* Handle all diagnostics events separately. May still call - * hci_recv_frame. - */ - if (len >= sizeof(diagnostics_hdr) && - memcmp(&skb->data[2], diagnostics_hdr, - sizeof(diagnostics_hdr)) == 0) { - return btintel_diagnostics(hdev, skb); - } - /* This is a debug event that comes from IML and OP image when it * starts execution. There is no need pass this event to stack. */ @@ -1343,6 +1458,9 @@ static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id) if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_HWEXP) btintel_pcie_msix_hw_exp_handler(data); + if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP1) + btintel_pcie_msix_gp1_handler(data); + /* This interrupt is triggered by the firmware after updating * boot_stage register and image_response register */ @@ -2028,6 +2146,7 @@ static int btintel_pcie_setup(struct hci_dev *hdev) while ((err = btintel_pcie_setup_internal(hdev)) && fw_dl_retry++ < 1) { bt_dev_err(hdev, "Firmware download retry count: %d", fw_dl_retry); + btintel_pcie_dump_debug_registers(hdev); err = btintel_pcie_reset_bt(data); if (err) { bt_dev_err(hdev, "Failed to do shr reset: %d", err); diff --git a/drivers/bluetooth/btintel_pcie.h b/drivers/bluetooth/btintel_pcie.h index 873178019cad..21b964b15c1c 100644 --- a/drivers/bluetooth/btintel_pcie.h +++ b/drivers/bluetooth/btintel_pcie.h @@ -12,10 +12,17 @@ #define BTINTEL_PCIE_CSR_HW_REV_REG (BTINTEL_PCIE_CSR_BASE + 0x028) #define BTINTEL_PCIE_CSR_RF_ID_REG (BTINTEL_PCIE_CSR_BASE + 0x09C) #define BTINTEL_PCIE_CSR_BOOT_STAGE_REG (BTINTEL_PCIE_CSR_BASE + 0x108) +#define BTINTEL_PCIE_CSR_IPC_CONTROL_REG (BTINTEL_PCIE_CSR_BASE + 0x10C) +#define BTINTEL_PCIE_CSR_IPC_STATUS_REG (BTINTEL_PCIE_CSR_BASE + 0x110) #define BTINTEL_PCIE_CSR_IPC_SLEEP_CTL_REG (BTINTEL_PCIE_CSR_BASE + 0x114) #define BTINTEL_PCIE_CSR_CI_ADDR_LSB_REG (BTINTEL_PCIE_CSR_BASE + 0x118) #define BTINTEL_PCIE_CSR_CI_ADDR_MSB_REG (BTINTEL_PCIE_CSR_BASE + 0x11C) #define BTINTEL_PCIE_CSR_IMG_RESPONSE_REG (BTINTEL_PCIE_CSR_BASE + 0x12C) +#define BTINTEL_PCIE_CSR_MBOX_1_REG (BTINTEL_PCIE_CSR_BASE + 0x170) +#define BTINTEL_PCIE_CSR_MBOX_2_REG (BTINTEL_PCIE_CSR_BASE + 0x174) +#define BTINTEL_PCIE_CSR_MBOX_3_REG (BTINTEL_PCIE_CSR_BASE + 0x178) +#define BTINTEL_PCIE_CSR_MBOX_4_REG (BTINTEL_PCIE_CSR_BASE + 0x17C) +#define BTINTEL_PCIE_CSR_MBOX_STATUS_REG (BTINTEL_PCIE_CSR_BASE + 0x180) #define BTINTEL_PCIE_PRPH_DEV_ADDR_REG (BTINTEL_PCIE_CSR_BASE + 0x440) #define BTINTEL_PCIE_PRPH_DEV_RD_REG (BTINTEL_PCIE_CSR_BASE + 0x458) #define BTINTEL_PCIE_CSR_HBUS_TARG_WRPTR (BTINTEL_PCIE_CSR_BASE + 0x460) @@ -41,6 +48,9 @@ #define BTINTEL_PCIE_CSR_BOOT_STAGE_OPFW (BIT(2)) #define BTINTEL_PCIE_CSR_BOOT_STAGE_ROM_LOCKDOWN (BIT(10)) #define BTINTEL_PCIE_CSR_BOOT_STAGE_IML_LOCKDOWN (BIT(11)) +#define BTINTEL_PCIE_CSR_BOOT_STAGE_DEVICE_ERR (BIT(12)) +#define BTINTEL_PCIE_CSR_BOOT_STAGE_ABORT_HANDLER (BIT(13)) +#define BTINTEL_PCIE_CSR_BOOT_STAGE_DEVICE_HALTED (BIT(14)) #define BTINTEL_PCIE_CSR_BOOT_STAGE_MAC_ACCESS_ON (BIT(16)) #define BTINTEL_PCIE_CSR_BOOT_STAGE_ALIVE (BIT(23)) #define BTINTEL_PCIE_CSR_BOOT_STAGE_D3_STATE_READY (BIT(24)) @@ -89,6 +99,7 @@ enum msix_fh_int_causes { /* Causes for the HW register interrupts */ enum msix_hw_int_causes { BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0 = BIT(0), /* cause 32 */ + BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP1 = BIT(1), /* cause 33 */ BTINTEL_PCIE_MSIX_HW_INT_CAUSES_HWEXP = BIT(3), /* cause 35 */ }; @@ -121,6 +132,14 @@ enum btintel_pcie_tlv_type { BTINTEL_FW_BUILD, }; +/* causes for the MBOX interrupts */ +enum msix_mbox_int_causes { + BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX1 = BIT(0), /* cause MBOX1 */ + BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX2 = BIT(1), /* cause MBOX2 */ + BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX3 = BIT(2), /* cause MBOX3 */ + BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX4 = BIT(3), /* cause MBOX4 */ +}; + #define BTINTEL_PCIE_MSIX_NON_AUTO_CLEAR_CAUSE BIT(7) /* Minimum and Maximum number of MSI-X Vector diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c index 07cd308f7abf..93932a0d8625 100644 --- a/drivers/bluetooth/btmrvl_sdio.c +++ b/drivers/bluetooth/btmrvl_sdio.c @@ -100,7 +100,9 @@ static int btmrvl_sdio_probe_of(struct device *dev, } /* Configure wakeup (enabled by default) */ - device_init_wakeup(dev, true); + ret = devm_device_init_wakeup(dev); + if (ret) + return dev_err_probe(dev, ret, "Failed to init wakeup\n"); } } diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c index 1d26207b2ba7..c16a3518b8ff 100644 --- a/drivers/bluetooth/btmtksdio.c +++ b/drivers/bluetooth/btmtksdio.c @@ -1414,7 +1414,7 @@ static int btmtksdio_probe(struct sdio_func *func, */ pm_runtime_put_noidle(bdev->dev); - err = device_init_wakeup(bdev->dev, true); + err = devm_device_init_wakeup(bdev->dev); if (err) bt_dev_err(hdev, "failed to initialize device wakeup"); diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c index 604ab2bba231..b34623a69b8a 100644 --- a/drivers/bluetooth/btnxpuart.c +++ b/drivers/bluetooth/btnxpuart.c @@ -17,6 +17,7 @@ #include <linux/crc32.h> #include <linux/string_helpers.h> #include <linux/gpio/consumer.h> +#include <linux/of_irq.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> @@ -143,7 +144,9 @@ struct ps_data { bool driver_sent_cmd; u16 h2c_ps_interval; u16 c2h_ps_interval; + bool wakeup_source; struct gpio_desc *h2c_ps_gpio; + s32 irq_handler; struct hci_dev *hdev; struct work_struct work; struct timer_list ps_timer; @@ -476,12 +479,21 @@ static void ps_timeout_func(struct timer_list *t) } } +static irqreturn_t ps_host_wakeup_irq_handler(int irq, void *priv) +{ + struct btnxpuart_dev *nxpdev = (struct btnxpuart_dev *)priv; + + bt_dev_dbg(nxpdev->hdev, "Host wakeup interrupt"); + return IRQ_HANDLED; +} static int ps_setup(struct hci_dev *hdev) { struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); struct serdev_device *serdev = nxpdev->serdev; struct ps_data *psdata = &nxpdev->psdata; + int ret; + /* Out-Of-Band Device Wakeup */ psdata->h2c_ps_gpio = devm_gpiod_get_optional(&serdev->dev, "device-wakeup", GPIOD_OUT_LOW); if (IS_ERR(psdata->h2c_ps_gpio)) { @@ -493,11 +505,37 @@ static int ps_setup(struct hci_dev *hdev) if (device_property_read_u8(&serdev->dev, "nxp,wakein-pin", &psdata->h2c_wakeup_gpio)) { psdata->h2c_wakeup_gpio = 0xff; /* 0xff: use default pin/gpio */ } else if (!psdata->h2c_ps_gpio) { - bt_dev_warn(hdev, "nxp,wakein-pin property without device-wakeup GPIO"); + bt_dev_warn(hdev, "nxp,wakein-pin property without device-wakeup-gpios"); psdata->h2c_wakeup_gpio = 0xff; } - device_property_read_u8(&serdev->dev, "nxp,wakeout-pin", &psdata->c2h_wakeup_gpio); + /* Out-Of-Band Host Wakeup */ + if (of_property_read_bool(serdev->dev.of_node, "wakeup-source")) { + psdata->irq_handler = of_irq_get_byname(serdev->dev.of_node, "wakeup"); + bt_dev_info(nxpdev->hdev, "irq_handler: %d", psdata->irq_handler); + if (psdata->irq_handler > 0) + psdata->wakeup_source = true; + } + + if (device_property_read_u8(&serdev->dev, "nxp,wakeout-pin", &psdata->c2h_wakeup_gpio)) { + psdata->c2h_wakeup_gpio = 0xff; + if (psdata->wakeup_source) { + bt_dev_warn(hdev, "host wakeup interrupt without nxp,wakeout-pin"); + psdata->wakeup_source = false; + } + } else if (!psdata->wakeup_source) { + bt_dev_warn(hdev, "nxp,wakeout-pin property without host wakeup interrupt"); + psdata->c2h_wakeup_gpio = 0xff; + } + + if (psdata->wakeup_source) { + ret = devm_request_irq(&serdev->dev, psdata->irq_handler, + ps_host_wakeup_irq_handler, + IRQF_ONESHOT | IRQF_TRIGGER_FALLING, + dev_name(&serdev->dev), nxpdev); + disable_irq(psdata->irq_handler); + device_init_wakeup(&serdev->dev, true); + } psdata->hdev = hdev; INIT_WORK(&psdata->work, ps_work_func); @@ -637,12 +675,10 @@ static void ps_init(struct hci_dev *hdev) psdata->ps_state = PS_STATE_AWAKE; - if (psdata->c2h_wakeup_gpio) { + if (psdata->c2h_wakeup_gpio != 0xff) psdata->c2h_wakeupmode = BT_HOST_WAKEUP_METHOD_GPIO; - } else { + else psdata->c2h_wakeupmode = BT_HOST_WAKEUP_METHOD_NONE; - psdata->c2h_wakeup_gpio = 0xff; - } psdata->cur_h2c_wakeupmode = WAKEUP_METHOD_INVALID; if (psdata->h2c_ps_gpio) @@ -1821,6 +1857,11 @@ static int nxp_serdev_suspend(struct device *dev) struct ps_data *psdata = &nxpdev->psdata; ps_control(psdata->hdev, PS_STATE_SLEEP); + + if (psdata->wakeup_source) { + enable_irq_wake(psdata->irq_handler); + enable_irq(psdata->irq_handler); + } return 0; } @@ -1829,6 +1870,11 @@ static int nxp_serdev_resume(struct device *dev) struct btnxpuart_dev *nxpdev = dev_get_drvdata(dev); struct ps_data *psdata = &nxpdev->psdata; + if (psdata->wakeup_source) { + disable_irq(psdata->irq_handler); + disable_irq_wake(psdata->irq_handler); + } + ps_control(psdata->hdev, PS_STATE_AWAKE); return 0; } diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index a42dedb78e0a..9ab661d2d1e6 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -21,6 +21,7 @@ #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> +#include <net/bluetooth/hci_drv.h> #include "btintel.h" #include "btbcm.h" @@ -34,7 +35,6 @@ static bool force_scofix; static bool enable_autosuspend = IS_ENABLED(CONFIG_BT_HCIBTUSB_AUTOSUSPEND); static bool enable_poll_sync = IS_ENABLED(CONFIG_BT_HCIBTUSB_POLL_SYNC); static bool reset = true; -static bool auto_isoc_alt = IS_ENABLED(CONFIG_BT_HCIBTUSB_AUTO_ISOC_ALT); static struct usb_driver btusb_driver; @@ -513,6 +513,7 @@ static const struct usb_device_id quirks_table[] = { BTUSB_WIDEBAND_SPEECH }, /* Realtek 8851BE Bluetooth devices */ + { USB_DEVICE(0x0bda, 0xb850), .driver_info = BTUSB_REALTEK }, { USB_DEVICE(0x13d3, 0x3600), .driver_info = BTUSB_REALTEK }, /* Realtek 8852AE Bluetooth devices */ @@ -678,6 +679,8 @@ static const struct usb_device_id quirks_table[] = { BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x13d3, 0x3568), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x13d3, 0x3584), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x13d3, 0x3605), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x13d3, 0x3607), .driver_info = BTUSB_MEDIATEK | @@ -716,8 +719,12 @@ static const struct usb_device_id quirks_table[] = { BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x13d3, 0x3608), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x13d3, 0x3613), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x13d3, 0x3628), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x13d3, 0x3630), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH }, /* Additional Realtek 8723AE Bluetooth devices */ { USB_DEVICE(0x0930, 0x021d), .driver_info = BTUSB_REALTEK }, @@ -1118,42 +1125,6 @@ static inline void btusb_free_frags(struct btusb_data *data) spin_unlock_irqrestore(&data->rxlock, flags); } -static void btusb_sco_connected(struct btusb_data *data, struct sk_buff *skb) -{ - struct hci_event_hdr *hdr = (void *) skb->data; - struct hci_ev_sync_conn_complete *ev = - (void *) skb->data + sizeof(*hdr); - struct hci_dev *hdev = data->hdev; - unsigned int notify_air_mode; - - if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT) - return; - - if (skb->len < sizeof(*hdr) || hdr->evt != HCI_EV_SYNC_CONN_COMPLETE) - return; - - if (skb->len != sizeof(*hdr) + sizeof(*ev) || ev->status) - return; - - switch (ev->air_mode) { - case BT_CODEC_CVSD: - notify_air_mode = HCI_NOTIFY_ENABLE_SCO_CVSD; - break; - - case BT_CODEC_TRANSPARENT: - notify_air_mode = HCI_NOTIFY_ENABLE_SCO_TRANSP; - break; - - default: - return; - } - - bt_dev_info(hdev, "enabling SCO with air mode %u", ev->air_mode); - data->sco_num = 1; - data->air_mode = notify_air_mode; - schedule_work(&data->work); -} - static int btusb_recv_event(struct btusb_data *data, struct sk_buff *skb) { if (data->intr_interval) { @@ -1161,10 +1132,6 @@ static int btusb_recv_event(struct btusb_data *data, struct sk_buff *skb) schedule_delayed_work(&data->rx_work, 0); } - /* Configure altsetting for HCI_USER_CHANNEL on SCO connected */ - if (auto_isoc_alt && hci_dev_test_flag(data->hdev, HCI_USER_CHANNEL)) - btusb_sco_connected(data, skb); - return data->recv_event(data->hdev, skb); } @@ -3014,9 +2981,8 @@ static void btusb_coredump_qca(struct hci_dev *hdev) static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb) { int ret = 0; + unsigned int skip = 0; u8 pkt_type; - u8 *sk_ptr; - unsigned int sk_len; u16 seqno; u32 dump_size; @@ -3025,18 +2991,13 @@ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb) struct usb_device *udev = btdata->udev; pkt_type = hci_skb_pkt_type(skb); - sk_ptr = skb->data; - sk_len = skb->len; - - if (pkt_type == HCI_ACLDATA_PKT) { - sk_ptr += HCI_ACL_HDR_SIZE; - sk_len -= HCI_ACL_HDR_SIZE; - } + skip = sizeof(struct hci_event_hdr); + if (pkt_type == HCI_ACLDATA_PKT) + skip += sizeof(struct hci_acl_hdr); - sk_ptr += HCI_EVENT_HDR_SIZE; - sk_len -= HCI_EVENT_HDR_SIZE; + skb_pull(skb, skip); + dump_hdr = (struct qca_dump_hdr *)skb->data; - dump_hdr = (struct qca_dump_hdr *)sk_ptr; seqno = le16_to_cpu(dump_hdr->seqno); if (seqno == 0) { set_bit(BTUSB_HW_SSR_ACTIVE, &btdata->flags); @@ -3056,16 +3017,15 @@ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb) btdata->qca_dump.ram_dump_size = dump_size; btdata->qca_dump.ram_dump_seqno = 0; - sk_ptr += offsetof(struct qca_dump_hdr, data0); - sk_len -= offsetof(struct qca_dump_hdr, data0); + + skb_pull(skb, offsetof(struct qca_dump_hdr, data0)); usb_disable_autosuspend(udev); bt_dev_info(hdev, "%s memdump size(%u)\n", (pkt_type == HCI_ACLDATA_PKT) ? "ACL" : "event", dump_size); } else { - sk_ptr += offsetof(struct qca_dump_hdr, data); - sk_len -= offsetof(struct qca_dump_hdr, data); + skb_pull(skb, offsetof(struct qca_dump_hdr, data)); } if (!btdata->qca_dump.ram_dump_size) { @@ -3085,7 +3045,6 @@ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb) return ret; } - skb_pull(skb, skb->len - sk_len); hci_devcd_append(hdev, skb); btdata->qca_dump.ram_dump_seqno++; if (seqno == QCA_LAST_SEQUENCE_NUM) { @@ -3113,68 +3072,58 @@ out: /* Return: true if the ACL packet is a dump packet, false otherwise. */ static bool acl_pkt_is_dump_qca(struct hci_dev *hdev, struct sk_buff *skb) { - u8 *sk_ptr; - unsigned int sk_len; - struct hci_event_hdr *event_hdr; struct hci_acl_hdr *acl_hdr; struct qca_dump_hdr *dump_hdr; + struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC); + bool is_dump = false; - sk_ptr = skb->data; - sk_len = skb->len; - - acl_hdr = hci_acl_hdr(skb); - if (le16_to_cpu(acl_hdr->handle) != QCA_MEMDUMP_ACL_HANDLE) + if (!clone) return false; - sk_ptr += HCI_ACL_HDR_SIZE; - sk_len -= HCI_ACL_HDR_SIZE; - event_hdr = (struct hci_event_hdr *)sk_ptr; - - if ((event_hdr->evt != HCI_VENDOR_PKT) || - (event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE))) - return false; + acl_hdr = skb_pull_data(clone, sizeof(*acl_hdr)); + if (!acl_hdr || (le16_to_cpu(acl_hdr->handle) != QCA_MEMDUMP_ACL_HANDLE)) + goto out; - sk_ptr += HCI_EVENT_HDR_SIZE; - sk_len -= HCI_EVENT_HDR_SIZE; + event_hdr = skb_pull_data(clone, sizeof(*event_hdr)); + if (!event_hdr || (event_hdr->evt != HCI_VENDOR_PKT)) + goto out; - dump_hdr = (struct qca_dump_hdr *)sk_ptr; - if ((sk_len < offsetof(struct qca_dump_hdr, data)) || - (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) || - (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE)) - return false; + dump_hdr = skb_pull_data(clone, sizeof(*dump_hdr)); + if (!dump_hdr || (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) || + (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE)) + goto out; - return true; + is_dump = true; +out: + consume_skb(clone); + return is_dump; } /* Return: true if the event packet is a dump packet, false otherwise. */ static bool evt_pkt_is_dump_qca(struct hci_dev *hdev, struct sk_buff *skb) { - u8 *sk_ptr; - unsigned int sk_len; - struct hci_event_hdr *event_hdr; struct qca_dump_hdr *dump_hdr; + struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC); + bool is_dump = false; - sk_ptr = skb->data; - sk_len = skb->len; - - event_hdr = hci_event_hdr(skb); - - if ((event_hdr->evt != HCI_VENDOR_PKT) - || (event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE))) + if (!clone) return false; - sk_ptr += HCI_EVENT_HDR_SIZE; - sk_len -= HCI_EVENT_HDR_SIZE; + event_hdr = skb_pull_data(clone, sizeof(*event_hdr)); + if (!event_hdr || (event_hdr->evt != HCI_VENDOR_PKT)) + goto out; - dump_hdr = (struct qca_dump_hdr *)sk_ptr; - if ((sk_len < offsetof(struct qca_dump_hdr, data)) || - (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) || - (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE)) - return false; + dump_hdr = skb_pull_data(clone, sizeof(*dump_hdr)); + if (!dump_hdr || (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) || + (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE)) + goto out; - return true; + is_dump = true; +out: + consume_skb(clone); + return is_dump; } static int btusb_recv_acl_qca(struct hci_dev *hdev, struct sk_buff *skb) @@ -3771,31 +3720,133 @@ static const struct file_operations force_poll_sync_fops = { .llseek = default_llseek, }; -static ssize_t isoc_alt_show(struct device *dev, - struct device_attribute *attr, - char *buf) +#define BTUSB_HCI_DRV_OP_SUPPORTED_ALTSETTINGS \ + hci_opcode_pack(HCI_DRV_OGF_DRIVER_SPECIFIC, 0x0000) +#define BTUSB_HCI_DRV_SUPPORTED_ALTSETTINGS_SIZE 0 +struct btusb_hci_drv_rp_supported_altsettings { + __u8 num; + __u8 altsettings[]; +} __packed; + +#define BTUSB_HCI_DRV_OP_SWITCH_ALTSETTING \ + hci_opcode_pack(HCI_DRV_OGF_DRIVER_SPECIFIC, 0x0001) +#define BTUSB_HCI_DRV_SWITCH_ALTSETTING_SIZE 1 +struct btusb_hci_drv_cmd_switch_altsetting { + __u8 altsetting; +} __packed; + +static const struct { + u16 opcode; + const char *desc; +} btusb_hci_drv_supported_commands[] = { + /* Common commands */ + { HCI_DRV_OP_READ_INFO, "Read Info" }, + + /* Driver specific commands */ + { BTUSB_HCI_DRV_OP_SUPPORTED_ALTSETTINGS, "Supported Altsettings" }, + { BTUSB_HCI_DRV_OP_SWITCH_ALTSETTING, "Switch Altsetting" }, +}; +static int btusb_hci_drv_read_info(struct hci_dev *hdev, void *data, + u16 data_len) +{ + struct hci_drv_rp_read_info *rp; + size_t rp_size; + int err, i; + u16 opcode, num_supported_commands = + ARRAY_SIZE(btusb_hci_drv_supported_commands); + + rp_size = sizeof(*rp) + num_supported_commands * 2; + + rp = kmalloc(rp_size, GFP_KERNEL); + if (!rp) + return -ENOMEM; + + strscpy_pad(rp->driver_name, btusb_driver.name); + + rp->num_supported_commands = cpu_to_le16(num_supported_commands); + for (i = 0; i < num_supported_commands; i++) { + opcode = btusb_hci_drv_supported_commands[i].opcode; + bt_dev_info(hdev, + "Supported HCI Drv command (0x%02x|0x%04x): %s", + hci_opcode_ogf(opcode), + hci_opcode_ocf(opcode), + btusb_hci_drv_supported_commands[i].desc); + rp->supported_commands[i] = cpu_to_le16(opcode); + } + + err = hci_drv_cmd_complete(hdev, HCI_DRV_OP_READ_INFO, + HCI_DRV_STATUS_SUCCESS, rp, rp_size); + + kfree(rp); + return err; +} + +static int btusb_hci_drv_supported_altsettings(struct hci_dev *hdev, void *data, + u16 data_len) { - struct btusb_data *data = dev_get_drvdata(dev); + struct btusb_data *drvdata = hci_get_drvdata(hdev); + struct btusb_hci_drv_rp_supported_altsettings *rp; + size_t rp_size; + int err; + u8 i; + + /* There are at most 7 alt (0 - 6) */ + rp = kmalloc(sizeof(*rp) + 7, GFP_KERNEL); + + rp->num = 0; + if (!drvdata->isoc) + goto done; + + for (i = 0; i <= 6; i++) { + if (btusb_find_altsetting(drvdata, i)) + rp->altsettings[rp->num++] = i; + } - return sysfs_emit(buf, "%d\n", data->isoc_altsetting); +done: + rp_size = sizeof(*rp) + rp->num; + + err = hci_drv_cmd_complete(hdev, BTUSB_HCI_DRV_OP_SUPPORTED_ALTSETTINGS, + HCI_DRV_STATUS_SUCCESS, rp, rp_size); + kfree(rp); + return err; } -static ssize_t isoc_alt_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) +static int btusb_hci_drv_switch_altsetting(struct hci_dev *hdev, void *data, + u16 data_len) { - struct btusb_data *data = dev_get_drvdata(dev); - int alt; - int ret; + struct btusb_hci_drv_cmd_switch_altsetting *cmd = data; + u8 status; - if (kstrtoint(buf, 10, &alt)) - return -EINVAL; + if (cmd->altsetting > 6) { + status = HCI_DRV_STATUS_INVALID_PARAMETERS; + } else { + if (btusb_switch_alt_setting(hdev, cmd->altsetting)) + status = HCI_DRV_STATUS_UNSPECIFIED_ERROR; + else + status = HCI_DRV_STATUS_SUCCESS; + } - ret = btusb_switch_alt_setting(data->hdev, alt); - return ret < 0 ? ret : count; + return hci_drv_cmd_status(hdev, BTUSB_HCI_DRV_OP_SWITCH_ALTSETTING, + status); } -static DEVICE_ATTR_RW(isoc_alt); +static const struct hci_drv_handler btusb_hci_drv_common_handlers[] = { + { btusb_hci_drv_read_info, HCI_DRV_READ_INFO_SIZE }, +}; + +static const struct hci_drv_handler btusb_hci_drv_specific_handlers[] = { + { btusb_hci_drv_supported_altsettings, + BTUSB_HCI_DRV_SUPPORTED_ALTSETTINGS_SIZE }, + { btusb_hci_drv_switch_altsetting, + BTUSB_HCI_DRV_SWITCH_ALTSETTING_SIZE }, +}; + +static struct hci_drv btusb_hci_drv = { + .common_handler_count = ARRAY_SIZE(btusb_hci_drv_common_handlers), + .common_handlers = btusb_hci_drv_common_handlers, + .specific_handler_count = ARRAY_SIZE(btusb_hci_drv_specific_handlers), + .specific_handlers = btusb_hci_drv_specific_handlers, +}; static int btusb_probe(struct usb_interface *intf, const struct usb_device_id *id) @@ -3936,12 +3987,13 @@ static int btusb_probe(struct usb_interface *intf, data->reset_gpio = reset_gpio; } - hdev->open = btusb_open; - hdev->close = btusb_close; - hdev->flush = btusb_flush; - hdev->send = btusb_send_frame; - hdev->notify = btusb_notify; - hdev->wakeup = btusb_wakeup; + hdev->open = btusb_open; + hdev->close = btusb_close; + hdev->flush = btusb_flush; + hdev->send = btusb_send_frame; + hdev->notify = btusb_notify; + hdev->wakeup = btusb_wakeup; + hdev->hci_drv = &btusb_hci_drv; #ifdef CONFIG_PM err = btusb_config_oob_wake(hdev); @@ -4160,10 +4212,6 @@ static int btusb_probe(struct usb_interface *intf, data->isoc, data); if (err < 0) goto out_free_dev; - - err = device_create_file(&intf->dev, &dev_attr_isoc_alt); - if (err) - goto out_free_dev; } if (IS_ENABLED(CONFIG_BT_HCIBTUSB_BCM) && data->diag) { @@ -4210,10 +4258,8 @@ static void btusb_disconnect(struct usb_interface *intf) hdev = data->hdev; usb_set_intfdata(data->intf, NULL); - if (data->isoc) { - device_remove_file(&intf->dev, &dev_attr_isoc_alt); + if (data->isoc) usb_set_intfdata(data->isoc, NULL); - } if (data->diag) usb_set_intfdata(data->diag, NULL); diff --git a/drivers/bluetooth/hci_aml.c b/drivers/bluetooth/hci_aml.c index dc9541e76d81..1394c575aa6d 100644 --- a/drivers/bluetooth/hci_aml.c +++ b/drivers/bluetooth/hci_aml.c @@ -313,8 +313,7 @@ static int aml_download_firmware(struct hci_dev *hdev, const char *fw_name) goto exit; exit: - if (firmware) - release_firmware(firmware); + release_firmware(firmware); return ret; } diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c index 014db6386624..8ddf3a9a53df 100644 --- a/drivers/clk/clk-s2mps11.c +++ b/drivers/clk/clk-s2mps11.c @@ -137,6 +137,8 @@ static int s2mps11_clk_probe(struct platform_device *pdev) if (!clk_data) return -ENOMEM; + clk_data->num = S2MPS11_CLKS_NUM; + switch (hwid) { case S2MPS11X: s2mps11_reg = S2MPS11_REG_RTC_CTRL; @@ -186,7 +188,6 @@ static int s2mps11_clk_probe(struct platform_device *pdev) clk_data->hws[i] = &s2mps11_clks[i].hw; } - clk_data->num = S2MPS11_CLKS_NUM; of_clk_add_hw_provider(s2mps11_clks->clk_np, of_clk_hw_onecell_get, clk_data); diff --git a/drivers/clk/rockchip/clk-rk3576.c b/drivers/clk/rockchip/clk-rk3576.c index 595e010341f7..be703f250197 100644 --- a/drivers/clk/rockchip/clk-rk3576.c +++ b/drivers/clk/rockchip/clk-rk3576.c @@ -541,6 +541,8 @@ static struct rockchip_clk_branch rk3576_clk_branches[] __initdata = { RK3576_CLKGATE_CON(5), 14, GFLAGS), GATE(CLK_OTPC_AUTO_RD_G, "clk_otpc_auto_rd_g", "xin24m", 0, RK3576_CLKGATE_CON(5), 15, GFLAGS), + GATE(CLK_OTP_PHY_G, "clk_otp_phy_g", "xin24m", 0, + RK3576_CLKGATE_CON(6), 0, GFLAGS), COMPOSITE(CLK_MIPI_CAMERAOUT_M0, "clk_mipi_cameraout_m0", mux_24m_spll_gpll_cpll_p, 0, RK3576_CLKSEL_CON(38), 8, 2, MFLAGS, 0, 8, DFLAGS, RK3576_CLKGATE_CON(6), 3, GFLAGS), diff --git a/drivers/clk/sunxi-ng/ccu-sun20i-d1.c b/drivers/clk/sunxi-ng/ccu-sun20i-d1.c index bb66c906ebbb..e83d4fd40240 100644 --- a/drivers/clk/sunxi-ng/ccu-sun20i-d1.c +++ b/drivers/clk/sunxi-ng/ccu-sun20i-d1.c @@ -412,19 +412,23 @@ static const struct clk_parent_data mmc0_mmc1_parents[] = { { .hw = &pll_periph0_2x_clk.common.hw }, { .hw = &pll_audio1_div2_clk.common.hw }, }; -static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(mmc0_clk, "mmc0", mmc0_mmc1_parents, 0x830, - 0, 4, /* M */ - 8, 2, /* P */ - 24, 3, /* mux */ - BIT(31), /* gate */ - 0); - -static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(mmc1_clk, "mmc1", mmc0_mmc1_parents, 0x834, - 0, 4, /* M */ - 8, 2, /* P */ - 24, 3, /* mux */ - BIT(31), /* gate */ - 0); +static SUNXI_CCU_MP_DATA_WITH_MUX_GATE_POSTDIV(mmc0_clk, "mmc0", + mmc0_mmc1_parents, 0x830, + 0, 4, /* M */ + 8, 2, /* P */ + 24, 3, /* mux */ + BIT(31), /* gate */ + 2, /* post-div */ + 0); + +static SUNXI_CCU_MP_DATA_WITH_MUX_GATE_POSTDIV(mmc1_clk, "mmc1", + mmc0_mmc1_parents, 0x834, + 0, 4, /* M */ + 8, 2, /* P */ + 24, 3, /* mux */ + BIT(31), /* gate */ + 2, /* post-div */ + 0); static const struct clk_parent_data mmc2_parents[] = { { .fw_name = "hosc" }, @@ -433,12 +437,14 @@ static const struct clk_parent_data mmc2_parents[] = { { .hw = &pll_periph0_800M_clk.common.hw }, { .hw = &pll_audio1_div2_clk.common.hw }, }; -static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(mmc2_clk, "mmc2", mmc2_parents, 0x838, - 0, 4, /* M */ - 8, 2, /* P */ - 24, 3, /* mux */ - BIT(31), /* gate */ - 0); +static SUNXI_CCU_MP_DATA_WITH_MUX_GATE_POSTDIV(mmc2_clk, "mmc2", mmc2_parents, + 0x838, + 0, 4, /* M */ + 8, 2, /* P */ + 24, 3, /* mux */ + BIT(31), /* gate */ + 2, /* post-div */ + 0); static SUNXI_CCU_GATE_HWS(bus_mmc0_clk, "bus-mmc0", psi_ahb_hws, 0x84c, BIT(0), 0); diff --git a/drivers/clk/sunxi-ng/ccu_mp.h b/drivers/clk/sunxi-ng/ccu_mp.h index b35aeec70484..bb09c649bfa3 100644 --- a/drivers/clk/sunxi-ng/ccu_mp.h +++ b/drivers/clk/sunxi-ng/ccu_mp.h @@ -52,6 +52,28 @@ struct ccu_mp { } \ } +#define SUNXI_CCU_MP_DATA_WITH_MUX_GATE_POSTDIV(_struct, _name, _parents, \ + _reg, \ + _mshift, _mwidth, \ + _pshift, _pwidth, \ + _muxshift, _muxwidth, \ + _gate, _postdiv, _flags)\ + struct ccu_mp _struct = { \ + .enable = _gate, \ + .m = _SUNXI_CCU_DIV(_mshift, _mwidth), \ + .p = _SUNXI_CCU_DIV(_pshift, _pwidth), \ + .mux = _SUNXI_CCU_MUX(_muxshift, _muxwidth), \ + .fixed_post_div = _postdiv, \ + .common = { \ + .reg = _reg, \ + .features = CCU_FEATURE_FIXED_POSTDIV, \ + .hw.init = CLK_HW_INIT_PARENTS_DATA(_name, \ + _parents, \ + &ccu_mp_ops, \ + _flags), \ + } \ + } + #define SUNXI_CCU_MP_WITH_MUX_GATE(_struct, _name, _parents, _reg, \ _mshift, _mwidth, \ _pshift, _pwidth, \ @@ -109,8 +131,7 @@ struct ccu_mp { _mshift, _mwidth, \ _pshift, _pwidth, \ _muxshift, _muxwidth, \ - _gate, _features, \ - _flags) \ + _gate, _flags, _features) \ struct ccu_mp _struct = { \ .enable = _gate, \ .m = _SUNXI_CCU_DIV(_mshift, _mwidth), \ diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index 5f8d010516f0..b1ef4546346d 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -320,8 +320,9 @@ void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence, count++; dma_resv_list_set(fobj, i, fence, usage); - /* pointer update must be visible before we extend the num_fences */ - smp_store_mb(fobj->num_fences, count); + /* fence update must be visible before we extend the num_fences */ + smp_wmb(); + fobj->num_fences = count; } EXPORT_SYMBOL(dma_resv_add_fence); diff --git a/drivers/dma/amd/ptdma/ptdma-dmaengine.c b/drivers/dma/amd/ptdma/ptdma-dmaengine.c index 715ac3ae067b..81339664036f 100644 --- a/drivers/dma/amd/ptdma/ptdma-dmaengine.c +++ b/drivers/dma/amd/ptdma/ptdma-dmaengine.c @@ -342,6 +342,9 @@ static void pt_cmd_callback_work(void *data, int err) struct pt_dma_chan *chan; unsigned long flags; + if (!desc) + return; + dma_chan = desc->vd.tx.chan; chan = to_pt_chan(dma_chan); @@ -355,16 +358,14 @@ static void pt_cmd_callback_work(void *data, int err) desc->status = DMA_ERROR; spin_lock_irqsave(&chan->vc.lock, flags); - if (desc) { - if (desc->status != DMA_COMPLETE) { - if (desc->status != DMA_ERROR) - desc->status = DMA_COMPLETE; + if (desc->status != DMA_COMPLETE) { + if (desc->status != DMA_ERROR) + desc->status = DMA_COMPLETE; - dma_cookie_complete(tx_desc); - dma_descriptor_unmap(tx_desc); - } else { - tx_desc = NULL; - } + dma_cookie_complete(tx_desc); + dma_descriptor_unmap(tx_desc); + } else { + tx_desc = NULL; } spin_unlock_irqrestore(&chan->vc.lock, flags); diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index d891dfca358e..91b2fbc0b864 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -841,9 +841,9 @@ static int dmatest_func(void *data) } else { dma_async_issue_pending(chan); - wait_event_timeout(thread->done_wait, - done->done, - msecs_to_jiffies(params->timeout)); + wait_event_freezable_timeout(thread->done_wait, + done->done, + msecs_to_jiffies(params->timeout)); status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c index 756d67325db5..66bfa28d984e 100644 --- a/drivers/dma/fsl-edma-main.c +++ b/drivers/dma/fsl-edma-main.c @@ -57,7 +57,7 @@ static irqreturn_t fsl_edma3_tx_handler(int irq, void *dev_id) intr = edma_readl_chreg(fsl_chan, ch_int); if (!intr) - return IRQ_HANDLED; + return IRQ_NONE; edma_writel_chreg(fsl_chan, 1, ch_int); diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c index ff94ee892339..6d12033649f8 100644 --- a/drivers/dma/idxd/cdev.c +++ b/drivers/dma/idxd/cdev.c @@ -222,7 +222,7 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp) struct idxd_wq *wq; struct device *dev, *fdev; int rc = 0; - struct iommu_sva *sva; + struct iommu_sva *sva = NULL; unsigned int pasid; struct idxd_cdev *idxd_cdev; @@ -317,7 +317,7 @@ failed_set_pasid: if (device_user_pasid_enabled(idxd)) idxd_xa_pasid_remove(ctx); failed_get_pasid: - if (device_user_pasid_enabled(idxd)) + if (device_user_pasid_enabled(idxd) && !IS_ERR_OR_NULL(sva)) iommu_sva_unbind_device(sva); failed: mutex_unlock(&wq->wq_lock); @@ -407,6 +407,9 @@ static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma) if (!idxd->user_submission_safe && !capable(CAP_SYS_RAWIO)) return -EPERM; + if (current->mm != ctx->mm) + return -EPERM; + rc = check_vma(wq, vma, __func__); if (rc < 0) return rc; @@ -473,6 +476,9 @@ static ssize_t idxd_cdev_write(struct file *filp, const char __user *buf, size_t ssize_t written = 0; int i; + if (current->mm != ctx->mm) + return -EPERM; + for (i = 0; i < len/sizeof(struct dsa_hw_desc); i++) { int rc = idxd_submit_user_descriptor(ctx, udesc + i); @@ -493,6 +499,9 @@ static __poll_t idxd_cdev_poll(struct file *filp, struct idxd_device *idxd = wq->idxd; __poll_t out = 0; + if (current->mm != ctx->mm) + return POLLNVAL; + poll_wait(filp, &wq->err_queue, wait); spin_lock(&idxd->dev_lock); if (idxd->sw_err.valid) diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c index fca1d2924999..760b7d81fcd8 100644 --- a/drivers/dma/idxd/init.c +++ b/drivers/dma/idxd/init.c @@ -155,6 +155,25 @@ static void idxd_cleanup_interrupts(struct idxd_device *idxd) pci_free_irq_vectors(pdev); } +static void idxd_clean_wqs(struct idxd_device *idxd) +{ + struct idxd_wq *wq; + struct device *conf_dev; + int i; + + for (i = 0; i < idxd->max_wqs; i++) { + wq = idxd->wqs[i]; + if (idxd->hw.wq_cap.op_config) + bitmap_free(wq->opcap_bmap); + kfree(wq->wqcfg); + conf_dev = wq_confdev(wq); + put_device(conf_dev); + kfree(wq); + } + bitmap_free(idxd->wq_enable_map); + kfree(idxd->wqs); +} + static int idxd_setup_wqs(struct idxd_device *idxd) { struct device *dev = &idxd->pdev->dev; @@ -169,8 +188,8 @@ static int idxd_setup_wqs(struct idxd_device *idxd) idxd->wq_enable_map = bitmap_zalloc_node(idxd->max_wqs, GFP_KERNEL, dev_to_node(dev)); if (!idxd->wq_enable_map) { - kfree(idxd->wqs); - return -ENOMEM; + rc = -ENOMEM; + goto err_bitmap; } for (i = 0; i < idxd->max_wqs; i++) { @@ -189,10 +208,8 @@ static int idxd_setup_wqs(struct idxd_device *idxd) conf_dev->bus = &dsa_bus_type; conf_dev->type = &idxd_wq_device_type; rc = dev_set_name(conf_dev, "wq%d.%d", idxd->id, wq->id); - if (rc < 0) { - put_device(conf_dev); + if (rc < 0) goto err; - } mutex_init(&wq->wq_lock); init_waitqueue_head(&wq->err_queue); @@ -203,7 +220,6 @@ static int idxd_setup_wqs(struct idxd_device *idxd) wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES; wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev)); if (!wq->wqcfg) { - put_device(conf_dev); rc = -ENOMEM; goto err; } @@ -211,9 +227,8 @@ static int idxd_setup_wqs(struct idxd_device *idxd) if (idxd->hw.wq_cap.op_config) { wq->opcap_bmap = bitmap_zalloc(IDXD_MAX_OPCAP_BITS, GFP_KERNEL); if (!wq->opcap_bmap) { - put_device(conf_dev); rc = -ENOMEM; - goto err; + goto err_opcap_bmap; } bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS); } @@ -224,15 +239,46 @@ static int idxd_setup_wqs(struct idxd_device *idxd) return 0; - err: +err_opcap_bmap: + kfree(wq->wqcfg); + +err: + put_device(conf_dev); + kfree(wq); + while (--i >= 0) { wq = idxd->wqs[i]; + if (idxd->hw.wq_cap.op_config) + bitmap_free(wq->opcap_bmap); + kfree(wq->wqcfg); conf_dev = wq_confdev(wq); put_device(conf_dev); + kfree(wq); + } + bitmap_free(idxd->wq_enable_map); + +err_bitmap: + kfree(idxd->wqs); + return rc; } +static void idxd_clean_engines(struct idxd_device *idxd) +{ + struct idxd_engine *engine; + struct device *conf_dev; + int i; + + for (i = 0; i < idxd->max_engines; i++) { + engine = idxd->engines[i]; + conf_dev = engine_confdev(engine); + put_device(conf_dev); + kfree(engine); + } + kfree(idxd->engines); +} + static int idxd_setup_engines(struct idxd_device *idxd) { struct idxd_engine *engine; @@ -263,6 +309,7 @@ static int idxd_setup_engines(struct idxd_device *idxd) rc = dev_set_name(conf_dev, "engine%d.%d", idxd->id, engine->id); if (rc < 0) { put_device(conf_dev); + kfree(engine); goto err; } @@ -276,10 +323,26 @@ static int idxd_setup_engines(struct idxd_device *idxd) engine = idxd->engines[i]; conf_dev = engine_confdev(engine); put_device(conf_dev); + kfree(engine); } + kfree(idxd->engines); + return rc; } +static void idxd_clean_groups(struct idxd_device *idxd) +{ + struct idxd_group *group; + int i; + + for (i = 0; i < idxd->max_groups; i++) { + group = idxd->groups[i]; + put_device(group_confdev(group)); + kfree(group); + } + kfree(idxd->groups); +} + static int idxd_setup_groups(struct idxd_device *idxd) { struct device *dev = &idxd->pdev->dev; @@ -310,6 +373,7 @@ static int idxd_setup_groups(struct idxd_device *idxd) rc = dev_set_name(conf_dev, "group%d.%d", idxd->id, group->id); if (rc < 0) { put_device(conf_dev); + kfree(group); goto err; } @@ -334,20 +398,18 @@ static int idxd_setup_groups(struct idxd_device *idxd) while (--i >= 0) { group = idxd->groups[i]; put_device(group_confdev(group)); + kfree(group); } + kfree(idxd->groups); + return rc; } static void idxd_cleanup_internals(struct idxd_device *idxd) { - int i; - - for (i = 0; i < idxd->max_groups; i++) - put_device(group_confdev(idxd->groups[i])); - for (i = 0; i < idxd->max_engines; i++) - put_device(engine_confdev(idxd->engines[i])); - for (i = 0; i < idxd->max_wqs; i++) - put_device(wq_confdev(idxd->wqs[i])); + idxd_clean_groups(idxd); + idxd_clean_engines(idxd); + idxd_clean_wqs(idxd); destroy_workqueue(idxd->wq); } @@ -390,7 +452,7 @@ static int idxd_init_evl(struct idxd_device *idxd) static int idxd_setup_internals(struct idxd_device *idxd) { struct device *dev = &idxd->pdev->dev; - int rc, i; + int rc; init_waitqueue_head(&idxd->cmd_waitq); @@ -421,14 +483,11 @@ static int idxd_setup_internals(struct idxd_device *idxd) err_evl: destroy_workqueue(idxd->wq); err_wkq_create: - for (i = 0; i < idxd->max_groups; i++) - put_device(group_confdev(idxd->groups[i])); + idxd_clean_groups(idxd); err_group: - for (i = 0; i < idxd->max_engines; i++) - put_device(engine_confdev(idxd->engines[i])); + idxd_clean_engines(idxd); err_engine: - for (i = 0; i < idxd->max_wqs; i++) - put_device(wq_confdev(idxd->wqs[i])); + idxd_clean_wqs(idxd); err_wqs: return rc; } @@ -528,6 +587,17 @@ static void idxd_read_caps(struct idxd_device *idxd) idxd->hw.iaa_cap.bits = ioread64(idxd->reg_base + IDXD_IAACAP_OFFSET); } +static void idxd_free(struct idxd_device *idxd) +{ + if (!idxd) + return; + + put_device(idxd_confdev(idxd)); + bitmap_free(idxd->opcap_bmap); + ida_free(&idxd_ida, idxd->id); + kfree(idxd); +} + static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_data *data) { struct device *dev = &pdev->dev; @@ -545,28 +615,34 @@ static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_d idxd_dev_set_type(&idxd->idxd_dev, idxd->data->type); idxd->id = ida_alloc(&idxd_ida, GFP_KERNEL); if (idxd->id < 0) - return NULL; + goto err_ida; idxd->opcap_bmap = bitmap_zalloc_node(IDXD_MAX_OPCAP_BITS, GFP_KERNEL, dev_to_node(dev)); - if (!idxd->opcap_bmap) { - ida_free(&idxd_ida, idxd->id); - return NULL; - } + if (!idxd->opcap_bmap) + goto err_opcap; device_initialize(conf_dev); conf_dev->parent = dev; conf_dev->bus = &dsa_bus_type; conf_dev->type = idxd->data->dev_type; rc = dev_set_name(conf_dev, "%s%d", idxd->data->name_prefix, idxd->id); - if (rc < 0) { - put_device(conf_dev); - return NULL; - } + if (rc < 0) + goto err_name; spin_lock_init(&idxd->dev_lock); spin_lock_init(&idxd->cmd_lock); return idxd; + +err_name: + put_device(conf_dev); + bitmap_free(idxd->opcap_bmap); +err_opcap: + ida_free(&idxd_ida, idxd->id); +err_ida: + kfree(idxd); + + return NULL; } static int idxd_enable_system_pasid(struct idxd_device *idxd) @@ -1190,7 +1266,7 @@ int idxd_pci_probe_alloc(struct idxd_device *idxd, struct pci_dev *pdev, err: pci_iounmap(pdev, idxd->reg_base); err_iomap: - put_device(idxd_confdev(idxd)); + idxd_free(idxd); err_idxd_alloc: pci_disable_device(pdev); return rc; @@ -1232,7 +1308,6 @@ static void idxd_shutdown(struct pci_dev *pdev) static void idxd_remove(struct pci_dev *pdev) { struct idxd_device *idxd = pci_get_drvdata(pdev); - struct idxd_irq_entry *irq_entry; idxd_unregister_devices(idxd); /* @@ -1245,20 +1320,12 @@ static void idxd_remove(struct pci_dev *pdev) get_device(idxd_confdev(idxd)); device_unregister(idxd_confdev(idxd)); idxd_shutdown(pdev); - if (device_pasid_enabled(idxd)) - idxd_disable_system_pasid(idxd); idxd_device_remove_debugfs(idxd); - - irq_entry = idxd_get_ie(idxd, 0); - free_irq(irq_entry->vector, irq_entry); - pci_free_irq_vectors(pdev); + idxd_cleanup(idxd); pci_iounmap(pdev, idxd->reg_base); - if (device_user_pasid_enabled(idxd)) - idxd_disable_sva(pdev); - pci_disable_device(pdev); - destroy_workqueue(idxd->wq); - perfmon_pmu_remove(idxd); put_device(idxd_confdev(idxd)); + idxd_free(idxd); + pci_disable_device(pdev); } static struct pci_driver idxd_pci_driver = { diff --git a/drivers/dma/mediatek/mtk-cqdma.c b/drivers/dma/mediatek/mtk-cqdma.c index d5ddb4e30e71..47c8adfdc155 100644 --- a/drivers/dma/mediatek/mtk-cqdma.c +++ b/drivers/dma/mediatek/mtk-cqdma.c @@ -420,15 +420,11 @@ static struct virt_dma_desc *mtk_cqdma_find_active_desc(struct dma_chan *c, { struct mtk_cqdma_vchan *cvc = to_cqdma_vchan(c); struct virt_dma_desc *vd; - unsigned long flags; - spin_lock_irqsave(&cvc->pc->lock, flags); list_for_each_entry(vd, &cvc->pc->queue, node) if (vd->tx.cookie == cookie) { - spin_unlock_irqrestore(&cvc->pc->lock, flags); return vd; } - spin_unlock_irqrestore(&cvc->pc->lock, flags); list_for_each_entry(vd, &cvc->vc.desc_issued, node) if (vd->tx.cookie == cookie) @@ -452,9 +448,11 @@ static enum dma_status mtk_cqdma_tx_status(struct dma_chan *c, if (ret == DMA_COMPLETE || !txstate) return ret; + spin_lock_irqsave(&cvc->pc->lock, flags); spin_lock_irqsave(&cvc->vc.lock, flags); vd = mtk_cqdma_find_active_desc(c, cookie); spin_unlock_irqrestore(&cvc->vc.lock, flags); + spin_unlock_irqrestore(&cvc->pc->lock, flags); if (vd) { cvd = to_cqdma_vdesc(vd); diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c index b223a7aacb0c..b6255c0601bb 100644 --- a/drivers/dma/ti/k3-udma.c +++ b/drivers/dma/ti/k3-udma.c @@ -1091,8 +1091,11 @@ static void udma_check_tx_completion(struct work_struct *work) u32 residue_diff; ktime_t time_diff; unsigned long delay; + unsigned long flags; while (1) { + spin_lock_irqsave(&uc->vc.lock, flags); + if (uc->desc) { /* Get previous residue and time stamp */ residue_diff = uc->tx_drain.residue; @@ -1127,6 +1130,8 @@ static void udma_check_tx_completion(struct work_struct *work) break; } + spin_unlock_irqrestore(&uc->vc.lock, flags); + usleep_range(ktime_to_us(delay), ktime_to_us(delay) + 10); continue; @@ -1143,6 +1148,8 @@ static void udma_check_tx_completion(struct work_struct *work) break; } + + spin_unlock_irqrestore(&uc->vc.lock, flags); } static irqreturn_t udma_ring_irq_handler(int irq, void *data) @@ -4246,7 +4253,6 @@ static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec, struct of_dma *ofdma) { struct udma_dev *ud = ofdma->of_dma_data; - dma_cap_mask_t mask = ud->ddev.cap_mask; struct udma_filter_param filter_param; struct dma_chan *chan; @@ -4278,7 +4284,7 @@ static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec, } } - chan = __dma_request_channel(&mask, udma_dma_filter_fn, &filter_param, + chan = __dma_request_channel(&ud->ddev.cap_mask, udma_dma_filter_fn, &filter_param, ofdma->of_node); if (!chan) { dev_err(ud->dev, "get channel fail in %s.\n", __func__); diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c index 442435ded020..13cc120cf11f 100644 --- a/drivers/gpio/gpio-pca953x.c +++ b/drivers/gpio/gpio-pca953x.c @@ -1204,6 +1204,8 @@ static int pca953x_restore_context(struct pca953x_chip *chip) guard(mutex)(&chip->i2c_lock); + if (chip->client->irq > 0) + enable_irq(chip->client->irq); regcache_cache_only(chip->regmap, false); regcache_mark_dirty(chip->regmap); ret = pca953x_regcache_sync(chip); @@ -1216,6 +1218,10 @@ static int pca953x_restore_context(struct pca953x_chip *chip) static void pca953x_save_context(struct pca953x_chip *chip) { guard(mutex)(&chip->i2c_lock); + + /* Disable IRQ to prevent early triggering while regmap "cache only" is on */ + if (chip->client->irq > 0) + disable_irq(chip->client->irq); regcache_cache_only(chip->regmap, true); } diff --git a/drivers/gpio/gpio-virtuser.c b/drivers/gpio/gpio-virtuser.c index 13407fd4f0eb..eab6726953b4 100644 --- a/drivers/gpio/gpio-virtuser.c +++ b/drivers/gpio/gpio-virtuser.c @@ -401,10 +401,15 @@ static ssize_t gpio_virtuser_direction_do_write(struct file *file, char buf[32], *trimmed; int ret, dir, val = 0; - ret = simple_write_to_buffer(buf, sizeof(buf), ppos, user_buf, count); + if (count >= sizeof(buf)) + return -EINVAL; + + ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count); if (ret < 0) return ret; + buf[ret] = '\0'; + trimmed = strim(buf); if (strcmp(trimmed, "input") == 0) { @@ -623,12 +628,15 @@ static ssize_t gpio_virtuser_consumer_write(struct file *file, char buf[GPIO_VIRTUSER_NAME_BUF_LEN + 2]; int ret; + if (count >= sizeof(buf)) + return -EINVAL; + ret = simple_write_to_buffer(buf, GPIO_VIRTUSER_NAME_BUF_LEN, ppos, user_buf, count); if (ret < 0) return ret; - buf[strlen(buf) - 1] = '\0'; + buf[ret] = '\0'; ret = gpiod_set_consumer_name(data->ad.desc, buf); if (ret) diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index cd4fecbb41f2..113c5d90f2df 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -742,6 +742,12 @@ EXPORT_SYMBOL_GPL(gpiochip_query_valid_mask); bool gpiochip_line_is_valid(const struct gpio_chip *gc, unsigned int offset) { + /* + * hog pins are requested before registering GPIO chip + */ + if (!gc->gpiodev) + return true; + /* No mask means all valid */ if (likely(!gc->gpiodev->valid_mask)) return true; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c index cfdf558b48b6..02138aa55793 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c @@ -109,7 +109,7 @@ int amdgpu_unmap_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct drm_exec exec; int r; - drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); + drm_exec_init(&exec, 0, 0); drm_exec_until_all_locked(&exec) { r = amdgpu_vm_lock_pd(vm, &exec, 0); if (likely(!r)) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c index e74e26b6a4f2..fec9a007533a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c @@ -752,6 +752,18 @@ static int gmc_v11_0_sw_init(struct amdgpu_ip_block *ip_block) adev->gmc.vram_type = vram_type; adev->gmc.vram_vendor = vram_vendor; + /* The mall_size is already calculated as mall_size_per_umc * num_umc. + * However, for gfx1151, which features a 2-to-1 UMC mapping, + * the result must be multiplied by 2 to determine the actual mall size. + */ + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { + case IP_VERSION(11, 5, 1): + adev->gmc.mall_size *= 2; + break; + default: + break; + } + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(11, 0, 0): case IP_VERSION(11, 0, 1): diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c index a1171e6152ed..f11df9c2ec13 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c @@ -1023,6 +1023,10 @@ static int vcn_v4_0_5_start_dpg_mode(struct amdgpu_vcn_inst *vinst, ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT | VCN_RB1_DB_CTRL__EN_MASK); + /* Keeping one read-back to ensure all register writes are done, otherwise + * it may introduce race conditions */ + RREG32_SOC15(VCN, inst_idx, regVCN_RB1_DB_CTRL); + return 0; } @@ -1205,6 +1209,10 @@ static int vcn_v4_0_5_start(struct amdgpu_vcn_inst *vinst) WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp); fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF); + /* Keeping one read-back to ensure all register writes are done, otherwise + * it may introduce race conditions */ + RREG32_SOC15(VCN, i, regVCN_RB_ENABLE); + return 0; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 64df8ca448b3..cc01b9c68b47 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -372,6 +372,8 @@ get_crtc_by_otg_inst(struct amdgpu_device *adev, static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state, struct dm_crtc_state *new_state) { + if (new_state->stream->adjust.timing_adjust_pending) + return true; if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED) return true; else if (amdgpu_dm_crtc_vrr_active(old_state) != amdgpu_dm_crtc_vrr_active(new_state)) @@ -12763,7 +12765,8 @@ int amdgpu_dm_process_dmub_aux_transfer_sync( /* The reply is stored in the top nibble of the command. */ payload->reply[0] = (adev->dm.dmub_notify->aux_reply.command >> 4) & 0xF; - if (!payload->write && p_notify->aux_reply.length) + /*write req may receive a byte indicating partially written number as well*/ + if (p_notify->aux_reply.length) memcpy(payload->data, p_notify->aux_reply.data, p_notify->aux_reply.length); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 074b79fd5822..5cdbc86ef8f5 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -62,6 +62,7 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, enum aux_return_code_type operation_result; struct amdgpu_device *adev; struct ddc_service *ddc; + uint8_t copy[16]; if (WARN_ON(msg->size > 16)) return -E2BIG; @@ -77,6 +78,11 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, (msg->request & DP_AUX_I2C_WRITE_STATUS_UPDATE) != 0; payload.defer_delay = 0; + if (payload.write) { + memcpy(copy, msg->buffer, msg->size); + payload.data = copy; + } + result = dc_link_aux_transfer_raw(TO_DM_AUX(aux)->ddc_service, &payload, &operation_result); @@ -100,9 +106,9 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, */ if (payload.write && result >= 0) { if (result) { - /*one byte indicating partially written bytes. Force 0 to retry*/ - drm_info(adev_to_drm(adev), "amdgpu: AUX partially written\n"); - result = 0; + /*one byte indicating partially written bytes*/ + drm_dbg_dp(adev_to_drm(adev), "amdgpu: AUX partially written\n"); + result = payload.data[0]; } else if (!payload.reply[0]) /*I2C_ACK|AUX_ACK*/ result = msg->size; @@ -127,11 +133,11 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, break; } - drm_info(adev_to_drm(adev), "amdgpu: DP AUX transfer fail:%d\n", operation_result); + drm_dbg_dp(adev_to_drm(adev), "amdgpu: DP AUX transfer fail:%d\n", operation_result); } if (payload.reply[0]) - drm_info(adev_to_drm(adev), "amdgpu: AUX reply command not ACK: 0x%02x.", + drm_dbg_dp(adev_to_drm(adev), "amdgpu: AUX reply command not ACK: 0x%02x.", payload.reply[0]); return result; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 28d1353f403d..ba4ce8a63158 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -439,9 +439,12 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc, * Don't adjust DRR while there's bandwidth optimizations pending to * avoid conflicting with firmware updates. */ - if (dc->ctx->dce_version > DCE_VERSION_MAX) - if (dc->optimized_required || dc->wm_optimized_required) + if (dc->ctx->dce_version > DCE_VERSION_MAX) { + if (dc->optimized_required || dc->wm_optimized_required) { + stream->adjust.timing_adjust_pending = true; return false; + } + } dc_exit_ips_for_hw_access(dc); @@ -3168,7 +3171,8 @@ static void copy_stream_update_to_stream(struct dc *dc, if (update->crtc_timing_adjust) { if (stream->adjust.v_total_min != update->crtc_timing_adjust->v_total_min || - stream->adjust.v_total_max != update->crtc_timing_adjust->v_total_max) + stream->adjust.v_total_max != update->crtc_timing_adjust->v_total_max || + stream->adjust.timing_adjust_pending) update->crtc_timing_adjust->timing_adjust_pending = true; stream->adjust = *update->crtc_timing_adjust; update->crtc_timing_adjust->timing_adjust_pending = false; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c index d9159ca55412..92f0a099d089 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c @@ -195,9 +195,9 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = { .dcn_downspread_percent = 0.5, .gpuvm_min_page_size_bytes = 4096, .hostvm_min_page_size_bytes = 4096, - .do_urgent_latency_adjustment = 1, + .do_urgent_latency_adjustment = 0, .urgent_latency_adjustment_fabric_clock_component_us = 0, - .urgent_latency_adjustment_fabric_clock_reference_mhz = 3000, + .urgent_latency_adjustment_fabric_clock_reference_mhz = 0, }; void dcn35_build_wm_range_table_fpu(struct clk_mgr *clk_mgr) diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c index 0c8ec30ea672..731fbd4bc600 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c @@ -910,7 +910,7 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm } //TODO : Could be possibly moved to a common helper layer. -static bool dml21_wrapper_get_plane_id(const struct dc_state *context, const struct dc_plane_state *plane, unsigned int *plane_id) +static bool dml21_wrapper_get_plane_id(const struct dc_state *context, unsigned int stream_id, const struct dc_plane_state *plane, unsigned int *plane_id) { int i, j; @@ -918,10 +918,12 @@ static bool dml21_wrapper_get_plane_id(const struct dc_state *context, const str return false; for (i = 0; i < context->stream_count; i++) { - for (j = 0; j < context->stream_status[i].plane_count; j++) { - if (context->stream_status[i].plane_states[j] == plane) { - *plane_id = (i << 16) | j; - return true; + if (context->streams[i]->stream_id == stream_id) { + for (j = 0; j < context->stream_status[i].plane_count; j++) { + if (context->stream_status[i].plane_states[j] == plane) { + *plane_id = (i << 16) | j; + return true; + } } } } @@ -944,14 +946,14 @@ static unsigned int map_stream_to_dml21_display_cfg(const struct dml2_context *d return location; } -static unsigned int map_plane_to_dml21_display_cfg(const struct dml2_context *dml_ctx, +static unsigned int map_plane_to_dml21_display_cfg(const struct dml2_context *dml_ctx, unsigned int stream_id, const struct dc_plane_state *plane, const struct dc_state *context) { unsigned int plane_id; int i = 0; int location = -1; - if (!dml21_wrapper_get_plane_id(context, plane, &plane_id)) { + if (!dml21_wrapper_get_plane_id(context, stream_id, plane, &plane_id)) { ASSERT(false); return -1; } @@ -1037,7 +1039,7 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s dml_dispcfg->plane_descriptors[disp_cfg_plane_location].stream_index = disp_cfg_stream_location; } else { for (plane_index = 0; plane_index < context->stream_status[stream_index].plane_count; plane_index++) { - disp_cfg_plane_location = map_plane_to_dml21_display_cfg(dml_ctx, context->stream_status[stream_index].plane_states[plane_index], context); + disp_cfg_plane_location = map_plane_to_dml21_display_cfg(dml_ctx, context->streams[stream_index]->stream_id, context->stream_status[stream_index].plane_states[plane_index], context); if (disp_cfg_plane_location < 0) disp_cfg_plane_location = dml_dispcfg->num_planes++; @@ -1048,7 +1050,7 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s populate_dml21_plane_config_from_plane_state(dml_ctx, &dml_dispcfg->plane_descriptors[disp_cfg_plane_location], context->stream_status[stream_index].plane_states[plane_index], context, stream_index); dml_dispcfg->plane_descriptors[disp_cfg_plane_location].stream_index = disp_cfg_stream_location; - if (dml21_wrapper_get_plane_id(context, context->stream_status[stream_index].plane_states[plane_index], &dml_ctx->v21.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id[disp_cfg_plane_location])) + if (dml21_wrapper_get_plane_id(context, context->streams[stream_index]->stream_id, context->stream_status[stream_index].plane_states[plane_index], &dml_ctx->v21.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id[disp_cfg_plane_location])) dml_ctx->v21.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id_valid[disp_cfg_plane_location] = true; /* apply forced pstate policy */ diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c index 1236e0f9a256..712aff7e17f7 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c @@ -120,10 +120,11 @@ void dpp401_set_cursor_attributes( enum dc_cursor_color_format color_format = cursor_attributes->color_format; int cur_rom_en = 0; - // DCN4 should always do Cursor degamma for Cursor Color modes if (color_format == CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA || color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA) { - cur_rom_en = 1; + if (cursor_attributes->attribute_flags.bits.ENABLE_CURSOR_DEGAMMA) { + cur_rom_en = 1; + } } REG_UPDATE_3(CURSOR0_CONTROL, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c index 5489f3d431f6..3af6a3402b89 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c @@ -1980,9 +1980,9 @@ void dcn401_program_pipe( dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->hubp_regs.det_size); } - if (pipe_ctx->update_flags.raw || - (pipe_ctx->plane_state && pipe_ctx->plane_state->update_flags.raw) || - pipe_ctx->stream->update_flags.raw) + if (pipe_ctx->plane_state && (pipe_ctx->update_flags.raw || + pipe_ctx->plane_state->update_flags.raw || + pipe_ctx->stream->update_flags.raw)) dc->hwss.update_dchubp_dpp(dc, pipe_ctx, context); if (pipe_ctx->plane_state && (pipe_ctx->update_flags.bits.enable || diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c index 268626e73c54..53c961f86d43 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c @@ -148,6 +148,7 @@ void link_blank_dp_stream(struct dc_link *link, bool hw_init) void link_set_all_streams_dpms_off_for_link(struct dc_link *link) { struct pipe_ctx *pipes[MAX_PIPES]; + struct dc_stream_state *streams[MAX_PIPES]; struct dc_state *state = link->dc->current_state; uint8_t count; int i; @@ -160,10 +161,18 @@ void link_set_all_streams_dpms_off_for_link(struct dc_link *link) link_get_master_pipes_with_dpms_on(link, state, &count, pipes); + /* The subsequent call to dc_commit_updates_for_stream for a full update + * will release the current state and swap to a new state. Releasing the + * current state results in the stream pointers in the pipe_ctx structs + * to be zero'd. Hence, cache all streams prior to dc_commit_updates_for_stream. + */ + for (i = 0; i < count; i++) + streams[i] = pipes[i]->stream; + for (i = 0; i < count; i++) { - stream_update.stream = pipes[i]->stream; + stream_update.stream = streams[i]; dc_commit_updates_for_stream(link->ctx->dc, NULL, 0, - pipes[i]->stream, &stream_update, + streams[i], &stream_update, state); } diff --git a/drivers/gpu/drm/drm_gpusvm.c b/drivers/gpu/drm/drm_gpusvm.c index de424e670995..4b2f32889f00 100644 --- a/drivers/gpu/drm/drm_gpusvm.c +++ b/drivers/gpu/drm/drm_gpusvm.c @@ -1118,6 +1118,10 @@ static void __drm_gpusvm_range_unmap_pages(struct drm_gpusvm *gpusvm, lockdep_assert_held(&gpusvm->notifier_lock); if (range->flags.has_dma_mapping) { + struct drm_gpusvm_range_flags flags = { + .__flags = range->flags.__flags, + }; + for (i = 0, j = 0; i < npages; j++) { struct drm_pagemap_device_addr *addr = &range->dma_addr[j]; @@ -1131,8 +1135,12 @@ static void __drm_gpusvm_range_unmap_pages(struct drm_gpusvm *gpusvm, dev, *addr); i += 1 << addr->order; } - range->flags.has_devmem_pages = false; - range->flags.has_dma_mapping = false; + + /* WRITE_ONCE pairs with READ_ONCE for opportunistic checks */ + flags.has_devmem_pages = false; + flags.has_dma_mapping = false; + WRITE_ONCE(range->flags.__flags, flags.__flags); + range->dpagemap = NULL; } } @@ -1334,6 +1342,7 @@ int drm_gpusvm_range_get_pages(struct drm_gpusvm *gpusvm, int err = 0; struct dev_pagemap *pagemap; struct drm_pagemap *dpagemap; + struct drm_gpusvm_range_flags flags; retry: hmm_range.notifier_seq = mmu_interval_read_begin(notifier); @@ -1378,7 +1387,8 @@ map_pages: */ drm_gpusvm_notifier_lock(gpusvm); - if (range->flags.unmapped) { + flags.__flags = range->flags.__flags; + if (flags.unmapped) { drm_gpusvm_notifier_unlock(gpusvm); err = -EFAULT; goto err_free; @@ -1454,6 +1464,11 @@ map_pages: goto err_unmap; } + if (ctx->devmem_only) { + err = -EFAULT; + goto err_unmap; + } + addr = dma_map_page(gpusvm->drm->dev, page, 0, PAGE_SIZE << order, @@ -1469,14 +1484,17 @@ map_pages: } i += 1 << order; num_dma_mapped = i; - range->flags.has_dma_mapping = true; + flags.has_dma_mapping = true; } if (zdd) { - range->flags.has_devmem_pages = true; + flags.has_devmem_pages = true; range->dpagemap = dpagemap; } + /* WRITE_ONCE pairs with READ_ONCE for opportunistic checks */ + WRITE_ONCE(range->flags.__flags, flags.__flags); + drm_gpusvm_notifier_unlock(gpusvm); kvfree(pfns); set_seqno: @@ -1765,6 +1783,8 @@ int drm_gpusvm_migrate_to_devmem(struct drm_gpusvm *gpusvm, goto err_finalize; /* Upon success bind devmem allocation to range and zdd */ + devmem_allocation->timeslice_expiration = get_jiffies_64() + + msecs_to_jiffies(ctx->timeslice_ms); zdd->devmem_allocation = devmem_allocation; /* Owns ref */ err_finalize: @@ -1985,6 +2005,13 @@ static int __drm_gpusvm_migrate_to_ram(struct vm_area_struct *vas, void *buf; int i, err = 0; + if (page) { + zdd = page->zone_device_data; + if (time_before64(get_jiffies_64(), + zdd->devmem_allocation->timeslice_expiration)) + return 0; + } + start = ALIGN_DOWN(fault_addr, size); end = ALIGN(fault_addr + 1, size); diff --git a/drivers/gpu/drm/meson/meson_encoder_hdmi.c b/drivers/gpu/drm/meson/meson_encoder_hdmi.c index 7752d8ac85f0..c08fa93e50a3 100644 --- a/drivers/gpu/drm/meson/meson_encoder_hdmi.c +++ b/drivers/gpu/drm/meson/meson_encoder_hdmi.c @@ -75,7 +75,7 @@ static void meson_encoder_hdmi_set_vclk(struct meson_encoder_hdmi *encoder_hdmi, unsigned long long venc_freq; unsigned long long hdmi_freq; - vclk_freq = mode->clock * 1000; + vclk_freq = mode->clock * 1000ULL; /* For 420, pixel clock is half unlike venc clock */ if (encoder_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24) @@ -123,7 +123,7 @@ static enum drm_mode_status meson_encoder_hdmi_mode_valid(struct drm_bridge *bri struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge); struct meson_drm *priv = encoder_hdmi->priv; bool is_hdmi2_sink = display_info->hdmi.scdc.supported; - unsigned long long clock = mode->clock * 1000; + unsigned long long clock = mode->clock * 1000ULL; unsigned long long phy_freq; unsigned long long vclk_freq; unsigned long long venc_freq; diff --git a/drivers/gpu/drm/tiny/panel-mipi-dbi.c b/drivers/gpu/drm/tiny/panel-mipi-dbi.c index 0460ecaef4bd..23914a9f7fd3 100644 --- a/drivers/gpu/drm/tiny/panel-mipi-dbi.c +++ b/drivers/gpu/drm/tiny/panel-mipi-dbi.c @@ -390,7 +390,10 @@ static int panel_mipi_dbi_spi_probe(struct spi_device *spi) spi_set_drvdata(spi, drm); - drm_client_setup(drm, NULL); + if (bpp == 16) + drm_client_setup_with_fourcc(drm, DRM_FORMAT_RGB565); + else + drm_client_setup_with_fourcc(drm, DRM_FORMAT_RGB888); return 0; } diff --git a/drivers/gpu/drm/xe/instructions/xe_mi_commands.h b/drivers/gpu/drm/xe/instructions/xe_mi_commands.h index 167fb0f742de..5a47991b4b81 100644 --- a/drivers/gpu/drm/xe/instructions/xe_mi_commands.h +++ b/drivers/gpu/drm/xe/instructions/xe_mi_commands.h @@ -47,6 +47,10 @@ #define MI_LRI_FORCE_POSTED REG_BIT(12) #define MI_LRI_LEN(x) (((x) & 0xff) + 1) +#define MI_STORE_REGISTER_MEM (__MI_INSTR(0x24) | XE_INSTR_NUM_DW(4)) +#define MI_SRM_USE_GGTT REG_BIT(22) +#define MI_SRM_ADD_CS_OFFSET REG_BIT(19) + #define MI_FLUSH_DW __MI_INSTR(0x26) #define MI_FLUSH_DW_PROTECTED_MEM_EN REG_BIT(22) #define MI_FLUSH_DW_STORE_INDEX REG_BIT(21) diff --git a/drivers/gpu/drm/xe/regs/xe_engine_regs.h b/drivers/gpu/drm/xe/regs/xe_engine_regs.h index fb8ec317b6ee..891f928d80ce 100644 --- a/drivers/gpu/drm/xe/regs/xe_engine_regs.h +++ b/drivers/gpu/drm/xe/regs/xe_engine_regs.h @@ -43,6 +43,10 @@ #define XEHPC_BCS8_RING_BASE 0x3ee000 #define GSCCS_RING_BASE 0x11a000 +#define ENGINE_ID(base) XE_REG((base) + 0x8c) +#define ENGINE_INSTANCE_ID REG_GENMASK(9, 4) +#define ENGINE_CLASS_ID REG_GENMASK(2, 0) + #define RING_TAIL(base) XE_REG((base) + 0x30) #define TAIL_ADDR REG_GENMASK(20, 3) @@ -154,6 +158,7 @@ #define STOP_RING REG_BIT(8) #define RING_CTX_TIMESTAMP(base) XE_REG((base) + 0x3a8) +#define RING_CTX_TIMESTAMP_UDW(base) XE_REG((base) + 0x3ac) #define CSBE_DEBUG_STATUS(base) XE_REG((base) + 0x3fc) #define RING_FORCE_TO_NONPRIV(base, i) XE_REG(((base) + 0x4d0) + (i) * 4) diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h index da1f198ac107..181913967ac9 100644 --- a/drivers/gpu/drm/xe/regs/xe_gt_regs.h +++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h @@ -157,6 +157,7 @@ #define XEHPG_SC_INSTDONE_EXTRA2 XE_REG_MCR(0x7108) #define COMMON_SLICE_CHICKEN4 XE_REG(0x7300, XE_REG_OPTION_MASKED) +#define SBE_PUSH_CONSTANT_BEHIND_FIX_ENABLE REG_BIT(12) #define DISABLE_TDC_LOAD_BALANCING_CALC REG_BIT(6) #define COMMON_SLICE_CHICKEN3 XE_REG(0x7304, XE_REG_OPTION_MASKED) diff --git a/drivers/gpu/drm/xe/regs/xe_lrc_layout.h b/drivers/gpu/drm/xe/regs/xe_lrc_layout.h index 57944f90bbf6..994af591a2e8 100644 --- a/drivers/gpu/drm/xe/regs/xe_lrc_layout.h +++ b/drivers/gpu/drm/xe/regs/xe_lrc_layout.h @@ -11,7 +11,9 @@ #define CTX_RING_TAIL (0x06 + 1) #define CTX_RING_START (0x08 + 1) #define CTX_RING_CTL (0x0a + 1) +#define CTX_BB_PER_CTX_PTR (0x12 + 1) #define CTX_TIMESTAMP (0x22 + 1) +#define CTX_TIMESTAMP_UDW (0x24 + 1) #define CTX_INDIRECT_RING_STATE (0x26 + 1) #define CTX_PDP0_UDW (0x30 + 1) #define CTX_PDP0_LDW (0x32 + 1) diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h index 9f8667ebba85..0482f26aa480 100644 --- a/drivers/gpu/drm/xe/xe_device_types.h +++ b/drivers/gpu/drm/xe/xe_device_types.h @@ -330,6 +330,8 @@ struct xe_device { u8 has_sriov:1; /** @info.has_usm: Device has unified shared memory support */ u8 has_usm:1; + /** @info.has_64bit_timestamp: Device supports 64-bit timestamps */ + u8 has_64bit_timestamp:1; /** @info.is_dgfx: is discrete device */ u8 is_dgfx:1; /** diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c index 606922d9dd73..cd9b1c32f30f 100644 --- a/drivers/gpu/drm/xe/xe_exec_queue.c +++ b/drivers/gpu/drm/xe/xe_exec_queue.c @@ -830,7 +830,7 @@ void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q) { struct xe_device *xe = gt_to_xe(q->gt); struct xe_lrc *lrc; - u32 old_ts, new_ts; + u64 old_ts, new_ts; int idx; /* diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c index 31bc2022bfc2..769781d577df 100644 --- a/drivers/gpu/drm/xe/xe_guc_submit.c +++ b/drivers/gpu/drm/xe/xe_guc_submit.c @@ -941,7 +941,7 @@ static bool check_timeout(struct xe_exec_queue *q, struct xe_sched_job *job) return xe_sched_invalidate_job(job, 2); } - ctx_timestamp = xe_lrc_ctx_timestamp(q->lrc[0]); + ctx_timestamp = lower_32_bits(xe_lrc_ctx_timestamp(q->lrc[0])); ctx_job_timestamp = xe_lrc_ctx_job_timestamp(q->lrc[0]); /* diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c index df3ceddede07..03bfba696b37 100644 --- a/drivers/gpu/drm/xe/xe_lrc.c +++ b/drivers/gpu/drm/xe/xe_lrc.c @@ -24,6 +24,7 @@ #include "xe_hw_fence.h" #include "xe_map.h" #include "xe_memirq.h" +#include "xe_mmio.h" #include "xe_sriov.h" #include "xe_trace_lrc.h" #include "xe_vm.h" @@ -650,6 +651,7 @@ u32 xe_lrc_pphwsp_offset(struct xe_lrc *lrc) #define LRC_START_SEQNO_PPHWSP_OFFSET (LRC_SEQNO_PPHWSP_OFFSET + 8) #define LRC_CTX_JOB_TIMESTAMP_OFFSET (LRC_START_SEQNO_PPHWSP_OFFSET + 8) #define LRC_PARALLEL_PPHWSP_OFFSET 2048 +#define LRC_ENGINE_ID_PPHWSP_OFFSET 2096 #define LRC_PPHWSP_SIZE SZ_4K u32 xe_lrc_regs_offset(struct xe_lrc *lrc) @@ -684,7 +686,7 @@ static inline u32 __xe_lrc_start_seqno_offset(struct xe_lrc *lrc) static u32 __xe_lrc_ctx_job_timestamp_offset(struct xe_lrc *lrc) { - /* The start seqno is stored in the driver-defined portion of PPHWSP */ + /* This is stored in the driver-defined portion of PPHWSP */ return xe_lrc_pphwsp_offset(lrc) + LRC_CTX_JOB_TIMESTAMP_OFFSET; } @@ -694,11 +696,21 @@ static inline u32 __xe_lrc_parallel_offset(struct xe_lrc *lrc) return xe_lrc_pphwsp_offset(lrc) + LRC_PARALLEL_PPHWSP_OFFSET; } +static inline u32 __xe_lrc_engine_id_offset(struct xe_lrc *lrc) +{ + return xe_lrc_pphwsp_offset(lrc) + LRC_ENGINE_ID_PPHWSP_OFFSET; +} + static u32 __xe_lrc_ctx_timestamp_offset(struct xe_lrc *lrc) { return __xe_lrc_regs_offset(lrc) + CTX_TIMESTAMP * sizeof(u32); } +static u32 __xe_lrc_ctx_timestamp_udw_offset(struct xe_lrc *lrc) +{ + return __xe_lrc_regs_offset(lrc) + CTX_TIMESTAMP_UDW * sizeof(u32); +} + static inline u32 __xe_lrc_indirect_ring_offset(struct xe_lrc *lrc) { /* Indirect ring state page is at the very end of LRC */ @@ -726,8 +738,10 @@ DECL_MAP_ADDR_HELPERS(regs) DECL_MAP_ADDR_HELPERS(start_seqno) DECL_MAP_ADDR_HELPERS(ctx_job_timestamp) DECL_MAP_ADDR_HELPERS(ctx_timestamp) +DECL_MAP_ADDR_HELPERS(ctx_timestamp_udw) DECL_MAP_ADDR_HELPERS(parallel) DECL_MAP_ADDR_HELPERS(indirect_ring) +DECL_MAP_ADDR_HELPERS(engine_id) #undef DECL_MAP_ADDR_HELPERS @@ -743,18 +757,37 @@ u32 xe_lrc_ctx_timestamp_ggtt_addr(struct xe_lrc *lrc) } /** + * xe_lrc_ctx_timestamp_udw_ggtt_addr() - Get ctx timestamp udw GGTT address + * @lrc: Pointer to the lrc. + * + * Returns: ctx timestamp udw GGTT address + */ +u32 xe_lrc_ctx_timestamp_udw_ggtt_addr(struct xe_lrc *lrc) +{ + return __xe_lrc_ctx_timestamp_udw_ggtt_addr(lrc); +} + +/** * xe_lrc_ctx_timestamp() - Read ctx timestamp value * @lrc: Pointer to the lrc. * * Returns: ctx timestamp value */ -u32 xe_lrc_ctx_timestamp(struct xe_lrc *lrc) +u64 xe_lrc_ctx_timestamp(struct xe_lrc *lrc) { struct xe_device *xe = lrc_to_xe(lrc); struct iosys_map map; + u32 ldw, udw = 0; map = __xe_lrc_ctx_timestamp_map(lrc); - return xe_map_read32(xe, &map); + ldw = xe_map_read32(xe, &map); + + if (xe->info.has_64bit_timestamp) { + map = __xe_lrc_ctx_timestamp_udw_map(lrc); + udw = xe_map_read32(xe, &map); + } + + return (u64)udw << 32 | ldw; } /** @@ -864,7 +897,7 @@ static void *empty_lrc_data(struct xe_hw_engine *hwe) static void xe_lrc_set_ppgtt(struct xe_lrc *lrc, struct xe_vm *vm) { - u64 desc = xe_vm_pdp4_descriptor(vm, lrc->tile); + u64 desc = xe_vm_pdp4_descriptor(vm, gt_to_tile(lrc->gt)); xe_lrc_write_ctx_reg(lrc, CTX_PDP0_UDW, upper_32_bits(desc)); xe_lrc_write_ctx_reg(lrc, CTX_PDP0_LDW, lower_32_bits(desc)); @@ -877,6 +910,65 @@ static void xe_lrc_finish(struct xe_lrc *lrc) xe_bo_unpin(lrc->bo); xe_bo_unlock(lrc->bo); xe_bo_put(lrc->bo); + xe_bo_unpin_map_no_vm(lrc->bb_per_ctx_bo); +} + +/* + * xe_lrc_setup_utilization() - Setup wa bb to assist in calculating active + * context run ticks. + * @lrc: Pointer to the lrc. + * + * Context Timestamp (CTX_TIMESTAMP) in the LRC accumulates the run ticks of the + * context, but only gets updated when the context switches out. In order to + * check how long a context has been active before it switches out, two things + * are required: + * + * (1) Determine if the context is running: + * To do so, we program the WA BB to set an initial value for CTX_TIMESTAMP in + * the LRC. The value chosen is 1 since 0 is the initial value when the LRC is + * initialized. During a query, we just check for this value to determine if the + * context is active. If the context switched out, it would overwrite this + * location with the actual CTX_TIMESTAMP MMIO value. Note that WA BB runs as + * the last part of context restore, so reusing this LRC location will not + * clobber anything. + * + * (2) Calculate the time that the context has been active for: + * The CTX_TIMESTAMP ticks only when the context is active. If a context is + * active, we just use the CTX_TIMESTAMP MMIO as the new value of utilization. + * While doing so, we need to read the CTX_TIMESTAMP MMIO for the specific + * engine instance. Since we do not know which instance the context is running + * on until it is scheduled, we also read the ENGINE_ID MMIO in the WA BB and + * store it in the PPHSWP. + */ +#define CONTEXT_ACTIVE 1ULL +static void xe_lrc_setup_utilization(struct xe_lrc *lrc) +{ + u32 *cmd; + + cmd = lrc->bb_per_ctx_bo->vmap.vaddr; + + *cmd++ = MI_STORE_REGISTER_MEM | MI_SRM_USE_GGTT | MI_SRM_ADD_CS_OFFSET; + *cmd++ = ENGINE_ID(0).addr; + *cmd++ = __xe_lrc_engine_id_ggtt_addr(lrc); + *cmd++ = 0; + + *cmd++ = MI_STORE_DATA_IMM | MI_SDI_GGTT | MI_SDI_NUM_DW(1); + *cmd++ = __xe_lrc_ctx_timestamp_ggtt_addr(lrc); + *cmd++ = 0; + *cmd++ = lower_32_bits(CONTEXT_ACTIVE); + + if (lrc_to_xe(lrc)->info.has_64bit_timestamp) { + *cmd++ = MI_STORE_DATA_IMM | MI_SDI_GGTT | MI_SDI_NUM_DW(1); + *cmd++ = __xe_lrc_ctx_timestamp_udw_ggtt_addr(lrc); + *cmd++ = 0; + *cmd++ = upper_32_bits(CONTEXT_ACTIVE); + } + + *cmd++ = MI_BATCH_BUFFER_END; + + xe_lrc_write_ctx_reg(lrc, CTX_BB_PER_CTX_PTR, + xe_bo_ggtt_addr(lrc->bb_per_ctx_bo) | 1); + } #define PVC_CTX_ASID (0x2e + 1) @@ -893,31 +985,40 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe, void *init_data = NULL; u32 arb_enable; u32 lrc_size; + u32 bo_flags; int err; kref_init(&lrc->refcount); + lrc->gt = gt; lrc->flags = 0; lrc_size = ring_size + xe_gt_lrc_size(gt, hwe->class); if (xe_gt_has_indirect_ring_state(gt)) lrc->flags |= XE_LRC_FLAG_INDIRECT_RING_STATE; + bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile) | XE_BO_FLAG_GGTT | + XE_BO_FLAG_GGTT_INVALIDATE; + /* * FIXME: Perma-pinning LRC as we don't yet support moving GGTT address * via VM bind calls. */ lrc->bo = xe_bo_create_pin_map(xe, tile, vm, lrc_size, ttm_bo_type_kernel, - XE_BO_FLAG_VRAM_IF_DGFX(tile) | - XE_BO_FLAG_GGTT | - XE_BO_FLAG_GGTT_INVALIDATE); + bo_flags); if (IS_ERR(lrc->bo)) return PTR_ERR(lrc->bo); + lrc->bb_per_ctx_bo = xe_bo_create_pin_map(xe, tile, NULL, SZ_4K, + ttm_bo_type_kernel, + bo_flags); + if (IS_ERR(lrc->bb_per_ctx_bo)) { + err = PTR_ERR(lrc->bb_per_ctx_bo); + goto err_lrc_finish; + } + lrc->size = lrc_size; - lrc->tile = gt_to_tile(hwe->gt); lrc->ring.size = ring_size; lrc->ring.tail = 0; - lrc->ctx_timestamp = 0; xe_hw_fence_ctx_init(&lrc->fence_ctx, hwe->gt, hwe->fence_irq, hwe->name); @@ -990,7 +1091,10 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe, xe_lrc_read_ctx_reg(lrc, CTX_CONTEXT_CONTROL) | _MASKED_BIT_ENABLE(CTX_CTRL_PXP_ENABLE)); + lrc->ctx_timestamp = 0; xe_lrc_write_ctx_reg(lrc, CTX_TIMESTAMP, 0); + if (lrc_to_xe(lrc)->info.has_64bit_timestamp) + xe_lrc_write_ctx_reg(lrc, CTX_TIMESTAMP_UDW, 0); if (xe->info.has_asid && vm) xe_lrc_write_ctx_reg(lrc, PVC_CTX_ASID, vm->usm.asid); @@ -1019,6 +1123,8 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe, map = __xe_lrc_start_seqno_map(lrc); xe_map_write32(lrc_to_xe(lrc), &map, lrc->fence_ctx.next_seqno - 1); + xe_lrc_setup_utilization(lrc); + return 0; err_lrc_finish: @@ -1238,6 +1344,21 @@ struct iosys_map xe_lrc_parallel_map(struct xe_lrc *lrc) return __xe_lrc_parallel_map(lrc); } +/** + * xe_lrc_engine_id() - Read engine id value + * @lrc: Pointer to the lrc. + * + * Returns: context id value + */ +static u32 xe_lrc_engine_id(struct xe_lrc *lrc) +{ + struct xe_device *xe = lrc_to_xe(lrc); + struct iosys_map map; + + map = __xe_lrc_engine_id_map(lrc); + return xe_map_read32(xe, &map); +} + static int instr_dw(u32 cmd_header) { /* GFXPIPE "SINGLE_DW" opcodes are a single dword */ @@ -1684,7 +1805,7 @@ struct xe_lrc_snapshot *xe_lrc_snapshot_capture(struct xe_lrc *lrc) snapshot->lrc_offset = xe_lrc_pphwsp_offset(lrc); snapshot->lrc_size = lrc->bo->size - snapshot->lrc_offset; snapshot->lrc_snapshot = NULL; - snapshot->ctx_timestamp = xe_lrc_ctx_timestamp(lrc); + snapshot->ctx_timestamp = lower_32_bits(xe_lrc_ctx_timestamp(lrc)); snapshot->ctx_job_timestamp = xe_lrc_ctx_job_timestamp(lrc); return snapshot; } @@ -1784,22 +1905,74 @@ void xe_lrc_snapshot_free(struct xe_lrc_snapshot *snapshot) kfree(snapshot); } +static int get_ctx_timestamp(struct xe_lrc *lrc, u32 engine_id, u64 *reg_ctx_ts) +{ + u16 class = REG_FIELD_GET(ENGINE_CLASS_ID, engine_id); + u16 instance = REG_FIELD_GET(ENGINE_INSTANCE_ID, engine_id); + struct xe_hw_engine *hwe; + u64 val; + + hwe = xe_gt_hw_engine(lrc->gt, class, instance, false); + if (xe_gt_WARN_ONCE(lrc->gt, !hwe || xe_hw_engine_is_reserved(hwe), + "Unexpected engine class:instance %d:%d for context utilization\n", + class, instance)) + return -1; + + if (lrc_to_xe(lrc)->info.has_64bit_timestamp) + val = xe_mmio_read64_2x32(&hwe->gt->mmio, + RING_CTX_TIMESTAMP(hwe->mmio_base)); + else + val = xe_mmio_read32(&hwe->gt->mmio, + RING_CTX_TIMESTAMP(hwe->mmio_base)); + + *reg_ctx_ts = val; + + return 0; +} + /** * xe_lrc_update_timestamp() - Update ctx timestamp * @lrc: Pointer to the lrc. * @old_ts: Old timestamp value * * Populate @old_ts current saved ctx timestamp, read new ctx timestamp and - * update saved value. + * update saved value. With support for active contexts, the calculation may be + * slightly racy, so follow a read-again logic to ensure that the context is + * still active before returning the right timestamp. * * Returns: New ctx timestamp value */ -u32 xe_lrc_update_timestamp(struct xe_lrc *lrc, u32 *old_ts) +u64 xe_lrc_update_timestamp(struct xe_lrc *lrc, u64 *old_ts) { + u64 lrc_ts, reg_ts; + u32 engine_id; + *old_ts = lrc->ctx_timestamp; - lrc->ctx_timestamp = xe_lrc_ctx_timestamp(lrc); + lrc_ts = xe_lrc_ctx_timestamp(lrc); + /* CTX_TIMESTAMP mmio read is invalid on VF, so return the LRC value */ + if (IS_SRIOV_VF(lrc_to_xe(lrc))) { + lrc->ctx_timestamp = lrc_ts; + goto done; + } + + if (lrc_ts == CONTEXT_ACTIVE) { + engine_id = xe_lrc_engine_id(lrc); + if (!get_ctx_timestamp(lrc, engine_id, ®_ts)) + lrc->ctx_timestamp = reg_ts; + + /* read lrc again to ensure context is still active */ + lrc_ts = xe_lrc_ctx_timestamp(lrc); + } + + /* + * If context switched out, just use the lrc_ts. Note that this needs to + * be a separate if condition. + */ + if (lrc_ts != CONTEXT_ACTIVE) + lrc->ctx_timestamp = lrc_ts; +done: trace_xe_lrc_update_timestamp(lrc, *old_ts); return lrc->ctx_timestamp; diff --git a/drivers/gpu/drm/xe/xe_lrc.h b/drivers/gpu/drm/xe/xe_lrc.h index 0b40f349ab95..eb6e8de8c939 100644 --- a/drivers/gpu/drm/xe/xe_lrc.h +++ b/drivers/gpu/drm/xe/xe_lrc.h @@ -120,7 +120,8 @@ void xe_lrc_snapshot_print(struct xe_lrc_snapshot *snapshot, struct drm_printer void xe_lrc_snapshot_free(struct xe_lrc_snapshot *snapshot); u32 xe_lrc_ctx_timestamp_ggtt_addr(struct xe_lrc *lrc); -u32 xe_lrc_ctx_timestamp(struct xe_lrc *lrc); +u32 xe_lrc_ctx_timestamp_udw_ggtt_addr(struct xe_lrc *lrc); +u64 xe_lrc_ctx_timestamp(struct xe_lrc *lrc); u32 xe_lrc_ctx_job_timestamp_ggtt_addr(struct xe_lrc *lrc); u32 xe_lrc_ctx_job_timestamp(struct xe_lrc *lrc); @@ -136,6 +137,6 @@ u32 xe_lrc_ctx_job_timestamp(struct xe_lrc *lrc); * * Returns the current LRC timestamp */ -u32 xe_lrc_update_timestamp(struct xe_lrc *lrc, u32 *old_ts); +u64 xe_lrc_update_timestamp(struct xe_lrc *lrc, u64 *old_ts); #endif diff --git a/drivers/gpu/drm/xe/xe_lrc_types.h b/drivers/gpu/drm/xe/xe_lrc_types.h index 71ecb453f811..ae24cf6f8dd9 100644 --- a/drivers/gpu/drm/xe/xe_lrc_types.h +++ b/drivers/gpu/drm/xe/xe_lrc_types.h @@ -25,8 +25,8 @@ struct xe_lrc { /** @size: size of lrc including any indirect ring state page */ u32 size; - /** @tile: tile which this LRC belongs to */ - struct xe_tile *tile; + /** @gt: gt which this LRC belongs to */ + struct xe_gt *gt; /** @flags: LRC flags */ #define XE_LRC_FLAG_INDIRECT_RING_STATE 0x1 @@ -52,7 +52,10 @@ struct xe_lrc { struct xe_hw_fence_ctx fence_ctx; /** @ctx_timestamp: readout value of CTX_TIMESTAMP on last update */ - u32 ctx_timestamp; + u64 ctx_timestamp; + + /** @bb_per_ctx_bo: buffer object for per context batch wa buffer */ + struct xe_bo *bb_per_ctx_bo; }; struct xe_lrc_snapshot; diff --git a/drivers/gpu/drm/xe/xe_module.c b/drivers/gpu/drm/xe/xe_module.c index 9f4632e39a1a..e861c694f336 100644 --- a/drivers/gpu/drm/xe/xe_module.c +++ b/drivers/gpu/drm/xe/xe_module.c @@ -29,9 +29,6 @@ struct xe_modparam xe_modparam = { module_param_named(svm_notifier_size, xe_modparam.svm_notifier_size, uint, 0600); MODULE_PARM_DESC(svm_notifier_size, "Set the svm notifier size(in MiB), must be power of 2"); -module_param_named(always_migrate_to_vram, xe_modparam.always_migrate_to_vram, bool, 0444); -MODULE_PARM_DESC(always_migrate_to_vram, "Always migrate to VRAM on GPU fault"); - module_param_named_unsafe(force_execlist, xe_modparam.force_execlist, bool, 0444); MODULE_PARM_DESC(force_execlist, "Force Execlist submission"); diff --git a/drivers/gpu/drm/xe/xe_module.h b/drivers/gpu/drm/xe/xe_module.h index 84339e509c80..5a3bfea8b7b4 100644 --- a/drivers/gpu/drm/xe/xe_module.h +++ b/drivers/gpu/drm/xe/xe_module.h @@ -12,7 +12,6 @@ struct xe_modparam { bool force_execlist; bool probe_display; - bool always_migrate_to_vram; u32 force_vram_bar_size; int guc_log_level; char *guc_firmware_path; diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c index 818f023166d5..f4d108dc49b1 100644 --- a/drivers/gpu/drm/xe/xe_pci.c +++ b/drivers/gpu/drm/xe/xe_pci.c @@ -140,6 +140,7 @@ static const struct xe_graphics_desc graphics_xelpg = { .has_indirect_ring_state = 1, \ .has_range_tlb_invalidation = 1, \ .has_usm = 1, \ + .has_64bit_timestamp = 1, \ .va_bits = 48, \ .vm_max_level = 4, \ .hw_engine_mask = \ @@ -668,6 +669,7 @@ static int xe_info_init(struct xe_device *xe, xe->info.has_range_tlb_invalidation = graphics_desc->has_range_tlb_invalidation; xe->info.has_usm = graphics_desc->has_usm; + xe->info.has_64bit_timestamp = graphics_desc->has_64bit_timestamp; for_each_remote_tile(tile, xe, id) { int err; diff --git a/drivers/gpu/drm/xe/xe_pci_types.h b/drivers/gpu/drm/xe/xe_pci_types.h index e9b9bbc138d3..ca6b10d35573 100644 --- a/drivers/gpu/drm/xe/xe_pci_types.h +++ b/drivers/gpu/drm/xe/xe_pci_types.h @@ -21,6 +21,7 @@ struct xe_graphics_desc { u8 has_indirect_ring_state:1; u8 has_range_tlb_invalidation:1; u8 has_usm:1; + u8 has_64bit_timestamp:1; }; struct xe_media_desc { diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c index ffaf0d02dc7d..856038553b81 100644 --- a/drivers/gpu/drm/xe/xe_pt.c +++ b/drivers/gpu/drm/xe/xe_pt.c @@ -2232,11 +2232,19 @@ static void op_commit(struct xe_vm *vm, } case DRM_GPUVA_OP_DRIVER: { + /* WRITE_ONCE pairs with READ_ONCE in xe_svm.c */ + if (op->subop == XE_VMA_SUBOP_MAP_RANGE) { - op->map_range.range->tile_present |= BIT(tile->id); - op->map_range.range->tile_invalidated &= ~BIT(tile->id); + WRITE_ONCE(op->map_range.range->tile_present, + op->map_range.range->tile_present | + BIT(tile->id)); + WRITE_ONCE(op->map_range.range->tile_invalidated, + op->map_range.range->tile_invalidated & + ~BIT(tile->id)); } else if (op->subop == XE_VMA_SUBOP_UNMAP_RANGE) { - op->unmap_range.range->tile_present &= ~BIT(tile->id); + WRITE_ONCE(op->unmap_range.range->tile_present, + op->unmap_range.range->tile_present & + ~BIT(tile->id)); } break; } diff --git a/drivers/gpu/drm/xe/xe_ring_ops.c b/drivers/gpu/drm/xe/xe_ring_ops.c index a7582b097ae6..bc1689db4cd7 100644 --- a/drivers/gpu/drm/xe/xe_ring_ops.c +++ b/drivers/gpu/drm/xe/xe_ring_ops.c @@ -234,13 +234,10 @@ static u32 get_ppgtt_flag(struct xe_sched_job *job) static int emit_copy_timestamp(struct xe_lrc *lrc, u32 *dw, int i) { - dw[i++] = MI_COPY_MEM_MEM | MI_COPY_MEM_MEM_SRC_GGTT | - MI_COPY_MEM_MEM_DST_GGTT; + dw[i++] = MI_STORE_REGISTER_MEM | MI_SRM_USE_GGTT | MI_SRM_ADD_CS_OFFSET; + dw[i++] = RING_CTX_TIMESTAMP(0).addr; dw[i++] = xe_lrc_ctx_job_timestamp_ggtt_addr(lrc); dw[i++] = 0; - dw[i++] = xe_lrc_ctx_timestamp_ggtt_addr(lrc); - dw[i++] = 0; - dw[i++] = MI_NOOP; return i; } diff --git a/drivers/gpu/drm/xe/xe_shrinker.c b/drivers/gpu/drm/xe/xe_shrinker.c index 8184390f9c7b..86d47aaf0358 100644 --- a/drivers/gpu/drm/xe/xe_shrinker.c +++ b/drivers/gpu/drm/xe/xe_shrinker.c @@ -227,7 +227,7 @@ struct xe_shrinker *xe_shrinker_create(struct xe_device *xe) if (!shrinker) return ERR_PTR(-ENOMEM); - shrinker->shrink = shrinker_alloc(0, "xe system shrinker"); + shrinker->shrink = shrinker_alloc(0, "drm-xe_gem:%s", xe->drm.unique); if (!shrinker->shrink) { kfree(shrinker); return ERR_PTR(-ENOMEM); diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c index 24c578e1170e..975094c1a582 100644 --- a/drivers/gpu/drm/xe/xe_svm.c +++ b/drivers/gpu/drm/xe/xe_svm.c @@ -15,8 +15,17 @@ static bool xe_svm_range_in_vram(struct xe_svm_range *range) { - /* Not reliable without notifier lock */ - return range->base.flags.has_devmem_pages; + /* + * Advisory only check whether the range is currently backed by VRAM + * memory. + */ + + struct drm_gpusvm_range_flags flags = { + /* Pairs with WRITE_ONCE in drm_gpusvm.c */ + .__flags = READ_ONCE(range->base.flags.__flags), + }; + + return flags.has_devmem_pages; } static bool xe_svm_range_has_vram_binding(struct xe_svm_range *range) @@ -645,9 +654,16 @@ void xe_svm_fini(struct xe_vm *vm) } static bool xe_svm_range_is_valid(struct xe_svm_range *range, - struct xe_tile *tile) + struct xe_tile *tile, + bool devmem_only) { - return (range->tile_present & ~range->tile_invalidated) & BIT(tile->id); + /* + * Advisory only check whether the range currently has a valid mapping, + * READ_ONCE pairs with WRITE_ONCE in xe_pt.c + */ + return ((READ_ONCE(range->tile_present) & + ~READ_ONCE(range->tile_invalidated)) & BIT(tile->id)) && + (!devmem_only || xe_svm_range_in_vram(range)); } static struct xe_vram_region *tile_to_vr(struct xe_tile *tile) @@ -712,6 +728,36 @@ unlock: return err; } +static bool supports_4K_migration(struct xe_device *xe) +{ + if (xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) + return false; + + return true; +} + +static bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, + struct xe_vma *vma) +{ + struct xe_vm *vm = range_to_vm(&range->base); + u64 range_size = xe_svm_range_size(range); + + if (!range->base.flags.migrate_devmem) + return false; + + if (xe_svm_range_in_vram(range)) { + drm_dbg(&vm->xe->drm, "Range is already in VRAM\n"); + return false; + } + + if (range_size <= SZ_64K && !supports_4K_migration(vm->xe)) { + drm_dbg(&vm->xe->drm, "Platform doesn't support SZ_4K range migration\n"); + return false; + } + + return true; +} + /** * xe_svm_handle_pagefault() - SVM handle page fault * @vm: The VM. @@ -735,11 +781,16 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma, IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR), .check_pages_threshold = IS_DGFX(vm->xe) && IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR) ? SZ_64K : 0, + .devmem_only = atomic && IS_DGFX(vm->xe) && + IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR), + .timeslice_ms = atomic && IS_DGFX(vm->xe) && + IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR) ? 5 : 0, }; struct xe_svm_range *range; struct drm_gpusvm_range *r; struct drm_exec exec; struct dma_fence *fence; + int migrate_try_count = ctx.devmem_only ? 3 : 1; ktime_t end = 0; int err; @@ -758,24 +809,31 @@ retry: if (IS_ERR(r)) return PTR_ERR(r); + if (ctx.devmem_only && !r->flags.migrate_devmem) + return -EACCES; + range = to_xe_range(r); - if (xe_svm_range_is_valid(range, tile)) + if (xe_svm_range_is_valid(range, tile, ctx.devmem_only)) return 0; range_debug(range, "PAGE FAULT"); - /* XXX: Add migration policy, for now migrate range once */ - if (!range->skip_migrate && range->base.flags.migrate_devmem && - xe_svm_range_size(range) >= SZ_64K) { - range->skip_migrate = true; - + if (--migrate_try_count >= 0 && + xe_svm_range_needs_migrate_to_vram(range, vma)) { err = xe_svm_alloc_vram(vm, tile, range, &ctx); + ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */ if (err) { - drm_dbg(&vm->xe->drm, - "VRAM allocation failed, falling back to " - "retrying fault, asid=%u, errno=%pe\n", - vm->usm.asid, ERR_PTR(err)); - goto retry; + if (migrate_try_count || !ctx.devmem_only) { + drm_dbg(&vm->xe->drm, + "VRAM allocation failed, falling back to retrying fault, asid=%u, errno=%pe\n", + vm->usm.asid, ERR_PTR(err)); + goto retry; + } else { + drm_err(&vm->xe->drm, + "VRAM allocation failed, retry count exceeded, asid=%u, errno=%pe\n", + vm->usm.asid, ERR_PTR(err)); + return err; + } } } @@ -783,15 +841,23 @@ retry: err = drm_gpusvm_range_get_pages(&vm->svm.gpusvm, r, &ctx); /* Corner where CPU mappings have changed */ if (err == -EOPNOTSUPP || err == -EFAULT || err == -EPERM) { - if (err == -EOPNOTSUPP) { - range_debug(range, "PAGE FAULT - EVICT PAGES"); - drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base); + ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */ + if (migrate_try_count > 0 || !ctx.devmem_only) { + if (err == -EOPNOTSUPP) { + range_debug(range, "PAGE FAULT - EVICT PAGES"); + drm_gpusvm_range_evict(&vm->svm.gpusvm, + &range->base); + } + drm_dbg(&vm->xe->drm, + "Get pages failed, falling back to retrying, asid=%u, gpusvm=%p, errno=%pe\n", + vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err)); + range_debug(range, "PAGE FAULT - RETRY PAGES"); + goto retry; + } else { + drm_err(&vm->xe->drm, + "Get pages failed, retry count exceeded, asid=%u, gpusvm=%p, errno=%pe\n", + vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err)); } - drm_dbg(&vm->xe->drm, - "Get pages failed, falling back to retrying, asid=%u, gpusvm=%p, errno=%pe\n", - vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err)); - range_debug(range, "PAGE FAULT - RETRY PAGES"); - goto retry; } if (err) { range_debug(range, "PAGE FAULT - FAIL PAGE COLLECT"); @@ -815,6 +881,7 @@ retry_bind: drm_exec_fini(&exec); err = PTR_ERR(fence); if (err == -EAGAIN) { + ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */ range_debug(range, "PAGE FAULT - RETRY BIND"); goto retry; } @@ -825,9 +892,6 @@ retry_bind: } drm_exec_fini(&exec); - if (xe_modparam.always_migrate_to_vram) - range->skip_migrate = false; - dma_fence_wait(fence, false); dma_fence_put(fence); diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h index be306fe7aaa4..fe58ac2f4baa 100644 --- a/drivers/gpu/drm/xe/xe_svm.h +++ b/drivers/gpu/drm/xe/xe_svm.h @@ -36,11 +36,6 @@ struct xe_svm_range { * range. Protected by GPU SVM notifier lock. */ u8 tile_invalidated; - /** - * @skip_migrate: Skip migration to VRAM, protected by GPU fault handler - * locking. - */ - u8 skip_migrate :1; }; #if IS_ENABLED(CONFIG_DRM_GPUSVM) diff --git a/drivers/gpu/drm/xe/xe_trace_lrc.h b/drivers/gpu/drm/xe/xe_trace_lrc.h index 5c669a0b2180..d525cbee1e34 100644 --- a/drivers/gpu/drm/xe/xe_trace_lrc.h +++ b/drivers/gpu/drm/xe/xe_trace_lrc.h @@ -19,12 +19,12 @@ #define __dev_name_lrc(lrc) dev_name(gt_to_xe((lrc)->fence_ctx.gt)->drm.dev) TRACE_EVENT(xe_lrc_update_timestamp, - TP_PROTO(struct xe_lrc *lrc, uint32_t old), + TP_PROTO(struct xe_lrc *lrc, uint64_t old), TP_ARGS(lrc, old), TP_STRUCT__entry( __field(struct xe_lrc *, lrc) - __field(u32, old) - __field(u32, new) + __field(u64, old) + __field(u64, new) __string(name, lrc->fence_ctx.name) __string(device_id, __dev_name_lrc(lrc)) ), @@ -36,7 +36,7 @@ TRACE_EVENT(xe_lrc_update_timestamp, __assign_str(name); __assign_str(device_id); ), - TP_printk("lrc=:%p lrc->name=%s old=%u new=%u device_id:%s", + TP_printk("lrc=:%p lrc->name=%s old=%llu new=%llu device_id:%s", __entry->lrc, __get_str(name), __entry->old, __entry->new, __get_str(device_id)) diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c index 24f644c0a673..2f833f0d575f 100644 --- a/drivers/gpu/drm/xe/xe_wa.c +++ b/drivers/gpu/drm/xe/xe_wa.c @@ -815,6 +815,10 @@ static const struct xe_rtp_entry_sr lrc_was[] = { XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)), XE_RTP_ACTIONS(SET(CHICKEN_RASTER_1, DIS_CLIP_NEGATIVE_BOUNDING_BOX)) }, + { XE_RTP_NAME("22021007897"), + XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)), + XE_RTP_ACTIONS(SET(COMMON_SLICE_CHICKEN4, SBE_PUSH_CONSTANT_BEHIND_FIX_ENABLE)) + }, /* Xe3_LPG */ { XE_RTP_NAME("14021490052"), diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c index 25f0ebfcbd5f..0a9b44ce4904 100644 --- a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c +++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c @@ -83,6 +83,9 @@ static int amd_sfh_hid_client_deinit(struct amd_mp2_dev *privdata) case ALS_IDX: privdata->dev_en.is_als_present = false; break; + case SRA_IDX: + privdata->dev_en.is_sra_present = false; + break; } if (cl_data->sensor_sts[i] == SENSOR_ENABLED) { @@ -134,9 +137,6 @@ static int amd_sfh1_1_hid_client_init(struct amd_mp2_dev *privdata) for (i = 0; i < cl_data->num_hid_devices; i++) { cl_data->sensor_sts[i] = SENSOR_DISABLED; - if (cl_data->num_hid_devices == 1 && cl_data->sensor_idx[0] == SRA_IDX) - break; - if (cl_data->sensor_idx[i] == SRA_IDX) { info.sensor_idx = cl_data->sensor_idx[i]; writel(0, privdata->mmio + amd_get_p2c_val(privdata, 0)); @@ -145,8 +145,10 @@ static int amd_sfh1_1_hid_client_init(struct amd_mp2_dev *privdata) (privdata, cl_data->sensor_idx[i], ENABLE_SENSOR); cl_data->sensor_sts[i] = (status == 0) ? SENSOR_ENABLED : SENSOR_DISABLED; - if (cl_data->sensor_sts[i] == SENSOR_ENABLED) + if (cl_data->sensor_sts[i] == SENSOR_ENABLED) { + cl_data->is_any_sensor_enabled = true; privdata->dev_en.is_sra_present = true; + } continue; } @@ -238,6 +240,8 @@ static int amd_sfh1_1_hid_client_init(struct amd_mp2_dev *privdata) cleanup: amd_sfh_hid_client_deinit(privdata); for (i = 0; i < cl_data->num_hid_devices; i++) { + if (cl_data->sensor_idx[i] == SRA_IDX) + continue; devm_kfree(dev, cl_data->feature_report[i]); devm_kfree(dev, in_data->input_report[i]); devm_kfree(dev, cl_data->report_descr[i]); diff --git a/drivers/hid/bpf/hid_bpf_dispatch.c b/drivers/hid/bpf/hid_bpf_dispatch.c index 2e96ec6a3073..9a06f9b0e4ef 100644 --- a/drivers/hid/bpf/hid_bpf_dispatch.c +++ b/drivers/hid/bpf/hid_bpf_dispatch.c @@ -38,6 +38,9 @@ dispatch_hid_bpf_device_event(struct hid_device *hdev, enum hid_report_type type struct hid_bpf_ops *e; int ret; + if (unlikely(hdev->bpf.destroyed)) + return ERR_PTR(-ENODEV); + if (type >= HID_REPORT_TYPES) return ERR_PTR(-EINVAL); @@ -93,6 +96,9 @@ int dispatch_hid_bpf_raw_requests(struct hid_device *hdev, struct hid_bpf_ops *e; int ret, idx; + if (unlikely(hdev->bpf.destroyed)) + return -ENODEV; + if (rtype >= HID_REPORT_TYPES) return -EINVAL; @@ -130,6 +136,9 @@ int dispatch_hid_bpf_output_report(struct hid_device *hdev, struct hid_bpf_ops *e; int ret, idx; + if (unlikely(hdev->bpf.destroyed)) + return -ENODEV; + idx = srcu_read_lock(&hdev->bpf.srcu); list_for_each_entry_srcu(e, &hdev->bpf.prog_list, list, srcu_read_lock_held(&hdev->bpf.srcu)) { diff --git a/drivers/hid/bpf/progs/XPPen__ACK05.bpf.c b/drivers/hid/bpf/progs/XPPen__ACK05.bpf.c index 1a0aeea6a081..a754710fc90b 100644 --- a/drivers/hid/bpf/progs/XPPen__ACK05.bpf.c +++ b/drivers/hid/bpf/progs/XPPen__ACK05.bpf.c @@ -157,6 +157,7 @@ static const __u8 fixed_rdesc_vendor[] = { ReportCount(5) // padding Input(Const) // Byte 4 in report - just exists so we get to be a tablet pad + UsagePage_Digitizers Usage_Dig_BarrelSwitch // BTN_STYLUS ReportCount(1) ReportSize(1) diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 288a2b864cc4..1062731315a2 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -41,6 +41,10 @@ #define USB_VENDOR_ID_ACTIONSTAR 0x2101 #define USB_DEVICE_ID_ACTIONSTAR_1011 0x1011 +#define USB_VENDOR_ID_ADATA_XPG 0x125f +#define USB_VENDOR_ID_ADATA_XPG_WL_GAMING_MOUSE 0x7505 +#define USB_VENDOR_ID_ADATA_XPG_WL_GAMING_MOUSE_DONGLE 0x7506 + #define USB_VENDOR_ID_ADS_TECH 0x06e1 #define USB_DEVICE_ID_ADS_TECH_RADIO_SI470X 0xa155 diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c index 646171598e41..0731473cc9b1 100644 --- a/drivers/hid/hid-quirks.c +++ b/drivers/hid/hid-quirks.c @@ -27,6 +27,8 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_GAMEPAD), HID_QUIRK_BADPAD }, { HID_USB_DEVICE(USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_PREDATOR), HID_QUIRK_BADPAD }, + { HID_USB_DEVICE(USB_VENDOR_ID_ADATA_XPG, USB_VENDOR_ID_ADATA_XPG_WL_GAMING_MOUSE), HID_QUIRK_ALWAYS_POLL }, + { HID_USB_DEVICE(USB_VENDOR_ID_ADATA_XPG, USB_VENDOR_ID_ADATA_XPG_WL_GAMING_MOUSE_DONGLE), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_AFATECH, USB_DEVICE_ID_AFATECH_AF9016), HID_QUIRK_FULLSPEED_INTERVAL }, { HID_USB_DEVICE(USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS), HID_QUIRK_NOGET }, { HID_USB_DEVICE(USB_VENDOR_ID_AKAI_09E8, USB_DEVICE_ID_AKAI_09E8_MIDIMIX), HID_QUIRK_NO_INIT_REPORTS }, diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c index dfd9d22ed559..949d307c66a8 100644 --- a/drivers/hid/hid-steam.c +++ b/drivers/hid/hid-steam.c @@ -1150,11 +1150,9 @@ static void steam_client_ll_close(struct hid_device *hdev) struct steam_device *steam = hdev->driver_data; unsigned long flags; - bool connected; spin_lock_irqsave(&steam->lock, flags); steam->client_opened--; - connected = steam->connected && !steam->client_opened; spin_unlock_irqrestore(&steam->lock, flags); schedule_work(&steam->unregister_work); diff --git a/drivers/hid/hid-thrustmaster.c b/drivers/hid/hid-thrustmaster.c index 3b81468a1df2..0bf70664c35e 100644 --- a/drivers/hid/hid-thrustmaster.c +++ b/drivers/hid/hid-thrustmaster.c @@ -174,6 +174,7 @@ static void thrustmaster_interrupts(struct hid_device *hdev) u8 ep_addr[2] = {b_ep, 0}; if (!usb_check_int_endpoints(usbif, ep_addr)) { + kfree(send_buf); hid_err(hdev, "Unexpected non-int endpoint\n"); return; } diff --git a/drivers/hid/hid-uclogic-core.c b/drivers/hid/hid-uclogic-core.c index a367df6ea01f..61a4019ddc74 100644 --- a/drivers/hid/hid-uclogic-core.c +++ b/drivers/hid/hid-uclogic-core.c @@ -142,11 +142,12 @@ static int uclogic_input_configured(struct hid_device *hdev, suffix = "System Control"; break; } - } - - if (suffix) + } else { hi->input->name = devm_kasprintf(&hdev->dev, GFP_KERNEL, "%s %s", hdev->name, suffix); + if (!hi->input->name) + return -ENOMEM; + } return 0; } diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c index 1556d4287fa5..eaf099b2efdb 100644 --- a/drivers/hid/wacom_sys.c +++ b/drivers/hid/wacom_sys.c @@ -70,10 +70,16 @@ static void wacom_wac_queue_flush(struct hid_device *hdev, { while (!kfifo_is_empty(fifo)) { int size = kfifo_peek_len(fifo); - u8 *buf = kzalloc(size, GFP_KERNEL); + u8 *buf; unsigned int count; int err; + buf = kzalloc(size, GFP_KERNEL); + if (!buf) { + kfifo_skip(fifo); + continue; + } + count = kfifo_out(fifo, buf, size); if (count != size) { // Hard to say what is the "right" action in this @@ -81,6 +87,7 @@ static void wacom_wac_queue_flush(struct hid_device *hdev, // to flush seems reasonable enough, however. hid_warn(hdev, "%s: removed fifo entry with unexpected size\n", __func__); + kfree(buf); continue; } err = hid_report_raw_event(hdev, HID_INPUT_REPORT, buf, size, false); @@ -2361,6 +2368,8 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless) unsigned int connect_mask = HID_CONNECT_HIDRAW; features->pktlen = wacom_compute_pktlen(hdev); + if (!features->pktlen) + return -ENODEV; if (!devres_open_group(&hdev->dev, wacom, GFP_KERNEL)) return -ENOMEM; diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c index 8e0267c7cc29..f21f9877c040 100644 --- a/drivers/i2c/busses/i2c-designware-pcidrv.c +++ b/drivers/i2c/busses/i2c-designware-pcidrv.c @@ -278,9 +278,11 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev, if ((dev->flags & MODEL_MASK) == MODEL_AMD_NAVI_GPU) { dev->slave = i2c_new_ccgx_ucsi(&dev->adapter, dev->irq, &dgpu_node); - if (IS_ERR(dev->slave)) + if (IS_ERR(dev->slave)) { + i2c_del_adapter(&dev->adapter); return dev_err_probe(device, PTR_ERR(dev->slave), "register UCSI failed\n"); + } } pm_runtime_set_autosuspend_delay(device, 1000); diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index b4e3e4beb7f4..d4263385850a 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -1352,6 +1352,9 @@ static void ib_device_notify_register(struct ib_device *device) down_read(&devices_rwsem); + /* Mark for userspace that device is ready */ + kobject_uevent(&device->dev.kobj, KOBJ_ADD); + ret = rdma_nl_notify_event(device, 0, RDMA_REGISTER_EVENT); if (ret) goto out; @@ -1468,10 +1471,9 @@ int ib_register_device(struct ib_device *device, const char *name, return ret; } dev_set_uevent_suppress(&device->dev, false); - /* Mark for userspace that device is ready */ - kobject_uevent(&device->dev.kobj, KOBJ_ADD); ib_device_notify_register(device); + ib_device_put(device); return 0; diff --git a/drivers/infiniband/hw/irdma/main.c b/drivers/infiniband/hw/irdma/main.c index abb532bc8ce4..1e840bbd619d 100644 --- a/drivers/infiniband/hw/irdma/main.c +++ b/drivers/infiniband/hw/irdma/main.c @@ -221,7 +221,7 @@ static int irdma_init_interrupts(struct irdma_pci_f *rf, struct iidc_rdma_core_d break; if (i < IRDMA_MIN_MSIX) { - for (; i > 0; i--) + while (--i >= 0) ice_free_rdma_qvector(cdev, &rf->msix_entries[i]); kfree(rf->msix_entries); @@ -256,6 +256,8 @@ static void irdma_remove(struct auxiliary_device *aux_dev) irdma_ib_unregister_device(iwdev); irdma_deinit_interrupts(iwdev->rf, cdev_info); + kfree(iwdev->rf); + pr_debug("INIT: Gen2 PF[%d] device remove success\n", PCI_FUNC(cdev_info->pdev->devfn)); } diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c index eeb932e58730..1e8c92826de2 100644 --- a/drivers/infiniband/hw/irdma/verbs.c +++ b/drivers/infiniband/hw/irdma/verbs.c @@ -4871,5 +4871,4 @@ void irdma_ib_dealloc_device(struct ib_device *ibdev) irdma_rt_deinit_hw(iwdev); irdma_ctrl_deinit_hw(iwdev->rf); - kfree(iwdev->rf); } diff --git a/drivers/infiniband/sw/rxe/rxe_cq.c b/drivers/infiniband/sw/rxe/rxe_cq.c index fec87c9030ab..fffd144d509e 100644 --- a/drivers/infiniband/sw/rxe/rxe_cq.c +++ b/drivers/infiniband/sw/rxe/rxe_cq.c @@ -56,11 +56,8 @@ int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe, err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, udata, cq->queue->buf, cq->queue->buf_size, &cq->queue->ip); - if (err) { - vfree(cq->queue->buf); - kfree(cq->queue); + if (err) return err; - } cq->is_user = uresp; diff --git a/drivers/infiniband/sw/siw/Kconfig b/drivers/infiniband/sw/siw/Kconfig index ae4a953e2a03..186f182b80e7 100644 --- a/drivers/infiniband/sw/siw/Kconfig +++ b/drivers/infiniband/sw/siw/Kconfig @@ -3,6 +3,7 @@ config RDMA_SIW depends on INET && INFINIBAND depends on INFINIBAND_VIRT_DMA select CRC32 + select NET_CRC32C help This driver implements the iWARP RDMA transport over the Linux TCP/IP network stack. It enables a system with a diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h index 385067e07faf..d9e5a2e4c471 100644 --- a/drivers/infiniband/sw/siw/siw.h +++ b/drivers/infiniband/sw/siw/siw.h @@ -693,29 +693,9 @@ static inline void siw_crc_oneshot(const void *data, size_t len, u8 out[4]) return siw_crc_final(&crc, out); } -static inline __wsum siw_csum_update(const void *buff, int len, __wsum sum) -{ - return (__force __wsum)crc32c((__force __u32)sum, buff, len); -} - -static inline __wsum siw_csum_combine(__wsum csum, __wsum csum2, int offset, - int len) -{ - return (__force __wsum)crc32c_combine((__force __u32)csum, - (__force __u32)csum2, len); -} - static inline void siw_crc_skb(struct siw_rx_stream *srx, unsigned int len) { - const struct skb_checksum_ops siw_cs_ops = { - .update = siw_csum_update, - .combine = siw_csum_combine, - }; - __wsum crc = (__force __wsum)srx->mpa_crc; - - crc = __skb_checksum(srx->skb, srx->skb_offset, len, crc, - &siw_cs_ops); - srx->mpa_crc = (__force u32)crc; + srx->mpa_crc = skb_crc32c(srx->skb, srx->skb_offset, len, srx->mpa_crc); } #define siw_dbg(ibdev, fmt, ...) \ diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index abe0522b7df4..91f866e3fb8b 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h @@ -329,14 +329,6 @@ struct ipoib_dev_priv { unsigned long flags; - /* - * This protects access to the child_intfs list. - * To READ from child_intfs the RTNL or vlan_rwsem read side must be - * held. To WRITE RTNL and the vlan_rwsem write side must be held (in - * that order) This lock exists because we have a few contexts where - * we need the child_intfs, but do not want to grab the RTNL. - */ - struct rw_semaphore vlan_rwsem; struct mutex mcast_mutex; struct rb_root path_tree; @@ -399,6 +391,9 @@ struct ipoib_dev_priv { struct ib_event_handler event_handler; struct net_device *parent; + /* 'child_intfs' and 'list' membership of all child devices are + * protected by the netdev instance lock of 'dev'. + */ struct list_head child_intfs; struct list_head list; int child_type; @@ -512,6 +507,8 @@ int ipoib_intf_init(struct ib_device *hca, u32 port, const char *format, void ipoib_ib_dev_flush_light(struct work_struct *work); void ipoib_ib_dev_flush_normal(struct work_struct *work); void ipoib_ib_dev_flush_heavy(struct work_struct *work); +void ipoib_queue_work(struct ipoib_dev_priv *priv, + enum ipoib_flush_level level); void ipoib_ib_tx_timeout_work(struct work_struct *work); void ipoib_ib_dev_cleanup(struct net_device *dev); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 5cde275daa94..10b0dbda6cd5 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -40,6 +40,7 @@ #include <linux/ip.h> #include <linux/tcp.h> +#include <net/netdev_lock.h> #include <rdma/ib_cache.h> #include "ipoib.h" @@ -781,16 +782,20 @@ static void ipoib_napi_enable(struct net_device *dev) { struct ipoib_dev_priv *priv = ipoib_priv(dev); - napi_enable(&priv->recv_napi); - napi_enable(&priv->send_napi); + netdev_lock_ops_to_full(dev); + napi_enable_locked(&priv->recv_napi); + napi_enable_locked(&priv->send_napi); + netdev_unlock_full_to_ops(dev); } static void ipoib_napi_disable(struct net_device *dev) { struct ipoib_dev_priv *priv = ipoib_priv(dev); - napi_disable(&priv->recv_napi); - napi_disable(&priv->send_napi); + netdev_lock_ops_to_full(dev); + napi_disable_locked(&priv->recv_napi); + napi_disable_locked(&priv->send_napi); + netdev_unlock_full_to_ops(dev); } int ipoib_ib_dev_stop_default(struct net_device *dev) @@ -1172,24 +1177,11 @@ out: } static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, - enum ipoib_flush_level level, - int nesting) + enum ipoib_flush_level level) { - struct ipoib_dev_priv *cpriv; struct net_device *dev = priv->dev; int result; - down_read_nested(&priv->vlan_rwsem, nesting); - - /* - * Flush any child interfaces too -- they might be up even if - * the parent is down. - */ - list_for_each_entry(cpriv, &priv->child_intfs, list) - __ipoib_ib_dev_flush(cpriv, level, nesting + 1); - - up_read(&priv->vlan_rwsem); - if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) && level != IPOIB_FLUSH_HEAVY) { /* Make sure the dev_addr is set even if not flushing */ @@ -1253,10 +1245,14 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, ipoib_ib_dev_down(dev); if (level == IPOIB_FLUSH_HEAVY) { + netdev_lock_ops(dev); if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) ipoib_ib_dev_stop(dev); - if (ipoib_ib_dev_open(dev)) + result = ipoib_ib_dev_open(dev); + netdev_unlock_ops(dev); + + if (result) return; if (netif_queue_stopped(dev)) @@ -1280,7 +1276,7 @@ void ipoib_ib_dev_flush_light(struct work_struct *work) struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, flush_light); - __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT, 0); + __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT); } void ipoib_ib_dev_flush_normal(struct work_struct *work) @@ -1288,7 +1284,7 @@ void ipoib_ib_dev_flush_normal(struct work_struct *work) struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, flush_normal); - __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL, 0); + __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL); } void ipoib_ib_dev_flush_heavy(struct work_struct *work) @@ -1297,10 +1293,35 @@ void ipoib_ib_dev_flush_heavy(struct work_struct *work) container_of(work, struct ipoib_dev_priv, flush_heavy); rtnl_lock(); - __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY, 0); + __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY); rtnl_unlock(); } +void ipoib_queue_work(struct ipoib_dev_priv *priv, + enum ipoib_flush_level level) +{ + if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { + struct ipoib_dev_priv *cpriv; + + netdev_lock(priv->dev); + list_for_each_entry(cpriv, &priv->child_intfs, list) + ipoib_queue_work(cpriv, level); + netdev_unlock(priv->dev); + } + + switch (level) { + case IPOIB_FLUSH_LIGHT: + queue_work(ipoib_workqueue, &priv->flush_light); + break; + case IPOIB_FLUSH_NORMAL: + queue_work(ipoib_workqueue, &priv->flush_normal); + break; + case IPOIB_FLUSH_HEAVY: + queue_work(ipoib_workqueue, &priv->flush_heavy); + break; + } +} + void ipoib_ib_dev_cleanup(struct net_device *dev) { struct ipoib_dev_priv *priv = ipoib_priv(dev); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 3b463db8ce39..f2f5465f2a90 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -49,6 +49,7 @@ #include <linux/jhash.h> #include <net/arp.h> #include <net/addrconf.h> +#include <net/netdev_lock.h> #include <net/pkt_sched.h> #include <linux/inetdevice.h> #include <rdma/ib_cache.h> @@ -132,6 +133,52 @@ static int ipoib_netdev_event(struct notifier_block *this, } #endif +struct ipoib_ifupdown_work { + struct work_struct work; + struct net_device *dev; + netdevice_tracker dev_tracker; + bool up; +}; + +static void ipoib_ifupdown_task(struct work_struct *work) +{ + struct ipoib_ifupdown_work *pwork = + container_of(work, struct ipoib_ifupdown_work, work); + struct net_device *dev = pwork->dev; + unsigned int flags; + + rtnl_lock(); + flags = dev->flags; + if (pwork->up) + flags |= IFF_UP; + else + flags &= ~IFF_UP; + + if (dev->flags != flags) + dev_change_flags(dev, flags, NULL); + rtnl_unlock(); + netdev_put(dev, &pwork->dev_tracker); + kfree(pwork); +} + +static void ipoib_schedule_ifupdown_task(struct net_device *dev, bool up) +{ + struct ipoib_ifupdown_work *work; + + if ((up && (dev->flags & IFF_UP)) || + (!up && !(dev->flags & IFF_UP))) + return; + + work = kmalloc(sizeof(*work), GFP_KERNEL); + if (!work) + return; + work->dev = dev; + netdev_hold(dev, &work->dev_tracker, GFP_KERNEL); + work->up = up; + INIT_WORK(&work->work, ipoib_ifupdown_task); + queue_work(ipoib_workqueue, &work->work); +} + int ipoib_open(struct net_device *dev) { struct ipoib_dev_priv *priv = ipoib_priv(dev); @@ -154,17 +201,10 @@ int ipoib_open(struct net_device *dev) struct ipoib_dev_priv *cpriv; /* Bring up any child interfaces too */ - down_read(&priv->vlan_rwsem); - list_for_each_entry(cpriv, &priv->child_intfs, list) { - int flags; - - flags = cpriv->dev->flags; - if (flags & IFF_UP) - continue; - - dev_change_flags(cpriv->dev, flags | IFF_UP, NULL); - } - up_read(&priv->vlan_rwsem); + netdev_lock_ops_to_full(dev); + list_for_each_entry(cpriv, &priv->child_intfs, list) + ipoib_schedule_ifupdown_task(cpriv->dev, true); + netdev_unlock_full_to_ops(dev); } else if (priv->parent) { struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent); @@ -199,17 +239,10 @@ static int ipoib_stop(struct net_device *dev) struct ipoib_dev_priv *cpriv; /* Bring down any child interfaces too */ - down_read(&priv->vlan_rwsem); - list_for_each_entry(cpriv, &priv->child_intfs, list) { - int flags; - - flags = cpriv->dev->flags; - if (!(flags & IFF_UP)) - continue; - - dev_change_flags(cpriv->dev, flags & ~IFF_UP, NULL); - } - up_read(&priv->vlan_rwsem); + netdev_lock_ops_to_full(dev); + list_for_each_entry(cpriv, &priv->child_intfs, list) + ipoib_schedule_ifupdown_task(cpriv->dev, false); + netdev_unlock_full_to_ops(dev); } return 0; @@ -426,17 +459,20 @@ static int ipoib_match_gid_pkey_addr(struct ipoib_dev_priv *priv, } } + if (test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) + return matches; + /* Check child interfaces */ - down_read_nested(&priv->vlan_rwsem, nesting); + netdev_lock(priv->dev); list_for_each_entry(child_priv, &priv->child_intfs, list) { matches += ipoib_match_gid_pkey_addr(child_priv, gid, - pkey_index, addr, - nesting + 1, - found_net_dev); + pkey_index, addr, + nesting + 1, + found_net_dev); if (matches > 1) break; } - up_read(&priv->vlan_rwsem); + netdev_unlock(priv->dev); return matches; } @@ -531,9 +567,11 @@ int ipoib_set_mode(struct net_device *dev, const char *buf) set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags); ipoib_warn(priv, "enabling connected mode " "will cause multicast packet drops\n"); + netdev_lock_ops(dev); netdev_update_features(dev); - dev_set_mtu(dev, ipoib_cm_max_mtu(dev)); + netif_set_mtu(dev, ipoib_cm_max_mtu(dev)); netif_set_real_num_tx_queues(dev, 1); + netdev_unlock_ops(dev); rtnl_unlock(); priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM; @@ -543,9 +581,11 @@ int ipoib_set_mode(struct net_device *dev, const char *buf) if (!strcmp(buf, "datagram\n")) { clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags); + netdev_lock_ops(dev); netdev_update_features(dev); - dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu)); + netif_set_mtu(dev, min(priv->mcast_mtu, dev->mtu)); netif_set_real_num_tx_queues(dev, dev->num_tx_queues); + netdev_unlock_ops(dev); rtnl_unlock(); ipoib_flush_paths(dev); return (!rtnl_trylock()) ? -EBUSY : 0; @@ -1212,6 +1252,7 @@ void ipoib_ib_tx_timeout_work(struct work_struct *work) int err; rtnl_lock(); + netdev_lock_ops(priv->dev); if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) goto unlock; @@ -1226,6 +1267,7 @@ void ipoib_ib_tx_timeout_work(struct work_struct *work) netif_tx_wake_all_queues(priv->dev); unlock: + netdev_unlock_ops(priv->dev); rtnl_unlock(); } @@ -1992,9 +2034,9 @@ static int ipoib_ndo_init(struct net_device *ndev) dev_hold(priv->parent); - down_write(&ppriv->vlan_rwsem); + netdev_lock(priv->parent); list_add_tail(&priv->list, &ppriv->child_intfs); - up_write(&ppriv->vlan_rwsem); + netdev_unlock(priv->parent); } return 0; @@ -2004,8 +2046,6 @@ static void ipoib_ndo_uninit(struct net_device *dev) { struct ipoib_dev_priv *priv = ipoib_priv(dev); - ASSERT_RTNL(); - /* * ipoib_remove_one guarantees the children are removed before the * parent, and that is the only place where a parent can be removed. @@ -2015,9 +2055,9 @@ static void ipoib_ndo_uninit(struct net_device *dev) if (priv->parent) { struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent); - down_write(&ppriv->vlan_rwsem); + netdev_lock(ppriv->dev); list_del(&priv->list); - up_write(&ppriv->vlan_rwsem); + netdev_unlock(ppriv->dev); } ipoib_neigh_hash_uninit(dev); @@ -2167,7 +2207,6 @@ static void ipoib_build_priv(struct net_device *dev) priv->dev = dev; spin_lock_init(&priv->lock); - init_rwsem(&priv->vlan_rwsem); mutex_init(&priv->mcast_mutex); INIT_LIST_HEAD(&priv->path_list); @@ -2372,10 +2411,10 @@ static void set_base_guid(struct ipoib_dev_priv *priv, union ib_gid *gid) netif_addr_unlock_bh(netdev); if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { - down_read(&priv->vlan_rwsem); + netdev_lock_ops_to_full(priv->dev); list_for_each_entry(child_priv, &priv->child_intfs, list) set_base_guid(child_priv, gid); - up_read(&priv->vlan_rwsem); + netdev_unlock_full_to_ops(priv->dev); } } @@ -2415,6 +2454,14 @@ static int ipoib_set_mac(struct net_device *dev, void *addr) set_base_guid(priv, (union ib_gid *)(ss->__data + 4)); + if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { + struct ipoib_dev_priv *cpriv; + + netdev_lock_ops_to_full(dev); + list_for_each_entry(cpriv, &priv->child_intfs, list) + queue_work(ipoib_workqueue, &cpriv->flush_light); + netdev_unlock_full_to_ops(dev); + } queue_work(ipoib_workqueue, &priv->flush_light); return 0; @@ -2526,7 +2573,7 @@ static struct net_device *ipoib_add_port(const char *format, ib_register_event_handler(&priv->event_handler); /* call event handler to ensure pkey in sync */ - queue_work(ipoib_workqueue, &priv->flush_heavy); + ipoib_queue_work(priv, IPOIB_FLUSH_HEAVY); ndev->rtnl_link_ops = ipoib_get_link_ops(); @@ -2624,9 +2671,11 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data) rtnl_lock(); + netdev_lock(priv->dev); list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) unregister_netdevice_queue(cpriv->dev, &head); + netdev_unlock(priv->dev); unregister_netdevice_queue(priv->dev, &head); unregister_netdevice_many(&head); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c index 368e5d77416d..86983080d28b 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c @@ -280,15 +280,15 @@ void ipoib_event(struct ib_event_handler *handler, dev_name(&record->device->dev), record->element.port_num); if (record->event == IB_EVENT_CLIENT_REREGISTER) { - queue_work(ipoib_workqueue, &priv->flush_light); + ipoib_queue_work(priv, IPOIB_FLUSH_LIGHT); } else if (record->event == IB_EVENT_PORT_ERR || record->event == IB_EVENT_PORT_ACTIVE || record->event == IB_EVENT_LID_CHANGE) { - queue_work(ipoib_workqueue, &priv->flush_normal); + ipoib_queue_work(priv, IPOIB_FLUSH_NORMAL); } else if (record->event == IB_EVENT_PKEY_CHANGE) { - queue_work(ipoib_workqueue, &priv->flush_heavy); + ipoib_queue_work(priv, IPOIB_FLUSH_HEAVY); } else if (record->event == IB_EVENT_GID_CHANGE && !test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) { - queue_work(ipoib_workqueue, &priv->flush_light); + ipoib_queue_work(priv, IPOIB_FLUSH_LIGHT); } } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c index 562df2b3ef18..243e8f555eca 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c @@ -53,8 +53,7 @@ static bool is_child_unique(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv) { struct ipoib_dev_priv *tpriv; - - ASSERT_RTNL(); + bool result = true; /* * Since the legacy sysfs interface uses pkey for deletion it cannot @@ -73,13 +72,17 @@ static bool is_child_unique(struct ipoib_dev_priv *ppriv, if (ppriv->pkey == priv->pkey) return false; + netdev_lock(ppriv->dev); list_for_each_entry(tpriv, &ppriv->child_intfs, list) { if (tpriv->pkey == priv->pkey && - tpriv->child_type == IPOIB_LEGACY_CHILD) - return false; + tpriv->child_type == IPOIB_LEGACY_CHILD) { + result = false; + break; + } } + netdev_unlock(ppriv->dev); - return true; + return result; } /* @@ -98,8 +101,6 @@ int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv, int result; struct rdma_netdev *rn = netdev_priv(ndev); - ASSERT_RTNL(); - /* * We do not need to touch priv if register_netdevice fails, so just * always use this flow. @@ -267,6 +268,7 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey) ppriv = ipoib_priv(pdev); rc = -ENODEV; + netdev_lock(ppriv->dev); list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) { if (priv->pkey == pkey && priv->child_type == IPOIB_LEGACY_CHILD) { @@ -278,9 +280,7 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey) goto out; } - down_write(&ppriv->vlan_rwsem); list_del_init(&priv->list); - up_write(&ppriv->vlan_rwsem); work->dev = priv->dev; INIT_WORK(&work->work, ipoib_vlan_delete_task); queue_work(ipoib_workqueue, &work->work); @@ -291,6 +291,7 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey) } out: + netdev_unlock(ppriv->dev); rtnl_unlock(); return rc; diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c index dc98c39d2b20..cc6a6c1585d2 100644 --- a/drivers/irqchip/irq-gic-v2m.c +++ b/drivers/irqchip/irq-gic-v2m.c @@ -252,7 +252,7 @@ static void __init gicv2m_teardown(void) static struct msi_parent_ops gicv2m_msi_parent_ops = { .supported_flags = GICV2M_MSI_FLAGS_SUPPORTED, .required_flags = GICV2M_MSI_FLAGS_REQUIRED, - .chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK, + .chip_flags = MSI_CHIP_FLAG_SET_EOI, .bus_select_token = DOMAIN_BUS_NEXUS, .bus_select_mask = MATCH_PCI_MSI | MATCH_PLATFORM_MSI, .prefix = "GICv2m-", diff --git a/drivers/irqchip/irq-gic-v3-its-msi-parent.c b/drivers/irqchip/irq-gic-v3-its-msi-parent.c index bdb04c808148..c5a7eb1c0419 100644 --- a/drivers/irqchip/irq-gic-v3-its-msi-parent.c +++ b/drivers/irqchip/irq-gic-v3-its-msi-parent.c @@ -203,7 +203,7 @@ static bool its_init_dev_msi_info(struct device *dev, struct irq_domain *domain, const struct msi_parent_ops gic_v3_its_msi_parent_ops = { .supported_flags = ITS_MSI_FLAGS_SUPPORTED, .required_flags = ITS_MSI_FLAGS_REQUIRED, - .chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK, + .chip_flags = MSI_CHIP_FLAG_SET_EOI, .bus_select_token = DOMAIN_BUS_NEXUS, .bus_select_mask = MATCH_PCI_MSI | MATCH_PLATFORM_MSI, .prefix = "ITS-", diff --git a/drivers/irqchip/irq-gic-v3-mbi.c b/drivers/irqchip/irq-gic-v3-mbi.c index 34e9ca77a8c3..647b18e24e0c 100644 --- a/drivers/irqchip/irq-gic-v3-mbi.c +++ b/drivers/irqchip/irq-gic-v3-mbi.c @@ -197,7 +197,7 @@ static bool mbi_init_dev_msi_info(struct device *dev, struct irq_domain *domain, static const struct msi_parent_ops gic_v3_mbi_msi_parent_ops = { .supported_flags = MBI_MSI_FLAGS_SUPPORTED, .required_flags = MBI_MSI_FLAGS_REQUIRED, - .chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK, + .chip_flags = MSI_CHIP_FLAG_SET_EOI, .bus_select_token = DOMAIN_BUS_NEXUS, .bus_select_mask = MATCH_PCI_MSI | MATCH_PLATFORM_MSI, .prefix = "MBI-", diff --git a/drivers/irqchip/irq-mvebu-gicp.c b/drivers/irqchip/irq-mvebu-gicp.c index d67f93f6d750..60b976286636 100644 --- a/drivers/irqchip/irq-mvebu-gicp.c +++ b/drivers/irqchip/irq-mvebu-gicp.c @@ -161,7 +161,7 @@ static const struct irq_domain_ops gicp_domain_ops = { static const struct msi_parent_ops gicp_msi_parent_ops = { .supported_flags = GICP_MSI_FLAGS_SUPPORTED, .required_flags = GICP_MSI_FLAGS_REQUIRED, - .chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK, + .chip_flags = MSI_CHIP_FLAG_SET_EOI, .bus_select_token = DOMAIN_BUS_GENERIC_MSI, .bus_select_mask = MATCH_PLATFORM_MSI, .prefix = "GICP-", diff --git a/drivers/irqchip/irq-mvebu-odmi.c b/drivers/irqchip/irq-mvebu-odmi.c index 28f7e81df94f..54f6f0811573 100644 --- a/drivers/irqchip/irq-mvebu-odmi.c +++ b/drivers/irqchip/irq-mvebu-odmi.c @@ -157,7 +157,7 @@ static const struct irq_domain_ops odmi_domain_ops = { static const struct msi_parent_ops odmi_msi_parent_ops = { .supported_flags = ODMI_MSI_FLAGS_SUPPORTED, .required_flags = ODMI_MSI_FLAGS_REQUIRED, - .chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK, + .chip_flags = MSI_CHIP_FLAG_SET_EOI, .bus_select_token = DOMAIN_BUS_GENERIC_MSI, .bus_select_mask = MATCH_PLATFORM_MSI, .prefix = "ODMI-", diff --git a/drivers/irqchip/irq-riscv-imsic-state.c b/drivers/irqchip/irq-riscv-imsic-state.c index bdf5cd2037f2..62f76950a113 100644 --- a/drivers/irqchip/irq-riscv-imsic-state.c +++ b/drivers/irqchip/irq-riscv-imsic-state.c @@ -208,17 +208,17 @@ skip: } #ifdef CONFIG_SMP -static void __imsic_local_timer_start(struct imsic_local_priv *lpriv) +static void __imsic_local_timer_start(struct imsic_local_priv *lpriv, unsigned int cpu) { lockdep_assert_held(&lpriv->lock); if (!timer_pending(&lpriv->timer)) { lpriv->timer.expires = jiffies + 1; - add_timer_on(&lpriv->timer, smp_processor_id()); + add_timer_on(&lpriv->timer, cpu); } } #else -static inline void __imsic_local_timer_start(struct imsic_local_priv *lpriv) +static inline void __imsic_local_timer_start(struct imsic_local_priv *lpriv, unsigned int cpu) { } #endif @@ -233,7 +233,7 @@ void imsic_local_sync_all(bool force_all) if (force_all) bitmap_fill(lpriv->dirty_bitmap, imsic->global.nr_ids + 1); if (!__imsic_local_sync(lpriv)) - __imsic_local_timer_start(lpriv); + __imsic_local_timer_start(lpriv, smp_processor_id()); raw_spin_unlock_irqrestore(&lpriv->lock, flags); } @@ -278,7 +278,7 @@ static void __imsic_remote_sync(struct imsic_local_priv *lpriv, unsigned int cpu return; } - __imsic_local_timer_start(lpriv); + __imsic_local_timer_start(lpriv, cpu); } } #else diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index d05226484c64..98cf4486fcee 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -453,13 +453,14 @@ static struct net_device *bond_ipsec_dev(struct xfrm_state *xs) /** * bond_ipsec_add_sa - program device with a security association + * @bond_dev: pointer to the bond net device * @xs: pointer to transformer state struct * @extack: extack point to fill failure reason **/ -static int bond_ipsec_add_sa(struct xfrm_state *xs, +static int bond_ipsec_add_sa(struct net_device *bond_dev, + struct xfrm_state *xs, struct netlink_ext_ack *extack) { - struct net_device *bond_dev = xs->xso.dev; struct net_device *real_dev; netdevice_tracker tracker; struct bond_ipsec *ipsec; @@ -495,9 +496,9 @@ static int bond_ipsec_add_sa(struct xfrm_state *xs, goto out; } - xs->xso.real_dev = real_dev; - err = real_dev->xfrmdev_ops->xdo_dev_state_add(xs, extack); + err = real_dev->xfrmdev_ops->xdo_dev_state_add(real_dev, xs, extack); if (!err) { + xs->xso.real_dev = real_dev; ipsec->xs = xs; INIT_LIST_HEAD(&ipsec->list); mutex_lock(&bond->ipsec_lock); @@ -539,11 +540,25 @@ static void bond_ipsec_add_sa_all(struct bonding *bond) if (ipsec->xs->xso.real_dev == real_dev) continue; - ipsec->xs->xso.real_dev = real_dev; - if (real_dev->xfrmdev_ops->xdo_dev_state_add(ipsec->xs, NULL)) { + if (real_dev->xfrmdev_ops->xdo_dev_state_add(real_dev, + ipsec->xs, NULL)) { slave_warn(bond_dev, real_dev, "%s: failed to add SA\n", __func__); - ipsec->xs->xso.real_dev = NULL; + continue; } + + spin_lock_bh(&ipsec->xs->lock); + /* xs might have been killed by the user during the migration + * to the new dev, but bond_ipsec_del_sa() should have done + * nothing, as xso.real_dev is NULL. + * Delete it from the device we just added it to. The pending + * bond_ipsec_free_sa() call will do the rest of the cleanup. + */ + if (ipsec->xs->km.state == XFRM_STATE_DEAD && + real_dev->xfrmdev_ops->xdo_dev_state_delete) + real_dev->xfrmdev_ops->xdo_dev_state_delete(real_dev, + ipsec->xs); + ipsec->xs->xso.real_dev = real_dev; + spin_unlock_bh(&ipsec->xs->lock); } out: mutex_unlock(&bond->ipsec_lock); @@ -551,54 +566,27 @@ out: /** * bond_ipsec_del_sa - clear out this specific SA + * @bond_dev: pointer to the bond net device * @xs: pointer to transformer state struct **/ -static void bond_ipsec_del_sa(struct xfrm_state *xs) +static void bond_ipsec_del_sa(struct net_device *bond_dev, + struct xfrm_state *xs) { - struct net_device *bond_dev = xs->xso.dev; struct net_device *real_dev; - netdevice_tracker tracker; - struct bond_ipsec *ipsec; - struct bonding *bond; - struct slave *slave; - if (!bond_dev) + if (!bond_dev || !xs->xso.real_dev) return; - rcu_read_lock(); - bond = netdev_priv(bond_dev); - slave = rcu_dereference(bond->curr_active_slave); - real_dev = slave ? slave->dev : NULL; - netdev_hold(real_dev, &tracker, GFP_ATOMIC); - rcu_read_unlock(); - - if (!slave) - goto out; - - if (!xs->xso.real_dev) - goto out; - - WARN_ON(xs->xso.real_dev != real_dev); + real_dev = xs->xso.real_dev; if (!real_dev->xfrmdev_ops || !real_dev->xfrmdev_ops->xdo_dev_state_delete || netif_is_bond_master(real_dev)) { slave_warn(bond_dev, real_dev, "%s: no slave xdo_dev_state_delete\n", __func__); - goto out; + return; } - real_dev->xfrmdev_ops->xdo_dev_state_delete(xs); -out: - netdev_put(real_dev, &tracker); - mutex_lock(&bond->ipsec_lock); - list_for_each_entry(ipsec, &bond->ipsec_list, list) { - if (ipsec->xs == xs) { - list_del(&ipsec->list); - kfree(ipsec); - break; - } - } - mutex_unlock(&bond->ipsec_lock); + real_dev->xfrmdev_ops->xdo_dev_state_delete(real_dev, xs); } static void bond_ipsec_del_sa_all(struct bonding *bond) @@ -624,46 +612,55 @@ static void bond_ipsec_del_sa_all(struct bonding *bond) slave_warn(bond_dev, real_dev, "%s: no slave xdo_dev_state_delete\n", __func__); - } else { - real_dev->xfrmdev_ops->xdo_dev_state_delete(ipsec->xs); - if (real_dev->xfrmdev_ops->xdo_dev_state_free) - real_dev->xfrmdev_ops->xdo_dev_state_free(ipsec->xs); + continue; } + + spin_lock_bh(&ipsec->xs->lock); + ipsec->xs->xso.real_dev = NULL; + /* Don't double delete states killed by the user. */ + if (ipsec->xs->km.state != XFRM_STATE_DEAD) + real_dev->xfrmdev_ops->xdo_dev_state_delete(real_dev, + ipsec->xs); + spin_unlock_bh(&ipsec->xs->lock); + + if (real_dev->xfrmdev_ops->xdo_dev_state_free) + real_dev->xfrmdev_ops->xdo_dev_state_free(real_dev, + ipsec->xs); } mutex_unlock(&bond->ipsec_lock); } -static void bond_ipsec_free_sa(struct xfrm_state *xs) +static void bond_ipsec_free_sa(struct net_device *bond_dev, + struct xfrm_state *xs) { - struct net_device *bond_dev = xs->xso.dev; struct net_device *real_dev; - netdevice_tracker tracker; + struct bond_ipsec *ipsec; struct bonding *bond; - struct slave *slave; if (!bond_dev) return; - rcu_read_lock(); bond = netdev_priv(bond_dev); - slave = rcu_dereference(bond->curr_active_slave); - real_dev = slave ? slave->dev : NULL; - netdev_hold(real_dev, &tracker, GFP_ATOMIC); - rcu_read_unlock(); - - if (!slave) - goto out; + mutex_lock(&bond->ipsec_lock); if (!xs->xso.real_dev) goto out; - WARN_ON(xs->xso.real_dev != real_dev); + real_dev = xs->xso.real_dev; - if (real_dev && real_dev->xfrmdev_ops && + xs->xso.real_dev = NULL; + if (real_dev->xfrmdev_ops && real_dev->xfrmdev_ops->xdo_dev_state_free) - real_dev->xfrmdev_ops->xdo_dev_state_free(xs); + real_dev->xfrmdev_ops->xdo_dev_state_free(real_dev, xs); out: - netdev_put(real_dev, &tracker); + list_for_each_entry(ipsec, &bond->ipsec_list, list) { + if (ipsec->xs == xs) { + list_del(&ipsec->list); + kfree(ipsec); + break; + } + } + mutex_unlock(&bond->ipsec_lock); } /** diff --git a/drivers/net/can/ctucanfd/ctucanfd_base.c b/drivers/net/can/ctucanfd/ctucanfd_base.c index f65c1a1e05cc..bf6398772960 100644 --- a/drivers/net/can/ctucanfd/ctucanfd_base.c +++ b/drivers/net/can/ctucanfd/ctucanfd_base.c @@ -275,7 +275,7 @@ static int ctucan_set_bittiming(struct net_device *ndev) static int ctucan_set_data_bittiming(struct net_device *ndev) { struct ctucan_priv *priv = netdev_priv(ndev); - struct can_bittiming *dbt = &priv->can.data_bittiming; + struct can_bittiming *dbt = &priv->can.fd.data_bittiming; /* Note that dbt may be modified here */ return ctucan_set_btr(ndev, dbt, false); @@ -290,7 +290,7 @@ static int ctucan_set_data_bittiming(struct net_device *ndev) static int ctucan_set_secondary_sample_point(struct net_device *ndev) { struct ctucan_priv *priv = netdev_priv(ndev); - struct can_bittiming *dbt = &priv->can.data_bittiming; + struct can_bittiming *dbt = &priv->can.fd.data_bittiming; int ssp_offset = 0; u32 ssp_cfg = 0; /* No SSP by default */ @@ -1358,12 +1358,12 @@ int ctucan_probe_common(struct device *dev, void __iomem *addr, int irq, unsigne priv->ntxbufs = ntxbufs; priv->dev = dev; priv->can.bittiming_const = &ctu_can_fd_bit_timing_max; - priv->can.data_bittiming_const = &ctu_can_fd_bit_timing_data_max; + priv->can.fd.data_bittiming_const = &ctu_can_fd_bit_timing_data_max; priv->can.do_set_mode = ctucan_do_set_mode; /* Needed for timing adjustment to be performed as soon as possible */ priv->can.do_set_bittiming = ctucan_set_bittiming; - priv->can.do_set_data_bittiming = ctucan_set_data_bittiming; + priv->can.fd.do_set_data_bittiming = ctucan_set_data_bittiming; priv->can.do_get_berr_counter = ctucan_get_berr_counter; priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c index 5ec3170b896a..ea8c807af4d8 100644 --- a/drivers/net/can/dev/dev.c +++ b/drivers/net/can/dev/dev.c @@ -404,8 +404,8 @@ int open_candev(struct net_device *dev) /* For CAN FD the data bitrate has to be >= the arbitration bitrate */ if ((priv->ctrlmode & CAN_CTRLMODE_FD) && - (!priv->data_bittiming.bitrate || - priv->data_bittiming.bitrate < priv->bittiming.bitrate)) { + (!priv->fd.data_bittiming.bitrate || + priv->fd.data_bittiming.bitrate < priv->bittiming.bitrate)) { netdev_err(dev, "incorrect/missing data bit-timing\n"); return -EINVAL; } @@ -543,16 +543,16 @@ int register_candev(struct net_device *dev) if (!priv->bitrate_const != !priv->bitrate_const_cnt) return -EINVAL; - if (!priv->data_bitrate_const != !priv->data_bitrate_const_cnt) + if (!priv->fd.data_bitrate_const != !priv->fd.data_bitrate_const_cnt) return -EINVAL; /* We only support either fixed bit rates or bit timing const. */ - if ((priv->bitrate_const || priv->data_bitrate_const) && - (priv->bittiming_const || priv->data_bittiming_const)) + if ((priv->bitrate_const || priv->fd.data_bitrate_const) && + (priv->bittiming_const || priv->fd.data_bittiming_const)) return -EINVAL; if (!can_bittiming_const_valid(priv->bittiming_const) || - !can_bittiming_const_valid(priv->data_bittiming_const)) + !can_bittiming_const_valid(priv->fd.data_bittiming_const)) return -EINVAL; if (!priv->termination_const) { diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c index f1db9b7ffd4d..a36842ace084 100644 --- a/drivers/net/can/dev/netlink.c +++ b/drivers/net/can/dev/netlink.c @@ -141,7 +141,7 @@ static int can_tdc_changelink(struct can_priv *priv, const struct nlattr *nla, { struct nlattr *tb_tdc[IFLA_CAN_TDC_MAX + 1]; struct can_tdc tdc = { 0 }; - const struct can_tdc_const *tdc_const = priv->tdc_const; + const struct can_tdc_const *tdc_const = priv->fd.tdc_const; int err; if (!tdc_const || !can_tdc_is_enabled(priv)) @@ -179,7 +179,7 @@ static int can_tdc_changelink(struct can_priv *priv, const struct nlattr *nla, tdc.tdcf = tdcf; } - priv->tdc = tdc; + priv->fd.tdc = tdc; return 0; } @@ -228,10 +228,10 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[], dev->mtu = CANFD_MTU; } else { dev->mtu = CAN_MTU; - memset(&priv->data_bittiming, 0, - sizeof(priv->data_bittiming)); + memset(&priv->fd.data_bittiming, 0, + sizeof(priv->fd.data_bittiming)); priv->ctrlmode &= ~CAN_CTRLMODE_TDC_MASK; - memset(&priv->tdc, 0, sizeof(priv->tdc)); + memset(&priv->fd.tdc, 0, sizeof(priv->fd.tdc)); } tdc_mask = cm->mask & CAN_CTRLMODE_TDC_MASK; @@ -312,16 +312,16 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[], * directly via do_set_bitrate(). Bail out if neither * is given. */ - if (!priv->data_bittiming_const && !priv->do_set_data_bittiming && - !priv->data_bitrate_const) + if (!priv->fd.data_bittiming_const && !priv->fd.do_set_data_bittiming && + !priv->fd.data_bitrate_const) return -EOPNOTSUPP; memcpy(&dbt, nla_data(data[IFLA_CAN_DATA_BITTIMING]), sizeof(dbt)); err = can_get_bittiming(dev, &dbt, - priv->data_bittiming_const, - priv->data_bitrate_const, - priv->data_bitrate_const_cnt, + priv->fd.data_bittiming_const, + priv->fd.data_bitrate_const, + priv->fd.data_bitrate_const_cnt, extack); if (err) return err; @@ -333,7 +333,7 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[], return -EINVAL; } - memset(&priv->tdc, 0, sizeof(priv->tdc)); + memset(&priv->fd.tdc, 0, sizeof(priv->fd.tdc)); if (data[IFLA_CAN_TDC]) { /* TDC parameters are provided: use them */ err = can_tdc_changelink(priv, data[IFLA_CAN_TDC], @@ -346,17 +346,17 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[], /* Neither of TDC parameters nor TDC flags are * provided: do calculation */ - can_calc_tdco(&priv->tdc, priv->tdc_const, &dbt, + can_calc_tdco(&priv->fd.tdc, priv->fd.tdc_const, &dbt, &priv->ctrlmode, priv->ctrlmode_supported); } /* else: both CAN_CTRLMODE_TDC_{AUTO,MANUAL} are explicitly * turned off. TDC is disabled: do nothing */ - memcpy(&priv->data_bittiming, &dbt, sizeof(dbt)); + memcpy(&priv->fd.data_bittiming, &dbt, sizeof(dbt)); - if (priv->do_set_data_bittiming) { + if (priv->fd.do_set_data_bittiming) { /* Finally, set the bit-timing registers */ - err = priv->do_set_data_bittiming(dev); + err = priv->fd.do_set_data_bittiming(dev); if (err) return err; } @@ -394,7 +394,7 @@ static size_t can_tdc_get_size(const struct net_device *dev) struct can_priv *priv = netdev_priv(dev); size_t size; - if (!priv->tdc_const) + if (!priv->fd.tdc_const) return 0; size = nla_total_size(0); /* nest IFLA_CAN_TDC */ @@ -404,17 +404,17 @@ static size_t can_tdc_get_size(const struct net_device *dev) } size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCO_MIN */ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCO_MAX */ - if (priv->tdc_const->tdcf_max) { + if (priv->fd.tdc_const->tdcf_max) { size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCF_MIN */ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCF_MAX */ } if (can_tdc_is_enabled(priv)) { if (priv->ctrlmode & CAN_CTRLMODE_TDC_MANUAL || - priv->do_get_auto_tdcv) + priv->fd.do_get_auto_tdcv) size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCV */ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCO */ - if (priv->tdc_const->tdcf_max) + if (priv->fd.tdc_const->tdcf_max) size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCF */ } @@ -442,9 +442,9 @@ static size_t can_get_size(const struct net_device *dev) size += nla_total_size(sizeof(u32)); /* IFLA_CAN_RESTART_MS */ if (priv->do_get_berr_counter) /* IFLA_CAN_BERR_COUNTER */ size += nla_total_size(sizeof(struct can_berr_counter)); - if (priv->data_bittiming.bitrate) /* IFLA_CAN_DATA_BITTIMING */ + if (priv->fd.data_bittiming.bitrate) /* IFLA_CAN_DATA_BITTIMING */ size += nla_total_size(sizeof(struct can_bittiming)); - if (priv->data_bittiming_const) /* IFLA_CAN_DATA_BITTIMING_CONST */ + if (priv->fd.data_bittiming_const) /* IFLA_CAN_DATA_BITTIMING_CONST */ size += nla_total_size(sizeof(struct can_bittiming_const)); if (priv->termination_const) { size += nla_total_size(sizeof(priv->termination)); /* IFLA_CAN_TERMINATION */ @@ -454,9 +454,9 @@ static size_t can_get_size(const struct net_device *dev) if (priv->bitrate_const) /* IFLA_CAN_BITRATE_CONST */ size += nla_total_size(sizeof(*priv->bitrate_const) * priv->bitrate_const_cnt); - if (priv->data_bitrate_const) /* IFLA_CAN_DATA_BITRATE_CONST */ - size += nla_total_size(sizeof(*priv->data_bitrate_const) * - priv->data_bitrate_const_cnt); + if (priv->fd.data_bitrate_const) /* IFLA_CAN_DATA_BITRATE_CONST */ + size += nla_total_size(sizeof(*priv->fd.data_bitrate_const) * + priv->fd.data_bitrate_const_cnt); size += sizeof(priv->bitrate_max); /* IFLA_CAN_BITRATE_MAX */ size += can_tdc_get_size(dev); /* IFLA_CAN_TDC */ size += can_ctrlmode_ext_get_size(); /* IFLA_CAN_CTRLMODE_EXT */ @@ -468,8 +468,8 @@ static int can_tdc_fill_info(struct sk_buff *skb, const struct net_device *dev) { struct nlattr *nest; struct can_priv *priv = netdev_priv(dev); - struct can_tdc *tdc = &priv->tdc; - const struct can_tdc_const *tdc_const = priv->tdc_const; + struct can_tdc *tdc = &priv->fd.tdc; + const struct can_tdc_const *tdc_const = priv->fd.tdc_const; if (!tdc_const) return 0; @@ -497,8 +497,8 @@ static int can_tdc_fill_info(struct sk_buff *skb, const struct net_device *dev) if (priv->ctrlmode & CAN_CTRLMODE_TDC_MANUAL) { tdcv = tdc->tdcv; err = 0; - } else if (priv->do_get_auto_tdcv) { - err = priv->do_get_auto_tdcv(dev, &tdcv); + } else if (priv->fd.do_get_auto_tdcv) { + err = priv->fd.do_get_auto_tdcv(dev, &tdcv); } if (!err && nla_put_u32(skb, IFLA_CAN_TDC_TDCV, tdcv)) goto err_cancel; @@ -564,14 +564,14 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev) !priv->do_get_berr_counter(dev, &bec) && nla_put(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec)) || - (priv->data_bittiming.bitrate && + (priv->fd.data_bittiming.bitrate && nla_put(skb, IFLA_CAN_DATA_BITTIMING, - sizeof(priv->data_bittiming), &priv->data_bittiming)) || + sizeof(priv->fd.data_bittiming), &priv->fd.data_bittiming)) || - (priv->data_bittiming_const && + (priv->fd.data_bittiming_const && nla_put(skb, IFLA_CAN_DATA_BITTIMING_CONST, - sizeof(*priv->data_bittiming_const), - priv->data_bittiming_const)) || + sizeof(*priv->fd.data_bittiming_const), + priv->fd.data_bittiming_const)) || (priv->termination_const && (nla_put_u16(skb, IFLA_CAN_TERMINATION, priv->termination) || @@ -586,11 +586,11 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev) priv->bitrate_const_cnt, priv->bitrate_const)) || - (priv->data_bitrate_const && + (priv->fd.data_bitrate_const && nla_put(skb, IFLA_CAN_DATA_BITRATE_CONST, - sizeof(*priv->data_bitrate_const) * - priv->data_bitrate_const_cnt, - priv->data_bitrate_const)) || + sizeof(*priv->fd.data_bitrate_const) * + priv->fd.data_bitrate_const_cnt, + priv->fd.data_bitrate_const)) || (nla_put(skb, IFLA_CAN_BITRATE_MAX, sizeof(priv->bitrate_max), diff --git a/drivers/net/can/flexcan/flexcan-core.c b/drivers/net/can/flexcan/flexcan-core.c index 6d80c341b26f..06d5d35fc1b5 100644 --- a/drivers/net/can/flexcan/flexcan-core.c +++ b/drivers/net/can/flexcan/flexcan-core.c @@ -1226,7 +1226,7 @@ static void flexcan_set_bittiming_cbt(const struct net_device *dev) { struct flexcan_priv *priv = netdev_priv(dev); struct can_bittiming *bt = &priv->can.bittiming; - struct can_bittiming *dbt = &priv->can.data_bittiming; + struct can_bittiming *dbt = &priv->can.fd.data_bittiming; struct flexcan_regs __iomem *regs = priv->regs; u32 reg_cbt, reg_fdctrl; @@ -2239,7 +2239,7 @@ static int flexcan_probe(struct platform_device *pdev) priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO; priv->can.bittiming_const = &flexcan_fd_bittiming_const; - priv->can.data_bittiming_const = + priv->can.fd.data_bittiming_const = &flexcan_fd_data_bittiming_const; } else { priv->can.bittiming_const = &flexcan_bittiming_const; diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c index c86b57d47085..2eeee65f606f 100644 --- a/drivers/net/can/ifi_canfd/ifi_canfd.c +++ b/drivers/net/can/ifi_canfd/ifi_canfd.c @@ -669,7 +669,7 @@ static void ifi_canfd_set_bittiming(struct net_device *ndev) { struct ifi_canfd_priv *priv = netdev_priv(ndev); const struct can_bittiming *bt = &priv->can.bittiming; - const struct can_bittiming *dbt = &priv->can.data_bittiming; + const struct can_bittiming *dbt = &priv->can.fd.data_bittiming; u16 brp, sjw, tseg1, tseg2, tdc; /* Configure bit timing */ @@ -1000,10 +1000,10 @@ static int ifi_canfd_plat_probe(struct platform_device *pdev) priv->can.clock.freq = readl(addr + IFI_CANFD_CANCLOCK); - priv->can.bittiming_const = &ifi_canfd_bittiming_const; - priv->can.data_bittiming_const = &ifi_canfd_bittiming_const; - priv->can.do_set_mode = ifi_canfd_set_mode; - priv->can.do_get_berr_counter = ifi_canfd_get_berr_counter; + priv->can.bittiming_const = &ifi_canfd_bittiming_const; + priv->can.fd.data_bittiming_const = &ifi_canfd_bittiming_const; + priv->can.do_set_mode = ifi_canfd_set_mode; + priv->can.do_get_berr_counter = ifi_canfd_get_berr_counter; /* IFI CANFD can do both Bosch FD and ISO FD */ priv->can.ctrlmode = CAN_CTRLMODE_FD; diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c index cf0d51805272..7d3066691d5d 100644 --- a/drivers/net/can/kvaser_pciefd.c +++ b/drivers/net/can/kvaser_pciefd.c @@ -16,6 +16,7 @@ #include <linux/netdevice.h> #include <linux/pci.h> #include <linux/timer.h> +#include <net/netdev_queues.h> MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Kvaser AB <support@kvaser.com>"); @@ -410,10 +411,13 @@ struct kvaser_pciefd_can { void __iomem *reg_base; struct can_berr_counter bec; u8 cmd_seq; + u8 tx_max_count; + u8 tx_idx; + u8 ack_idx; int err_rep_cnt; - int echo_idx; + unsigned int completed_tx_pkts; + unsigned int completed_tx_bytes; spinlock_t lock; /* Locks sensitive registers (e.g. MODE) */ - spinlock_t echo_lock; /* Locks the message echo buffer */ struct timer_list bec_poll_timer; struct completion start_comp, flush_comp; }; @@ -714,6 +718,9 @@ static int kvaser_pciefd_open(struct net_device *netdev) int ret; struct kvaser_pciefd_can *can = netdev_priv(netdev); + can->tx_idx = 0; + can->ack_idx = 0; + ret = open_candev(netdev); if (ret) return ret; @@ -745,21 +752,26 @@ static int kvaser_pciefd_stop(struct net_device *netdev) timer_delete(&can->bec_poll_timer); } can->can.state = CAN_STATE_STOPPED; + netdev_reset_queue(netdev); close_candev(netdev); return ret; } +static unsigned int kvaser_pciefd_tx_avail(const struct kvaser_pciefd_can *can) +{ + return can->tx_max_count - (READ_ONCE(can->tx_idx) - READ_ONCE(can->ack_idx)); +} + static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet *p, - struct kvaser_pciefd_can *can, + struct can_priv *can, u8 seq, struct sk_buff *skb) { struct canfd_frame *cf = (struct canfd_frame *)skb->data; int packet_size; - int seq = can->echo_idx; memset(p, 0, sizeof(*p)); - if (can->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) + if (can->ctrlmode & CAN_CTRLMODE_ONE_SHOT) p->header[1] |= KVASER_PCIEFD_TPACKET_SMS; if (cf->can_id & CAN_RTR_FLAG) @@ -782,7 +794,7 @@ static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet *p, } else { p->header[1] |= FIELD_PREP(KVASER_PCIEFD_RPACKET_DLC_MASK, - can_get_cc_dlc((struct can_frame *)cf, can->can.ctrlmode)); + can_get_cc_dlc((struct can_frame *)cf, can->ctrlmode)); } p->header[1] |= FIELD_PREP(KVASER_PCIEFD_PACKET_SEQ_MASK, seq); @@ -797,22 +809,24 @@ static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct kvaser_pciefd_can *can = netdev_priv(netdev); - unsigned long irq_flags; struct kvaser_pciefd_tx_packet packet; + unsigned int seq = can->tx_idx & (can->can.echo_skb_max - 1); + unsigned int frame_len; int nr_words; - u8 count; if (can_dev_dropped_skb(netdev, skb)) return NETDEV_TX_OK; + if (!netif_subqueue_maybe_stop(netdev, 0, kvaser_pciefd_tx_avail(can), 1, 1)) + return NETDEV_TX_BUSY; - nr_words = kvaser_pciefd_prepare_tx_packet(&packet, can, skb); + nr_words = kvaser_pciefd_prepare_tx_packet(&packet, &can->can, seq, skb); - spin_lock_irqsave(&can->echo_lock, irq_flags); /* Prepare and save echo skb in internal slot */ - can_put_echo_skb(skb, netdev, can->echo_idx, 0); - - /* Move echo index to the next slot */ - can->echo_idx = (can->echo_idx + 1) % can->can.echo_skb_max; + WRITE_ONCE(can->can.echo_skb[seq], NULL); + frame_len = can_skb_get_frame_len(skb); + can_put_echo_skb(skb, netdev, seq, frame_len); + netdev_sent_queue(netdev, frame_len); + WRITE_ONCE(can->tx_idx, can->tx_idx + 1); /* Write header to fifo */ iowrite32(packet.header[0], @@ -836,14 +850,7 @@ static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb, KVASER_PCIEFD_KCAN_FIFO_LAST_REG); } - count = FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK, - ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG)); - /* No room for a new message, stop the queue until at least one - * successful transmit - */ - if (count >= can->can.echo_skb_max || can->can.echo_skb[can->echo_idx]) - netif_stop_queue(netdev); - spin_unlock_irqrestore(&can->echo_lock, irq_flags); + netif_subqueue_maybe_stop(netdev, 0, kvaser_pciefd_tx_avail(can), 1, 1); return NETDEV_TX_OK; } @@ -856,7 +863,7 @@ static int kvaser_pciefd_set_bittiming(struct kvaser_pciefd_can *can, bool data) struct can_bittiming *bt; if (data) - bt = &can->can.data_bittiming; + bt = &can->can.fd.data_bittiming; else bt = &can->can.bittiming; @@ -970,6 +977,8 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie) can->kv_pcie = pcie; can->cmd_seq = 0; can->err_rep_cnt = 0; + can->completed_tx_pkts = 0; + can->completed_tx_bytes = 0; can->bec.txerr = 0; can->bec.rxerr = 0; @@ -983,17 +992,16 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie) tx_nr_packets_max = FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_MAX_MASK, ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG)); + can->tx_max_count = min(KVASER_PCIEFD_CAN_TX_MAX_COUNT, tx_nr_packets_max - 1); can->can.clock.freq = pcie->freq; - can->can.echo_skb_max = min(KVASER_PCIEFD_CAN_TX_MAX_COUNT, tx_nr_packets_max - 1); - can->echo_idx = 0; - spin_lock_init(&can->echo_lock); + can->can.echo_skb_max = roundup_pow_of_two(can->tx_max_count); spin_lock_init(&can->lock); can->can.bittiming_const = &kvaser_pciefd_bittiming_const; - can->can.data_bittiming_const = &kvaser_pciefd_bittiming_const; + can->can.fd.data_bittiming_const = &kvaser_pciefd_bittiming_const; can->can.do_set_bittiming = kvaser_pciefd_set_nominal_bittiming; - can->can.do_set_data_bittiming = kvaser_pciefd_set_data_bittiming; + can->can.fd.do_set_data_bittiming = kvaser_pciefd_set_data_bittiming; can->can.do_set_mode = kvaser_pciefd_set_mode; can->can.do_get_berr_counter = kvaser_pciefd_get_berr_counter; can->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY | @@ -1201,7 +1209,7 @@ static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie, skb = alloc_canfd_skb(priv->dev, &cf); if (!skb) { priv->dev->stats.rx_dropped++; - return -ENOMEM; + return 0; } cf->len = can_fd_dlc2len(dlc); @@ -1213,7 +1221,7 @@ static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie, skb = alloc_can_skb(priv->dev, (struct can_frame **)&cf); if (!skb) { priv->dev->stats.rx_dropped++; - return -ENOMEM; + return 0; } can_frame_set_cc_len((struct can_frame *)cf, dlc, priv->ctrlmode); } @@ -1231,7 +1239,9 @@ static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie, priv->dev->stats.rx_packets++; kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp); - return netif_rx(skb); + netif_rx(skb); + + return 0; } static void kvaser_pciefd_change_state(struct kvaser_pciefd_can *can, @@ -1510,19 +1520,21 @@ static int kvaser_pciefd_handle_ack_packet(struct kvaser_pciefd *pcie, netdev_dbg(can->can.dev, "Packet was flushed\n"); } else { int echo_idx = FIELD_GET(KVASER_PCIEFD_PACKET_SEQ_MASK, p->header[0]); - int len; - u8 count; + unsigned int len, frame_len = 0; struct sk_buff *skb; + if (echo_idx != (can->ack_idx & (can->can.echo_skb_max - 1))) + return 0; skb = can->can.echo_skb[echo_idx]; - if (skb) - kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp); - len = can_get_echo_skb(can->can.dev, echo_idx, NULL); - count = FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK, - ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG)); + if (!skb) + return 0; + kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp); + len = can_get_echo_skb(can->can.dev, echo_idx, &frame_len); - if (count < can->can.echo_skb_max && netif_queue_stopped(can->can.dev)) - netif_wake_queue(can->can.dev); + /* Pairs with barrier in kvaser_pciefd_start_xmit() */ + smp_store_release(&can->ack_idx, can->ack_idx + 1); + can->completed_tx_pkts++; + can->completed_tx_bytes += frame_len; if (!one_shot_fail) { can->can.dev->stats.tx_bytes += len; @@ -1638,32 +1650,51 @@ static int kvaser_pciefd_read_buffer(struct kvaser_pciefd *pcie, int dma_buf) { int pos = 0; int res = 0; + unsigned int i; do { res = kvaser_pciefd_read_packet(pcie, &pos, dma_buf); } while (!res && pos > 0 && pos < KVASER_PCIEFD_DMA_SIZE); + /* Report ACKs in this buffer to BQL en masse for correct periods */ + for (i = 0; i < pcie->nr_channels; ++i) { + struct kvaser_pciefd_can *can = pcie->can[i]; + + if (!can->completed_tx_pkts) + continue; + netif_subqueue_completed_wake(can->can.dev, 0, + can->completed_tx_pkts, + can->completed_tx_bytes, + kvaser_pciefd_tx_avail(can), 1); + can->completed_tx_pkts = 0; + can->completed_tx_bytes = 0; + } + return res; } -static u32 kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie) +static void kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie) { + void __iomem *srb_cmd_reg = KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG; u32 irq = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG); - if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) + iowrite32(irq, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG); + + if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) { kvaser_pciefd_read_buffer(pcie, 0); + iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0, srb_cmd_reg); /* Rearm buffer */ + } - if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) + if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) { kvaser_pciefd_read_buffer(pcie, 1); + iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1, srb_cmd_reg); /* Rearm buffer */ + } if (unlikely(irq & KVASER_PCIEFD_SRB_IRQ_DOF0 || irq & KVASER_PCIEFD_SRB_IRQ_DOF1 || irq & KVASER_PCIEFD_SRB_IRQ_DUF0 || irq & KVASER_PCIEFD_SRB_IRQ_DUF1)) dev_err(&pcie->pci->dev, "DMA IRQ error 0x%08X\n", irq); - - iowrite32(irq, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG); - return irq; } static void kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can) @@ -1691,29 +1722,22 @@ static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev) struct kvaser_pciefd *pcie = (struct kvaser_pciefd *)dev; const struct kvaser_pciefd_irq_mask *irq_mask = pcie->driver_data->irq_mask; u32 pci_irq = ioread32(KVASER_PCIEFD_PCI_IRQ_ADDR(pcie)); - u32 srb_irq = 0; - u32 srb_release = 0; int i; if (!(pci_irq & irq_mask->all)) return IRQ_NONE; + iowrite32(0, KVASER_PCIEFD_PCI_IEN_ADDR(pcie)); + if (pci_irq & irq_mask->kcan_rx0) - srb_irq = kvaser_pciefd_receive_irq(pcie); + kvaser_pciefd_receive_irq(pcie); for (i = 0; i < pcie->nr_channels; i++) { if (pci_irq & irq_mask->kcan_tx[i]) kvaser_pciefd_transmit_irq(pcie->can[i]); } - if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD0) - srb_release |= KVASER_PCIEFD_SRB_CMD_RDB0; - - if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD1) - srb_release |= KVASER_PCIEFD_SRB_CMD_RDB1; - - if (srb_release) - iowrite32(srb_release, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); + iowrite32(irq_mask->all, KVASER_PCIEFD_PCI_IEN_ADDR(pcie)); return IRQ_HANDLED; } @@ -1733,13 +1757,22 @@ static void kvaser_pciefd_teardown_can_ctrls(struct kvaser_pciefd *pcie) } } +static void kvaser_pciefd_disable_irq_srcs(struct kvaser_pciefd *pcie) +{ + unsigned int i; + + /* Masking PCI_IRQ is insufficient as running ISR will unmask it */ + iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IEN_REG); + for (i = 0; i < pcie->nr_channels; ++i) + iowrite32(0, pcie->can[i]->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); +} + static int kvaser_pciefd_probe(struct pci_dev *pdev, const struct pci_device_id *id) { int ret; struct kvaser_pciefd *pcie; const struct kvaser_pciefd_irq_mask *irq_mask; - void __iomem *irq_en_base; pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL); if (!pcie) @@ -1805,8 +1838,7 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IEN_REG); /* Enable PCI interrupts */ - irq_en_base = KVASER_PCIEFD_PCI_IEN_ADDR(pcie); - iowrite32(irq_mask->all, irq_en_base); + iowrite32(irq_mask->all, KVASER_PCIEFD_PCI_IEN_ADDR(pcie)); /* Ready the DMA buffers */ iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); @@ -1820,8 +1852,7 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev, return 0; err_free_irq: - /* Disable PCI interrupts */ - iowrite32(0, irq_en_base); + kvaser_pciefd_disable_irq_srcs(pcie); free_irq(pcie->pci->irq, pcie); err_pci_free_irq_vectors: @@ -1844,35 +1875,26 @@ err_disable_pci: return ret; } -static void kvaser_pciefd_remove_all_ctrls(struct kvaser_pciefd *pcie) -{ - int i; - - for (i = 0; i < pcie->nr_channels; i++) { - struct kvaser_pciefd_can *can = pcie->can[i]; - - if (can) { - iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); - unregister_candev(can->can.dev); - timer_delete(&can->bec_poll_timer); - kvaser_pciefd_pwm_stop(can); - free_candev(can->can.dev); - } - } -} - static void kvaser_pciefd_remove(struct pci_dev *pdev) { struct kvaser_pciefd *pcie = pci_get_drvdata(pdev); + unsigned int i; - kvaser_pciefd_remove_all_ctrls(pcie); + for (i = 0; i < pcie->nr_channels; ++i) { + struct kvaser_pciefd_can *can = pcie->can[i]; - /* Disable interrupts */ - iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG); - iowrite32(0, KVASER_PCIEFD_PCI_IEN_ADDR(pcie)); + unregister_candev(can->can.dev); + timer_delete(&can->bec_poll_timer); + kvaser_pciefd_pwm_stop(can); + } + kvaser_pciefd_disable_irq_srcs(pcie); free_irq(pcie->pci->irq, pcie); pci_free_irq_vectors(pcie->pci); + + for (i = 0; i < pcie->nr_channels; ++i) + free_candev(pcie->can[i]->can.dev); + pci_iounmap(pdev, pcie->reg_base); pci_release_regions(pdev); pci_disable_device(pdev); diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index c2c116ce1087..6c656bfdb323 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -1372,7 +1372,7 @@ static int m_can_set_bittiming(struct net_device *dev) { struct m_can_classdev *cdev = netdev_priv(dev); const struct can_bittiming *bt = &cdev->can.bittiming; - const struct can_bittiming *dbt = &cdev->can.data_bittiming; + const struct can_bittiming *dbt = &cdev->can.fd.data_bittiming; u16 brp, sjw, tseg1, tseg2; u32 reg_btp; @@ -1738,7 +1738,7 @@ static int m_can_dev_setup(struct m_can_classdev *cdev) if (err) return err; cdev->can.bittiming_const = &m_can_bittiming_const_30X; - cdev->can.data_bittiming_const = &m_can_data_bittiming_const_30X; + cdev->can.fd.data_bittiming_const = &m_can_data_bittiming_const_30X; break; case 31: /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.1.x */ @@ -1746,13 +1746,13 @@ static int m_can_dev_setup(struct m_can_classdev *cdev) if (err) return err; cdev->can.bittiming_const = &m_can_bittiming_const_31X; - cdev->can.data_bittiming_const = &m_can_data_bittiming_const_31X; + cdev->can.fd.data_bittiming_const = &m_can_data_bittiming_const_31X; break; case 32: case 33: /* Support both MCAN version v3.2.x and v3.3.0 */ cdev->can.bittiming_const = &m_can_bittiming_const_31X; - cdev->can.data_bittiming_const = &m_can_data_bittiming_const_31X; + cdev->can.fd.data_bittiming_const = &m_can_data_bittiming_const_31X; niso = m_can_niso_supported(cdev); if (niso < 0) diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c index 28f3fd805273..77292afaed22 100644 --- a/drivers/net/can/peak_canfd/peak_canfd.c +++ b/drivers/net/can/peak_canfd/peak_canfd.c @@ -624,7 +624,7 @@ static int peak_canfd_set_data_bittiming(struct net_device *ndev) { struct peak_canfd_priv *priv = netdev_priv(ndev); - return pucan_set_timing_fast(priv, &priv->can.data_bittiming); + return pucan_set_timing_fast(priv, &priv->can.fd.data_bittiming); } static int peak_canfd_close(struct net_device *ndev) @@ -813,12 +813,12 @@ struct net_device *alloc_peak_canfd_dev(int sizeof_priv, int index, /* complete now socket-can initialization side */ priv->can.state = CAN_STATE_STOPPED; priv->can.bittiming_const = &peak_canfd_nominal_const; - priv->can.data_bittiming_const = &peak_canfd_data_const; + priv->can.fd.data_bittiming_const = &peak_canfd_data_const; priv->can.do_set_mode = peak_canfd_set_mode; priv->can.do_get_berr_counter = peak_canfd_get_berr_counter; priv->can.do_set_bittiming = peak_canfd_set_bittiming; - priv->can.do_set_data_bittiming = peak_canfd_set_data_bittiming; + priv->can.fd.do_set_data_bittiming = peak_canfd_set_data_bittiming; priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_3_SAMPLES | diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c index aa3df0d05b85..7f10213738e5 100644 --- a/drivers/net/can/rcar/rcar_canfd.c +++ b/drivers/net/can/rcar/rcar_canfd.c @@ -21,6 +21,7 @@ * wherever it is modified to a readable name. */ +#include <linux/bitfield.h> #include <linux/bitmap.h> #include <linux/bitops.h> #include <linux/can/dev.h> @@ -74,33 +75,24 @@ #define RCANFD_GSTS_GNOPM (BIT(0) | BIT(1) | BIT(2) | BIT(3)) /* RSCFDnCFDGERFL / RSCFDnGERFL */ -#define RCANFD_GERFL_EEF0_7 GENMASK(23, 16) -#define RCANFD_GERFL_EEF(ch) BIT(16 + (ch)) +#define RCANFD_GERFL_EEF GENMASK(23, 16) #define RCANFD_GERFL_CMPOF BIT(3) /* CAN FD only */ #define RCANFD_GERFL_THLES BIT(2) #define RCANFD_GERFL_MES BIT(1) #define RCANFD_GERFL_DEF BIT(0) #define RCANFD_GERFL_ERR(gpriv, x) \ - ((x) & (reg_gen4(gpriv, RCANFD_GERFL_EEF0_7, \ - RCANFD_GERFL_EEF(0) | RCANFD_GERFL_EEF(1)) | \ - RCANFD_GERFL_MES | \ - ((gpriv)->fdmode ? RCANFD_GERFL_CMPOF : 0))) +({\ + typeof(gpriv) (_gpriv) = (gpriv); \ + ((x) & ((FIELD_PREP(RCANFD_GERFL_EEF, (_gpriv)->channels_mask)) | \ + RCANFD_GERFL_MES | ((_gpriv)->fdmode ? RCANFD_GERFL_CMPOF : 0))); \ +}) /* AFL Rx rules registers */ -/* RSCFDnCFDGAFLCFG0 / RSCFDnGAFLCFG0 */ -#define RCANFD_GAFLCFG_SETRNC(gpriv, n, x) \ - (((x) & reg_gen4(gpriv, 0x1ff, 0xff)) << \ - (reg_gen4(gpriv, 16, 24) - ((n) & 1) * reg_gen4(gpriv, 16, 8))) - -#define RCANFD_GAFLCFG_GETRNC(gpriv, n, x) \ - (((x) >> (reg_gen4(gpriv, 16, 24) - ((n) & 1) * reg_gen4(gpriv, 16, 8))) & \ - reg_gen4(gpriv, 0x1ff, 0xff)) - /* RSCFDnCFDGAFLECTR / RSCFDnGAFLECTR */ #define RCANFD_GAFLECTR_AFLDAE BIT(8) -#define RCANFD_GAFLECTR_AFLPN(gpriv, x) ((x) & reg_gen4(gpriv, 0x7f, 0x1f)) +#define RCANFD_GAFLECTR_AFLPN(gpriv, page_num) ((page_num) & (gpriv)->info->max_aflpn) /* RSCFDnCFDGAFLIDj / RSCFDnGAFLIDj */ #define RCANFD_GAFLID_GAFLLB BIT(29) @@ -118,13 +110,13 @@ /* RSCFDnCFDCmNCFG - CAN FD only */ #define RCANFD_NCFG_NTSEG2(gpriv, x) \ - (((x) & reg_gen4(gpriv, 0x7f, 0x1f)) << reg_gen4(gpriv, 25, 24)) + (((x) & ((gpriv)->info->nom_bittiming->tseg2_max - 1)) << (gpriv)->info->sh->ntseg2) #define RCANFD_NCFG_NTSEG1(gpriv, x) \ - (((x) & reg_gen4(gpriv, 0xff, 0x7f)) << reg_gen4(gpriv, 17, 16)) + (((x) & ((gpriv)->info->nom_bittiming->tseg1_max - 1)) << (gpriv)->info->sh->ntseg1) #define RCANFD_NCFG_NSJW(gpriv, x) \ - (((x) & reg_gen4(gpriv, 0x7f, 0x1f)) << reg_gen4(gpriv, 10, 11)) + (((x) & ((gpriv)->info->nom_bittiming->sjw_max - 1)) << (gpriv)->info->sh->nsjw) #define RCANFD_NCFG_NBRP(x) (((x) & 0x3ff) << 0) @@ -186,13 +178,13 @@ #define RCANFD_CERFL_ERR(x) ((x) & (0x7fff)) /* above bits 14:0 */ /* RSCFDnCFDCmDCFG */ -#define RCANFD_DCFG_DSJW(gpriv, x) (((x) & reg_gen4(gpriv, 0xf, 0x7)) << 24) +#define RCANFD_DCFG_DSJW(gpriv, x) (((x) & ((gpriv)->info->data_bittiming->sjw_max - 1)) << 24) #define RCANFD_DCFG_DTSEG2(gpriv, x) \ - (((x) & reg_gen4(gpriv, 0x0f, 0x7)) << reg_gen4(gpriv, 16, 20)) + (((x) & ((gpriv)->info->data_bittiming->tseg2_max - 1)) << (gpriv)->info->sh->dtseg2) #define RCANFD_DCFG_DTSEG1(gpriv, x) \ - (((x) & reg_gen4(gpriv, 0x1f, 0xf)) << reg_gen4(gpriv, 8, 16)) + (((x) & ((gpriv)->info->data_bittiming->tseg1_max - 1)) << (gpriv)->info->sh->dtseg1) #define RCANFD_DCFG_DBRP(x) (((x) & 0xff) << 0) @@ -233,11 +225,14 @@ /* Common FIFO bits */ /* RSCFDnCFDCFCCk */ -#define RCANFD_CFCC_CFTML(gpriv, x) \ - (((x) & reg_gen4(gpriv, 0x1f, 0xf)) << reg_gen4(gpriv, 16, 20)) -#define RCANFD_CFCC_CFM(gpriv, x) (((x) & 0x3) << reg_gen4(gpriv, 8, 16)) +#define RCANFD_CFCC_CFTML(gpriv, cftml) \ +({\ + typeof(gpriv) (_gpriv) = (gpriv); \ + (((cftml) & (_gpriv)->info->max_cftml) << (_gpriv)->info->sh->cftml); \ +}) +#define RCANFD_CFCC_CFM(gpriv, x) (((x) & 0x3) << (gpriv)->info->sh->cfm) #define RCANFD_CFCC_CFIM BIT(12) -#define RCANFD_CFCC_CFDC(gpriv, x) (((x) & 0x7) << reg_gen4(gpriv, 21, 8)) +#define RCANFD_CFCC_CFDC(gpriv, x) (((x) & 0x7) << (gpriv)->info->sh->cfdc) #define RCANFD_CFCC_CFPLS(x) (((x) & 0x7) << 4) #define RCANFD_CFCC_CFTXIE BIT(2) #define RCANFD_CFCC_CFE BIT(0) @@ -298,14 +293,14 @@ /* RSCFDnCFDGAFLECTR / RSCFDnGAFLECTR */ #define RCANFD_GAFLECTR (0x0098) /* RSCFDnCFDGAFLCFG / RSCFDnGAFLCFG */ -#define RCANFD_GAFLCFG(ch) (0x009c + (0x04 * ((ch) / 2))) +#define RCANFD_GAFLCFG(w) (0x009c + (0x04 * (w))) /* RSCFDnCFDRMNB / RSCFDnRMNB */ #define RCANFD_RMNB (0x00a4) /* RSCFDnCFDRMND / RSCFDnRMND */ #define RCANFD_RMND(y) (0x00a8 + (0x04 * (y))) /* RSCFDnCFDRFCCx / RSCFDnRFCCx */ -#define RCANFD_RFCC(gpriv, x) (reg_gen4(gpriv, 0x00c0, 0x00b8) + (0x04 * (x))) +#define RCANFD_RFCC(gpriv, x) ((gpriv)->info->regs->rfcc + (0x04 * (x))) /* RSCFDnCFDRFSTSx / RSCFDnRFSTSx */ #define RCANFD_RFSTS(gpriv, x) (RCANFD_RFCC(gpriv, x) + 0x20) /* RSCFDnCFDRFPCTRx / RSCFDnRFPCTRx */ @@ -315,13 +310,13 @@ /* RSCFDnCFDCFCCx / RSCFDnCFCCx */ #define RCANFD_CFCC(gpriv, ch, idx) \ - (reg_gen4(gpriv, 0x0120, 0x0118) + (0x0c * (ch)) + (0x04 * (idx))) + ((gpriv)->info->regs->cfcc + (0x0c * (ch)) + (0x04 * (idx))) /* RSCFDnCFDCFSTSx / RSCFDnCFSTSx */ #define RCANFD_CFSTS(gpriv, ch, idx) \ - (reg_gen4(gpriv, 0x01e0, 0x0178) + (0x0c * (ch)) + (0x04 * (idx))) + ((gpriv)->info->regs->cfsts + (0x0c * (ch)) + (0x04 * (idx))) /* RSCFDnCFDCFPCTRx / RSCFDnCFPCTRx */ #define RCANFD_CFPCTR(gpriv, ch, idx) \ - (reg_gen4(gpriv, 0x0240, 0x01d8) + (0x0c * (ch)) + (0x04 * (idx))) + ((gpriv)->info->regs->cfpctr + (0x0c * (ch)) + (0x04 * (idx))) /* RSCFDnCFDFESTS / RSCFDnFESTS */ #define RCANFD_FESTS (0x0238) @@ -437,7 +432,7 @@ /* CAN FD mode specific register map */ /* RSCFDnCFDCmXXX -> RCANFD_F_XXX(m) */ -#define RCANFD_F_DCFG(gpriv, m) (reg_gen4(gpriv, 0x1400, 0x0500) + (0x20 * (m))) +#define RCANFD_F_DCFG(gpriv, m) ((gpriv)->info->regs->f_dcfg + (0x20 * (m))) #define RCANFD_F_CFDCFG(m) (0x0504 + (0x20 * (m))) #define RCANFD_F_CFDCTR(m) (0x0508 + (0x20 * (m))) #define RCANFD_F_CFDSTS(m) (0x050c + (0x20 * (m))) @@ -453,7 +448,7 @@ #define RCANFD_F_RMDF(q, b) (0x200c + (0x04 * (b)) + (0x20 * (q))) /* RSCFDnCFDRFXXx -> RCANFD_F_RFXX(x) */ -#define RCANFD_F_RFOFFSET(gpriv) reg_gen4(gpriv, 0x6000, 0x3000) +#define RCANFD_F_RFOFFSET(gpriv) ((gpriv)->info->regs->rfoffset) #define RCANFD_F_RFID(gpriv, x) (RCANFD_F_RFOFFSET(gpriv) + (0x80 * (x))) #define RCANFD_F_RFPTR(gpriv, x) (RCANFD_F_RFOFFSET(gpriv) + 0x04 + (0x80 * (x))) #define RCANFD_F_RFFDSTS(gpriv, x) (RCANFD_F_RFOFFSET(gpriv) + 0x08 + (0x80 * (x))) @@ -461,7 +456,7 @@ (RCANFD_F_RFOFFSET(gpriv) + 0x0c + (0x80 * (x)) + (0x04 * (df))) /* RSCFDnCFDCFXXk -> RCANFD_F_CFXX(ch, k) */ -#define RCANFD_F_CFOFFSET(gpriv) reg_gen4(gpriv, 0x6400, 0x3400) +#define RCANFD_F_CFOFFSET(gpriv) ((gpriv)->info->regs->cfoffset) #define RCANFD_F_CFID(gpriv, ch, idx) \ (RCANFD_F_CFOFFSET(gpriv) + (0x180 * (ch)) + (0x80 * (idx))) @@ -510,12 +505,43 @@ struct rcar_canfd_global; +struct rcar_canfd_regs { + u16 rfcc; /* RX FIFO Configuration/Control Register */ + u16 cfcc; /* Common FIFO Configuration/Control Register */ + u16 cfsts; /* Common FIFO Status Register */ + u16 cfpctr; /* Common FIFO Pointer Control Register */ + u16 f_dcfg; /* Global FD Configuration Register */ + u16 rfoffset; /* Receive FIFO buffer access ID register */ + u16 cfoffset; /* Transmit/receive FIFO buffer access ID register */ +}; + +struct rcar_canfd_shift_data { + u8 ntseg2; /* Nominal Bit Rate Time Segment 2 Control */ + u8 ntseg1; /* Nominal Bit Rate Time Segment 1 Control */ + u8 nsjw; /* Nominal Bit Rate Resynchronization Jump Width Control */ + u8 dtseg2; /* Data Bit Rate Time Segment 2 Control */ + u8 dtseg1; /* Data Bit Rate Time Segment 1 Control */ + u8 cftml; /* Common FIFO TX Message Buffer Link */ + u8 cfm; /* Common FIFO Mode */ + u8 cfdc; /* Common FIFO Depth Configuration */ +}; + struct rcar_canfd_hw_info { + const struct can_bittiming_const *nom_bittiming; + const struct can_bittiming_const *data_bittiming; + const struct rcar_canfd_regs *regs; + const struct rcar_canfd_shift_data *sh; + u8 rnc_field_width; + u8 max_aflpn; + u8 max_cftml; u8 max_channels; u8 postdiv; /* hardware features */ unsigned shared_global_irqs:1; /* Has shared global irqs */ unsigned multi_channel_irqs:1; /* Has multiple channel irqs */ + unsigned ch_interface_mode:1; /* Has channel interface mode */ + unsigned shared_can_regs:1; /* Has shared classical can registers */ + unsigned external_clk:1; /* Has external clock */ }; /* Channel priv data */ @@ -548,7 +574,7 @@ struct rcar_canfd_global { }; /* CAN FD mode nominal rate constants */ -static const struct can_bittiming_const rcar_canfd_nom_bittiming_const = { +static const struct can_bittiming_const rcar_canfd_gen3_nom_bittiming_const = { .name = RCANFD_DRV_NAME, .tseg1_min = 2, .tseg1_max = 128, @@ -560,8 +586,20 @@ static const struct can_bittiming_const rcar_canfd_nom_bittiming_const = { .brp_inc = 1, }; +static const struct can_bittiming_const rcar_canfd_gen4_nom_bittiming_const = { + .name = RCANFD_DRV_NAME, + .tseg1_min = 2, + .tseg1_max = 256, + .tseg2_min = 2, + .tseg2_max = 128, + .sjw_max = 128, + .brp_min = 1, + .brp_max = 1024, + .brp_inc = 1, +}; + /* CAN FD mode data rate constants */ -static const struct can_bittiming_const rcar_canfd_data_bittiming_const = { +static const struct can_bittiming_const rcar_canfd_gen3_data_bittiming_const = { .name = RCANFD_DRV_NAME, .tseg1_min = 2, .tseg1_max = 16, @@ -573,6 +611,18 @@ static const struct can_bittiming_const rcar_canfd_data_bittiming_const = { .brp_inc = 1, }; +static const struct can_bittiming_const rcar_canfd_gen4_data_bittiming_const = { + .name = RCANFD_DRV_NAME, + .tseg1_min = 2, + .tseg1_max = 32, + .tseg2_min = 2, + .tseg2_max = 16, + .sjw_max = 16, + .brp_min = 1, + .brp_max = 256, + .brp_inc = 1, +}; + /* Classical CAN mode bitrate constants */ static const struct can_bittiming_const rcar_canfd_bittiming_const = { .name = RCANFD_DRV_NAME, @@ -586,36 +636,113 @@ static const struct can_bittiming_const rcar_canfd_bittiming_const = { .brp_inc = 1, }; +static const struct rcar_canfd_regs rcar_gen3_regs = { + .rfcc = 0x00b8, + .cfcc = 0x0118, + .cfsts = 0x0178, + .cfpctr = 0x01d8, + .f_dcfg = 0x0500, + .rfoffset = 0x3000, + .cfoffset = 0x3400, +}; + +static const struct rcar_canfd_regs rcar_gen4_regs = { + .rfcc = 0x00c0, + .cfcc = 0x0120, + .cfsts = 0x01e0, + .cfpctr = 0x0240, + .f_dcfg = 0x1400, + .rfoffset = 0x6000, + .cfoffset = 0x6400, +}; + +static const struct rcar_canfd_shift_data rcar_gen3_shift_data = { + .ntseg2 = 24, + .ntseg1 = 16, + .nsjw = 11, + .dtseg2 = 20, + .dtseg1 = 16, + .cftml = 20, + .cfm = 16, + .cfdc = 8, +}; + +static const struct rcar_canfd_shift_data rcar_gen4_shift_data = { + .ntseg2 = 25, + .ntseg1 = 17, + .nsjw = 10, + .dtseg2 = 16, + .dtseg1 = 8, + .cftml = 16, + .cfm = 8, + .cfdc = 21, +}; + static const struct rcar_canfd_hw_info rcar_gen3_hw_info = { + .nom_bittiming = &rcar_canfd_gen3_nom_bittiming_const, + .data_bittiming = &rcar_canfd_gen3_data_bittiming_const, + .regs = &rcar_gen3_regs, + .sh = &rcar_gen3_shift_data, + .rnc_field_width = 8, + .max_aflpn = 31, + .max_cftml = 15, .max_channels = 2, .postdiv = 2, .shared_global_irqs = 1, + .ch_interface_mode = 0, + .shared_can_regs = 0, + .external_clk = 1, }; static const struct rcar_canfd_hw_info rcar_gen4_hw_info = { + .nom_bittiming = &rcar_canfd_gen4_nom_bittiming_const, + .data_bittiming = &rcar_canfd_gen4_data_bittiming_const, + .regs = &rcar_gen4_regs, + .sh = &rcar_gen4_shift_data, + .rnc_field_width = 16, + .max_aflpn = 127, + .max_cftml = 31, .max_channels = 8, .postdiv = 2, .shared_global_irqs = 1, + .ch_interface_mode = 1, + .shared_can_regs = 1, + .external_clk = 1, }; static const struct rcar_canfd_hw_info rzg2l_hw_info = { + .nom_bittiming = &rcar_canfd_gen3_nom_bittiming_const, + .data_bittiming = &rcar_canfd_gen3_data_bittiming_const, + .regs = &rcar_gen3_regs, + .sh = &rcar_gen3_shift_data, + .rnc_field_width = 8, + .max_aflpn = 31, + .max_cftml = 15, .max_channels = 2, .postdiv = 1, .multi_channel_irqs = 1, + .ch_interface_mode = 0, + .shared_can_regs = 0, + .external_clk = 1, }; -/* Helper functions */ -static inline bool is_gen4(struct rcar_canfd_global *gpriv) -{ - return gpriv->info == &rcar_gen4_hw_info; -} - -static inline u32 reg_gen4(struct rcar_canfd_global *gpriv, - u32 gen4, u32 not_gen4) -{ - return is_gen4(gpriv) ? gen4 : not_gen4; -} +static const struct rcar_canfd_hw_info r9a09g047_hw_info = { + .nom_bittiming = &rcar_canfd_gen4_nom_bittiming_const, + .data_bittiming = &rcar_canfd_gen4_data_bittiming_const, + .regs = &rcar_gen4_regs, + .sh = &rcar_gen4_shift_data, + .rnc_field_width = 16, + .max_aflpn = 63, + .max_cftml = 31, + .max_channels = 6, + .postdiv = 1, + .multi_channel_irqs = 1, + .ch_interface_mode = 1, + .shared_can_regs = 1, + .external_clk = 0, +}; +/* Helper functions */ static inline void rcar_canfd_update(u32 mask, u32 val, u32 __iomem *reg) { u32 data = readl(reg); @@ -681,9 +808,20 @@ static void rcar_canfd_tx_failure_cleanup(struct net_device *ndev) can_free_echo_skb(ndev, i, NULL); } +static void rcar_canfd_setrnc(struct rcar_canfd_global *gpriv, unsigned int ch, + unsigned int num_rules) +{ + unsigned int rnc_stride = 32 / gpriv->info->rnc_field_width; + unsigned int shift = 32 - (ch % rnc_stride + 1) * gpriv->info->rnc_field_width; + unsigned int w = ch / rnc_stride; + u32 rnc = num_rules << shift; + + rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLCFG(w), rnc); +} + static void rcar_canfd_set_mode(struct rcar_canfd_global *gpriv) { - if (is_gen4(gpriv)) { + if (gpriv->info->ch_interface_mode) { u32 ch, val = gpriv->fdmode ? RCANFD_GEN4_FDCFG_FDOE : RCANFD_GEN4_FDCFG_CLOE; @@ -789,7 +927,7 @@ static void rcar_canfd_configure_controller(struct rcar_canfd_global *gpriv) static void rcar_canfd_configure_afl_rules(struct rcar_canfd_global *gpriv, u32 ch, u32 rule_entry) { - int offset, page, num_rules = RCANFD_CHANNEL_NUMRULES; + unsigned int offset, page, num_rules = RCANFD_CHANNEL_NUMRULES; u32 rule_entry_index = rule_entry % 16; u32 ridx = ch + RCANFD_RFFIFO_IDX; @@ -800,9 +938,8 @@ static void rcar_canfd_configure_afl_rules(struct rcar_canfd_global *gpriv, RCANFD_GAFLECTR_AFLDAE)); /* Write number of rules for channel */ - rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLCFG(ch), - RCANFD_GAFLCFG_SETRNC(gpriv, ch, num_rules)); - if (is_gen4(gpriv)) + rcar_canfd_setrnc(gpriv, ch, num_rules); + if (gpriv->info->shared_can_regs) offset = RCANFD_GEN4_GAFL_OFFSET; else if (gpriv->fdmode) offset = RCANFD_F_GAFL_OFFSET; @@ -942,7 +1079,7 @@ static void rcar_canfd_global_error(struct net_device *ndev) u32 ridx = ch + RCANFD_RFFIFO_IDX; gerfl = rcar_canfd_read(priv->base, RCANFD_GERFL); - if (gerfl & RCANFD_GERFL_EEF(ch)) { + if (gerfl & FIELD_PREP(RCANFD_GERFL_EEF, BIT(ch))) { netdev_dbg(ndev, "Ch%u: ECC Error flag\n", ch); stats->tx_dropped++; } @@ -1304,7 +1441,7 @@ static void rcar_canfd_set_bittiming(struct net_device *dev) struct rcar_canfd_channel *priv = netdev_priv(dev); struct rcar_canfd_global *gpriv = priv->gpriv; const struct can_bittiming *bt = &priv->can.bittiming; - const struct can_bittiming *dbt = &priv->can.data_bittiming; + const struct can_bittiming *dbt = &priv->can.fd.data_bittiming; u16 brp, sjw, tseg1, tseg2; u32 cfg; u32 ch = priv->channel; @@ -1338,7 +1475,7 @@ static void rcar_canfd_set_bittiming(struct net_device *dev) brp, sjw, tseg1, tseg2); } else { /* Classical CAN only mode */ - if (is_gen4(gpriv)) { + if (gpriv->info->shared_can_regs) { cfg = (RCANFD_NCFG_NTSEG1(gpriv, tseg1) | RCANFD_NCFG_NBRP(brp) | RCANFD_NCFG_NSJW(gpriv, sjw) | @@ -1503,7 +1640,7 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb, dlc = RCANFD_CFPTR_CFDLC(can_fd_len2dlc(cf->len)); - if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) || is_gen4(gpriv)) { + if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) || gpriv->info->shared_can_regs) { rcar_canfd_write(priv->base, RCANFD_F_CFID(gpriv, ch, RCANFD_CFFIFO_IDX), id); rcar_canfd_write(priv->base, @@ -1562,7 +1699,7 @@ static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv) u32 ch = priv->channel; u32 ridx = ch + RCANFD_RFFIFO_IDX; - if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) || is_gen4(gpriv)) { + if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) || gpriv->info->shared_can_regs) { id = rcar_canfd_read(priv->base, RCANFD_F_RFID(gpriv, ridx)); dlc = rcar_canfd_read(priv->base, RCANFD_F_RFPTR(gpriv, ridx)); @@ -1613,7 +1750,7 @@ static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv) cf->len = can_cc_dlc2len(RCANFD_RFPTR_RFDLC(dlc)); if (id & RCANFD_RFID_RFRTR) cf->can_id |= CAN_RTR_FLAG; - else if (is_gen4(gpriv)) + else if (gpriv->info->shared_can_regs) rcar_canfd_get_data(priv, cf, RCANFD_F_RFDF(gpriv, ridx, 0)); else rcar_canfd_get_data(priv, cf, RCANFD_C_RFDF(ridx, 0)); @@ -1736,16 +1873,19 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch, if (info->multi_channel_irqs) { char *irq_name; + char name[10]; int err_irq; int tx_irq; - err_irq = platform_get_irq_byname(pdev, ch == 0 ? "ch0_err" : "ch1_err"); + scnprintf(name, sizeof(name), "ch%u_err", ch); + err_irq = platform_get_irq_byname(pdev, name); if (err_irq < 0) { err = err_irq; goto fail; } - tx_irq = platform_get_irq_byname(pdev, ch == 0 ? "ch0_trx" : "ch1_trx"); + scnprintf(name, sizeof(name), "ch%u_trx", ch); + tx_irq = platform_get_irq_byname(pdev, name); if (tx_irq < 0) { err = tx_irq; goto fail; @@ -1782,9 +1922,8 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch, } if (gpriv->fdmode) { - priv->can.bittiming_const = &rcar_canfd_nom_bittiming_const; - priv->can.data_bittiming_const = - &rcar_canfd_data_bittiming_const; + priv->can.bittiming_const = gpriv->info->nom_bittiming; + priv->can.fd.data_bittiming_const = gpriv->info->data_bittiming; /* Controller starts in CAN FD only mode */ err = can_set_static_ctrlmode(ndev, CAN_CTRLMODE_FD); @@ -1846,6 +1985,7 @@ static int rcar_canfd_probe(struct platform_device *pdev) u32 rule_entry = 0; bool fdmode = true; /* CAN FD only mode - default */ char name[9] = "channelX"; + struct clk *clk_ram; int i; info = of_device_get_match_data(dev); @@ -1855,13 +1995,13 @@ static int rcar_canfd_probe(struct platform_device *pdev) for (i = 0; i < info->max_channels; ++i) { name[7] = '0' + i; - of_child = of_get_child_by_name(dev->of_node, name); - if (of_child && of_device_is_available(of_child)) { + of_child = of_get_available_child_by_name(dev->of_node, name); + if (of_child) { channels_mask |= BIT(i); transceivers[i] = devm_of_phy_optional_get(dev, of_child, NULL); + of_node_put(of_child); } - of_node_put(of_child); if (IS_ERR(transceivers[i])) return PTR_ERR(transceivers[i]); } @@ -1932,9 +2072,14 @@ static int rcar_canfd_probe(struct platform_device *pdev) fcan_freq = clk_get_rate(gpriv->can_clk) / info->postdiv; } else { fcan_freq = clk_get_rate(gpriv->can_clk); - gpriv->extclk = true; + gpriv->extclk = gpriv->info->external_clk; } + clk_ram = devm_clk_get_optional_enabled(dev, "ram_clk"); + if (IS_ERR(clk_ram)) + return dev_err_probe(dev, PTR_ERR(clk_ram), + "cannot get enabled ram clock\n"); + addr = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(addr)) { err = PTR_ERR(addr); @@ -2097,6 +2242,7 @@ static SIMPLE_DEV_PM_OPS(rcar_canfd_pm_ops, rcar_canfd_suspend, static const __maybe_unused struct of_device_id rcar_canfd_of_table[] = { { .compatible = "renesas,r8a779a0-canfd", .data = &rcar_gen4_hw_info }, + { .compatible = "renesas,r9a09g047-canfd", .data = &r9a09g047_hw_info }, { .compatible = "renesas,rcar-gen3-canfd", .data = &rcar_gen3_hw_info }, { .compatible = "renesas,rcar-gen4-canfd", .data = &rcar_gen4_hw_info }, { .compatible = "renesas,rzg2l-canfd", .data = &rzg2l_hw_info }, diff --git a/drivers/net/can/rockchip/rockchip_canfd-core.c b/drivers/net/can/rockchip/rockchip_canfd-core.c index c3fb3176ce42..046f0a0ae4d4 100644 --- a/drivers/net/can/rockchip/rockchip_canfd-core.c +++ b/drivers/net/can/rockchip/rockchip_canfd-core.c @@ -118,7 +118,7 @@ static void rkcanfd_chip_set_work_mode(const struct rkcanfd_priv *priv) static int rkcanfd_set_bittiming(struct rkcanfd_priv *priv) { - const struct can_bittiming *dbt = &priv->can.data_bittiming; + const struct can_bittiming *dbt = &priv->can.fd.data_bittiming; const struct can_bittiming *bt = &priv->can.bittiming; u32 reg_nbt, reg_dbt, reg_tdc; u32 tdco; @@ -899,7 +899,7 @@ static int rkcanfd_probe(struct platform_device *pdev) platform_set_drvdata(pdev, priv); priv->can.clock.freq = clk_get_rate(priv->clks[0].clk); priv->can.bittiming_const = &rkcanfd_bittiming_const; - priv->can.data_bittiming_const = &rkcanfd_data_bittiming_const; + priv->can.fd.data_bittiming_const = &rkcanfd_data_bittiming_const; priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_BERR_REPORTING; priv->can.do_set_mode = rkcanfd_set_mode; diff --git a/drivers/net/can/rockchip/rockchip_canfd-timestamp.c b/drivers/net/can/rockchip/rockchip_canfd-timestamp.c index 43d4b5721812..fa85a75be65a 100644 --- a/drivers/net/can/rockchip/rockchip_canfd-timestamp.c +++ b/drivers/net/can/rockchip/rockchip_canfd-timestamp.c @@ -39,7 +39,7 @@ static void rkcanfd_timestamp_work(struct work_struct *work) void rkcanfd_timestamp_init(struct rkcanfd_priv *priv) { - const struct can_bittiming *dbt = &priv->can.data_bittiming; + const struct can_bittiming *dbt = &priv->can.fd.data_bittiming; const struct can_bittiming *bt = &priv->can.bittiming; struct cyclecounter *cc = &priv->cc; u32 bitrate, div, reg, rate; diff --git a/drivers/net/can/slcan/slcan-core.c b/drivers/net/can/slcan/slcan-core.c index 24c6622d36bd..58ff2ec1d975 100644 --- a/drivers/net/can/slcan/slcan-core.c +++ b/drivers/net/can/slcan/slcan-core.c @@ -71,12 +71,21 @@ MODULE_AUTHOR("Dario Binacchi <dario.binacchi@amarulasolutions.com>"); #define SLCAN_CMD_LEN 1 #define SLCAN_SFF_ID_LEN 3 #define SLCAN_EFF_ID_LEN 8 +#define SLCAN_DATA_LENGTH_LEN 1 +#define SLCAN_ERROR_LEN 1 #define SLCAN_STATE_LEN 1 #define SLCAN_STATE_BE_RXCNT_LEN 3 #define SLCAN_STATE_BE_TXCNT_LEN 3 -#define SLCAN_STATE_FRAME_LEN (1 + SLCAN_CMD_LEN + \ - SLCAN_STATE_BE_RXCNT_LEN + \ - SLCAN_STATE_BE_TXCNT_LEN) +#define SLCAN_STATE_MSG_LEN (SLCAN_CMD_LEN + \ + SLCAN_STATE_LEN + \ + SLCAN_STATE_BE_RXCNT_LEN + \ + SLCAN_STATE_BE_TXCNT_LEN) +#define SLCAN_ERROR_MSG_LEN_MIN (SLCAN_CMD_LEN + \ + SLCAN_ERROR_LEN + \ + SLCAN_DATA_LENGTH_LEN) +#define SLCAN_FRAME_MSG_LEN_MIN (SLCAN_CMD_LEN + \ + SLCAN_SFF_ID_LEN + \ + SLCAN_DATA_LENGTH_LEN) struct slcan { struct can_priv can; @@ -176,6 +185,9 @@ static void slcan_bump_frame(struct slcan *sl) u32 tmpid; char *cmd = sl->rbuff; + if (sl->rcount < SLCAN_FRAME_MSG_LEN_MIN) + return; + skb = alloc_can_skb(sl->dev, &cf); if (unlikely(!skb)) { sl->dev->stats.rx_dropped++; @@ -281,7 +293,7 @@ static void slcan_bump_state(struct slcan *sl) return; } - if (state == sl->can.state || sl->rcount < SLCAN_STATE_FRAME_LEN) + if (state == sl->can.state || sl->rcount != SLCAN_STATE_MSG_LEN) return; cmd += SLCAN_STATE_BE_RXCNT_LEN + SLCAN_CMD_LEN + 1; @@ -328,6 +340,9 @@ static void slcan_bump_err(struct slcan *sl) bool rx_errors = false, tx_errors = false, rx_over_errors = false; int i, len; + if (sl->rcount < SLCAN_ERROR_MSG_LEN_MIN) + return; + /* get len from sanitized ASCII value */ len = cmd[1]; if (len >= '0' && len < '9') @@ -456,8 +471,7 @@ static void slcan_bump(struct slcan *sl) static void slcan_unesc(struct slcan *sl, unsigned char s) { if ((s == '\r') || (s == '\a')) { /* CR or BEL ends the pdu */ - if (!test_and_clear_bit(SLF_ERROR, &sl->flags) && - sl->rcount > 4) + if (!test_and_clear_bit(SLF_ERROR, &sl->flags)) slcan_bump(sl); sl->rcount = 0; diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c index c30b04f8fc0d..7450ea42c1ea 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c @@ -527,7 +527,7 @@ static int mcp251xfd_chip_timestamp_init(const struct mcp251xfd_priv *priv) static int mcp251xfd_set_bittiming(const struct mcp251xfd_priv *priv) { const struct can_bittiming *bt = &priv->can.bittiming; - const struct can_bittiming *dbt = &priv->can.data_bittiming; + const struct can_bittiming *dbt = &priv->can.fd.data_bittiming; u32 tdcmod, val = 0; int err; @@ -600,8 +600,8 @@ static int mcp251xfd_set_bittiming(const struct mcp251xfd_priv *priv) tdcmod = MCP251XFD_REG_TDC_TDCMOD_DISABLED; val = FIELD_PREP(MCP251XFD_REG_TDC_TDCMOD_MASK, tdcmod) | - FIELD_PREP(MCP251XFD_REG_TDC_TDCV_MASK, priv->can.tdc.tdcv) | - FIELD_PREP(MCP251XFD_REG_TDC_TDCO_MASK, priv->can.tdc.tdco); + FIELD_PREP(MCP251XFD_REG_TDC_TDCV_MASK, priv->can.fd.tdc.tdcv) | + FIELD_PREP(MCP251XFD_REG_TDC_TDCO_MASK, priv->can.fd.tdc.tdco); return regmap_write(priv->map_reg, MCP251XFD_REG_TDC, val); } @@ -2104,8 +2104,8 @@ static int mcp251xfd_probe(struct spi_device *spi) priv->can.do_set_mode = mcp251xfd_set_mode; priv->can.do_get_berr_counter = mcp251xfd_get_berr_counter; priv->can.bittiming_const = &mcp251xfd_bittiming_const; - priv->can.data_bittiming_const = &mcp251xfd_data_bittiming_const; - priv->can.tdc_const = &mcp251xfd_tdc_const; + priv->can.fd.data_bittiming_const = &mcp251xfd_data_bittiming_const; + priv->can.fd.tdc_const = &mcp251xfd_tdc_const; priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_BERR_REPORTING | CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO | diff --git a/drivers/net/can/usb/esd_usb.c b/drivers/net/can/usb/esd_usb.c index 03ad10b01867..27a3818885c2 100644 --- a/drivers/net/can/usb/esd_usb.c +++ b/drivers/net/can/usb/esd_usb.c @@ -1098,7 +1098,7 @@ static int esd_usb_3_set_bittiming(struct net_device *netdev) const struct can_bittiming_const *data_btc = &esd_usb_3_data_bittiming_const; struct esd_usb_net_priv *priv = netdev_priv(netdev); struct can_bittiming *nom_bt = &priv->can.bittiming; - struct can_bittiming *data_bt = &priv->can.data_bittiming; + struct can_bittiming *data_bt = &priv->can.fd.data_bittiming; struct esd_usb_3_set_baudrate_msg_x *baud_x; union esd_usb_msg *msg; u16 flags = 0; @@ -1218,9 +1218,9 @@ static int esd_usb_probe_one_net(struct usb_interface *intf, int index) priv->can.clock.freq = ESD_USB_3_CAN_CLOCK; priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD; priv->can.bittiming_const = &esd_usb_3_nom_bittiming_const; - priv->can.data_bittiming_const = &esd_usb_3_data_bittiming_const; + priv->can.fd.data_bittiming_const = &esd_usb_3_data_bittiming_const; priv->can.do_set_bittiming = esd_usb_3_set_bittiming; - priv->can.do_set_data_bittiming = esd_usb_3_set_bittiming; + priv->can.fd.do_set_data_bittiming = esd_usb_3_set_bittiming; break; case ESD_USB_CANUSBM_PRODUCT_ID: diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.c b/drivers/net/can/usb/etas_es58x/es58x_core.c index 71f24dc0a927..db1acf6d504c 100644 --- a/drivers/net/can/usb/etas_es58x/es58x_core.c +++ b/drivers/net/can/usb/etas_es58x/es58x_core.c @@ -2059,8 +2059,8 @@ static int es58x_init_priv(struct es58x_device *es58x_dev, can->bittiming_const = param->bittiming_const; if (param->ctrlmode_supported & CAN_CTRLMODE_FD) { - can->data_bittiming_const = param->data_bittiming_const; - can->tdc_const = param->tdc_const; + can->fd.data_bittiming_const = param->data_bittiming_const; + can->fd.tdc_const = param->tdc_const; } can->bitrate_max = param->bitrate_max; can->clock = param->clock; diff --git a/drivers/net/can/usb/etas_es58x/es58x_fd.c b/drivers/net/can/usb/etas_es58x/es58x_fd.c index 84ffa1839bac..d924b053677b 100644 --- a/drivers/net/can/usb/etas_es58x/es58x_fd.c +++ b/drivers/net/can/usb/etas_es58x/es58x_fd.c @@ -427,12 +427,12 @@ static int es58x_fd_enable_channel(struct es58x_priv *priv) if (tx_conf_msg.canfd_enabled) { es58x_fd_convert_bittiming(&tx_conf_msg.data_bittiming, - &priv->can.data_bittiming); + &priv->can.fd.data_bittiming); if (can_tdc_is_enabled(&priv->can)) { tx_conf_msg.tdc_enabled = 1; - tx_conf_msg.tdco = cpu_to_le16(priv->can.tdc.tdco); - tx_conf_msg.tdcf = cpu_to_le16(priv->can.tdc.tdcf); + tx_conf_msg.tdco = cpu_to_le16(priv->can.fd.tdc.tdco); + tx_conf_msg.tdcf = cpu_to_le16(priv->can.fd.tdc.tdcf); } conf_len = ES58X_FD_CANFD_CONF_LEN; diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index 3ccac6781b98..bb6335278e46 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@ -728,7 +728,7 @@ static int gs_usb_set_bittiming(struct net_device *netdev) static int gs_usb_set_data_bittiming(struct net_device *netdev) { struct gs_can *dev = netdev_priv(netdev); - struct can_bittiming *bt = &dev->can.data_bittiming; + struct can_bittiming *bt = &dev->can.fd.data_bittiming; struct gs_device_bittiming dbt = { .prop_seg = cpu_to_le32(bt->prop_seg), .phase_seg1 = cpu_to_le32(bt->phase_seg1), @@ -1300,8 +1300,8 @@ static struct gs_can *gs_make_candev(unsigned int channel, /* The data bit timing will be overwritten, if * GS_CAN_FEATURE_BT_CONST_EXT is set. */ - dev->can.data_bittiming_const = &dev->bt_const; - dev->can.do_set_data_bittiming = gs_usb_set_data_bittiming; + dev->can.fd.data_bittiming_const = &dev->bt_const; + dev->can.fd.do_set_data_bittiming = gs_usb_set_data_bittiming; } if (feature & GS_CAN_FEATURE_TERMINATION) { @@ -1381,7 +1381,7 @@ static struct gs_can *gs_make_candev(unsigned int channel, dev->data_bt_const.brp_max = le32_to_cpu(bt_const_extended.dbrp_max); dev->data_bt_const.brp_inc = le32_to_cpu(bt_const_extended.dbrp_inc); - dev->can.data_bittiming_const = &dev->data_bt_const; + dev->can.fd.data_bittiming_const = &dev->data_bt_const; } can_rx_offload_add_manual(netdev, &dev->offload, GS_NAPI_WEIGHT); diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h index 078496d9b7ba..f6c77eca9f43 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h @@ -137,7 +137,7 @@ struct kvaser_usb_net_priv { * @dev_set_mode: used for can.do_set_mode * @dev_set_bittiming: used for can.do_set_bittiming * @dev_get_busparams: readback arbitration busparams - * @dev_set_data_bittiming: used for can.do_set_data_bittiming + * @dev_set_data_bittiming: used for can.fd.do_set_data_bittiming * @dev_get_data_busparams: readback data busparams * @dev_get_berr_counter: used for can.do_get_berr_counter * diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c index dcb0bcbe0565..daf42080f942 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c @@ -592,7 +592,7 @@ static int kvaser_usb_set_data_bittiming(struct net_device *netdev) struct kvaser_usb_net_priv *priv = netdev_priv(netdev); struct kvaser_usb *dev = priv->dev; const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops; - struct can_bittiming *dbt = &priv->can.data_bittiming; + struct can_bittiming *dbt = &priv->can.fd.data_bittiming; struct kvaser_usb_busparams busparams; int tseg1 = dbt->prop_seg + dbt->phase_seg1; int tseg2 = dbt->phase_seg2; @@ -842,8 +842,8 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel) priv->can.ctrlmode_supported |= dev->card_data.ctrlmode_supported; if (priv->can.ctrlmode_supported & CAN_CTRLMODE_FD) { - priv->can.data_bittiming_const = dev->cfg->data_bittiming_const; - priv->can.do_set_data_bittiming = kvaser_usb_set_data_bittiming; + priv->can.fd.data_bittiming_const = dev->cfg->data_bittiming_const; + priv->can.fd.do_set_data_bittiming = kvaser_usb_set_data_bittiming; } netdev->flags |= IFF_ECHO; diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c index 59f7cd8ceb39..117637b9b995 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c @@ -770,7 +770,7 @@ static int peak_usb_set_data_bittiming(struct net_device *netdev) const struct peak_usb_adapter *pa = dev->adapter; if (pa->dev_set_data_bittiming) { - struct can_bittiming *bt = &dev->can.data_bittiming; + struct can_bittiming *bt = &dev->can.fd.data_bittiming; int err = pa->dev_set_data_bittiming(dev, bt); if (err) @@ -954,8 +954,8 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter, dev->can.clock = peak_usb_adapter->clock; dev->can.bittiming_const = peak_usb_adapter->bittiming_const; dev->can.do_set_bittiming = peak_usb_set_bittiming; - dev->can.data_bittiming_const = peak_usb_adapter->data_bittiming_const; - dev->can.do_set_data_bittiming = peak_usb_set_data_bittiming; + dev->can.fd.data_bittiming_const = peak_usb_adapter->data_bittiming_const; + dev->can.fd.do_set_data_bittiming = peak_usb_set_data_bittiming; dev->can.do_set_mode = peak_usb_set_mode; dev->can.do_get_berr_counter = peak_usb_adapter->do_get_berr_counter; dev->can.ctrlmode_supported = peak_usb_adapter->ctrlmode_supported; diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c index 436c0e4b0344..3f2e378199ab 100644 --- a/drivers/net/can/xilinx_can.c +++ b/drivers/net/can/xilinx_can.c @@ -481,7 +481,7 @@ static int xcan_set_bittiming(struct net_device *ndev) { struct xcan_priv *priv = netdev_priv(ndev); struct can_bittiming *bt = &priv->can.bittiming; - struct can_bittiming *dbt = &priv->can.data_bittiming; + struct can_bittiming *dbt = &priv->can.fd.data_bittiming; u32 btr0, btr1; u32 is_config_mode; @@ -517,10 +517,10 @@ static int xcan_set_bittiming(struct net_device *ndev) btr0 = dbt->brp - 1; if (can_tdc_is_enabled(&priv->can)) { if (priv->devtype.cantype == XAXI_CANFD) - btr0 |= FIELD_PREP(XCAN_BRPR_TDCO_MASK, priv->can.tdc.tdco) | + btr0 |= FIELD_PREP(XCAN_BRPR_TDCO_MASK, priv->can.fd.tdc.tdco) | XCAN_BRPR_TDC_ENABLE; else - btr0 |= FIELD_PREP(XCAN_2_BRPR_TDCO_MASK, priv->can.tdc.tdco) | + btr0 |= FIELD_PREP(XCAN_2_BRPR_TDCO_MASK, priv->can.fd.tdc.tdco) | XCAN_BRPR_TDC_ENABLE; } @@ -1967,22 +1967,22 @@ static int xcan_probe(struct platform_device *pdev) goto err_free; if (devtype->cantype == XAXI_CANFD) { - priv->can.data_bittiming_const = + priv->can.fd.data_bittiming_const = &xcan_data_bittiming_const_canfd; - priv->can.tdc_const = &xcan_tdc_const_canfd; + priv->can.fd.tdc_const = &xcan_tdc_const_canfd; } if (devtype->cantype == XAXI_CANFD_2_0) { - priv->can.data_bittiming_const = + priv->can.fd.data_bittiming_const = &xcan_data_bittiming_const_canfd2; - priv->can.tdc_const = &xcan_tdc_const_canfd2; + priv->can.fd.tdc_const = &xcan_tdc_const_canfd2; } if (devtype->cantype == XAXI_CANFD || devtype->cantype == XAXI_CANFD_2_0) { priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD | CAN_CTRLMODE_TDC_AUTO; - priv->can.do_get_auto_tdcv = xcan_get_auto_tdcv; + priv->can.fd.do_get_auto_tdcv = xcan_get_auto_tdcv; } priv->reg_base = addr; diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c index adbab544c60f..d8a35f25a4c8 100644 --- a/drivers/net/dsa/dsa_loop.c +++ b/drivers/net/dsa/dsa_loop.c @@ -405,7 +405,7 @@ static int __init dsa_loop_init(void) unsigned int i, ret; for (i = 0; i < NUM_FIXED_PHYS; i++) - phydevs[i] = fixed_phy_register(PHY_POLL, &status, NULL); + phydevs[i] = fixed_phy_register(&status, NULL); ret = mdio_driver_register(&dsa_loop_drv); if (ret) diff --git a/drivers/net/dsa/microchip/Kconfig b/drivers/net/dsa/microchip/Kconfig index 12a86585a77f..c71d3fd5dfeb 100644 --- a/drivers/net/dsa/microchip/Kconfig +++ b/drivers/net/dsa/microchip/Kconfig @@ -6,6 +6,7 @@ menuconfig NET_DSA_MICROCHIP_KSZ_COMMON select NET_DSA_TAG_NONE select NET_IEEE8021Q_HELPERS select DCB + select PCS_XPCS help This driver adds support for Microchip KSZ8, KSZ9 and LAN937X series switch chips, being KSZ8863/8873, diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c index 29fe79ea74cd..d747ea1c41a7 100644 --- a/drivers/net/dsa/microchip/ksz9477.c +++ b/drivers/net/dsa/microchip/ksz9477.c @@ -2,7 +2,7 @@ /* * Microchip KSZ9477 switch driver main logic * - * Copyright (C) 2017-2024 Microchip Technology Inc. + * Copyright (C) 2017-2025 Microchip Technology Inc. */ #include <linux/kernel.h> @@ -161,6 +161,190 @@ static int ksz9477_wait_alu_sta_ready(struct ksz_device *dev) 10, 1000); } +static void port_sgmii_s(struct ksz_device *dev, uint port, u16 devid, u16 reg) +{ + u32 data; + + data = (devid & MII_MMD_CTRL_DEVAD_MASK) << 16; + data |= reg; + ksz_pwrite32(dev, port, REG_PORT_SGMII_ADDR__4, data); +} + +static void port_sgmii_r(struct ksz_device *dev, uint port, u16 devid, u16 reg, + u16 *buf) +{ + port_sgmii_s(dev, port, devid, reg); + ksz_pread16(dev, port, REG_PORT_SGMII_DATA__4 + 2, buf); +} + +static void port_sgmii_w(struct ksz_device *dev, uint port, u16 devid, u16 reg, + u16 buf) +{ + port_sgmii_s(dev, port, devid, reg); + ksz_pwrite32(dev, port, REG_PORT_SGMII_DATA__4, buf); +} + +static int ksz9477_pcs_read(struct mii_bus *bus, int phy, int mmd, int reg) +{ + struct ksz_device *dev = bus->priv; + int port = ksz_get_sgmii_port(dev); + u16 val; + + port_sgmii_r(dev, port, mmd, reg, &val); + + /* Simulate a value to activate special code in the XPCS driver if + * supported. + */ + if (mmd == MDIO_MMD_PMAPMD) { + if (reg == MDIO_DEVID1) + val = 0x9477; + else if (reg == MDIO_DEVID2) + val = 0x22 << 10; + } else if (mmd == MDIO_MMD_VEND2) { + struct ksz_port *p = &dev->ports[port]; + + /* Need to update MII_BMCR register with the exact speed and + * duplex mode when running in SGMII mode and this register is + * used to detect connected speed in that mode. + */ + if (reg == MMD_SR_MII_AUTO_NEG_STATUS) { + int duplex, speed; + + if (val & SR_MII_STAT_LINK_UP) { + speed = (val >> SR_MII_STAT_S) & SR_MII_STAT_M; + if (speed == SR_MII_STAT_1000_MBPS) + speed = SPEED_1000; + else if (speed == SR_MII_STAT_100_MBPS) + speed = SPEED_100; + else + speed = SPEED_10; + + if (val & SR_MII_STAT_FULL_DUPLEX) + duplex = DUPLEX_FULL; + else + duplex = DUPLEX_HALF; + + if (!p->phydev.link || + p->phydev.speed != speed || + p->phydev.duplex != duplex) { + u16 ctrl; + + p->phydev.link = 1; + p->phydev.speed = speed; + p->phydev.duplex = duplex; + port_sgmii_r(dev, port, mmd, MII_BMCR, + &ctrl); + ctrl &= BMCR_ANENABLE; + ctrl |= mii_bmcr_encode_fixed(speed, + duplex); + port_sgmii_w(dev, port, mmd, MII_BMCR, + ctrl); + } + } else { + p->phydev.link = 0; + } + } else if (reg == MII_BMSR) { + p->phydev.link = (val & BMSR_LSTATUS); + } + } + + return val; +} + +static int ksz9477_pcs_write(struct mii_bus *bus, int phy, int mmd, int reg, + u16 val) +{ + struct ksz_device *dev = bus->priv; + int port = ksz_get_sgmii_port(dev); + + if (mmd == MDIO_MMD_VEND2) { + struct ksz_port *p = &dev->ports[port]; + + if (reg == MMD_SR_MII_AUTO_NEG_CTRL) { + u16 sgmii_mode = SR_MII_PCS_SGMII << SR_MII_PCS_MODE_S; + + /* Need these bits for 1000BASE-X mode to work with + * AN on. + */ + if (!(val & sgmii_mode)) + val |= SR_MII_SGMII_LINK_UP | + SR_MII_TX_CFG_PHY_MASTER; + + /* SGMII interrupt in the port cannot be masked, so + * make sure interrupt is not enabled as it is not + * handled. + */ + val &= ~SR_MII_AUTO_NEG_COMPLETE_INTR; + } else if (reg == MII_BMCR) { + /* The MII_ADVERTISE register needs to write once + * before doing auto-negotiation for the correct + * config_word to be sent out after reset. + */ + if ((val & BMCR_ANENABLE) && !p->sgmii_adv_write) { + u16 adv; + + /* The SGMII port cannot disable flow control + * so it is better to just advertise symmetric + * pause. + */ + port_sgmii_r(dev, port, mmd, MII_ADVERTISE, + &adv); + adv |= ADVERTISE_1000XPAUSE; + adv &= ~ADVERTISE_1000XPSE_ASYM; + port_sgmii_w(dev, port, mmd, MII_ADVERTISE, + adv); + p->sgmii_adv_write = 1; + } else if (val & BMCR_RESET) { + p->sgmii_adv_write = 0; + } + } else if (reg == MII_ADVERTISE) { + /* XPCS driver writes to this register so there is no + * need to update it for the errata. + */ + p->sgmii_adv_write = 1; + } + } + port_sgmii_w(dev, port, mmd, reg, val); + + return 0; +} + +int ksz9477_pcs_create(struct ksz_device *dev) +{ + /* This chip has a SGMII port. */ + if (ksz_has_sgmii_port(dev)) { + int port = ksz_get_sgmii_port(dev); + struct ksz_port *p = &dev->ports[port]; + struct phylink_pcs *pcs; + struct mii_bus *bus; + int ret; + + bus = devm_mdiobus_alloc(dev->dev); + if (!bus) + return -ENOMEM; + + bus->name = "ksz_pcs_mdio_bus"; + snprintf(bus->id, MII_BUS_ID_SIZE, "%s-pcs", + dev_name(dev->dev)); + bus->read_c45 = &ksz9477_pcs_read; + bus->write_c45 = &ksz9477_pcs_write; + bus->parent = dev->dev; + bus->phy_mask = ~0; + bus->priv = dev; + + ret = devm_mdiobus_register(dev->dev, bus); + if (ret) + return ret; + + pcs = xpcs_create_pcs_mdiodev(bus, 0); + if (IS_ERR(pcs)) + return PTR_ERR(pcs); + p->pcs = pcs; + } + + return 0; +} + int ksz9477_reset_switch(struct ksz_device *dev) { u8 data8; @@ -978,6 +1162,14 @@ void ksz9477_get_caps(struct ksz_device *dev, int port, if (dev->info->gbit_capable[port]) config->mac_capabilities |= MAC_1000FD; + + if (ksz_is_sgmii_port(dev, port)) { + struct ksz_port *p = &dev->ports[port]; + + phy_interface_or(config->supported_interfaces, + config->supported_interfaces, + p->pcs->supported_interfaces); + } } int ksz9477_set_ageing_time(struct ksz_device *dev, unsigned int msecs) diff --git a/drivers/net/dsa/microchip/ksz9477.h b/drivers/net/dsa/microchip/ksz9477.h index d2166b0d881e..0d1a6dfda23e 100644 --- a/drivers/net/dsa/microchip/ksz9477.h +++ b/drivers/net/dsa/microchip/ksz9477.h @@ -2,7 +2,7 @@ /* * Microchip KSZ9477 series Header file * - * Copyright (C) 2017-2022 Microchip Technology Inc. + * Copyright (C) 2017-2025 Microchip Technology Inc. */ #ifndef __KSZ9477_H @@ -97,4 +97,6 @@ void ksz9477_acl_match_process_l2(struct ksz_device *dev, int port, u16 ethtype, u8 *src_mac, u8 *dst_mac, unsigned long cookie, u32 prio); +int ksz9477_pcs_create(struct ksz_device *dev); + #endif diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index f492fa9f6dd4..f424b3d10781 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -2,7 +2,7 @@ /* * Microchip switch driver main logic * - * Copyright (C) 2017-2024 Microchip Technology Inc. + * Copyright (C) 2017-2025 Microchip Technology Inc. */ #include <linux/delay.h> @@ -408,12 +408,29 @@ static void ksz9477_phylink_mac_link_up(struct phylink_config *config, int speed, int duplex, bool tx_pause, bool rx_pause); +static struct phylink_pcs * +ksz_phylink_mac_select_pcs(struct phylink_config *config, + phy_interface_t interface) +{ + struct dsa_port *dp = dsa_phylink_to_port(config); + struct ksz_device *dev = dp->ds->priv; + struct ksz_port *p = &dev->ports[dp->index]; + + if (ksz_is_sgmii_port(dev, dp->index) && + (interface == PHY_INTERFACE_MODE_SGMII || + interface == PHY_INTERFACE_MODE_1000BASEX)) + return p->pcs; + + return NULL; +} + static const struct phylink_mac_ops ksz9477_phylink_mac_ops = { .mac_config = ksz_phylink_mac_config, .mac_link_down = ksz_phylink_mac_link_down, .mac_link_up = ksz9477_phylink_mac_link_up, .mac_disable_tx_lpi = ksz_phylink_mac_disable_tx_lpi, .mac_enable_tx_lpi = ksz_phylink_mac_enable_tx_lpi, + .mac_select_pcs = ksz_phylink_mac_select_pcs, }; static const struct ksz_dev_ops ksz9477_dev_ops = { @@ -451,6 +468,7 @@ static const struct ksz_dev_ops ksz9477_dev_ops = { .reset = ksz9477_reset_switch, .init = ksz9477_switch_init, .exit = ksz9477_switch_exit, + .pcs_create = ksz9477_pcs_create, }; static const struct phylink_mac_ops lan937x_phylink_mac_ops = { @@ -1093,8 +1111,7 @@ static const struct regmap_range ksz9477_valid_regs[] = { regmap_reg_range(0x701b, 0x701b), regmap_reg_range(0x701f, 0x7020), regmap_reg_range(0x7030, 0x7030), - regmap_reg_range(0x7200, 0x7203), - regmap_reg_range(0x7206, 0x7207), + regmap_reg_range(0x7200, 0x7207), regmap_reg_range(0x7300, 0x7301), regmap_reg_range(0x7400, 0x7401), regmap_reg_range(0x7403, 0x7403), @@ -1610,6 +1627,7 @@ const struct ksz_chip_data ksz_switch_chips[] = { true, false, false}, .gbit_capable = {true, true, true, true, true, true, true}, .ptp_capable = true, + .sgmii_port = 7, .wr_table = &ksz9477_register_set, .rd_table = &ksz9477_register_set, }, @@ -2002,6 +2020,7 @@ const struct ksz_chip_data ksz_switch_chips[] = { .internal_phy = {true, true, true, true, true, false, false}, .gbit_capable = {true, true, true, true, true, true, true}, + .sgmii_port = 7, .wr_table = &ksz9477_register_set, .rd_table = &ksz9477_register_set, }, @@ -2137,7 +2156,7 @@ void ksz_r_mib_stats64(struct ksz_device *dev, int port) spin_unlock(&mib->stats64_lock); - if (dev->info->phy_errata_9477) { + if (dev->info->phy_errata_9477 && !ksz_is_sgmii_port(dev, port)) { ret = ksz9477_errata_monitor(dev, port, raw->tx_late_col); if (ret) dev_err(dev->dev, "Failed to monitor transmission halt\n"); @@ -2845,6 +2864,12 @@ static int ksz_setup(struct dsa_switch *ds) if (ret) return ret; + if (ksz_has_sgmii_port(dev) && dev->dev_ops->pcs_create) { + ret = dev->dev_ops->pcs_create(dev); + if (ret) + return ret; + } + /* set broadcast storm protection 10% rate */ regmap_update_bits(ksz_regmap_16(dev), regs[S_BROADCAST_CTRL], BROADCAST_STORM_RATE, @@ -3692,6 +3717,10 @@ static void ksz_phylink_mac_config(struct phylink_config *config, if (dev->info->internal_phy[port]) return; + /* No need to configure XMII control register when using SGMII. */ + if (ksz_is_sgmii_port(dev, port)) + return; + if (phylink_autoneg_inband(mode)) { dev_err(dev->dev, "In-band AN not supported!\n"); return; diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h index a034017568cd..a08417df2ca4 100644 --- a/drivers/net/dsa/microchip/ksz_common.h +++ b/drivers/net/dsa/microchip/ksz_common.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Microchip switch driver common header * - * Copyright (C) 2017-2024 Microchip Technology Inc. + * Copyright (C) 2017-2025 Microchip Technology Inc. */ #ifndef __KSZ_COMMON_H @@ -10,6 +10,7 @@ #include <linux/etherdevice.h> #include <linux/kernel.h> #include <linux/mutex.h> +#include <linux/pcs/pcs-xpcs.h> #include <linux/phy.h> #include <linux/regmap.h> #include <net/dsa.h> @@ -93,6 +94,7 @@ struct ksz_chip_data { bool internal_phy[KSZ_MAX_NUM_PORTS]; bool gbit_capable[KSZ_MAX_NUM_PORTS]; bool ptp_capable; + u8 sgmii_port; const struct regmap_access_table *wr_table; const struct regmap_access_table *rd_table; }; @@ -132,6 +134,7 @@ struct ksz_port { u32 force:1; u32 read:1; /* read MIB counters in background */ u32 freeze:1; /* MIB counter freeze is enabled */ + u32 sgmii_adv_write:1; struct ksz_port_mib mib; phy_interface_t interface; @@ -141,6 +144,7 @@ struct ksz_port { void *acl_priv; struct ksz_irq pirq; u8 num; + struct phylink_pcs *pcs; #if IS_ENABLED(CONFIG_NET_DSA_MICROCHIP_KSZ_PTP) struct kernel_hwtstamp_config tstamp_config; bool hwts_tx_en; @@ -440,6 +444,8 @@ struct ksz_dev_ops { int (*reset)(struct ksz_device *dev); int (*init)(struct ksz_device *dev); void (*exit)(struct ksz_device *dev); + + int (*pcs_create)(struct ksz_device *dev); }; struct ksz_device *ksz_switch_alloc(struct device *base, void *priv); @@ -731,6 +737,21 @@ static inline bool is_lan937x_tx_phy(struct ksz_device *dev, int port) dev->chip_id == LAN9372_CHIP_ID) && port == KSZ_PORT_4; } +static inline int ksz_get_sgmii_port(struct ksz_device *dev) +{ + return dev->info->sgmii_port - 1; +} + +static inline bool ksz_has_sgmii_port(struct ksz_device *dev) +{ + return dev->info->sgmii_port > 0; +} + +static inline bool ksz_is_sgmii_port(struct ksz_device *dev, int port) +{ + return dev->info->sgmii_port == port + 1; +} + /* STP State Defines */ #define PORT_TX_ENABLE BIT(2) #define PORT_RX_ENABLE BIT(1) diff --git a/drivers/net/ethernet/airoha/Kconfig b/drivers/net/ethernet/airoha/Kconfig index 1a4cf6a259f6..ad3ce501e7a5 100644 --- a/drivers/net/ethernet/airoha/Kconfig +++ b/drivers/net/ethernet/airoha/Kconfig @@ -24,4 +24,11 @@ config NET_AIROHA This driver supports the gigabit ethernet MACs in the Airoha SoC family. +config NET_AIROHA_FLOW_STATS + default y + bool "Airoha flow stats" + depends on NET_AIROHA && NET_AIROHA_NPU + help + Enable Aiorha flowtable statistic counters. + endif #NET_VENDOR_AIROHA diff --git a/drivers/net/ethernet/airoha/airoha_eth.c b/drivers/net/ethernet/airoha/airoha_eth.c index 16c7896f931f..507ba046c719 100644 --- a/drivers/net/ethernet/airoha/airoha_eth.c +++ b/drivers/net/ethernet/airoha/airoha_eth.c @@ -5,6 +5,7 @@ */ #include <linux/of.h> #include <linux/of_net.h> +#include <linux/of_reserved_mem.h> #include <linux/platform_device.h> #include <linux/tcp.h> #include <linux/u64_stats_sync.h> @@ -70,15 +71,6 @@ static void airoha_qdma_irq_disable(struct airoha_irq_bank *irq_bank, airoha_qdma_set_irqmask(irq_bank, index, mask, 0); } -static bool airhoa_is_lan_gdm_port(struct airoha_gdm_port *port) -{ - /* GDM1 port on EN7581 SoC is connected to the lan dsa switch. - * GDM{2,3,4} can be used as wan port connected to an external - * phy module. - */ - return port->id == 1; -} - static void airoha_set_macaddr(struct airoha_gdm_port *port, const u8 *addr) { struct airoha_eth *eth = port->qdma->eth; @@ -636,7 +628,6 @@ static int airoha_qdma_rx_process(struct airoha_queue *q, int budget) struct airoha_queue_entry *e = &q->entry[q->tail]; struct airoha_qdma_desc *desc = &q->desc[q->tail]; u32 hash, reason, msg1 = le32_to_cpu(desc->msg1); - dma_addr_t dma_addr = le32_to_cpu(desc->addr); struct page *page = virt_to_head_page(e->buf); u32 desc_ctrl = le32_to_cpu(desc->ctrl); struct airoha_gdm_port *port; @@ -645,22 +636,16 @@ static int airoha_qdma_rx_process(struct airoha_queue *q, int budget) if (!(desc_ctrl & QDMA_DESC_DONE_MASK)) break; - if (!dma_addr) - break; - - len = FIELD_GET(QDMA_DESC_LEN_MASK, desc_ctrl); - if (!len) - break; - q->tail = (q->tail + 1) % q->ndesc; q->queued--; - dma_sync_single_for_cpu(eth->dev, dma_addr, + dma_sync_single_for_cpu(eth->dev, e->dma_addr, SKB_WITH_OVERHEAD(q->buf_size), dir); + len = FIELD_GET(QDMA_DESC_LEN_MASK, desc_ctrl); data_len = q->skb ? q->buf_size : SKB_WITH_OVERHEAD(q->buf_size); - if (data_len < len) + if (!len || data_len < len) goto free_frag; p = airoha_qdma_get_gdm_port(eth, desc); @@ -723,9 +708,12 @@ static int airoha_qdma_rx_process(struct airoha_queue *q, int budget) q->skb = NULL; continue; free_frag: - page_pool_put_full_page(q->page_pool, page, true); - dev_kfree_skb(q->skb); - q->skb = NULL; + if (q->skb) { + dev_kfree_skb(q->skb); + q->skb = NULL; + } else { + page_pool_put_full_page(q->page_pool, page, true); + } } airoha_qdma_fill_rx_queue(q); @@ -1076,24 +1064,46 @@ static void airoha_qdma_cleanup_tx_queue(struct airoha_queue *q) static int airoha_qdma_init_hfwd_queues(struct airoha_qdma *qdma) { struct airoha_eth *eth = qdma->eth; + int id = qdma - ð->qdma[0]; dma_addr_t dma_addr; + const char *name; + int size, index; u32 status; - int size; size = HW_DSCP_NUM * sizeof(struct airoha_qdma_fwd_desc); - qdma->hfwd.desc = dmam_alloc_coherent(eth->dev, size, &dma_addr, - GFP_KERNEL); - if (!qdma->hfwd.desc) + if (!dmam_alloc_coherent(eth->dev, size, &dma_addr, GFP_KERNEL)) return -ENOMEM; airoha_qdma_wr(qdma, REG_FWD_DSCP_BASE, dma_addr); - size = AIROHA_MAX_PACKET_SIZE * HW_DSCP_NUM; - qdma->hfwd.q = dmam_alloc_coherent(eth->dev, size, &dma_addr, - GFP_KERNEL); - if (!qdma->hfwd.q) + name = devm_kasprintf(eth->dev, GFP_KERNEL, "qdma%d-buf", id); + if (!name) return -ENOMEM; + index = of_property_match_string(eth->dev->of_node, + "memory-region-names", name); + if (index >= 0) { + struct reserved_mem *rmem; + struct device_node *np; + + /* Consume reserved memory for hw forwarding buffers queue if + * available in the DTS + */ + np = of_parse_phandle(eth->dev->of_node, "memory-region", + index); + if (!np) + return -ENODEV; + + rmem = of_reserved_mem_lookup(np); + of_node_put(np); + dma_addr = rmem->base; + } else { + size = AIROHA_MAX_PACKET_SIZE * HW_DSCP_NUM; + if (!dmam_alloc_coherent(eth->dev, size, &dma_addr, + GFP_KERNEL)) + return -ENOMEM; + } + airoha_qdma_wr(qdma, REG_FWD_BUF_BASE, dma_addr); airoha_qdma_rmw(qdma, REG_HW_FWD_DSCP_CFG, @@ -1105,7 +1115,7 @@ static int airoha_qdma_init_hfwd_queues(struct airoha_qdma *qdma) LMGR_INIT_START | LMGR_SRAM_MODE_MASK | HW_FWD_DESC_NUM_MASK, FIELD_PREP(HW_FWD_DESC_NUM_MASK, HW_DSCP_NUM) | - LMGR_INIT_START); + LMGR_INIT_START | LMGR_SRAM_MODE_MASK); return read_poll_timeout(airoha_qdma_rr, status, !(status & LMGR_INIT_START), USEC_PER_MSEC, diff --git a/drivers/net/ethernet/airoha/airoha_eth.h b/drivers/net/ethernet/airoha/airoha_eth.h index 53f39083a8b0..b815697302bf 100644 --- a/drivers/net/ethernet/airoha/airoha_eth.h +++ b/drivers/net/ethernet/airoha/airoha_eth.h @@ -50,6 +50,14 @@ #define PPE_NUM 2 #define PPE1_SRAM_NUM_ENTRIES (8 * 1024) #define PPE_SRAM_NUM_ENTRIES (2 * PPE1_SRAM_NUM_ENTRIES) +#ifdef CONFIG_NET_AIROHA_FLOW_STATS +#define PPE1_STATS_NUM_ENTRIES (4 * 1024) +#else +#define PPE1_STATS_NUM_ENTRIES 0 +#endif /* CONFIG_NET_AIROHA_FLOW_STATS */ +#define PPE_STATS_NUM_ENTRIES (2 * PPE1_STATS_NUM_ENTRIES) +#define PPE1_SRAM_NUM_DATA_ENTRIES (PPE1_SRAM_NUM_ENTRIES - PPE1_STATS_NUM_ENTRIES) +#define PPE_SRAM_NUM_DATA_ENTRIES (2 * PPE1_SRAM_NUM_DATA_ENTRIES) #define PPE_DRAM_NUM_ENTRIES (16 * 1024) #define PPE_NUM_ENTRIES (PPE_SRAM_NUM_ENTRIES + PPE_DRAM_NUM_ENTRIES) #define PPE_HASH_MASK (PPE_NUM_ENTRIES - 1) @@ -261,6 +269,8 @@ struct airoha_foe_mac_info { u16 pppoe_id; u16 src_mac_lo; + + u32 meter; }; #define AIROHA_FOE_IB1_UNBIND_PREBIND BIT(24) @@ -296,6 +306,11 @@ struct airoha_foe_mac_info { #define AIROHA_FOE_TUNNEL BIT(6) #define AIROHA_FOE_TUNNEL_ID GENMASK(5, 0) +#define AIROHA_FOE_TUNNEL_MTU GENMASK(31, 16) +#define AIROHA_FOE_ACNT_GRP3 GENMASK(15, 9) +#define AIROHA_FOE_METER_GRP3 GENMASK(8, 5) +#define AIROHA_FOE_METER_GRP2 GENMASK(4, 0) + struct airoha_foe_bridge { u32 dest_mac_hi; @@ -379,6 +394,8 @@ struct airoha_foe_ipv6 { u32 ib2; struct airoha_foe_mac_info_common l2; + + u32 meter; }; struct airoha_foe_entry { @@ -397,6 +414,16 @@ struct airoha_foe_entry { }; }; +struct airoha_foe_stats { + u32 bytes; + u32 packets; +}; + +struct airoha_foe_stats64 { + u64 bytes; + u64 packets; +}; + struct airoha_flow_data { struct ethhdr eth; @@ -447,6 +474,7 @@ struct airoha_flow_table_entry { struct hlist_node l2_subflow_node; /* PPE L2 subflow entry */ u32 hash; + struct airoha_foe_stats64 stats; enum airoha_flow_entry_type type; struct rhash_head node; @@ -485,12 +513,6 @@ struct airoha_qdma { struct airoha_queue q_tx[AIROHA_NUM_TX_RING]; struct airoha_queue q_rx[AIROHA_NUM_RX_RING]; - - /* descriptor and packet buffers for qdma hw forward */ - struct { - void *desc; - void *q; - } hfwd; }; struct airoha_gdm_port { @@ -523,6 +545,9 @@ struct airoha_ppe { struct hlist_head *foe_flow; u16 foe_check_time[PPE_NUM_ENTRIES]; + struct airoha_foe_stats *foe_stats; + dma_addr_t foe_stats_dma; + struct dentry *debugfs_dir; }; @@ -572,6 +597,15 @@ u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val); #define airoha_qdma_clear(qdma, offset, val) \ airoha_rmw((qdma)->regs, (offset), (val), 0) +static inline bool airhoa_is_lan_gdm_port(struct airoha_gdm_port *port) +{ + /* GDM1 port on EN7581 SoC is connected to the lan dsa switch. + * GDM{2,3,4} can be used as wan port connected to an external + * phy module. + */ + return port->id == 1; +} + bool airoha_is_valid_gdm_port(struct airoha_eth *eth, struct airoha_gdm_port *port); @@ -582,6 +616,8 @@ int airoha_ppe_init(struct airoha_eth *eth); void airoha_ppe_deinit(struct airoha_eth *eth); struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe, u32 hash); +void airoha_ppe_foe_entry_get_stats(struct airoha_ppe *ppe, u32 hash, + struct airoha_foe_stats64 *stats); #ifdef CONFIG_DEBUG_FS int airoha_ppe_debugfs_init(struct airoha_ppe *ppe); diff --git a/drivers/net/ethernet/airoha/airoha_npu.c b/drivers/net/ethernet/airoha/airoha_npu.c index ead0625e781f..0e5b8c21b9aa 100644 --- a/drivers/net/ethernet/airoha/airoha_npu.c +++ b/drivers/net/ethernet/airoha/airoha_npu.c @@ -12,6 +12,7 @@ #include <linux/of_reserved_mem.h> #include <linux/regmap.h> +#include "airoha_eth.h" #include "airoha_npu.h" #define NPU_EN7581_FIRMWARE_DATA "airoha/en7581_npu_data.bin" @@ -72,6 +73,7 @@ enum { PPE_FUNC_SET_WAIT_HWNAT_INIT, PPE_FUNC_SET_WAIT_HWNAT_DEINIT, PPE_FUNC_SET_WAIT_API, + PPE_FUNC_SET_WAIT_FLOW_STATS_SETUP, }; enum { @@ -115,6 +117,10 @@ struct ppe_mbox_data { u32 size; u32 data; } set_info; + struct { + u32 npu_stats_addr; + u32 foe_stats_addr; + } stats_info; }; }; @@ -124,17 +130,12 @@ static int airoha_npu_send_msg(struct airoha_npu *npu, int func_id, u16 core = 0; /* FIXME */ u32 val, offset = core << 4; dma_addr_t dma_addr; - void *addr; int ret; - addr = kmemdup(p, size, GFP_ATOMIC); - if (!addr) - return -ENOMEM; - - dma_addr = dma_map_single(npu->dev, addr, size, DMA_TO_DEVICE); + dma_addr = dma_map_single(npu->dev, p, size, DMA_TO_DEVICE); ret = dma_mapping_error(npu->dev, dma_addr); if (ret) - goto out; + return ret; spin_lock_bh(&npu->cores[core].lock); @@ -155,8 +156,6 @@ static int airoha_npu_send_msg(struct airoha_npu *npu, int func_id, spin_unlock_bh(&npu->cores[core].lock); dma_unmap_single(npu->dev, dma_addr, size, DMA_TO_DEVICE); -out: - kfree(addr); return ret; } @@ -261,79 +260,137 @@ static irqreturn_t airoha_npu_wdt_handler(int irq, void *core_instance) static int airoha_npu_ppe_init(struct airoha_npu *npu) { - struct ppe_mbox_data ppe_data = { - .func_type = NPU_OP_SET, - .func_id = PPE_FUNC_SET_WAIT_HWNAT_INIT, - .init_info = { - .ppe_type = PPE_TYPE_L2B_IPV4_IPV6, - .wan_mode = QDMA_WAN_ETHER, - }, - }; + struct ppe_mbox_data *ppe_data; + int err; + + ppe_data = kzalloc(sizeof(*ppe_data), GFP_KERNEL); + if (!ppe_data) + return -ENOMEM; - return airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data, - sizeof(struct ppe_mbox_data)); + ppe_data->func_type = NPU_OP_SET; + ppe_data->func_id = PPE_FUNC_SET_WAIT_HWNAT_INIT; + ppe_data->init_info.ppe_type = PPE_TYPE_L2B_IPV4_IPV6; + ppe_data->init_info.wan_mode = QDMA_WAN_ETHER; + + err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data, + sizeof(*ppe_data)); + kfree(ppe_data); + + return err; } static int airoha_npu_ppe_deinit(struct airoha_npu *npu) { - struct ppe_mbox_data ppe_data = { - .func_type = NPU_OP_SET, - .func_id = PPE_FUNC_SET_WAIT_HWNAT_DEINIT, - }; + struct ppe_mbox_data *ppe_data; + int err; + + ppe_data = kzalloc(sizeof(*ppe_data), GFP_KERNEL); + if (!ppe_data) + return -ENOMEM; - return airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data, - sizeof(struct ppe_mbox_data)); + ppe_data->func_type = NPU_OP_SET; + ppe_data->func_id = PPE_FUNC_SET_WAIT_HWNAT_DEINIT; + + err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data, + sizeof(*ppe_data)); + kfree(ppe_data); + + return err; } static int airoha_npu_ppe_flush_sram_entries(struct airoha_npu *npu, dma_addr_t foe_addr, int sram_num_entries) { - struct ppe_mbox_data ppe_data = { - .func_type = NPU_OP_SET, - .func_id = PPE_FUNC_SET_WAIT_API, - .set_info = { - .func_id = PPE_SRAM_RESET_VAL, - .data = foe_addr, - .size = sram_num_entries, - }, - }; + struct ppe_mbox_data *ppe_data; + int err; + + ppe_data = kzalloc(sizeof(*ppe_data), GFP_KERNEL); + if (!ppe_data) + return -ENOMEM; + + ppe_data->func_type = NPU_OP_SET; + ppe_data->func_id = PPE_FUNC_SET_WAIT_API; + ppe_data->set_info.func_id = PPE_SRAM_RESET_VAL; + ppe_data->set_info.data = foe_addr; + ppe_data->set_info.size = sram_num_entries; + + err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data, + sizeof(*ppe_data)); + kfree(ppe_data); - return airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data, - sizeof(struct ppe_mbox_data)); + return err; } static int airoha_npu_foe_commit_entry(struct airoha_npu *npu, dma_addr_t foe_addr, u32 entry_size, u32 hash, bool ppe2) { - struct ppe_mbox_data ppe_data = { - .func_type = NPU_OP_SET, - .func_id = PPE_FUNC_SET_WAIT_API, - .set_info = { - .data = foe_addr, - .size = entry_size, - }, - }; + struct ppe_mbox_data *ppe_data; int err; - ppe_data.set_info.func_id = ppe2 ? PPE2_SRAM_SET_ENTRY - : PPE_SRAM_SET_ENTRY; + ppe_data = kzalloc(sizeof(*ppe_data), GFP_ATOMIC); + if (!ppe_data) + return -ENOMEM; + + ppe_data->func_type = NPU_OP_SET; + ppe_data->func_id = PPE_FUNC_SET_WAIT_API; + ppe_data->set_info.data = foe_addr; + ppe_data->set_info.size = entry_size; + ppe_data->set_info.func_id = ppe2 ? PPE2_SRAM_SET_ENTRY + : PPE_SRAM_SET_ENTRY; - err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data, - sizeof(struct ppe_mbox_data)); + err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data, + sizeof(*ppe_data)); if (err) - return err; + goto out; + + ppe_data->set_info.func_id = PPE_SRAM_SET_VAL; + ppe_data->set_info.data = hash; + ppe_data->set_info.size = sizeof(u32); - ppe_data.set_info.func_id = PPE_SRAM_SET_VAL; - ppe_data.set_info.data = hash; - ppe_data.set_info.size = sizeof(u32); + err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data, + sizeof(*ppe_data)); +out: + kfree(ppe_data); - return airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data, - sizeof(struct ppe_mbox_data)); + return err; } -struct airoha_npu *airoha_npu_get(struct device *dev) +static int airoha_npu_stats_setup(struct airoha_npu *npu, + dma_addr_t foe_stats_addr) +{ + int err, size = PPE_STATS_NUM_ENTRIES * sizeof(*npu->stats); + struct ppe_mbox_data *ppe_data; + + if (!size) /* flow stats are disabled */ + return 0; + + ppe_data = kzalloc(sizeof(*ppe_data), GFP_ATOMIC); + if (!ppe_data) + return -ENOMEM; + + ppe_data->func_type = NPU_OP_SET; + ppe_data->func_id = PPE_FUNC_SET_WAIT_FLOW_STATS_SETUP; + ppe_data->stats_info.foe_stats_addr = foe_stats_addr; + + err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data, + sizeof(*ppe_data)); + if (err) + goto out; + + npu->stats = devm_ioremap(npu->dev, + ppe_data->stats_info.npu_stats_addr, + size); + if (!npu->stats) + err = -ENOMEM; +out: + kfree(ppe_data); + + return err; +} + +struct airoha_npu *airoha_npu_get(struct device *dev, dma_addr_t *stats_addr) { struct platform_device *pdev; struct device_node *np; @@ -371,6 +428,17 @@ struct airoha_npu *airoha_npu_get(struct device *dev) goto error_module_put; } + if (stats_addr) { + int err; + + err = airoha_npu_stats_setup(npu, *stats_addr); + if (err) { + dev_err(dev, "failed to allocate npu stats buffer\n"); + npu = ERR_PTR(err); + goto error_module_put; + } + } + return npu; error_module_put: diff --git a/drivers/net/ethernet/airoha/airoha_npu.h b/drivers/net/ethernet/airoha/airoha_npu.h index a2b8ae4d9473..98ec3be74ce4 100644 --- a/drivers/net/ethernet/airoha/airoha_npu.h +++ b/drivers/net/ethernet/airoha/airoha_npu.h @@ -17,6 +17,8 @@ struct airoha_npu { struct work_struct wdt_work; } cores[NPU_NUM_CORES]; + struct airoha_foe_stats __iomem *stats; + struct { int (*ppe_init)(struct airoha_npu *npu); int (*ppe_deinit)(struct airoha_npu *npu); @@ -30,5 +32,5 @@ struct airoha_npu { } ops; }; -struct airoha_npu *airoha_npu_get(struct device *dev); +struct airoha_npu *airoha_npu_get(struct device *dev, dma_addr_t *stats_addr); void airoha_npu_put(struct airoha_npu *npu); diff --git a/drivers/net/ethernet/airoha/airoha_ppe.c b/drivers/net/ethernet/airoha/airoha_ppe.c index 6e9787c2843b..12d32c92717a 100644 --- a/drivers/net/ethernet/airoha/airoha_ppe.c +++ b/drivers/net/ethernet/airoha/airoha_ppe.c @@ -84,6 +84,7 @@ static void airoha_ppe_hw_init(struct airoha_ppe *ppe) airoha_fe_rmw(eth, REG_PPE_TB_CFG(i), PPE_TB_CFG_SEARCH_MISS_MASK | + PPE_TB_CFG_KEEPALIVE_MASK | PPE_TB_ENTRY_SIZE_MASK, FIELD_PREP(PPE_TB_CFG_SEARCH_MISS_MASK, 3) | FIELD_PREP(PPE_TB_ENTRY_SIZE_MASK, 0)); @@ -102,7 +103,7 @@ static void airoha_ppe_hw_init(struct airoha_ppe *ppe) if (airoha_ppe2_is_enabled(eth)) { sram_num_entries = - PPE_RAM_NUM_ENTRIES_SHIFT(PPE1_SRAM_NUM_ENTRIES); + PPE_RAM_NUM_ENTRIES_SHIFT(PPE1_SRAM_NUM_DATA_ENTRIES); airoha_fe_rmw(eth, REG_PPE_TB_CFG(0), PPE_SRAM_TB_NUM_ENTRY_MASK | PPE_DRAM_TB_NUM_ENTRY_MASK, @@ -119,7 +120,7 @@ static void airoha_ppe_hw_init(struct airoha_ppe *ppe) dram_num_entries)); } else { sram_num_entries = - PPE_RAM_NUM_ENTRIES_SHIFT(PPE_SRAM_NUM_ENTRIES); + PPE_RAM_NUM_ENTRIES_SHIFT(PPE_SRAM_NUM_DATA_ENTRIES); airoha_fe_rmw(eth, REG_PPE_TB_CFG(0), PPE_SRAM_TB_NUM_ENTRY_MASK | PPE_DRAM_TB_NUM_ENTRY_MASK, @@ -250,6 +251,12 @@ static int airoha_ppe_foe_entry_prepare(struct airoha_eth *eth, else pse_port = 2; /* uplink relies on GDM2 loopback */ val |= FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT, pse_port); + + /* For downlink traffic consume SRAM memory for hw forwarding + * descriptors queue. + */ + if (airhoa_is_lan_gdm_port(port)) + val |= AIROHA_FOE_IB2_FAST_PATH; } if (is_multicast_ether_addr(data->eth.h_dest)) @@ -417,6 +424,77 @@ static u32 airoha_ppe_foe_get_entry_hash(struct airoha_foe_entry *hwe) return hash; } +static u32 airoha_ppe_foe_get_flow_stats_index(struct airoha_ppe *ppe, u32 hash) +{ + if (!airoha_ppe2_is_enabled(ppe->eth)) + return hash; + + return hash >= PPE_STATS_NUM_ENTRIES ? hash - PPE1_STATS_NUM_ENTRIES + : hash; +} + +static void airoha_ppe_foe_flow_stat_entry_reset(struct airoha_ppe *ppe, + struct airoha_npu *npu, + int index) +{ + memset_io(&npu->stats[index], 0, sizeof(*npu->stats)); + memset(&ppe->foe_stats[index], 0, sizeof(*ppe->foe_stats)); +} + +static void airoha_ppe_foe_flow_stats_reset(struct airoha_ppe *ppe, + struct airoha_npu *npu) +{ + int i; + + for (i = 0; i < PPE_STATS_NUM_ENTRIES; i++) + airoha_ppe_foe_flow_stat_entry_reset(ppe, npu, i); +} + +static void airoha_ppe_foe_flow_stats_update(struct airoha_ppe *ppe, + struct airoha_npu *npu, + struct airoha_foe_entry *hwe, + u32 hash) +{ + int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1); + u32 index, pse_port, val, *data, *ib2, *meter; + u8 nbq; + + index = airoha_ppe_foe_get_flow_stats_index(ppe, hash); + if (index >= PPE_STATS_NUM_ENTRIES) + return; + + if (type == PPE_PKT_TYPE_BRIDGE) { + data = &hwe->bridge.data; + ib2 = &hwe->bridge.ib2; + meter = &hwe->bridge.l2.meter; + } else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) { + data = &hwe->ipv6.data; + ib2 = &hwe->ipv6.ib2; + meter = &hwe->ipv6.meter; + } else { + data = &hwe->ipv4.data; + ib2 = &hwe->ipv4.ib2; + meter = &hwe->ipv4.l2.meter; + } + + airoha_ppe_foe_flow_stat_entry_reset(ppe, npu, index); + + val = FIELD_GET(AIROHA_FOE_CHANNEL | AIROHA_FOE_QID, *data); + *data = (*data & ~AIROHA_FOE_ACTDP) | + FIELD_PREP(AIROHA_FOE_ACTDP, val); + + val = *ib2 & (AIROHA_FOE_IB2_NBQ | AIROHA_FOE_IB2_PSE_PORT | + AIROHA_FOE_IB2_PSE_QOS | AIROHA_FOE_IB2_FAST_PATH); + *meter |= FIELD_PREP(AIROHA_FOE_TUNNEL_MTU, val); + + pse_port = FIELD_GET(AIROHA_FOE_IB2_PSE_PORT, *ib2); + nbq = pse_port == 1 ? 6 : 5; + *ib2 &= ~(AIROHA_FOE_IB2_NBQ | AIROHA_FOE_IB2_PSE_PORT | + AIROHA_FOE_IB2_PSE_QOS); + *ib2 |= FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT, 6) | + FIELD_PREP(AIROHA_FOE_IB2_NBQ, nbq); +} + struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe, u32 hash) { @@ -470,6 +548,8 @@ static int airoha_ppe_foe_commit_entry(struct airoha_ppe *ppe, struct airoha_foe_entry *hwe = ppe->foe + hash * sizeof(*hwe); u32 ts = airoha_ppe_get_timestamp(ppe); struct airoha_eth *eth = ppe->eth; + struct airoha_npu *npu; + int err = 0; memcpy(&hwe->d, &e->d, sizeof(*hwe) - sizeof(hwe->ib1)); wmb(); @@ -478,25 +558,28 @@ static int airoha_ppe_foe_commit_entry(struct airoha_ppe *ppe, e->ib1 |= FIELD_PREP(AIROHA_FOE_IB1_BIND_TIMESTAMP, ts); hwe->ib1 = e->ib1; + rcu_read_lock(); + + npu = rcu_dereference(eth->npu); + if (!npu) { + err = -ENODEV; + goto unlock; + } + + airoha_ppe_foe_flow_stats_update(ppe, npu, hwe, hash); + if (hash < PPE_SRAM_NUM_ENTRIES) { dma_addr_t addr = ppe->foe_dma + hash * sizeof(*hwe); bool ppe2 = airoha_ppe2_is_enabled(eth) && hash >= PPE1_SRAM_NUM_ENTRIES; - struct airoha_npu *npu; - int err = -ENODEV; - - rcu_read_lock(); - npu = rcu_dereference(eth->npu); - if (npu) - err = npu->ops.ppe_foe_commit_entry(npu, addr, - sizeof(*hwe), hash, - ppe2); - rcu_read_unlock(); - return err; + err = npu->ops.ppe_foe_commit_entry(npu, addr, sizeof(*hwe), + hash, ppe2); } +unlock: + rcu_read_unlock(); - return 0; + return err; } static void airoha_ppe_foe_remove_flow(struct airoha_ppe *ppe, @@ -582,6 +665,7 @@ airoha_ppe_foe_commit_subflow_entry(struct airoha_ppe *ppe, l2->common.etype = ETH_P_IPV6; hwe.bridge.ib2 = e->data.bridge.ib2; + hwe.bridge.data = e->data.bridge.data; airoha_ppe_foe_commit_entry(ppe, &hwe, hash); return 0; @@ -681,6 +765,98 @@ static int airoha_ppe_foe_flow_commit_entry(struct airoha_ppe *ppe, return 0; } +static int airoha_ppe_get_entry_idle_time(struct airoha_ppe *ppe, u32 ib1) +{ + u32 state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, ib1); + u32 ts, ts_mask, now = airoha_ppe_get_timestamp(ppe); + int idle; + + if (state == AIROHA_FOE_STATE_BIND) { + ts = FIELD_GET(AIROHA_FOE_IB1_BIND_TIMESTAMP, ib1); + ts_mask = AIROHA_FOE_IB1_BIND_TIMESTAMP; + } else { + ts = FIELD_GET(AIROHA_FOE_IB1_UNBIND_TIMESTAMP, ib1); + now = FIELD_GET(AIROHA_FOE_IB1_UNBIND_TIMESTAMP, now); + ts_mask = AIROHA_FOE_IB1_UNBIND_TIMESTAMP; + } + idle = now - ts; + + return idle < 0 ? idle + ts_mask + 1 : idle; +} + +static void +airoha_ppe_foe_flow_l2_entry_update(struct airoha_ppe *ppe, + struct airoha_flow_table_entry *e) +{ + int min_idle = airoha_ppe_get_entry_idle_time(ppe, e->data.ib1); + struct airoha_flow_table_entry *iter; + struct hlist_node *n; + + lockdep_assert_held(&ppe_lock); + + hlist_for_each_entry_safe(iter, n, &e->l2_flows, l2_subflow_node) { + struct airoha_foe_entry *hwe; + u32 ib1, state; + int idle; + + hwe = airoha_ppe_foe_get_entry(ppe, iter->hash); + ib1 = READ_ONCE(hwe->ib1); + + state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, ib1); + if (state != AIROHA_FOE_STATE_BIND) { + iter->hash = 0xffff; + airoha_ppe_foe_remove_flow(ppe, iter); + continue; + } + + idle = airoha_ppe_get_entry_idle_time(ppe, ib1); + if (idle >= min_idle) + continue; + + min_idle = idle; + e->data.ib1 &= ~AIROHA_FOE_IB1_BIND_TIMESTAMP; + e->data.ib1 |= ib1 & AIROHA_FOE_IB1_BIND_TIMESTAMP; + } +} + +static void airoha_ppe_foe_flow_entry_update(struct airoha_ppe *ppe, + struct airoha_flow_table_entry *e) +{ + struct airoha_foe_entry *hwe_p, hwe = {}; + + spin_lock_bh(&ppe_lock); + + if (e->type == FLOW_TYPE_L2) { + airoha_ppe_foe_flow_l2_entry_update(ppe, e); + goto unlock; + } + + if (e->hash == 0xffff) + goto unlock; + + hwe_p = airoha_ppe_foe_get_entry(ppe, e->hash); + if (!hwe_p) + goto unlock; + + memcpy(&hwe, hwe_p, sizeof(*hwe_p)); + if (!airoha_ppe_foe_compare_entry(e, &hwe)) { + e->hash = 0xffff; + goto unlock; + } + + e->data.ib1 = hwe.ib1; +unlock: + spin_unlock_bh(&ppe_lock); +} + +static int airoha_ppe_entry_idle_time(struct airoha_ppe *ppe, + struct airoha_flow_table_entry *e) +{ + airoha_ppe_foe_flow_entry_update(ppe, e); + + return airoha_ppe_get_entry_idle_time(ppe, e->data.ib1); +} + static int airoha_ppe_flow_offload_replace(struct airoha_gdm_port *port, struct flow_cls_offload *f) { @@ -896,6 +1072,60 @@ static int airoha_ppe_flow_offload_destroy(struct airoha_gdm_port *port, return 0; } +void airoha_ppe_foe_entry_get_stats(struct airoha_ppe *ppe, u32 hash, + struct airoha_foe_stats64 *stats) +{ + u32 index = airoha_ppe_foe_get_flow_stats_index(ppe, hash); + struct airoha_eth *eth = ppe->eth; + struct airoha_npu *npu; + + if (index >= PPE_STATS_NUM_ENTRIES) + return; + + rcu_read_lock(); + + npu = rcu_dereference(eth->npu); + if (npu) { + u64 packets = ppe->foe_stats[index].packets; + u64 bytes = ppe->foe_stats[index].bytes; + struct airoha_foe_stats npu_stats; + + memcpy_fromio(&npu_stats, &npu->stats[index], + sizeof(*npu->stats)); + stats->packets = packets << 32 | npu_stats.packets; + stats->bytes = bytes << 32 | npu_stats.bytes; + } + + rcu_read_unlock(); +} + +static int airoha_ppe_flow_offload_stats(struct airoha_gdm_port *port, + struct flow_cls_offload *f) +{ + struct airoha_eth *eth = port->qdma->eth; + struct airoha_flow_table_entry *e; + u32 idle; + + e = rhashtable_lookup(ð->flow_table, &f->cookie, + airoha_flow_table_params); + if (!e) + return -ENOENT; + + idle = airoha_ppe_entry_idle_time(eth->ppe, e); + f->stats.lastused = jiffies - idle * HZ; + + if (e->hash != 0xffff) { + struct airoha_foe_stats64 stats = {}; + + airoha_ppe_foe_entry_get_stats(eth->ppe, e->hash, &stats); + f->stats.pkts += (stats.packets - e->stats.packets); + f->stats.bytes += (stats.bytes - e->stats.bytes); + e->stats = stats; + } + + return 0; +} + static int airoha_ppe_flow_offload_cmd(struct airoha_gdm_port *port, struct flow_cls_offload *f) { @@ -904,6 +1134,8 @@ static int airoha_ppe_flow_offload_cmd(struct airoha_gdm_port *port, return airoha_ppe_flow_offload_replace(port, f); case FLOW_CLS_DESTROY: return airoha_ppe_flow_offload_destroy(port, f); + case FLOW_CLS_STATS: + return airoha_ppe_flow_offload_stats(port, f); default: break; } @@ -929,11 +1161,12 @@ static int airoha_ppe_flush_sram_entries(struct airoha_ppe *ppe, static struct airoha_npu *airoha_ppe_npu_get(struct airoha_eth *eth) { - struct airoha_npu *npu = airoha_npu_get(eth->dev); + struct airoha_npu *npu = airoha_npu_get(eth->dev, + ð->ppe->foe_stats_dma); if (IS_ERR(npu)) { request_module("airoha-npu"); - npu = airoha_npu_get(eth->dev); + npu = airoha_npu_get(eth->dev, ð->ppe->foe_stats_dma); } return npu; @@ -956,6 +1189,8 @@ static int airoha_ppe_offload_setup(struct airoha_eth *eth) if (err) goto error_npu_put; + airoha_ppe_foe_flow_stats_reset(eth->ppe, npu); + rcu_assign_pointer(eth->npu, npu); synchronize_rcu(); @@ -1027,6 +1262,15 @@ int airoha_ppe_init(struct airoha_eth *eth) if (!ppe->foe_flow) return -ENOMEM; + foe_size = PPE_STATS_NUM_ENTRIES * sizeof(*ppe->foe_stats); + if (foe_size) { + ppe->foe_stats = dmam_alloc_coherent(eth->dev, foe_size, + &ppe->foe_stats_dma, + GFP_KERNEL); + if (!ppe->foe_stats) + return -ENOMEM; + } + err = rhashtable_init(ð->flow_table, &airoha_flow_table_params); if (err) return err; diff --git a/drivers/net/ethernet/airoha/airoha_ppe_debugfs.c b/drivers/net/ethernet/airoha/airoha_ppe_debugfs.c index 3cdc6fd53fc7..05a756233f6a 100644 --- a/drivers/net/ethernet/airoha/airoha_ppe_debugfs.c +++ b/drivers/net/ethernet/airoha/airoha_ppe_debugfs.c @@ -61,6 +61,7 @@ static int airoha_ppe_debugfs_foe_show(struct seq_file *m, void *private, u16 *src_port = NULL, *dest_port = NULL; struct airoha_foe_mac_info_common *l2; unsigned char h_source[ETH_ALEN] = {}; + struct airoha_foe_stats64 stats = {}; unsigned char h_dest[ETH_ALEN]; struct airoha_foe_entry *hwe; u32 type, state, ib2, data; @@ -144,14 +145,18 @@ static int airoha_ppe_debugfs_foe_show(struct seq_file *m, void *private, cpu_to_be16(hwe->ipv4.l2.src_mac_lo); } + airoha_ppe_foe_entry_get_stats(ppe, i, &stats); + *((__be32 *)h_dest) = cpu_to_be32(l2->dest_mac_hi); *((__be16 *)&h_dest[4]) = cpu_to_be16(l2->dest_mac_lo); *((__be32 *)h_source) = cpu_to_be32(l2->src_mac_hi); seq_printf(m, " eth=%pM->%pM etype=%04x data=%08x" - " vlan=%d,%d ib1=%08x ib2=%08x\n", + " vlan=%d,%d ib1=%08x ib2=%08x" + " packets=%llu bytes=%llu\n", h_source, h_dest, l2->etype, data, - l2->vlan1, l2->vlan2, hwe->ib1, ib2); + l2->vlan1, l2->vlan2, hwe->ib1, ib2, + stats.packets, stats.bytes); } return 0; diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index a461ec612e95..3e9c57196a39 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -1446,7 +1446,7 @@ int bgmac_phy_connect_direct(struct bgmac *bgmac) struct phy_device *phy_dev; int err; - phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, NULL); + phy_dev = fixed_phy_register(&fphy_status, NULL); if (IS_ERR(phy_dev)) { dev_err(bgmac->dev, "Failed to register fixed PHY device\n"); return PTR_ERR(phy_dev); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c index 238db9a1aebf..84c4812414fd 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c @@ -20,6 +20,7 @@ #include <asm/byteorder.h> #include <linux/bitmap.h> #include <linux/auxiliary_bus.h> +#include <net/netdev_lock.h> #include "bnxt_hsi.h" #include "bnxt.h" @@ -304,14 +305,12 @@ void bnxt_ulp_irq_stop(struct bnxt *bp) if (!ulp->msix_requested) return; - netdev_lock(bp->dev); - ops = rcu_dereference(ulp->ulp_ops); + ops = netdev_lock_dereference(ulp->ulp_ops, bp->dev); if (!ops || !ops->ulp_irq_stop) return; if (test_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) reset = true; ops->ulp_irq_stop(ulp->handle, reset); - netdev_unlock(bp->dev); } } @@ -330,8 +329,7 @@ void bnxt_ulp_irq_restart(struct bnxt *bp, int err) if (!ulp->msix_requested) return; - netdev_lock(bp->dev); - ops = rcu_dereference(ulp->ulp_ops); + ops = netdev_lock_dereference(ulp->ulp_ops, bp->dev); if (!ops || !ops->ulp_irq_restart) return; @@ -343,7 +341,6 @@ void bnxt_ulp_irq_restart(struct bnxt *bp, int err) bnxt_fill_msix_vecs(bp, ent); } ops->ulp_irq_restart(ulp->handle, ent); - netdev_unlock(bp->dev); kfree(ent); } } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c index e675611777b5..4a6d8cb9f970 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c @@ -425,9 +425,9 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog) if (prog) { bnxt_set_rx_skb_mode(bp, true); - xdp_features_set_redirect_target(dev, true); + xdp_features_set_redirect_target_locked(dev, true); } else { - xdp_features_clear_redirect_target(dev); + xdp_features_clear_redirect_target_locked(dev); bnxt_set_rx_skb_mode(bp, false); } bp->tx_nr_rings_xdp = tx_xdp; diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 73d78dcb774d..fa0077bc67b7 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -969,12 +969,13 @@ static int bcmgenet_set_pauseparam(struct net_device *dev, /* standard ethtool support functions. */ enum bcmgenet_stat_type { - BCMGENET_STAT_NETDEV = -1, + BCMGENET_STAT_RTNL = -1, BCMGENET_STAT_MIB_RX, BCMGENET_STAT_MIB_TX, BCMGENET_STAT_RUNT, BCMGENET_STAT_MISC, BCMGENET_STAT_SOFT, + BCMGENET_STAT_SOFT64, }; struct bcmgenet_stats { @@ -984,13 +985,15 @@ struct bcmgenet_stats { enum bcmgenet_stat_type type; /* reg offset from UMAC base for misc counters */ u16 reg_offset; + /* sync for u64 stats counters */ + int syncp_offset; }; -#define STAT_NETDEV(m) { \ +#define STAT_RTNL(m) { \ .stat_string = __stringify(m), \ - .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \ - .stat_offset = offsetof(struct net_device_stats, m), \ - .type = BCMGENET_STAT_NETDEV, \ + .stat_sizeof = sizeof(((struct rtnl_link_stats64 *)0)->m), \ + .stat_offset = offsetof(struct rtnl_link_stats64, m), \ + .type = BCMGENET_STAT_RTNL, \ } #define STAT_GENET_MIB(str, m, _type) { \ @@ -1000,6 +1003,14 @@ struct bcmgenet_stats { .type = _type, \ } +#define STAT_GENET_SOFT_MIB64(str, s, m) { \ + .stat_string = str, \ + .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->s.m), \ + .stat_offset = offsetof(struct bcmgenet_priv, s.m), \ + .type = BCMGENET_STAT_SOFT64, \ + .syncp_offset = offsetof(struct bcmgenet_priv, s.syncp), \ +} + #define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX) #define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX) #define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT) @@ -1014,18 +1025,38 @@ struct bcmgenet_stats { } #define STAT_GENET_Q(num) \ - STAT_GENET_SOFT_MIB("txq" __stringify(num) "_packets", \ - tx_rings[num].packets), \ - STAT_GENET_SOFT_MIB("txq" __stringify(num) "_bytes", \ - tx_rings[num].bytes), \ - STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_bytes", \ - rx_rings[num].bytes), \ - STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_packets", \ - rx_rings[num].packets), \ - STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_errors", \ - rx_rings[num].errors), \ - STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_dropped", \ - rx_rings[num].dropped) + STAT_GENET_SOFT_MIB64("txq" __stringify(num) "_packets", \ + tx_rings[num].stats64, packets), \ + STAT_GENET_SOFT_MIB64("txq" __stringify(num) "_bytes", \ + tx_rings[num].stats64, bytes), \ + STAT_GENET_SOFT_MIB64("txq" __stringify(num) "_errors", \ + tx_rings[num].stats64, errors), \ + STAT_GENET_SOFT_MIB64("txq" __stringify(num) "_dropped", \ + tx_rings[num].stats64, dropped), \ + STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_bytes", \ + rx_rings[num].stats64, bytes), \ + STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_packets", \ + rx_rings[num].stats64, packets), \ + STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_errors", \ + rx_rings[num].stats64, errors), \ + STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_dropped", \ + rx_rings[num].stats64, dropped), \ + STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_multicast", \ + rx_rings[num].stats64, multicast), \ + STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_missed", \ + rx_rings[num].stats64, missed), \ + STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_length_errors", \ + rx_rings[num].stats64, length_errors), \ + STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_over_errors", \ + rx_rings[num].stats64, over_errors), \ + STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_crc_errors", \ + rx_rings[num].stats64, crc_errors), \ + STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_frame_errors", \ + rx_rings[num].stats64, frame_errors), \ + STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_fragmented_errors", \ + rx_rings[num].stats64, fragmented_errors), \ + STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_broadcast", \ + rx_rings[num].stats64, broadcast) /* There is a 0xC gap between the end of RX and beginning of TX stats and then * between the end of TX stats and the beginning of the RX RUNT @@ -1037,15 +1068,20 @@ struct bcmgenet_stats { */ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = { /* general stats */ - STAT_NETDEV(rx_packets), - STAT_NETDEV(tx_packets), - STAT_NETDEV(rx_bytes), - STAT_NETDEV(tx_bytes), - STAT_NETDEV(rx_errors), - STAT_NETDEV(tx_errors), - STAT_NETDEV(rx_dropped), - STAT_NETDEV(tx_dropped), - STAT_NETDEV(multicast), + STAT_RTNL(rx_packets), + STAT_RTNL(tx_packets), + STAT_RTNL(rx_bytes), + STAT_RTNL(tx_bytes), + STAT_RTNL(rx_errors), + STAT_RTNL(tx_errors), + STAT_RTNL(rx_dropped), + STAT_RTNL(tx_dropped), + STAT_RTNL(multicast), + STAT_RTNL(rx_missed_errors), + STAT_RTNL(rx_length_errors), + STAT_RTNL(rx_over_errors), + STAT_RTNL(rx_crc_errors), + STAT_RTNL(rx_frame_errors), /* UniMAC RSV counters */ STAT_GENET_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64), STAT_GENET_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127), @@ -1133,6 +1169,20 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = { #define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats) +#define BCMGENET_STATS64_ADD(stats, m, v) \ + do { \ + u64_stats_update_begin(&stats->syncp); \ + u64_stats_add(&stats->m, v); \ + u64_stats_update_end(&stats->syncp); \ + } while (0) + +#define BCMGENET_STATS64_INC(stats, m) \ + do { \ + u64_stats_update_begin(&stats->syncp); \ + u64_stats_inc(&stats->m); \ + u64_stats_update_end(&stats->syncp); \ + } while (0) + static void bcmgenet_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { @@ -1216,8 +1266,9 @@ static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv) s = &bcmgenet_gstrings_stats[i]; switch (s->type) { - case BCMGENET_STAT_NETDEV: + case BCMGENET_STAT_RTNL: case BCMGENET_STAT_SOFT: + case BCMGENET_STAT_SOFT64: continue; case BCMGENET_STAT_RUNT: offset += BCMGENET_STAT_OFFSET; @@ -1255,28 +1306,40 @@ static void bcmgenet_get_ethtool_stats(struct net_device *dev, u64 *data) { struct bcmgenet_priv *priv = netdev_priv(dev); + struct rtnl_link_stats64 stats64; + struct u64_stats_sync *syncp; + unsigned int start; int i; if (netif_running(dev)) bcmgenet_update_mib_counters(priv); - dev->netdev_ops->ndo_get_stats(dev); + dev_get_stats(dev, &stats64); for (i = 0; i < BCMGENET_STATS_LEN; i++) { const struct bcmgenet_stats *s; char *p; s = &bcmgenet_gstrings_stats[i]; - if (s->type == BCMGENET_STAT_NETDEV) - p = (char *)&dev->stats; - else - p = (char *)priv; - p += s->stat_offset; - if (sizeof(unsigned long) != sizeof(u32) && - s->stat_sizeof == sizeof(unsigned long)) - data[i] = *(unsigned long *)p; - else - data[i] = *(u32 *)p; + p = (char *)priv; + + if (s->type == BCMGENET_STAT_SOFT64) { + syncp = (struct u64_stats_sync *)(p + s->syncp_offset); + do { + start = u64_stats_fetch_begin(syncp); + data[i] = u64_stats_read((u64_stats_t *)(p + s->stat_offset)); + } while (u64_stats_fetch_retry(syncp, start)); + } else { + if (s->type == BCMGENET_STAT_RTNL) + p = (char *)&stats64; + + p += s->stat_offset; + if (sizeof(unsigned long) != sizeof(u32) && + s->stat_sizeof == sizeof(unsigned long)) + data[i] = *(unsigned long *)p; + else + data[i] = *(u32 *)p; + } } } @@ -1856,6 +1919,7 @@ static struct sk_buff *bcmgenet_free_rx_cb(struct device *dev, static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, struct bcmgenet_tx_ring *ring) { + struct bcmgenet_tx_stats64 *stats = &ring->stats64; struct bcmgenet_priv *priv = netdev_priv(dev); unsigned int txbds_processed = 0; unsigned int bytes_compl = 0; @@ -1896,8 +1960,10 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, ring->free_bds += txbds_processed; ring->c_index = c_index; - ring->packets += pkts_compl; - ring->bytes += bytes_compl; + u64_stats_update_begin(&stats->syncp); + u64_stats_add(&stats->packets, pkts_compl); + u64_stats_add(&stats->bytes, bytes_compl); + u64_stats_update_end(&stats->syncp); netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->index), pkts_compl, bytes_compl); @@ -1983,8 +2049,10 @@ static void bcmgenet_tx_reclaim_all(struct net_device *dev) * the transmit checksum offsets in the descriptors */ static struct sk_buff *bcmgenet_add_tsb(struct net_device *dev, - struct sk_buff *skb) + struct sk_buff *skb, + struct bcmgenet_tx_ring *ring) { + struct bcmgenet_tx_stats64 *stats = &ring->stats64; struct bcmgenet_priv *priv = netdev_priv(dev); struct status_64 *status = NULL; struct sk_buff *new_skb; @@ -2001,7 +2069,7 @@ static struct sk_buff *bcmgenet_add_tsb(struct net_device *dev, if (!new_skb) { dev_kfree_skb_any(skb); priv->mib.tx_realloc_tsb_failed++; - dev->stats.tx_dropped++; + BCMGENET_STATS64_INC(stats, dropped); return NULL; } dev_consume_skb_any(skb); @@ -2089,7 +2157,7 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) GENET_CB(skb)->bytes_sent = skb->len; /* add the Transmit Status Block */ - skb = bcmgenet_add_tsb(dev, skb); + skb = bcmgenet_add_tsb(dev, skb, ring); if (!skb) { ret = NETDEV_TX_OK; goto out; @@ -2231,6 +2299,7 @@ static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv, static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, unsigned int budget) { + struct bcmgenet_rx_stats64 *stats = &ring->stats64; struct bcmgenet_priv *priv = ring->priv; struct net_device *dev = priv->dev; struct enet_cb *cb; @@ -2253,7 +2322,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, DMA_P_INDEX_DISCARD_CNT_MASK; if (discards > ring->old_discards) { discards = discards - ring->old_discards; - ring->errors += discards; + BCMGENET_STATS64_ADD(stats, missed, discards); ring->old_discards += discards; /* Clear HW register when we reach 75% of maximum 0xFFFF */ @@ -2279,7 +2348,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, skb = bcmgenet_rx_refill(priv, cb); if (unlikely(!skb)) { - ring->dropped++; + BCMGENET_STATS64_INC(stats, dropped); goto next; } @@ -2306,8 +2375,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, if (unlikely(len > RX_BUF_LENGTH)) { netif_err(priv, rx_status, dev, "oversized packet\n"); - dev->stats.rx_length_errors++; - dev->stats.rx_errors++; + BCMGENET_STATS64_INC(stats, length_errors); dev_kfree_skb_any(skb); goto next; } @@ -2315,7 +2383,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) { netif_err(priv, rx_status, dev, "dropping fragmented packet!\n"); - ring->errors++; + BCMGENET_STATS64_INC(stats, fragmented_errors); dev_kfree_skb_any(skb); goto next; } @@ -2328,15 +2396,22 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, DMA_RX_RXER))) { netif_err(priv, rx_status, dev, "dma_flag=0x%x\n", (unsigned int)dma_flag); + u64_stats_update_begin(&stats->syncp); if (dma_flag & DMA_RX_CRC_ERROR) - dev->stats.rx_crc_errors++; + u64_stats_inc(&stats->crc_errors); if (dma_flag & DMA_RX_OV) - dev->stats.rx_over_errors++; + u64_stats_inc(&stats->over_errors); if (dma_flag & DMA_RX_NO) - dev->stats.rx_frame_errors++; + u64_stats_inc(&stats->frame_errors); if (dma_flag & DMA_RX_LG) - dev->stats.rx_length_errors++; - dev->stats.rx_errors++; + u64_stats_inc(&stats->length_errors); + if ((dma_flag & (DMA_RX_CRC_ERROR | + DMA_RX_OV | + DMA_RX_NO | + DMA_RX_LG | + DMA_RX_RXER)) == DMA_RX_RXER) + u64_stats_inc(&stats->errors); + u64_stats_update_end(&stats->syncp); dev_kfree_skb_any(skb); goto next; } /* error packet */ @@ -2356,10 +2431,15 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, /*Finish setting up the received SKB and send it to the kernel*/ skb->protocol = eth_type_trans(skb, priv->dev); - ring->packets++; - ring->bytes += len; + + u64_stats_update_begin(&stats->syncp); + u64_stats_inc(&stats->packets); + u64_stats_add(&stats->bytes, len); if (dma_flag & DMA_RX_MULT) - dev->stats.multicast++; + u64_stats_inc(&stats->multicast); + else if (dma_flag & DMA_RX_BRDCAST) + u64_stats_inc(&stats->broadcast); + u64_stats_update_end(&stats->syncp); /* Notify kernel */ napi_gro_receive(&ring->napi, skb); @@ -3420,7 +3500,7 @@ static void bcmgenet_timeout(struct net_device *dev, unsigned int txqueue) netif_trans_update(dev); - dev->stats.tx_errors++; + BCMGENET_STATS64_INC((&priv->tx_rings[txqueue].stats64), errors); netif_tx_wake_all_queues(dev); } @@ -3509,39 +3589,72 @@ static int bcmgenet_set_mac_addr(struct net_device *dev, void *p) return 0; } -static struct net_device_stats *bcmgenet_get_stats(struct net_device *dev) +static void bcmgenet_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) { struct bcmgenet_priv *priv = netdev_priv(dev); - unsigned long tx_bytes = 0, tx_packets = 0; - unsigned long rx_bytes = 0, rx_packets = 0; - unsigned long rx_errors = 0, rx_dropped = 0; - struct bcmgenet_tx_ring *tx_ring; - struct bcmgenet_rx_ring *rx_ring; + struct bcmgenet_tx_stats64 *tx_stats; + struct bcmgenet_rx_stats64 *rx_stats; + u64 rx_length_errors, rx_over_errors; + u64 rx_missed, rx_fragmented_errors; + u64 rx_crc_errors, rx_frame_errors; + u64 tx_errors, tx_dropped; + u64 rx_errors, rx_dropped; + u64 tx_bytes, tx_packets; + u64 rx_bytes, rx_packets; + unsigned int start; unsigned int q; + u64 multicast; for (q = 0; q <= priv->hw_params->tx_queues; q++) { - tx_ring = &priv->tx_rings[q]; - tx_bytes += tx_ring->bytes; - tx_packets += tx_ring->packets; + tx_stats = &priv->tx_rings[q].stats64; + do { + start = u64_stats_fetch_begin(&tx_stats->syncp); + tx_bytes = u64_stats_read(&tx_stats->bytes); + tx_packets = u64_stats_read(&tx_stats->packets); + tx_errors = u64_stats_read(&tx_stats->errors); + tx_dropped = u64_stats_read(&tx_stats->dropped); + } while (u64_stats_fetch_retry(&tx_stats->syncp, start)); + + stats->tx_bytes += tx_bytes; + stats->tx_packets += tx_packets; + stats->tx_errors += tx_errors; + stats->tx_dropped += tx_dropped; } for (q = 0; q <= priv->hw_params->rx_queues; q++) { - rx_ring = &priv->rx_rings[q]; - - rx_bytes += rx_ring->bytes; - rx_packets += rx_ring->packets; - rx_errors += rx_ring->errors; - rx_dropped += rx_ring->dropped; + rx_stats = &priv->rx_rings[q].stats64; + do { + start = u64_stats_fetch_begin(&rx_stats->syncp); + rx_bytes = u64_stats_read(&rx_stats->bytes); + rx_packets = u64_stats_read(&rx_stats->packets); + rx_errors = u64_stats_read(&rx_stats->errors); + rx_dropped = u64_stats_read(&rx_stats->dropped); + rx_missed = u64_stats_read(&rx_stats->missed); + rx_length_errors = u64_stats_read(&rx_stats->length_errors); + rx_over_errors = u64_stats_read(&rx_stats->over_errors); + rx_crc_errors = u64_stats_read(&rx_stats->crc_errors); + rx_frame_errors = u64_stats_read(&rx_stats->frame_errors); + rx_fragmented_errors = u64_stats_read(&rx_stats->fragmented_errors); + multicast = u64_stats_read(&rx_stats->multicast); + } while (u64_stats_fetch_retry(&rx_stats->syncp, start)); + + rx_errors += rx_length_errors; + rx_errors += rx_crc_errors; + rx_errors += rx_frame_errors; + rx_errors += rx_fragmented_errors; + + stats->rx_bytes += rx_bytes; + stats->rx_packets += rx_packets; + stats->rx_errors += rx_errors; + stats->rx_dropped += rx_dropped; + stats->rx_missed_errors += rx_missed; + stats->rx_length_errors += rx_length_errors; + stats->rx_over_errors += rx_over_errors; + stats->rx_crc_errors += rx_crc_errors; + stats->rx_frame_errors += rx_frame_errors; + stats->multicast += multicast; } - - dev->stats.tx_bytes = tx_bytes; - dev->stats.tx_packets = tx_packets; - dev->stats.rx_bytes = rx_bytes; - dev->stats.rx_packets = rx_packets; - dev->stats.rx_errors = rx_errors; - dev->stats.rx_missed_errors = rx_errors; - dev->stats.rx_dropped = rx_dropped; - return &dev->stats; } static int bcmgenet_change_carrier(struct net_device *dev, bool new_carrier) @@ -3569,7 +3682,7 @@ static const struct net_device_ops bcmgenet_netdev_ops = { .ndo_set_mac_address = bcmgenet_set_mac_addr, .ndo_eth_ioctl = phy_do_ioctl_running, .ndo_set_features = bcmgenet_set_features, - .ndo_get_stats = bcmgenet_get_stats, + .ndo_get_stats64 = bcmgenet_get_stats64, .ndo_change_carrier = bcmgenet_change_carrier, }; diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index 10c631bbe964..5ec3979779ec 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h @@ -155,6 +155,30 @@ struct bcmgenet_mib_counters { u32 tx_realloc_tsb_failed; }; +struct bcmgenet_tx_stats64 { + struct u64_stats_sync syncp; + u64_stats_t packets; + u64_stats_t bytes; + u64_stats_t errors; + u64_stats_t dropped; +}; + +struct bcmgenet_rx_stats64 { + struct u64_stats_sync syncp; + u64_stats_t bytes; + u64_stats_t packets; + u64_stats_t errors; + u64_stats_t dropped; + u64_stats_t multicast; + u64_stats_t broadcast; + u64_stats_t missed; + u64_stats_t length_errors; + u64_stats_t over_errors; + u64_stats_t crc_errors; + u64_stats_t frame_errors; + u64_stats_t fragmented_errors; +}; + #define UMAC_MIB_START 0x400 #define UMAC_MDIO_CMD 0x614 @@ -515,8 +539,7 @@ struct bcmgenet_skb_cb { struct bcmgenet_tx_ring { spinlock_t lock; /* ring lock */ struct napi_struct napi; /* NAPI per tx queue */ - unsigned long packets; - unsigned long bytes; + struct bcmgenet_tx_stats64 stats64; unsigned int index; /* ring index */ struct enet_cb *cbs; /* tx ring buffer control block*/ unsigned int size; /* size of each tx ring */ @@ -540,10 +563,7 @@ struct bcmgenet_net_dim { struct bcmgenet_rx_ring { struct napi_struct napi; /* Rx NAPI struct */ - unsigned long bytes; - unsigned long packets; - unsigned long errors; - unsigned long dropped; + struct bcmgenet_rx_stats64 stats64; unsigned int index; /* Rx ring index */ struct enet_cb *cbs; /* Rx ring buffer control block */ unsigned int size; /* Rx ring size */ diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index 71c619d2bea5..b6437ba7a2eb 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -625,7 +625,7 @@ static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv) .asym_pause = 0, }; - phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL); + phydev = fixed_phy_register(&fphy_status, NULL); if (IS_ERR(phydev)) { dev_err(kdev, "failed to register fixed PHY device\n"); return PTR_ERR(phydev); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 551c279dc14b..51395c96b2e9 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -6480,10 +6480,11 @@ static const struct tlsdev_ops cxgb4_ktls_ops = { #if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE) -static int cxgb4_xfrm_add_state(struct xfrm_state *x, +static int cxgb4_xfrm_add_state(struct net_device *dev, + struct xfrm_state *x, struct netlink_ext_ack *extack) { - struct adapter *adap = netdev2adap(x->xso.dev); + struct adapter *adap = netdev2adap(dev); int ret; if (!mutex_trylock(&uld_mutex)) { @@ -6494,7 +6495,8 @@ static int cxgb4_xfrm_add_state(struct xfrm_state *x, if (ret) goto out_unlock; - ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_add(x, extack); + ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_add(dev, x, + extack); out_unlock: mutex_unlock(&uld_mutex); @@ -6502,9 +6504,9 @@ out_unlock: return ret; } -static void cxgb4_xfrm_del_state(struct xfrm_state *x) +static void cxgb4_xfrm_del_state(struct net_device *dev, struct xfrm_state *x) { - struct adapter *adap = netdev2adap(x->xso.dev); + struct adapter *adap = netdev2adap(dev); if (!mutex_trylock(&uld_mutex)) { dev_dbg(adap->pdev_dev, @@ -6514,15 +6516,15 @@ static void cxgb4_xfrm_del_state(struct xfrm_state *x) if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS)) goto out_unlock; - adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_delete(x); + adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_delete(dev, x); out_unlock: mutex_unlock(&uld_mutex); } -static void cxgb4_xfrm_free_state(struct xfrm_state *x) +static void cxgb4_xfrm_free_state(struct net_device *dev, struct xfrm_state *x) { - struct adapter *adap = netdev2adap(x->xso.dev); + struct adapter *adap = netdev2adap(dev); if (!mutex_trylock(&uld_mutex)) { dev_dbg(adap->pdev_dev, @@ -6532,7 +6534,7 @@ static void cxgb4_xfrm_free_state(struct xfrm_state *x) if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS)) goto out_unlock; - adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_free(x); + adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_free(dev, x); out_unlock: mutex_unlock(&uld_mutex); diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c index baba96883f48..ecd9a0bd5e18 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c +++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c @@ -75,9 +75,12 @@ static int ch_ipsec_uld_state_change(void *handle, enum cxgb4_state new_state); static int ch_ipsec_xmit(struct sk_buff *skb, struct net_device *dev); static void *ch_ipsec_uld_add(const struct cxgb4_lld_info *infop); static void ch_ipsec_advance_esn_state(struct xfrm_state *x); -static void ch_ipsec_xfrm_free_state(struct xfrm_state *x); -static void ch_ipsec_xfrm_del_state(struct xfrm_state *x); -static int ch_ipsec_xfrm_add_state(struct xfrm_state *x, +static void ch_ipsec_xfrm_free_state(struct net_device *dev, + struct xfrm_state *x); +static void ch_ipsec_xfrm_del_state(struct net_device *dev, + struct xfrm_state *x); +static int ch_ipsec_xfrm_add_state(struct net_device *dev, + struct xfrm_state *x, struct netlink_ext_ack *extack); static const struct xfrmdev_ops ch_ipsec_xfrmdev_ops = { @@ -223,7 +226,8 @@ out: * returns 0 on success, negative error if failed to send message to FPGA * positive error if FPGA returned a bad response */ -static int ch_ipsec_xfrm_add_state(struct xfrm_state *x, +static int ch_ipsec_xfrm_add_state(struct net_device *dev, + struct xfrm_state *x, struct netlink_ext_ack *extack) { struct ipsec_sa_entry *sa_entry; @@ -302,14 +306,16 @@ out: return res; } -static void ch_ipsec_xfrm_del_state(struct xfrm_state *x) +static void ch_ipsec_xfrm_del_state(struct net_device *dev, + struct xfrm_state *x) { /* do nothing */ if (!x->xso.offload_handle) return; } -static void ch_ipsec_xfrm_free_state(struct xfrm_state *x) +static void ch_ipsec_xfrm_free_state(struct net_device *dev, + struct xfrm_state *x) { struct ipsec_sa_entry *sa_entry; diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h index 9c12e967e9f1..301b3f3114af 100644 --- a/drivers/net/ethernet/cisco/enic/enic.h +++ b/drivers/net/ethernet/cisco/enic/enic.h @@ -26,6 +26,7 @@ #define ENIC_WQ_MAX 256 #define ENIC_RQ_MAX 256 +#define ENIC_RQ_MIN_DEFAULT 8 #define ENIC_WQ_NAPI_BUDGET 256 diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index c753c35b26eb..6ef8a0d90bce 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -2296,7 +2296,8 @@ static int enic_adjust_resources(struct enic *enic) * used based on which resource is the most constrained */ wq_avail = min(enic->wq_avail, ENIC_WQ_MAX); - rq_default = netif_get_num_default_rss_queues(); + rq_default = max(netif_get_num_default_rss_queues(), + ENIC_RQ_MIN_DEFAULT); rq_avail = min3(enic->rq_avail, ENIC_RQ_MAX, rq_default); max_queues = min(enic->cq_avail, enic->intr_avail - ENIC_MSIX_RESERVED_INTR); diff --git a/drivers/net/ethernet/dlink/dl2k.h b/drivers/net/ethernet/dlink/dl2k.h index 56aff2f0bdbf..ba679025e866 100644 --- a/drivers/net/ethernet/dlink/dl2k.h +++ b/drivers/net/ethernet/dlink/dl2k.h @@ -329,18 +329,18 @@ enum _pcs_anlpar { }; typedef struct t_SROM { - u16 config_param; /* 0x00 */ - u16 asic_ctrl; /* 0x02 */ - u16 sub_vendor_id; /* 0x04 */ - u16 sub_system_id; /* 0x06 */ - u16 pci_base_1; /* 0x08 (IP1000A only) */ - u16 pci_base_2; /* 0x0a (IP1000A only) */ + __le16 config_param; /* 0x00 */ + __le16 asic_ctrl; /* 0x02 */ + __le16 sub_vendor_id; /* 0x04 */ + __le16 sub_system_id; /* 0x06 */ + __le16 pci_base_1; /* 0x08 (IP1000A only) */ + __le16 pci_base_2; /* 0x0a (IP1000A only) */ __le16 led_mode; /* 0x0c (IP1000A only) */ - u16 reserved1[9]; /* 0x0e-0x1f */ + __le16 reserved1[9]; /* 0x0e-0x1f */ u8 mac_addr[6]; /* 0x20-0x25 */ u8 reserved2[10]; /* 0x26-0x2f */ u8 sib[204]; /* 0x30-0xfb */ - u32 crc; /* 0xfc-0xff */ + __le32 crc; /* 0xfc-0xff */ } SROM_t, *PSROM_t; /* Ioctl custom data */ diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index adb441b36581..d730af4a50c7 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -1609,7 +1609,7 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd) /* version 1 of the cmd is not supported only by BE2 */ if (BE2_chip(adapter)) hdr->version = 0; - if (BE3_chip(adapter) || lancer_chip(adapter)) + else if (BE3_chip(adapter) || lancer_chip(adapter)) hdr->version = 1; else hdr->version = 2; diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index 17ec35e75a65..a98d5af3f9e3 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -1906,7 +1906,7 @@ static int ftgmac100_probe(struct platform_device *pdev) goto err_phy_connect; } - phydev = fixed_phy_register(PHY_POLL, &ncsi_phy_status, np); + phydev = fixed_phy_register(&ncsi_phy_status, np); if (IS_ERR(phydev)) { dev_err(&pdev->dev, "failed to register fixed PHY device\n"); err = PTR_ERR(phydev); diff --git a/drivers/net/ethernet/freescale/enetc/enetc4_pf.c b/drivers/net/ethernet/freescale/enetc/enetc4_pf.c index c16378eb50bc..b3dc1afeefd1 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc4_pf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc4_pf.c @@ -971,8 +971,9 @@ static int enetc4_pf_netdev_create(struct enetc_si *si) return 0; err_reg_netdev: - enetc4_link_deinit(priv); + destroy_workqueue(si->workqueue); err_wq_init: + enetc4_link_deinit(priv); err_link_init: enetc_free_msix(priv); err_alloc_msix: diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c index a0bcfb5a713d..ff3295b60a69 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c @@ -61,6 +61,8 @@ static int hbg_reset_prepare(struct hbg_priv *priv, enum hbg_reset_type type) return -EBUSY; } + netif_device_detach(priv->netdev); + priv->reset_type = type; set_bit(HBG_NIC_STATE_RESETTING, &priv->state); clear_bit(HBG_NIC_STATE_RESET_FAIL, &priv->state); @@ -91,6 +93,8 @@ static int hbg_reset_done(struct hbg_priv *priv, enum hbg_reset_type type) return ret; } + netif_device_attach(priv->netdev); + dev_info(&priv->pdev->dev, "reset done\n"); return ret; } @@ -117,16 +121,13 @@ void hbg_err_reset(struct hbg_priv *priv) if (running) dev_close(priv->netdev); - hbg_reset(priv); - - /* in hbg_pci_err_detected(), we will detach first, - * so we need to attach before open - */ - if (!netif_device_present(priv->netdev)) - netif_device_attach(priv->netdev); + if (hbg_reset(priv)) + goto err_unlock; if (running) dev_open(priv->netdev, NULL); + +err_unlock: rtnl_unlock(); } @@ -160,7 +161,6 @@ static pci_ers_result_t hbg_pci_err_slot_reset(struct pci_dev *pdev) pci_save_state(pdev); hbg_err_reset(priv); - netif_device_attach(netdev); return PCI_ERS_RESULT_RECOVERED; } diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c index 8f1107b85fbb..55520053270a 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c @@ -317,6 +317,9 @@ static void hbg_update_stats_by_info(struct hbg_priv *priv, const struct hbg_ethtool_stats *stats; u32 i; + if (test_bit(HBG_NIC_STATE_RESETTING, &priv->state)) + return; + for (i = 0; i < info_len; i++) { stats = &info[i]; if (!stats->reg) diff --git a/drivers/net/ethernet/huawei/Kconfig b/drivers/net/ethernet/huawei/Kconfig index c05fce15eb51..7d0feb1da158 100644 --- a/drivers/net/ethernet/huawei/Kconfig +++ b/drivers/net/ethernet/huawei/Kconfig @@ -16,5 +16,6 @@ config NET_VENDOR_HUAWEI if NET_VENDOR_HUAWEI source "drivers/net/ethernet/huawei/hinic/Kconfig" +source "drivers/net/ethernet/huawei/hinic3/Kconfig" endif # NET_VENDOR_HUAWEI diff --git a/drivers/net/ethernet/huawei/Makefile b/drivers/net/ethernet/huawei/Makefile index 2549ad5afe6d..59865b882879 100644 --- a/drivers/net/ethernet/huawei/Makefile +++ b/drivers/net/ethernet/huawei/Makefile @@ -4,3 +4,4 @@ # obj-$(CONFIG_HINIC) += hinic/ +obj-$(CONFIG_HINIC3) += hinic3/ diff --git a/drivers/net/ethernet/huawei/hinic3/Kconfig b/drivers/net/ethernet/huawei/hinic3/Kconfig new file mode 100644 index 000000000000..ce4331d1387b --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/Kconfig @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Huawei driver configuration +# + +config HINIC3 + tristate "Huawei 3rd generation network adapters (HINIC3) support" + # Fields of HW and management structures are little endian and are + # currently not converted + depends on !CPU_BIG_ENDIAN + depends on X86 || ARM64 || COMPILE_TEST + depends on PCI_MSI && 64BIT + select AUXILIARY_BUS + select PAGE_POOL + help + This driver supports HiNIC 3rd gen Network Adapter (HINIC3). + The driver is supported on X86_64 and ARM64 little endian. + + To compile this driver as a module, choose M here. + The module will be called hinic3. diff --git a/drivers/net/ethernet/huawei/hinic3/Makefile b/drivers/net/ethernet/huawei/hinic3/Makefile new file mode 100644 index 000000000000..509dfbfb0e96 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/Makefile @@ -0,0 +1,21 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. + +obj-$(CONFIG_HINIC3) += hinic3.o + +hinic3-objs := hinic3_common.o \ + hinic3_hw_cfg.o \ + hinic3_hw_comm.o \ + hinic3_hwdev.o \ + hinic3_hwif.o \ + hinic3_irq.o \ + hinic3_lld.o \ + hinic3_main.o \ + hinic3_mbox.o \ + hinic3_netdev_ops.o \ + hinic3_nic_cfg.o \ + hinic3_nic_io.o \ + hinic3_queue_common.o \ + hinic3_rx.o \ + hinic3_tx.o \ + hinic3_wq.o diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_common.c b/drivers/net/ethernet/huawei/hinic3/hinic3_common.c new file mode 100644 index 000000000000..0aa42068728c --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_common.c @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. + +#include <linux/delay.h> +#include <linux/dma-mapping.h> + +#include "hinic3_common.h" + +int hinic3_dma_zalloc_coherent_align(struct device *dev, u32 size, u32 align, + gfp_t flag, + struct hinic3_dma_addr_align *mem_align) +{ + dma_addr_t paddr, align_paddr; + void *vaddr, *align_vaddr; + u32 real_size = size; + + vaddr = dma_alloc_coherent(dev, real_size, &paddr, flag); + if (!vaddr) + return -ENOMEM; + + align_paddr = ALIGN(paddr, align); + if (align_paddr == paddr) { + align_vaddr = vaddr; + goto out; + } + + dma_free_coherent(dev, real_size, vaddr, paddr); + + /* realloc memory for align */ + real_size = size + align; + vaddr = dma_alloc_coherent(dev, real_size, &paddr, flag); + if (!vaddr) + return -ENOMEM; + + align_paddr = ALIGN(paddr, align); + align_vaddr = vaddr + (align_paddr - paddr); + +out: + mem_align->real_size = real_size; + mem_align->ori_vaddr = vaddr; + mem_align->ori_paddr = paddr; + mem_align->align_vaddr = align_vaddr; + mem_align->align_paddr = align_paddr; + + return 0; +} + +void hinic3_dma_free_coherent_align(struct device *dev, + struct hinic3_dma_addr_align *mem_align) +{ + dma_free_coherent(dev, mem_align->real_size, + mem_align->ori_vaddr, mem_align->ori_paddr); +} diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_common.h b/drivers/net/ethernet/huawei/hinic3/hinic3_common.h new file mode 100644 index 000000000000..bb795dace04c --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_common.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */ + +#ifndef _HINIC3_COMMON_H_ +#define _HINIC3_COMMON_H_ + +#include <linux/device.h> + +#define HINIC3_MIN_PAGE_SIZE 0x1000 + +struct hinic3_dma_addr_align { + u32 real_size; + + void *ori_vaddr; + dma_addr_t ori_paddr; + + void *align_vaddr; + dma_addr_t align_paddr; +}; + +int hinic3_dma_zalloc_coherent_align(struct device *dev, u32 size, u32 align, + gfp_t flag, + struct hinic3_dma_addr_align *mem_align); +void hinic3_dma_free_coherent_align(struct device *dev, + struct hinic3_dma_addr_align *mem_align); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.c b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.c new file mode 100644 index 000000000000..87d9450c30ca --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.c @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. + +#include <linux/device.h> + +#include "hinic3_hw_cfg.h" +#include "hinic3_hwdev.h" +#include "hinic3_hwif.h" +#include "hinic3_mbox.h" + +bool hinic3_support_nic(struct hinic3_hwdev *hwdev) +{ + return hwdev->cfg_mgmt->cap.supp_svcs_bitmap & + BIT(HINIC3_SERVICE_T_NIC); +} + +u16 hinic3_func_max_qnum(struct hinic3_hwdev *hwdev) +{ + return hwdev->cfg_mgmt->cap.nic_svc_cap.max_sqs; +} + +u8 hinic3_physical_port_id(struct hinic3_hwdev *hwdev) +{ + return hwdev->cfg_mgmt->cap.port_id; +} diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.h new file mode 100644 index 000000000000..e017b1ae9f05 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */ + +#ifndef _HINIC3_HW_CFG_H_ +#define _HINIC3_HW_CFG_H_ + +#include <linux/mutex.h> +#include <linux/pci.h> + +struct hinic3_hwdev; + +struct hinic3_irq { + u32 irq_id; + u16 msix_entry_idx; + bool allocated; +}; + +struct hinic3_irq_info { + struct hinic3_irq *irq; + u16 num_irq; + /* device max irq number */ + u16 num_irq_hw; + /* protect irq alloc and free */ + struct mutex irq_mutex; +}; + +struct hinic3_nic_service_cap { + u16 max_sqs; +}; + +/* Device capabilities */ +struct hinic3_dev_cap { + /* Bitmasks of services supported by device */ + u16 supp_svcs_bitmap; + /* Physical port */ + u8 port_id; + struct hinic3_nic_service_cap nic_svc_cap; +}; + +struct hinic3_cfg_mgmt_info { + struct hinic3_irq_info irq_info; + struct hinic3_dev_cap cap; +}; + +int hinic3_alloc_irqs(struct hinic3_hwdev *hwdev, u16 num, + struct msix_entry *alloc_arr, u16 *act_num); +void hinic3_free_irq(struct hinic3_hwdev *hwdev, u32 irq_id); + +bool hinic3_support_nic(struct hinic3_hwdev *hwdev); +u16 hinic3_func_max_qnum(struct hinic3_hwdev *hwdev); +u8 hinic3_physical_port_id(struct hinic3_hwdev *hwdev); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.c b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.c new file mode 100644 index 000000000000..434696ce7dc2 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.c @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. + +#include <linux/delay.h> + +#include "hinic3_hw_comm.h" +#include "hinic3_hwdev.h" +#include "hinic3_hwif.h" +#include "hinic3_mbox.h" + +int hinic3_func_reset(struct hinic3_hwdev *hwdev, u16 func_id, u64 reset_flag) +{ + struct comm_cmd_func_reset func_reset = {}; + struct mgmt_msg_params msg_params = {}; + int err; + + func_reset.func_id = func_id; + func_reset.reset_flag = reset_flag; + + mgmt_msg_params_init_default(&msg_params, &func_reset, + sizeof(func_reset)); + + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM, + COMM_CMD_FUNC_RESET, &msg_params); + if (err || func_reset.head.status) { + dev_err(hwdev->dev, "Failed to reset func resources, reset_flag 0x%llx, err: %d, status: 0x%x\n", + reset_flag, err, func_reset.head.status); + return -EIO; + } + + return 0; +} diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.h new file mode 100644 index 000000000000..c33a1c77da9c --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */ + +#ifndef _HINIC3_HW_COMM_H_ +#define _HINIC3_HW_COMM_H_ + +#include "hinic3_hw_intf.h" + +struct hinic3_hwdev; + +int hinic3_func_reset(struct hinic3_hwdev *hwdev, u16 func_id, u64 reset_flag); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h new file mode 100644 index 000000000000..22c84093efa2 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h @@ -0,0 +1,113 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */ + +#ifndef _HINIC3_HW_INTF_H_ +#define _HINIC3_HW_INTF_H_ + +#include <linux/bits.h> +#include <linux/types.h> + +#define MGMT_MSG_CMD_OP_SET 1 +#define MGMT_MSG_CMD_OP_GET 0 + +#define MGMT_STATUS_PF_SET_VF_ALREADY 0x4 +#define MGMT_STATUS_EXIST 0x6 +#define MGMT_STATUS_CMD_UNSUPPORTED 0xFF + +#define MGMT_MSG_POLLING_TIMEOUT 0 + +struct mgmt_msg_head { + u8 status; + u8 version; + u8 rsvd0[6]; +}; + +struct mgmt_msg_params { + const void *buf_in; + u32 in_size; + void *buf_out; + u32 expected_out_size; + u32 timeout_ms; +}; + +/* CMDQ MODULE_TYPE */ +enum mgmt_mod_type { + /* HW communication module */ + MGMT_MOD_COMM = 0, + /* L2NIC module */ + MGMT_MOD_L2NIC = 1, + /* Configuration module */ + MGMT_MOD_CFGM = 7, + MGMT_MOD_HILINK = 14, +}; + +static inline void mgmt_msg_params_init_default(struct mgmt_msg_params *msg_params, + void *inout_buf, u32 buf_size) +{ + msg_params->buf_in = inout_buf; + msg_params->buf_out = inout_buf; + msg_params->in_size = buf_size; + msg_params->expected_out_size = buf_size; + msg_params->timeout_ms = 0; +} + +/* COMM Commands between Driver to fw */ +enum comm_cmd { + /* Commands for clearing FLR and resources */ + COMM_CMD_FUNC_RESET = 0, + COMM_CMD_FEATURE_NEGO = 1, + COMM_CMD_FLUSH_DOORBELL = 2, + COMM_CMD_START_FLUSH = 3, + COMM_CMD_GET_GLOBAL_ATTR = 5, + COMM_CMD_SET_FUNC_SVC_USED_STATE = 7, + + /* Driver Configuration Commands */ + COMM_CMD_SET_CMDQ_CTXT = 20, + COMM_CMD_SET_VAT = 21, + COMM_CMD_CFG_PAGESIZE = 22, + COMM_CMD_CFG_MSIX_CTRL_REG = 23, + COMM_CMD_SET_CEQ_CTRL_REG = 24, + COMM_CMD_SET_DMA_ATTR = 25, +}; + +enum comm_func_reset_bits { + COMM_FUNC_RESET_BIT_FLUSH = BIT(0), + COMM_FUNC_RESET_BIT_MQM = BIT(1), + COMM_FUNC_RESET_BIT_SMF = BIT(2), + COMM_FUNC_RESET_BIT_PF_BW_CFG = BIT(3), + + COMM_FUNC_RESET_BIT_COMM = BIT(10), + /* clear mbox and aeq, The COMM_FUNC_RESET_BIT_COMM bit must be set */ + COMM_FUNC_RESET_BIT_COMM_MGMT_CH = BIT(11), + /* clear cmdq and ceq, The COMM_FUNC_RESET_BIT_COMM bit must be set */ + COMM_FUNC_RESET_BIT_COMM_CMD_CH = BIT(12), + COMM_FUNC_RESET_BIT_NIC = BIT(13), +}; + +struct comm_cmd_func_reset { + struct mgmt_msg_head head; + u16 func_id; + u16 rsvd1[3]; + u64 reset_flag; +}; + +#define COMM_MAX_FEATURE_QWORD 4 +struct comm_cmd_feature_nego { + struct mgmt_msg_head head; + u16 func_id; + u8 opcode; + u8 rsvd; + u64 s_feature[COMM_MAX_FEATURE_QWORD]; +}; + +/* Services supported by HW. HW uses these values when delivering events. + * HW supports multiple services that are not yet supported by driver + * (e.g. RoCE). + */ +enum hinic3_service_type { + HINIC3_SERVICE_T_NIC = 0, + /* MAX is only used by SW for array sizes. */ + HINIC3_SERVICE_T_MAX = 1, +}; + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c b/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c new file mode 100644 index 000000000000..6e8788a64925 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. + +#include "hinic3_hw_comm.h" +#include "hinic3_hwdev.h" +#include "hinic3_hwif.h" +#include "hinic3_mbox.h" +#include "hinic3_mgmt.h" + +int hinic3_init_hwdev(struct pci_dev *pdev) +{ + /* Completed by later submission due to LoC limit. */ + return -EFAULT; +} + +void hinic3_free_hwdev(struct hinic3_hwdev *hwdev) +{ + /* Completed by later submission due to LoC limit. */ +} + +void hinic3_set_api_stop(struct hinic3_hwdev *hwdev) +{ + /* Completed by later submission due to LoC limit. */ +} diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.h new file mode 100644 index 000000000000..62e2745e9316 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.h @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */ + +#ifndef _HINIC3_HWDEV_H_ +#define _HINIC3_HWDEV_H_ + +#include <linux/auxiliary_bus.h> +#include <linux/pci.h> + +#include "hinic3_hw_intf.h" + +struct hinic3_cmdqs; +struct hinic3_hwif; + +enum hinic3_event_service_type { + HINIC3_EVENT_SRV_COMM = 0, + HINIC3_EVENT_SRV_NIC = 1 +}; + +#define HINIC3_SRV_EVENT_TYPE(svc, type) (((svc) << 16) | (type)) + +/* driver-specific data of pci_dev */ +struct hinic3_pcidev { + struct pci_dev *pdev; + struct hinic3_hwdev *hwdev; + /* Auxiliary devices */ + struct hinic3_adev *hadev[HINIC3_SERVICE_T_MAX]; + + void __iomem *cfg_reg_base; + void __iomem *intr_reg_base; + void __iomem *db_base; + u64 db_dwqe_len; + u64 db_base_phy; + + /* lock for attach/detach uld */ + struct mutex pdev_mutex; + unsigned long state; +}; + +struct hinic3_hwdev { + struct hinic3_pcidev *adapter; + struct pci_dev *pdev; + struct device *dev; + int dev_id; + struct hinic3_hwif *hwif; + struct hinic3_cfg_mgmt_info *cfg_mgmt; + struct hinic3_aeqs *aeqs; + struct hinic3_ceqs *ceqs; + struct hinic3_mbox *mbox; + struct hinic3_cmdqs *cmdqs; + struct workqueue_struct *workq; + /* protect channel init and uninit */ + spinlock_t channel_lock; + u64 features[COMM_MAX_FEATURE_QWORD]; + u32 wq_page_size; + u8 max_cmdq; + ulong func_state; +}; + +struct hinic3_event_info { + /* enum hinic3_event_service_type */ + u16 service; + u16 type; + u8 event_data[104]; +}; + +struct hinic3_adev { + struct auxiliary_device adev; + struct hinic3_hwdev *hwdev; + enum hinic3_service_type svc_type; + + void (*event)(struct auxiliary_device *adev, + struct hinic3_event_info *event); +}; + +int hinic3_init_hwdev(struct pci_dev *pdev); +void hinic3_free_hwdev(struct hinic3_hwdev *hwdev); + +void hinic3_set_api_stop(struct hinic3_hwdev *hwdev); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.c b/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.c new file mode 100644 index 000000000000..0865453bf0e7 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.c @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. + +#include <linux/bitfield.h> +#include <linux/device.h> +#include <linux/io.h> + +#include "hinic3_common.h" +#include "hinic3_hwdev.h" +#include "hinic3_hwif.h" + +void hinic3_set_msix_state(struct hinic3_hwdev *hwdev, u16 msix_idx, + enum hinic3_msix_state flag) +{ + /* Completed by later submission due to LoC limit. */ +} + +u16 hinic3_global_func_id(struct hinic3_hwdev *hwdev) +{ + return hwdev->hwif->attr.func_global_idx; +} diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.h new file mode 100644 index 000000000000..513c9680e6b6 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */ + +#ifndef _HINIC3_HWIF_H_ +#define _HINIC3_HWIF_H_ + +#include <linux/build_bug.h> +#include <linux/spinlock_types.h> + +struct hinic3_hwdev; + +enum hinic3_func_type { + HINIC3_FUNC_TYPE_VF = 1, +}; + +struct hinic3_db_area { + unsigned long *db_bitmap_array; + u32 db_max_areas; + /* protect doorbell area alloc and free */ + spinlock_t idx_lock; +}; + +struct hinic3_func_attr { + enum hinic3_func_type func_type; + u16 func_global_idx; + u16 global_vf_id_of_pf; + u16 num_irqs; + u16 num_sq; + u8 port_to_port_idx; + u8 pci_intf_idx; + u8 ppf_idx; + u8 num_aeqs; + u8 num_ceqs; + u8 msix_flex_en; +}; + +static_assert(sizeof(struct hinic3_func_attr) == 20); + +struct hinic3_hwif { + u8 __iomem *cfg_regs_base; + u64 db_base_phy; + u64 db_dwqe_len; + u8 __iomem *db_base; + struct hinic3_db_area db_area; + struct hinic3_func_attr attr; +}; + +enum hinic3_msix_state { + HINIC3_MSIX_ENABLE, + HINIC3_MSIX_DISABLE, +}; + +void hinic3_set_msix_state(struct hinic3_hwdev *hwdev, u16 msix_idx, + enum hinic3_msix_state flag); + +u16 hinic3_global_func_id(struct hinic3_hwdev *hwdev); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c b/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c new file mode 100644 index 000000000000..8b92eed25edf --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c @@ -0,0 +1,62 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. + +#include <linux/netdevice.h> + +#include "hinic3_hw_comm.h" +#include "hinic3_hwdev.h" +#include "hinic3_hwif.h" +#include "hinic3_nic_dev.h" +#include "hinic3_rx.h" +#include "hinic3_tx.h" + +static int hinic3_poll(struct napi_struct *napi, int budget) +{ + struct hinic3_irq_cfg *irq_cfg = + container_of(napi, struct hinic3_irq_cfg, napi); + struct hinic3_nic_dev *nic_dev; + bool busy = false; + int work_done; + + nic_dev = netdev_priv(irq_cfg->netdev); + + busy |= hinic3_tx_poll(irq_cfg->txq, budget); + + if (unlikely(!budget)) + return 0; + + work_done = hinic3_rx_poll(irq_cfg->rxq, budget); + busy |= work_done >= budget; + + if (busy) + return budget; + + if (likely(napi_complete_done(napi, work_done))) + hinic3_set_msix_state(nic_dev->hwdev, irq_cfg->msix_entry_idx, + HINIC3_MSIX_ENABLE); + + return work_done; +} + +void qp_add_napi(struct hinic3_irq_cfg *irq_cfg) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(irq_cfg->netdev); + + netif_queue_set_napi(irq_cfg->netdev, irq_cfg->irq_id, + NETDEV_QUEUE_TYPE_RX, &irq_cfg->napi); + netif_queue_set_napi(irq_cfg->netdev, irq_cfg->irq_id, + NETDEV_QUEUE_TYPE_TX, &irq_cfg->napi); + netif_napi_add(nic_dev->netdev, &irq_cfg->napi, hinic3_poll); + napi_enable(&irq_cfg->napi); +} + +void qp_del_napi(struct hinic3_irq_cfg *irq_cfg) +{ + napi_disable(&irq_cfg->napi); + netif_queue_set_napi(irq_cfg->netdev, irq_cfg->irq_id, + NETDEV_QUEUE_TYPE_RX, NULL); + netif_queue_set_napi(irq_cfg->netdev, irq_cfg->irq_id, + NETDEV_QUEUE_TYPE_TX, NULL); + netif_stop_subqueue(irq_cfg->netdev, irq_cfg->irq_id); + netif_napi_del(&irq_cfg->napi); +} diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_lld.c b/drivers/net/ethernet/huawei/hinic3/hinic3_lld.c new file mode 100644 index 000000000000..4827326e6a59 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_lld.c @@ -0,0 +1,414 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. + +#include <linux/delay.h> +#include <linux/iopoll.h> + +#include "hinic3_hw_cfg.h" +#include "hinic3_hwdev.h" +#include "hinic3_lld.h" +#include "hinic3_mgmt.h" + +#define HINIC3_VF_PCI_CFG_REG_BAR 0 +#define HINIC3_PCI_INTR_REG_BAR 2 +#define HINIC3_PCI_DB_BAR 4 + +#define HINIC3_EVENT_POLL_SLEEP_US 1000 +#define HINIC3_EVENT_POLL_TIMEOUT_US 10000000 + +static struct hinic3_adev_device { + const char *name; +} hinic3_adev_devices[HINIC3_SERVICE_T_MAX] = { + [HINIC3_SERVICE_T_NIC] = { + .name = "nic", + }, +}; + +static bool hinic3_adev_svc_supported(struct hinic3_hwdev *hwdev, + enum hinic3_service_type svc_type) +{ + switch (svc_type) { + case HINIC3_SERVICE_T_NIC: + return hinic3_support_nic(hwdev); + default: + break; + } + + return false; +} + +static void hinic3_comm_adev_release(struct device *dev) +{ + struct hinic3_adev *hadev = container_of(dev, struct hinic3_adev, + adev.dev); + + kfree(hadev); +} + +static struct hinic3_adev *hinic3_add_one_adev(struct hinic3_hwdev *hwdev, + enum hinic3_service_type svc_type) +{ + struct hinic3_adev *hadev; + const char *svc_name; + int ret; + + hadev = kzalloc(sizeof(*hadev), GFP_KERNEL); + if (!hadev) + return NULL; + + svc_name = hinic3_adev_devices[svc_type].name; + hadev->adev.name = svc_name; + hadev->adev.id = hwdev->dev_id; + hadev->adev.dev.parent = hwdev->dev; + hadev->adev.dev.release = hinic3_comm_adev_release; + hadev->svc_type = svc_type; + hadev->hwdev = hwdev; + + ret = auxiliary_device_init(&hadev->adev); + if (ret) { + dev_err(hwdev->dev, "failed init adev %s %u\n", + svc_name, hwdev->dev_id); + kfree(hadev); + return NULL; + } + + ret = auxiliary_device_add(&hadev->adev); + if (ret) { + dev_err(hwdev->dev, "failed to add adev %s %u\n", + svc_name, hwdev->dev_id); + auxiliary_device_uninit(&hadev->adev); + return NULL; + } + + return hadev; +} + +static void hinic3_del_one_adev(struct hinic3_hwdev *hwdev, + enum hinic3_service_type svc_type) +{ + struct hinic3_pcidev *pci_adapter = hwdev->adapter; + struct hinic3_adev *hadev; + int timeout; + bool state; + + timeout = read_poll_timeout(test_and_set_bit, state, !state, + HINIC3_EVENT_POLL_SLEEP_US, + HINIC3_EVENT_POLL_TIMEOUT_US, + false, svc_type, &pci_adapter->state); + + hadev = pci_adapter->hadev[svc_type]; + auxiliary_device_delete(&hadev->adev); + auxiliary_device_uninit(&hadev->adev); + pci_adapter->hadev[svc_type] = NULL; + if (!timeout) + clear_bit(svc_type, &pci_adapter->state); +} + +static int hinic3_attach_aux_devices(struct hinic3_hwdev *hwdev) +{ + struct hinic3_pcidev *pci_adapter = hwdev->adapter; + enum hinic3_service_type svc_type; + + mutex_lock(&pci_adapter->pdev_mutex); + + for (svc_type = 0; svc_type < HINIC3_SERVICE_T_MAX; svc_type++) { + if (!hinic3_adev_svc_supported(hwdev, svc_type)) + continue; + + pci_adapter->hadev[svc_type] = hinic3_add_one_adev(hwdev, + svc_type); + if (!pci_adapter->hadev[svc_type]) + goto err_del_adevs; + } + mutex_unlock(&pci_adapter->pdev_mutex); + return 0; + +err_del_adevs: + while (svc_type > 0) { + svc_type--; + if (pci_adapter->hadev[svc_type]) { + hinic3_del_one_adev(hwdev, svc_type); + pci_adapter->hadev[svc_type] = NULL; + } + } + mutex_unlock(&pci_adapter->pdev_mutex); + return -ENOMEM; +} + +static void hinic3_detach_aux_devices(struct hinic3_hwdev *hwdev) +{ + struct hinic3_pcidev *pci_adapter = hwdev->adapter; + int i; + + mutex_lock(&pci_adapter->pdev_mutex); + for (i = 0; i < ARRAY_SIZE(hinic3_adev_devices); i++) { + if (pci_adapter->hadev[i]) + hinic3_del_one_adev(hwdev, i); + } + mutex_unlock(&pci_adapter->pdev_mutex); +} + +struct hinic3_hwdev *hinic3_adev_get_hwdev(struct auxiliary_device *adev) +{ + struct hinic3_adev *hadev; + + hadev = container_of(adev, struct hinic3_adev, adev); + return hadev->hwdev; +} + +void hinic3_adev_event_register(struct auxiliary_device *adev, + void (*event_handler)(struct auxiliary_device *adev, + struct hinic3_event_info *event)) +{ + struct hinic3_adev *hadev; + + hadev = container_of(adev, struct hinic3_adev, adev); + hadev->event = event_handler; +} + +void hinic3_adev_event_unregister(struct auxiliary_device *adev) +{ + struct hinic3_adev *hadev; + + hadev = container_of(adev, struct hinic3_adev, adev); + hadev->event = NULL; +} + +static int hinic3_mapping_bar(struct pci_dev *pdev, + struct hinic3_pcidev *pci_adapter) +{ + pci_adapter->cfg_reg_base = pci_ioremap_bar(pdev, + HINIC3_VF_PCI_CFG_REG_BAR); + if (!pci_adapter->cfg_reg_base) { + dev_err(&pdev->dev, "Failed to map configuration regs\n"); + return -ENOMEM; + } + + pci_adapter->intr_reg_base = pci_ioremap_bar(pdev, + HINIC3_PCI_INTR_REG_BAR); + if (!pci_adapter->intr_reg_base) { + dev_err(&pdev->dev, "Failed to map interrupt regs\n"); + goto err_unmap_cfg_reg_base; + } + + pci_adapter->db_base_phy = pci_resource_start(pdev, HINIC3_PCI_DB_BAR); + pci_adapter->db_dwqe_len = pci_resource_len(pdev, HINIC3_PCI_DB_BAR); + pci_adapter->db_base = pci_ioremap_bar(pdev, HINIC3_PCI_DB_BAR); + if (!pci_adapter->db_base) { + dev_err(&pdev->dev, "Failed to map doorbell regs\n"); + goto err_unmap_intr_reg_base; + } + + return 0; + +err_unmap_intr_reg_base: + iounmap(pci_adapter->intr_reg_base); + +err_unmap_cfg_reg_base: + iounmap(pci_adapter->cfg_reg_base); + + return -ENOMEM; +} + +static void hinic3_unmapping_bar(struct hinic3_pcidev *pci_adapter) +{ + iounmap(pci_adapter->db_base); + iounmap(pci_adapter->intr_reg_base); + iounmap(pci_adapter->cfg_reg_base); +} + +static int hinic3_pci_init(struct pci_dev *pdev) +{ + struct hinic3_pcidev *pci_adapter; + int err; + + pci_adapter = kzalloc(sizeof(*pci_adapter), GFP_KERNEL); + if (!pci_adapter) + return -ENOMEM; + + pci_adapter->pdev = pdev; + mutex_init(&pci_adapter->pdev_mutex); + + pci_set_drvdata(pdev, pci_adapter); + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "Failed to enable PCI device\n"); + goto err_free_pci_adapter; + } + + err = pci_request_regions(pdev, HINIC3_NIC_DRV_NAME); + if (err) { + dev_err(&pdev->dev, "Failed to request regions\n"); + goto err_disable_device; + } + + pci_set_master(pdev); + + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) { + dev_err(&pdev->dev, "Failed to set DMA mask\n"); + goto err_release_regions; + } + + return 0; + +err_release_regions: + pci_clear_master(pdev); + pci_release_regions(pdev); + +err_disable_device: + pci_disable_device(pdev); + +err_free_pci_adapter: + pci_set_drvdata(pdev, NULL); + mutex_destroy(&pci_adapter->pdev_mutex); + kfree(pci_adapter); + + return err; +} + +static void hinic3_pci_uninit(struct pci_dev *pdev) +{ + struct hinic3_pcidev *pci_adapter = pci_get_drvdata(pdev); + + pci_clear_master(pdev); + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + mutex_destroy(&pci_adapter->pdev_mutex); + kfree(pci_adapter); +} + +static int hinic3_func_init(struct pci_dev *pdev, + struct hinic3_pcidev *pci_adapter) +{ + int err; + + err = hinic3_init_hwdev(pdev); + if (err) { + dev_err(&pdev->dev, "Failed to initialize hardware device\n"); + return err; + } + + err = hinic3_attach_aux_devices(pci_adapter->hwdev); + if (err) + goto err_free_hwdev; + + return 0; + +err_free_hwdev: + hinic3_free_hwdev(pci_adapter->hwdev); + + return err; +} + +static void hinic3_func_uninit(struct pci_dev *pdev) +{ + struct hinic3_pcidev *pci_adapter = pci_get_drvdata(pdev); + + hinic3_detach_aux_devices(pci_adapter->hwdev); + hinic3_free_hwdev(pci_adapter->hwdev); +} + +static int hinic3_probe_func(struct hinic3_pcidev *pci_adapter) +{ + struct pci_dev *pdev = pci_adapter->pdev; + int err; + + err = hinic3_mapping_bar(pdev, pci_adapter); + if (err) { + dev_err(&pdev->dev, "Failed to map bar\n"); + goto err_out; + } + + err = hinic3_func_init(pdev, pci_adapter); + if (err) + goto err_unmap_bar; + + return 0; + +err_unmap_bar: + hinic3_unmapping_bar(pci_adapter); + +err_out: + dev_err(&pdev->dev, "PCIe device probe function failed\n"); + return err; +} + +static void hinic3_remove_func(struct hinic3_pcidev *pci_adapter) +{ + struct pci_dev *pdev = pci_adapter->pdev; + + hinic3_func_uninit(pdev); + hinic3_unmapping_bar(pci_adapter); +} + +static int hinic3_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct hinic3_pcidev *pci_adapter; + int err; + + err = hinic3_pci_init(pdev); + if (err) + goto err_out; + + pci_adapter = pci_get_drvdata(pdev); + err = hinic3_probe_func(pci_adapter); + if (err) + goto err_uninit_pci; + + return 0; + +err_uninit_pci: + hinic3_pci_uninit(pdev); + +err_out: + dev_err(&pdev->dev, "PCIe device probe failed\n"); + return err; +} + +static void hinic3_remove(struct pci_dev *pdev) +{ + struct hinic3_pcidev *pci_adapter = pci_get_drvdata(pdev); + + hinic3_remove_func(pci_adapter); + hinic3_pci_uninit(pdev); +} + +static const struct pci_device_id hinic3_pci_table[] = { + /* Completed by later submission due to LoC limit. */ + {0, 0} + +}; + +MODULE_DEVICE_TABLE(pci, hinic3_pci_table); + +static void hinic3_shutdown(struct pci_dev *pdev) +{ + struct hinic3_pcidev *pci_adapter = pci_get_drvdata(pdev); + + pci_disable_device(pdev); + + if (pci_adapter) + hinic3_set_api_stop(pci_adapter->hwdev); +} + +static struct pci_driver hinic3_driver = { + .name = HINIC3_NIC_DRV_NAME, + .id_table = hinic3_pci_table, + .probe = hinic3_probe, + .remove = hinic3_remove, + .shutdown = hinic3_shutdown, + .sriov_configure = pci_sriov_configure_simple +}; + +int hinic3_lld_init(void) +{ + return pci_register_driver(&hinic3_driver); +} + +void hinic3_lld_exit(void) +{ + pci_unregister_driver(&hinic3_driver); +} diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_lld.h b/drivers/net/ethernet/huawei/hinic3/hinic3_lld.h new file mode 100644 index 000000000000..322b44803476 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_lld.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */ + +#ifndef _HINIC3_LLD_H_ +#define _HINIC3_LLD_H_ + +#include <linux/auxiliary_bus.h> + +struct hinic3_event_info; + +#define HINIC3_NIC_DRV_NAME "hinic3" + +int hinic3_lld_init(void); +void hinic3_lld_exit(void); +void hinic3_adev_event_register(struct auxiliary_device *adev, + void (*event_handler)(struct auxiliary_device *adev, + struct hinic3_event_info *event)); +void hinic3_adev_event_unregister(struct auxiliary_device *adev); +struct hinic3_hwdev *hinic3_adev_get_hwdev(struct auxiliary_device *adev); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_main.c b/drivers/net/ethernet/huawei/hinic3/hinic3_main.c new file mode 100644 index 000000000000..093aa6d775ff --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_main.c @@ -0,0 +1,354 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. + +#include <linux/etherdevice.h> +#include <linux/netdevice.h> + +#include "hinic3_common.h" +#include "hinic3_hw_comm.h" +#include "hinic3_hwdev.h" +#include "hinic3_hwif.h" +#include "hinic3_lld.h" +#include "hinic3_nic_cfg.h" +#include "hinic3_nic_dev.h" +#include "hinic3_nic_io.h" +#include "hinic3_rx.h" +#include "hinic3_tx.h" + +#define HINIC3_NIC_DRV_DESC "Intelligent Network Interface Card Driver" + +#define HINIC3_RX_BUF_LEN 2048 +#define HINIC3_LRO_REPLENISH_THLD 256 +#define HINIC3_NIC_DEV_WQ_NAME "hinic3_nic_dev_wq" + +#define HINIC3_SQ_DEPTH 1024 +#define HINIC3_RQ_DEPTH 1024 + +static int hinic3_alloc_txrxqs(struct net_device *netdev) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic3_hwdev *hwdev = nic_dev->hwdev; + int err; + + err = hinic3_alloc_txqs(netdev); + if (err) { + dev_err(hwdev->dev, "Failed to alloc txqs\n"); + return err; + } + + err = hinic3_alloc_rxqs(netdev); + if (err) { + dev_err(hwdev->dev, "Failed to alloc rxqs\n"); + goto err_free_txqs; + } + + return 0; + +err_free_txqs: + hinic3_free_txqs(netdev); + + return err; +} + +static void hinic3_free_txrxqs(struct net_device *netdev) +{ + hinic3_free_rxqs(netdev); + hinic3_free_txqs(netdev); +} + +static int hinic3_init_nic_dev(struct net_device *netdev, + struct hinic3_hwdev *hwdev) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + struct pci_dev *pdev = hwdev->pdev; + + nic_dev->netdev = netdev; + SET_NETDEV_DEV(netdev, &pdev->dev); + nic_dev->hwdev = hwdev; + nic_dev->pdev = pdev; + + nic_dev->rx_buf_len = HINIC3_RX_BUF_LEN; + nic_dev->lro_replenish_thld = HINIC3_LRO_REPLENISH_THLD; + nic_dev->nic_svc_cap = hwdev->cfg_mgmt->cap.nic_svc_cap; + + return 0; +} + +static int hinic3_sw_init(struct net_device *netdev) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic3_hwdev *hwdev = nic_dev->hwdev; + int err; + + nic_dev->q_params.sq_depth = HINIC3_SQ_DEPTH; + nic_dev->q_params.rq_depth = HINIC3_RQ_DEPTH; + + /* VF driver always uses random MAC address. During VM migration to a + * new device, the new device should learn the VMs old MAC rather than + * provide its own MAC. The product design assumes that every VF is + * suspectable to migration so the device avoids offering MAC address + * to VFs. + */ + eth_hw_addr_random(netdev); + err = hinic3_set_mac(hwdev, netdev->dev_addr, 0, + hinic3_global_func_id(hwdev)); + if (err) { + dev_err(hwdev->dev, "Failed to set default MAC\n"); + return err; + } + + err = hinic3_alloc_txrxqs(netdev); + if (err) { + dev_err(hwdev->dev, "Failed to alloc qps\n"); + goto err_del_mac; + } + + return 0; + +err_del_mac: + hinic3_del_mac(hwdev, netdev->dev_addr, 0, + hinic3_global_func_id(hwdev)); + + return err; +} + +static void hinic3_sw_uninit(struct net_device *netdev) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + + hinic3_free_txrxqs(netdev); + hinic3_del_mac(nic_dev->hwdev, netdev->dev_addr, 0, + hinic3_global_func_id(nic_dev->hwdev)); +} + +static void hinic3_assign_netdev_ops(struct net_device *netdev) +{ + hinic3_set_netdev_ops(netdev); +} + +static void netdev_feature_init(struct net_device *netdev) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + netdev_features_t cso_fts = 0; + netdev_features_t tso_fts = 0; + netdev_features_t dft_fts; + + dft_fts = NETIF_F_SG | NETIF_F_HIGHDMA; + if (hinic3_test_support(nic_dev, HINIC3_NIC_F_CSUM)) + cso_fts |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM; + if (hinic3_test_support(nic_dev, HINIC3_NIC_F_SCTP_CRC)) + cso_fts |= NETIF_F_SCTP_CRC; + if (hinic3_test_support(nic_dev, HINIC3_NIC_F_TSO)) + tso_fts |= NETIF_F_TSO | NETIF_F_TSO6; + + netdev->features |= dft_fts | cso_fts | tso_fts; +} + +static int hinic3_set_default_hw_feature(struct net_device *netdev) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic3_hwdev *hwdev = nic_dev->hwdev; + int err; + + err = hinic3_set_nic_feature_to_hw(nic_dev); + if (err) { + dev_err(hwdev->dev, "Failed to set nic features\n"); + return err; + } + + return 0; +} + +static void hinic3_link_status_change(struct net_device *netdev, + bool link_status_up) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + + if (link_status_up) { + if (netif_carrier_ok(netdev)) + return; + + nic_dev->link_status_up = true; + netif_carrier_on(netdev); + netdev_dbg(netdev, "Link is up\n"); + } else { + if (!netif_carrier_ok(netdev)) + return; + + nic_dev->link_status_up = false; + netif_carrier_off(netdev); + netdev_dbg(netdev, "Link is down\n"); + } +} + +static void hinic3_nic_event(struct auxiliary_device *adev, + struct hinic3_event_info *event) +{ + struct hinic3_nic_dev *nic_dev = dev_get_drvdata(&adev->dev); + struct net_device *netdev; + + netdev = nic_dev->netdev; + + switch (HINIC3_SRV_EVENT_TYPE(event->service, event->type)) { + case HINIC3_SRV_EVENT_TYPE(HINIC3_EVENT_SRV_NIC, + HINIC3_NIC_EVENT_LINK_UP): + hinic3_link_status_change(netdev, true); + break; + case HINIC3_SRV_EVENT_TYPE(HINIC3_EVENT_SRV_NIC, + HINIC3_NIC_EVENT_LINK_DOWN): + hinic3_link_status_change(netdev, false); + break; + default: + break; + } +} + +static int hinic3_nic_probe(struct auxiliary_device *adev, + const struct auxiliary_device_id *id) +{ + struct hinic3_hwdev *hwdev = hinic3_adev_get_hwdev(adev); + struct pci_dev *pdev = hwdev->pdev; + struct hinic3_nic_dev *nic_dev; + struct net_device *netdev; + u16 max_qps, glb_func_id; + int err; + + if (!hinic3_support_nic(hwdev)) { + dev_dbg(&adev->dev, "HW doesn't support nic\n"); + return 0; + } + + hinic3_adev_event_register(adev, hinic3_nic_event); + + glb_func_id = hinic3_global_func_id(hwdev); + err = hinic3_func_reset(hwdev, glb_func_id, COMM_FUNC_RESET_BIT_NIC); + if (err) { + dev_err(&adev->dev, "Failed to reset function\n"); + goto err_unregister_adev_event; + } + + max_qps = hinic3_func_max_qnum(hwdev); + netdev = alloc_etherdev_mq(sizeof(*nic_dev), max_qps); + if (!netdev) { + dev_err(&adev->dev, "Failed to allocate netdev\n"); + err = -ENOMEM; + goto err_unregister_adev_event; + } + + nic_dev = netdev_priv(netdev); + dev_set_drvdata(&adev->dev, nic_dev); + err = hinic3_init_nic_dev(netdev, hwdev); + if (err) + goto err_free_netdev; + + err = hinic3_init_nic_io(nic_dev); + if (err) + goto err_free_netdev; + + err = hinic3_sw_init(netdev); + if (err) + goto err_free_nic_io; + + hinic3_assign_netdev_ops(netdev); + + netdev_feature_init(netdev); + err = hinic3_set_default_hw_feature(netdev); + if (err) + goto err_uninit_sw; + + netif_carrier_off(netdev); + + err = register_netdev(netdev); + if (err) + goto err_uninit_nic_feature; + + return 0; + +err_uninit_nic_feature: + hinic3_update_nic_feature(nic_dev, 0); + hinic3_set_nic_feature_to_hw(nic_dev); + +err_uninit_sw: + hinic3_sw_uninit(netdev); + +err_free_nic_io: + hinic3_free_nic_io(nic_dev); + +err_free_netdev: + free_netdev(netdev); + +err_unregister_adev_event: + hinic3_adev_event_unregister(adev); + dev_err(&pdev->dev, "NIC service probe failed\n"); + + return err; +} + +static void hinic3_nic_remove(struct auxiliary_device *adev) +{ + struct hinic3_nic_dev *nic_dev = dev_get_drvdata(&adev->dev); + struct net_device *netdev; + + if (!hinic3_support_nic(nic_dev->hwdev)) + return; + + netdev = nic_dev->netdev; + unregister_netdev(netdev); + + hinic3_update_nic_feature(nic_dev, 0); + hinic3_set_nic_feature_to_hw(nic_dev); + hinic3_sw_uninit(netdev); + + hinic3_free_nic_io(nic_dev); + + free_netdev(netdev); +} + +static const struct auxiliary_device_id hinic3_nic_id_table[] = { + { + .name = HINIC3_NIC_DRV_NAME ".nic", + }, + {} +}; + +static struct auxiliary_driver hinic3_nic_driver = { + .probe = hinic3_nic_probe, + .remove = hinic3_nic_remove, + .suspend = NULL, + .resume = NULL, + .name = "nic", + .id_table = hinic3_nic_id_table, +}; + +static __init int hinic3_nic_lld_init(void) +{ + int err; + + pr_info("%s: %s\n", HINIC3_NIC_DRV_NAME, HINIC3_NIC_DRV_DESC); + + err = hinic3_lld_init(); + if (err) + return err; + + err = auxiliary_driver_register(&hinic3_nic_driver); + if (err) { + hinic3_lld_exit(); + return err; + } + + return 0; +} + +static __exit void hinic3_nic_lld_exit(void) +{ + auxiliary_driver_unregister(&hinic3_nic_driver); + + hinic3_lld_exit(); +} + +module_init(hinic3_nic_lld_init); +module_exit(hinic3_nic_lld_exit); + +MODULE_AUTHOR("Huawei Technologies CO., Ltd"); +MODULE_DESCRIPTION(HINIC3_NIC_DRV_DESC); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c b/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c new file mode 100644 index 000000000000..e74d1eb09730 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. + +#include <linux/dma-mapping.h> + +#include "hinic3_common.h" +#include "hinic3_hwdev.h" +#include "hinic3_hwif.h" +#include "hinic3_mbox.h" + +int hinic3_send_mbox_to_mgmt(struct hinic3_hwdev *hwdev, u8 mod, u16 cmd, + const struct mgmt_msg_params *msg_params) +{ + /* Completed by later submission due to LoC limit. */ + return -EFAULT; +} diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h b/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h new file mode 100644 index 000000000000..d7a6c37b7eff --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */ + +#ifndef _HINIC3_MBOX_H_ +#define _HINIC3_MBOX_H_ + +#include <linux/bitfield.h> +#include <linux/mutex.h> + +struct hinic3_hwdev; + +int hinic3_send_mbox_to_mgmt(struct hinic3_hwdev *hwdev, u8 mod, u16 cmd, + const struct mgmt_msg_params *msg_params); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h new file mode 100644 index 000000000000..4edabeb32112 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */ + +#ifndef _HINIC3_MGMT_H_ +#define _HINIC3_MGMT_H_ + +#include <linux/types.h> + +struct hinic3_hwdev; + +void hinic3_flush_mgmt_workq(struct hinic3_hwdev *hwdev); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h new file mode 100644 index 000000000000..c4434efdc7f7 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h @@ -0,0 +1,105 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */ + +#ifndef _HINIC3_MGMT_INTERFACE_H_ +#define _HINIC3_MGMT_INTERFACE_H_ + +#include <linux/bitfield.h> +#include <linux/bits.h> +#include <linux/if_ether.h> + +#include "hinic3_hw_intf.h" + +struct l2nic_cmd_feature_nego { + struct mgmt_msg_head msg_head; + u16 func_id; + u8 opcode; + u8 rsvd; + u64 s_feature[4]; +}; + +enum l2nic_func_tbl_cfg_bitmap { + L2NIC_FUNC_TBL_CFG_INIT = 0, + L2NIC_FUNC_TBL_CFG_RX_BUF_SIZE = 1, + L2NIC_FUNC_TBL_CFG_MTU = 2, +}; + +struct l2nic_func_tbl_cfg { + u16 rx_wqe_buf_size; + u16 mtu; + u32 rsvd[9]; +}; + +struct l2nic_cmd_set_func_tbl { + struct mgmt_msg_head msg_head; + u16 func_id; + u16 rsvd; + u32 cfg_bitmap; + struct l2nic_func_tbl_cfg tbl_cfg; +}; + +struct l2nic_cmd_set_mac { + struct mgmt_msg_head msg_head; + u16 func_id; + u16 vlan_id; + u16 rsvd1; + u8 mac[ETH_ALEN]; +}; + +struct l2nic_cmd_update_mac { + struct mgmt_msg_head msg_head; + u16 func_id; + u16 vlan_id; + u16 rsvd1; + u8 old_mac[ETH_ALEN]; + u16 rsvd2; + u8 new_mac[ETH_ALEN]; +}; + +struct l2nic_cmd_force_pkt_drop { + struct mgmt_msg_head msg_head; + u8 port; + u8 rsvd1[3]; +}; + +/* Commands between NIC to fw */ +enum l2nic_cmd { + /* FUNC CFG */ + L2NIC_CMD_SET_FUNC_TBL = 5, + L2NIC_CMD_SET_VPORT_ENABLE = 6, + L2NIC_CMD_SET_SQ_CI_ATTR = 8, + L2NIC_CMD_CLEAR_QP_RESOURCE = 11, + L2NIC_CMD_FEATURE_NEGO = 15, + L2NIC_CMD_SET_MAC = 21, + L2NIC_CMD_DEL_MAC = 22, + L2NIC_CMD_UPDATE_MAC = 23, + L2NIC_CMD_CFG_RSS = 60, + L2NIC_CMD_CFG_RSS_HASH_KEY = 63, + L2NIC_CMD_CFG_RSS_HASH_ENGINE = 64, + L2NIC_CMD_SET_RSS_CTX_TBL = 65, + L2NIC_CMD_QOS_DCB_STATE = 110, + L2NIC_CMD_FORCE_PKT_DROP = 113, + L2NIC_CMD_MAX = 256, +}; + +enum hinic3_nic_feature_cap { + HINIC3_NIC_F_CSUM = BIT(0), + HINIC3_NIC_F_SCTP_CRC = BIT(1), + HINIC3_NIC_F_TSO = BIT(2), + HINIC3_NIC_F_LRO = BIT(3), + HINIC3_NIC_F_UFO = BIT(4), + HINIC3_NIC_F_RSS = BIT(5), + HINIC3_NIC_F_RX_VLAN_FILTER = BIT(6), + HINIC3_NIC_F_RX_VLAN_STRIP = BIT(7), + HINIC3_NIC_F_TX_VLAN_INSERT = BIT(8), + HINIC3_NIC_F_VXLAN_OFFLOAD = BIT(9), + HINIC3_NIC_F_FDIR = BIT(11), + HINIC3_NIC_F_PROMISC = BIT(12), + HINIC3_NIC_F_ALLMULTI = BIT(13), + HINIC3_NIC_F_RATE_LIMIT = BIT(16), +}; + +#define HINIC3_NIC_F_ALL_MASK 0x33bff +#define HINIC3_NIC_DRV_DEFAULT_FEATURE 0x3f03f + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c b/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c new file mode 100644 index 000000000000..71104a6b8bef --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. + +#include <linux/etherdevice.h> +#include <linux/netdevice.h> + +#include "hinic3_hwif.h" +#include "hinic3_nic_cfg.h" +#include "hinic3_nic_dev.h" +#include "hinic3_nic_io.h" +#include "hinic3_rx.h" +#include "hinic3_tx.h" + +static int hinic3_open(struct net_device *netdev) +{ + /* Completed by later submission due to LoC limit. */ + return -EFAULT; +} + +static int hinic3_close(struct net_device *netdev) +{ + /* Completed by later submission due to LoC limit. */ + return -EFAULT; +} + +static int hinic3_change_mtu(struct net_device *netdev, int new_mtu) +{ + int err; + + err = hinic3_set_port_mtu(netdev, new_mtu); + if (err) { + netdev_err(netdev, "Failed to change port mtu to %d\n", + new_mtu); + return err; + } + + netdev_dbg(netdev, "Change mtu from %u to %d\n", netdev->mtu, new_mtu); + WRITE_ONCE(netdev->mtu, new_mtu); + + return 0; +} + +static int hinic3_set_mac_addr(struct net_device *netdev, void *addr) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + struct sockaddr *saddr = addr; + int err; + + if (!is_valid_ether_addr(saddr->sa_data)) + return -EADDRNOTAVAIL; + + if (ether_addr_equal(netdev->dev_addr, saddr->sa_data)) + return 0; + + err = hinic3_update_mac(nic_dev->hwdev, netdev->dev_addr, + saddr->sa_data, 0, + hinic3_global_func_id(nic_dev->hwdev)); + + if (err) + return err; + + eth_hw_addr_set(netdev, saddr->sa_data); + + return 0; +} + +static const struct net_device_ops hinic3_netdev_ops = { + .ndo_open = hinic3_open, + .ndo_stop = hinic3_close, + .ndo_change_mtu = hinic3_change_mtu, + .ndo_set_mac_address = hinic3_set_mac_addr, + .ndo_start_xmit = hinic3_xmit_frame, +}; + +void hinic3_set_netdev_ops(struct net_device *netdev) +{ + netdev->netdev_ops = &hinic3_netdev_ops; +} diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c new file mode 100644 index 000000000000..5b1a91a18c67 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c @@ -0,0 +1,233 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. + +#include <linux/if_vlan.h> + +#include "hinic3_hwdev.h" +#include "hinic3_hwif.h" +#include "hinic3_mbox.h" +#include "hinic3_nic_cfg.h" +#include "hinic3_nic_dev.h" +#include "hinic3_nic_io.h" + +static int hinic3_feature_nego(struct hinic3_hwdev *hwdev, u8 opcode, + u64 *s_feature, u16 size) +{ + struct l2nic_cmd_feature_nego feature_nego = {}; + struct mgmt_msg_params msg_params = {}; + int err; + + feature_nego.func_id = hinic3_global_func_id(hwdev); + feature_nego.opcode = opcode; + if (opcode == MGMT_MSG_CMD_OP_SET) + memcpy(feature_nego.s_feature, s_feature, size * sizeof(u64)); + + mgmt_msg_params_init_default(&msg_params, &feature_nego, + sizeof(feature_nego)); + + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC, + L2NIC_CMD_FEATURE_NEGO, &msg_params); + if (err || feature_nego.msg_head.status) { + dev_err(hwdev->dev, "Failed to negotiate nic feature, err:%d, status: 0x%x\n", + err, feature_nego.msg_head.status); + return -EIO; + } + + if (opcode == MGMT_MSG_CMD_OP_GET) + memcpy(s_feature, feature_nego.s_feature, size * sizeof(u64)); + + return 0; +} + +int hinic3_set_nic_feature_to_hw(struct hinic3_nic_dev *nic_dev) +{ + return hinic3_feature_nego(nic_dev->hwdev, MGMT_MSG_CMD_OP_SET, + &nic_dev->nic_io->feature_cap, 1); +} + +bool hinic3_test_support(struct hinic3_nic_dev *nic_dev, + enum hinic3_nic_feature_cap feature_bits) +{ + return (nic_dev->nic_io->feature_cap & feature_bits) == feature_bits; +} + +void hinic3_update_nic_feature(struct hinic3_nic_dev *nic_dev, u64 feature_cap) +{ + nic_dev->nic_io->feature_cap = feature_cap; +} + +static int hinic3_set_function_table(struct hinic3_hwdev *hwdev, u32 cfg_bitmap, + const struct l2nic_func_tbl_cfg *cfg) +{ + struct l2nic_cmd_set_func_tbl cmd_func_tbl = {}; + struct mgmt_msg_params msg_params = {}; + int err; + + cmd_func_tbl.func_id = hinic3_global_func_id(hwdev); + cmd_func_tbl.cfg_bitmap = cfg_bitmap; + cmd_func_tbl.tbl_cfg = *cfg; + + mgmt_msg_params_init_default(&msg_params, &cmd_func_tbl, + sizeof(cmd_func_tbl)); + + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC, + L2NIC_CMD_SET_FUNC_TBL, &msg_params); + if (err || cmd_func_tbl.msg_head.status) { + dev_err(hwdev->dev, + "Failed to set func table, bitmap: 0x%x, err: %d, status: 0x%x\n", + cfg_bitmap, err, cmd_func_tbl.msg_head.status); + return -EFAULT; + } + + return 0; +} + +int hinic3_set_port_mtu(struct net_device *netdev, u16 new_mtu) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + struct l2nic_func_tbl_cfg func_tbl_cfg = {}; + struct hinic3_hwdev *hwdev = nic_dev->hwdev; + + func_tbl_cfg.mtu = new_mtu; + return hinic3_set_function_table(hwdev, BIT(L2NIC_FUNC_TBL_CFG_MTU), + &func_tbl_cfg); +} + +static int hinic3_check_mac_info(struct hinic3_hwdev *hwdev, u8 status, + u16 vlan_id) +{ + if ((status && status != MGMT_STATUS_EXIST) || + ((vlan_id & BIT(15)) && status == MGMT_STATUS_EXIST)) { + return -EINVAL; + } + + return 0; +} + +int hinic3_set_mac(struct hinic3_hwdev *hwdev, const u8 *mac_addr, u16 vlan_id, + u16 func_id) +{ + struct l2nic_cmd_set_mac mac_info = {}; + struct mgmt_msg_params msg_params = {}; + int err; + + if ((vlan_id & HINIC3_VLAN_ID_MASK) >= VLAN_N_VID) { + dev_err(hwdev->dev, "Invalid VLAN number: %d\n", + (vlan_id & HINIC3_VLAN_ID_MASK)); + return -EINVAL; + } + + mac_info.func_id = func_id; + mac_info.vlan_id = vlan_id; + ether_addr_copy(mac_info.mac, mac_addr); + + mgmt_msg_params_init_default(&msg_params, &mac_info, sizeof(mac_info)); + + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC, + L2NIC_CMD_SET_MAC, &msg_params); + if (err || hinic3_check_mac_info(hwdev, mac_info.msg_head.status, + mac_info.vlan_id)) { + dev_err(hwdev->dev, + "Failed to update MAC, err: %d, status: 0x%x\n", + err, mac_info.msg_head.status); + return -EIO; + } + + if (mac_info.msg_head.status == MGMT_STATUS_PF_SET_VF_ALREADY) { + dev_warn(hwdev->dev, "PF has already set VF mac, Ignore set operation\n"); + return 0; + } + + if (mac_info.msg_head.status == MGMT_STATUS_EXIST) { + dev_warn(hwdev->dev, "MAC is repeated. Ignore update operation\n"); + return 0; + } + + return 0; +} + +int hinic3_del_mac(struct hinic3_hwdev *hwdev, const u8 *mac_addr, u16 vlan_id, + u16 func_id) +{ + struct l2nic_cmd_set_mac mac_info = {}; + struct mgmt_msg_params msg_params = {}; + int err; + + if ((vlan_id & HINIC3_VLAN_ID_MASK) >= VLAN_N_VID) { + dev_err(hwdev->dev, "Invalid VLAN number: %d\n", + (vlan_id & HINIC3_VLAN_ID_MASK)); + return -EINVAL; + } + + mac_info.func_id = func_id; + mac_info.vlan_id = vlan_id; + ether_addr_copy(mac_info.mac, mac_addr); + + mgmt_msg_params_init_default(&msg_params, &mac_info, sizeof(mac_info)); + + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC, + L2NIC_CMD_DEL_MAC, &msg_params); + if (err) { + dev_err(hwdev->dev, + "Failed to delete MAC, err: %d, status: 0x%x\n", + err, mac_info.msg_head.status); + return err; + } + + return 0; +} + +int hinic3_update_mac(struct hinic3_hwdev *hwdev, const u8 *old_mac, + u8 *new_mac, u16 vlan_id, u16 func_id) +{ + struct l2nic_cmd_update_mac mac_info = {}; + struct mgmt_msg_params msg_params = {}; + int err; + + if ((vlan_id & HINIC3_VLAN_ID_MASK) >= VLAN_N_VID) { + dev_err(hwdev->dev, "Invalid VLAN number: %d\n", + (vlan_id & HINIC3_VLAN_ID_MASK)); + return -EINVAL; + } + + mac_info.func_id = func_id; + mac_info.vlan_id = vlan_id; + ether_addr_copy(mac_info.old_mac, old_mac); + ether_addr_copy(mac_info.new_mac, new_mac); + + mgmt_msg_params_init_default(&msg_params, &mac_info, sizeof(mac_info)); + + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC, + L2NIC_CMD_UPDATE_MAC, &msg_params); + if (err || hinic3_check_mac_info(hwdev, mac_info.msg_head.status, + mac_info.vlan_id)) { + dev_err(hwdev->dev, + "Failed to update MAC, err: %d, status: 0x%x\n", + err, mac_info.msg_head.status); + return -EIO; + } + return 0; +} + +int hinic3_force_drop_tx_pkt(struct hinic3_hwdev *hwdev) +{ + struct l2nic_cmd_force_pkt_drop pkt_drop = {}; + struct mgmt_msg_params msg_params = {}; + int err; + + pkt_drop.port = hinic3_physical_port_id(hwdev); + + mgmt_msg_params_init_default(&msg_params, &pkt_drop, sizeof(pkt_drop)); + + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC, + L2NIC_CMD_FORCE_PKT_DROP, &msg_params); + if ((pkt_drop.msg_head.status != MGMT_STATUS_CMD_UNSUPPORTED && + pkt_drop.msg_head.status) || err) { + dev_err(hwdev->dev, + "Failed to set force tx packets drop, err: %d, status: 0x%x\n", + err, pkt_drop.msg_head.status); + return -EFAULT; + } + + return pkt_drop.msg_head.status; +} diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h new file mode 100644 index 000000000000..bf9ce51dc401 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */ + +#ifndef _HINIC3_NIC_CFG_H_ +#define _HINIC3_NIC_CFG_H_ + +#include <linux/types.h> + +#include "hinic3_hw_intf.h" +#include "hinic3_mgmt_interface.h" + +struct hinic3_hwdev; +struct hinic3_nic_dev; + +#define HINIC3_MIN_MTU_SIZE 256 +#define HINIC3_MAX_JUMBO_FRAME_SIZE 9600 + +#define HINIC3_VLAN_ID_MASK 0x7FFF + +enum hinic3_nic_event_type { + HINIC3_NIC_EVENT_LINK_DOWN = 0, + HINIC3_NIC_EVENT_LINK_UP = 1, +}; + +int hinic3_set_nic_feature_to_hw(struct hinic3_nic_dev *nic_dev); +bool hinic3_test_support(struct hinic3_nic_dev *nic_dev, + enum hinic3_nic_feature_cap feature_bits); +void hinic3_update_nic_feature(struct hinic3_nic_dev *nic_dev, u64 feature_cap); + +int hinic3_set_port_mtu(struct net_device *netdev, u16 new_mtu); + +int hinic3_set_mac(struct hinic3_hwdev *hwdev, const u8 *mac_addr, u16 vlan_id, + u16 func_id); +int hinic3_del_mac(struct hinic3_hwdev *hwdev, const u8 *mac_addr, u16 vlan_id, + u16 func_id); +int hinic3_update_mac(struct hinic3_hwdev *hwdev, const u8 *old_mac, + u8 *new_mac, u16 vlan_id, u16 func_id); + +int hinic3_force_drop_tx_pkt(struct hinic3_hwdev *hwdev); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h new file mode 100644 index 000000000000..c994fc9b6ee0 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */ + +#ifndef _HINIC3_NIC_DEV_H_ +#define _HINIC3_NIC_DEV_H_ + +#include <linux/netdevice.h> + +#include "hinic3_hw_cfg.h" +#include "hinic3_mgmt_interface.h" + +enum hinic3_flags { + HINIC3_RSS_ENABLE, +}; + +enum hinic3_rss_hash_type { + HINIC3_RSS_HASH_ENGINE_TYPE_XOR = 0, + HINIC3_RSS_HASH_ENGINE_TYPE_TOEP = 1, +}; + +struct hinic3_rss_type { + u8 tcp_ipv6_ext; + u8 ipv6_ext; + u8 tcp_ipv6; + u8 ipv6; + u8 tcp_ipv4; + u8 ipv4; + u8 udp_ipv6; + u8 udp_ipv4; +}; + +struct hinic3_irq_cfg { + struct net_device *netdev; + u16 msix_entry_idx; + /* provided by OS */ + u32 irq_id; + char irq_name[IFNAMSIZ + 16]; + struct napi_struct napi; + cpumask_t affinity_mask; + struct hinic3_txq *txq; + struct hinic3_rxq *rxq; +}; + +struct hinic3_dyna_txrxq_params { + u16 num_qps; + u32 sq_depth; + u32 rq_depth; + + struct hinic3_dyna_txq_res *txqs_res; + struct hinic3_dyna_rxq_res *rxqs_res; + struct hinic3_irq_cfg *irq_cfg; +}; + +struct hinic3_nic_dev { + struct pci_dev *pdev; + struct net_device *netdev; + struct hinic3_hwdev *hwdev; + struct hinic3_nic_io *nic_io; + + u16 max_qps; + u16 rx_buf_len; + u32 lro_replenish_thld; + unsigned long flags; + struct hinic3_nic_service_cap nic_svc_cap; + + struct hinic3_dyna_txrxq_params q_params; + struct hinic3_txq *txqs; + struct hinic3_rxq *rxqs; + + u16 num_qp_irq; + struct msix_entry *qps_msix_entries; + + bool link_status_up; +}; + +void hinic3_set_netdev_ops(struct net_device *netdev); + +/* Temporary prototypes. Functions become static in later submission. */ +void qp_add_napi(struct hinic3_irq_cfg *irq_cfg); +void qp_del_napi(struct hinic3_irq_cfg *irq_cfg); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c new file mode 100644 index 000000000000..34a1f5bd5ac1 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. + +#include "hinic3_hw_comm.h" +#include "hinic3_hw_intf.h" +#include "hinic3_hwdev.h" +#include "hinic3_hwif.h" +#include "hinic3_nic_cfg.h" +#include "hinic3_nic_dev.h" +#include "hinic3_nic_io.h" + +int hinic3_init_nic_io(struct hinic3_nic_dev *nic_dev) +{ + /* Completed by later submission due to LoC limit. */ + return -EFAULT; +} + +void hinic3_free_nic_io(struct hinic3_nic_dev *nic_dev) +{ + /* Completed by later submission due to LoC limit. */ +} diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h new file mode 100644 index 000000000000..865ba6878c48 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h @@ -0,0 +1,120 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */ + +#ifndef _HINIC3_NIC_IO_H_ +#define _HINIC3_NIC_IO_H_ + +#include <linux/bitfield.h> + +#include "hinic3_wq.h" + +struct hinic3_nic_dev; + +#define HINIC3_SQ_WQEBB_SHIFT 4 +#define HINIC3_RQ_WQEBB_SHIFT 3 +#define HINIC3_SQ_WQEBB_SIZE BIT(HINIC3_SQ_WQEBB_SHIFT) + +/* ******************** RQ_CTRL ******************** */ +enum hinic3_rq_wqe_type { + HINIC3_NORMAL_RQ_WQE = 1, +}; + +/* ******************** SQ_CTRL ******************** */ +#define HINIC3_TX_MSS_DEFAULT 0x3E00 +#define HINIC3_TX_MSS_MIN 0x50 +#define HINIC3_MAX_SQ_SGE 18 + +struct hinic3_io_queue { + struct hinic3_wq wq; + u8 owner; + u16 q_id; + u16 msix_entry_idx; + u8 __iomem *db_addr; + u16 *cons_idx_addr; +} ____cacheline_aligned; + +static inline u16 hinic3_get_sq_local_ci(const struct hinic3_io_queue *sq) +{ + const struct hinic3_wq *wq = &sq->wq; + + return wq->cons_idx & wq->idx_mask; +} + +static inline u16 hinic3_get_sq_local_pi(const struct hinic3_io_queue *sq) +{ + const struct hinic3_wq *wq = &sq->wq; + + return wq->prod_idx & wq->idx_mask; +} + +static inline u16 hinic3_get_sq_hw_ci(const struct hinic3_io_queue *sq) +{ + const struct hinic3_wq *wq = &sq->wq; + + return READ_ONCE(*sq->cons_idx_addr) & wq->idx_mask; +} + +/* ******************** DB INFO ******************** */ +#define DB_INFO_QID_MASK GENMASK(12, 0) +#define DB_INFO_CFLAG_MASK BIT(23) +#define DB_INFO_COS_MASK GENMASK(26, 24) +#define DB_INFO_TYPE_MASK GENMASK(31, 27) +#define DB_INFO_SET(val, member) \ + FIELD_PREP(DB_INFO_##member##_MASK, val) + +#define DB_PI_LOW_MASK 0xFFU +#define DB_PI_HIGH_MASK 0xFFU +#define DB_PI_HI_SHIFT 8 +#define DB_PI_LOW(pi) ((pi) & DB_PI_LOW_MASK) +#define DB_PI_HIGH(pi) (((pi) >> DB_PI_HI_SHIFT) & DB_PI_HIGH_MASK) +#define DB_ADDR(q, pi) ((u64 __iomem *)((q)->db_addr) + DB_PI_LOW(pi)) +#define DB_SRC_TYPE 1 + +/* CFLAG_DATA_PATH */ +#define DB_CFLAG_DP_SQ 0 +#define DB_CFLAG_DP_RQ 1 + +struct hinic3_nic_db { + u32 db_info; + u32 pi_hi; +}; + +static inline void hinic3_write_db(struct hinic3_io_queue *queue, int cos, + u8 cflag, u16 pi) +{ + struct hinic3_nic_db db; + + db.db_info = DB_INFO_SET(DB_SRC_TYPE, TYPE) | + DB_INFO_SET(cflag, CFLAG) | + DB_INFO_SET(cos, COS) | + DB_INFO_SET(queue->q_id, QID); + db.pi_hi = DB_PI_HIGH(pi); + + writeq(*((u64 *)&db), DB_ADDR(queue, pi)); +} + +struct hinic3_nic_io { + struct hinic3_io_queue *sq; + struct hinic3_io_queue *rq; + + u16 num_qps; + u16 max_qps; + + /* Base address for consumer index of all tx queues. Each queue is + * given a full cache line to hold its consumer index. HW updates + * current consumer index as it consumes tx WQEs. + */ + void *ci_vaddr_base; + dma_addr_t ci_dma_base; + + u8 __iomem *sqs_db_addr; + u8 __iomem *rqs_db_addr; + + u16 rx_buf_len; + u64 feature_cap; +}; + +int hinic3_init_nic_io(struct hinic3_nic_dev *nic_dev); +void hinic3_free_nic_io(struct hinic3_nic_dev *nic_dev); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.c b/drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.c new file mode 100644 index 000000000000..fab9011de9ad --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.c @@ -0,0 +1,68 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. + +#include <linux/device.h> + +#include "hinic3_hwdev.h" +#include "hinic3_queue_common.h" + +void hinic3_queue_pages_init(struct hinic3_queue_pages *qpages, u32 q_depth, + u32 page_size, u32 elem_size) +{ + u32 elem_per_page; + + elem_per_page = min(page_size / elem_size, q_depth); + + qpages->pages = NULL; + qpages->page_size = page_size; + qpages->num_pages = max(q_depth / elem_per_page, 1); + qpages->elem_size_shift = ilog2(elem_size); + qpages->elem_per_pg_shift = ilog2(elem_per_page); +} + +static void __queue_pages_free(struct hinic3_hwdev *hwdev, + struct hinic3_queue_pages *qpages, u32 pg_cnt) +{ + while (pg_cnt > 0) { + pg_cnt--; + hinic3_dma_free_coherent_align(hwdev->dev, + qpages->pages + pg_cnt); + } + kfree(qpages->pages); + qpages->pages = NULL; +} + +void hinic3_queue_pages_free(struct hinic3_hwdev *hwdev, + struct hinic3_queue_pages *qpages) +{ + __queue_pages_free(hwdev, qpages, qpages->num_pages); +} + +int hinic3_queue_pages_alloc(struct hinic3_hwdev *hwdev, + struct hinic3_queue_pages *qpages, u32 align) +{ + u32 pg_idx; + int err; + + qpages->pages = kcalloc(qpages->num_pages, sizeof(qpages->pages[0]), + GFP_KERNEL); + if (!qpages->pages) + return -ENOMEM; + + if (align == 0) + align = qpages->page_size; + + for (pg_idx = 0; pg_idx < qpages->num_pages; pg_idx++) { + err = hinic3_dma_zalloc_coherent_align(hwdev->dev, + qpages->page_size, + align, + GFP_KERNEL, + qpages->pages + pg_idx); + if (err) { + __queue_pages_free(hwdev, qpages, pg_idx); + return err; + } + } + + return 0; +} diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.h b/drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.h new file mode 100644 index 000000000000..ec4cae0a0929 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */ + +#ifndef _HINIC3_QUEUE_COMMON_H_ +#define _HINIC3_QUEUE_COMMON_H_ + +#include <linux/types.h> + +#include "hinic3_common.h" + +struct hinic3_hwdev; + +struct hinic3_queue_pages { + /* Array of DMA-able pages that actually holds the queue entries. */ + struct hinic3_dma_addr_align *pages; + /* Page size in bytes. */ + u32 page_size; + /* Number of pages, must be power of 2. */ + u16 num_pages; + u8 elem_size_shift; + u8 elem_per_pg_shift; +}; + +void hinic3_queue_pages_init(struct hinic3_queue_pages *qpages, u32 q_depth, + u32 page_size, u32 elem_size); +int hinic3_queue_pages_alloc(struct hinic3_hwdev *hwdev, + struct hinic3_queue_pages *qpages, u32 align); +void hinic3_queue_pages_free(struct hinic3_hwdev *hwdev, + struct hinic3_queue_pages *qpages); + +/* Get pointer to queue entry at the specified index. Index does not have to be + * masked to queue depth, only least significant bits will be used. Also + * provides remaining elements in same page (including the first one) in case + * caller needs multiple entries. + */ +static inline void *get_q_element(const struct hinic3_queue_pages *qpages, + u32 idx, u32 *remaining_in_page) +{ + const struct hinic3_dma_addr_align *page; + u32 page_idx, elem_idx, elem_per_pg, ofs; + u8 shift; + + shift = qpages->elem_per_pg_shift; + page_idx = (idx >> shift) & (qpages->num_pages - 1); + elem_per_pg = 1 << shift; + elem_idx = idx & (elem_per_pg - 1); + if (remaining_in_page) + *remaining_in_page = elem_per_pg - elem_idx; + ofs = elem_idx << qpages->elem_size_shift; + page = qpages->pages + page_idx; + return (char *)page->align_vaddr + ofs; +} + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c new file mode 100644 index 000000000000..860163e9d66c --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c @@ -0,0 +1,341 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. + +#include <linux/etherdevice.h> +#include <linux/if_vlan.h> +#include <linux/netdevice.h> +#include <net/gro.h> +#include <net/page_pool/helpers.h> + +#include "hinic3_hwdev.h" +#include "hinic3_nic_dev.h" +#include "hinic3_nic_io.h" +#include "hinic3_rx.h" + +#define HINIC3_RX_HDR_SIZE 256 +#define HINIC3_RX_BUFFER_WRITE 16 + +#define HINIC3_RX_TCP_PKT 0x3 +#define HINIC3_RX_UDP_PKT 0x4 +#define HINIC3_RX_SCTP_PKT 0x7 + +#define HINIC3_RX_IPV4_PKT 0 +#define HINIC3_RX_IPV6_PKT 1 +#define HINIC3_RX_INVALID_IP_TYPE 2 + +#define HINIC3_RX_PKT_FORMAT_NON_TUNNEL 0 +#define HINIC3_RX_PKT_FORMAT_VXLAN 1 + +#define HINIC3_LRO_PKT_HDR_LEN_IPV4 66 +#define HINIC3_LRO_PKT_HDR_LEN_IPV6 86 +#define HINIC3_LRO_PKT_HDR_LEN(cqe) \ + (RQ_CQE_OFFOLAD_TYPE_GET((cqe)->offload_type, IP_TYPE) == \ + HINIC3_RX_IPV6_PKT ? HINIC3_LRO_PKT_HDR_LEN_IPV6 : \ + HINIC3_LRO_PKT_HDR_LEN_IPV4) + +int hinic3_alloc_rxqs(struct net_device *netdev) +{ + /* Completed by later submission due to LoC limit. */ + return -EFAULT; +} + +void hinic3_free_rxqs(struct net_device *netdev) +{ + /* Completed by later submission due to LoC limit. */ +} + +static int rx_alloc_mapped_page(struct page_pool *page_pool, + struct hinic3_rx_info *rx_info, u16 buf_len) +{ + struct page *page; + u32 page_offset; + + page = page_pool_dev_alloc_frag(page_pool, &page_offset, buf_len); + if (unlikely(!page)) + return -ENOMEM; + + rx_info->page = page; + rx_info->page_offset = page_offset; + + return 0; +} + +static void rq_wqe_buf_set(struct hinic3_io_queue *rq, uint32_t wqe_idx, + dma_addr_t dma_addr, u16 len) +{ + struct hinic3_rq_wqe *rq_wqe; + + rq_wqe = get_q_element(&rq->wq.qpages, wqe_idx, NULL); + rq_wqe->buf_hi_addr = upper_32_bits(dma_addr); + rq_wqe->buf_lo_addr = lower_32_bits(dma_addr); +} + +static u32 hinic3_rx_fill_buffers(struct hinic3_rxq *rxq) +{ + u32 i, free_wqebbs = rxq->delta - 1; + struct hinic3_rx_info *rx_info; + dma_addr_t dma_addr; + int err; + + for (i = 0; i < free_wqebbs; i++) { + rx_info = &rxq->rx_info[rxq->next_to_update]; + + err = rx_alloc_mapped_page(rxq->page_pool, rx_info, + rxq->buf_len); + if (unlikely(err)) + break; + + dma_addr = page_pool_get_dma_addr(rx_info->page) + + rx_info->page_offset; + rq_wqe_buf_set(rxq->rq, rxq->next_to_update, dma_addr, + rxq->buf_len); + rxq->next_to_update = (rxq->next_to_update + 1) & rxq->q_mask; + } + + if (likely(i)) { + hinic3_write_db(rxq->rq, rxq->q_id & 3, DB_CFLAG_DP_RQ, + rxq->next_to_update << HINIC3_NORMAL_RQ_WQE); + rxq->delta -= i; + rxq->next_to_alloc = rxq->next_to_update; + } + + return i; +} + +static void hinic3_add_rx_frag(struct hinic3_rxq *rxq, + struct hinic3_rx_info *rx_info, + struct sk_buff *skb, u32 size) +{ + struct page *page; + u8 *va; + + page = rx_info->page; + va = (u8 *)page_address(page) + rx_info->page_offset; + net_prefetch(va); + + page_pool_dma_sync_for_cpu(rxq->page_pool, page, rx_info->page_offset, + rxq->buf_len); + + if (size <= HINIC3_RX_HDR_SIZE && !skb_is_nonlinear(skb)) { + memcpy(__skb_put(skb, size), va, + ALIGN(size, sizeof(long))); + page_pool_put_full_page(rxq->page_pool, page, false); + + return; + } + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + rx_info->page_offset, size, rxq->buf_len); + skb_mark_for_recycle(skb); +} + +static void packaging_skb(struct hinic3_rxq *rxq, struct sk_buff *skb, + u32 sge_num, u32 pkt_len) +{ + struct hinic3_rx_info *rx_info; + u32 temp_pkt_len = pkt_len; + u32 temp_sge_num = sge_num; + u32 sw_ci; + u32 size; + + sw_ci = rxq->cons_idx & rxq->q_mask; + while (temp_sge_num) { + rx_info = &rxq->rx_info[sw_ci]; + sw_ci = (sw_ci + 1) & rxq->q_mask; + if (unlikely(temp_pkt_len > rxq->buf_len)) { + size = rxq->buf_len; + temp_pkt_len -= rxq->buf_len; + } else { + size = temp_pkt_len; + } + + hinic3_add_rx_frag(rxq, rx_info, skb, size); + + /* clear contents of buffer_info */ + rx_info->page = NULL; + temp_sge_num--; + } +} + +static u32 hinic3_get_sge_num(struct hinic3_rxq *rxq, u32 pkt_len) +{ + u32 sge_num; + + sge_num = pkt_len >> rxq->buf_len_shift; + sge_num += (pkt_len & (rxq->buf_len - 1)) ? 1 : 0; + + return sge_num; +} + +static struct sk_buff *hinic3_fetch_rx_buffer(struct hinic3_rxq *rxq, + u32 pkt_len) +{ + struct sk_buff *skb; + u32 sge_num; + + skb = napi_alloc_skb(&rxq->irq_cfg->napi, HINIC3_RX_HDR_SIZE); + if (unlikely(!skb)) + return NULL; + + sge_num = hinic3_get_sge_num(rxq, pkt_len); + + net_prefetchw(skb->data); + packaging_skb(rxq, skb, sge_num, pkt_len); + + rxq->cons_idx += sge_num; + rxq->delta += sge_num; + + return skb; +} + +static void hinic3_pull_tail(struct sk_buff *skb) +{ + skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; + unsigned int pull_len; + unsigned char *va; + + va = skb_frag_address(frag); + + /* we need the header to contain the greater of either ETH_HLEN or + * 60 bytes if the skb->len is less than 60 for skb_pad. + */ + pull_len = eth_get_headlen(skb->dev, va, HINIC3_RX_HDR_SIZE); + + /* align pull length to size of long to optimize memcpy performance */ + skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); + + /* update all of the pointers */ + skb_frag_size_sub(frag, pull_len); + skb_frag_off_add(frag, pull_len); + + skb->data_len -= pull_len; + skb->tail += pull_len; +} + +static void hinic3_rx_csum(struct hinic3_rxq *rxq, u32 offload_type, + u32 status, struct sk_buff *skb) +{ + u32 pkt_fmt = RQ_CQE_OFFOLAD_TYPE_GET(offload_type, TUNNEL_PKT_FORMAT); + u32 pkt_type = RQ_CQE_OFFOLAD_TYPE_GET(offload_type, PKT_TYPE); + u32 ip_type = RQ_CQE_OFFOLAD_TYPE_GET(offload_type, IP_TYPE); + u32 csum_err = RQ_CQE_STATUS_GET(status, CSUM_ERR); + struct net_device *netdev = rxq->netdev; + + if (!(netdev->features & NETIF_F_RXCSUM)) + return; + + if (unlikely(csum_err)) { + /* pkt type is recognized by HW, and csum is wrong */ + skb->ip_summed = CHECKSUM_NONE; + return; + } + + if (ip_type == HINIC3_RX_INVALID_IP_TYPE || + !(pkt_fmt == HINIC3_RX_PKT_FORMAT_NON_TUNNEL || + pkt_fmt == HINIC3_RX_PKT_FORMAT_VXLAN)) { + skb->ip_summed = CHECKSUM_NONE; + return; + } + + switch (pkt_type) { + case HINIC3_RX_TCP_PKT: + case HINIC3_RX_UDP_PKT: + case HINIC3_RX_SCTP_PKT: + skb->ip_summed = CHECKSUM_UNNECESSARY; + break; + default: + skb->ip_summed = CHECKSUM_NONE; + break; + } +} + +static void hinic3_lro_set_gso_params(struct sk_buff *skb, u16 num_lro) +{ + struct ethhdr *eth = (struct ethhdr *)(skb->data); + __be16 proto; + + proto = __vlan_get_protocol(skb, eth->h_proto, NULL); + + skb_shinfo(skb)->gso_size = DIV_ROUND_UP(skb->len - skb_headlen(skb), + num_lro); + skb_shinfo(skb)->gso_type = proto == htons(ETH_P_IP) ? + SKB_GSO_TCPV4 : SKB_GSO_TCPV6; + skb_shinfo(skb)->gso_segs = num_lro; +} + +static int recv_one_pkt(struct hinic3_rxq *rxq, struct hinic3_rq_cqe *rx_cqe, + u32 pkt_len, u32 vlan_len, u32 status) +{ + struct net_device *netdev = rxq->netdev; + struct sk_buff *skb; + u32 offload_type; + u16 num_lro; + + skb = hinic3_fetch_rx_buffer(rxq, pkt_len); + if (unlikely(!skb)) + return -ENOMEM; + + /* place header in linear portion of buffer */ + if (skb_is_nonlinear(skb)) + hinic3_pull_tail(skb); + + offload_type = rx_cqe->offload_type; + hinic3_rx_csum(rxq, offload_type, status, skb); + + num_lro = RQ_CQE_STATUS_GET(status, NUM_LRO); + if (num_lro) + hinic3_lro_set_gso_params(skb, num_lro); + + skb_record_rx_queue(skb, rxq->q_id); + skb->protocol = eth_type_trans(skb, netdev); + + if (skb_has_frag_list(skb)) { + napi_gro_flush(&rxq->irq_cfg->napi, false); + netif_receive_skb(skb); + } else { + napi_gro_receive(&rxq->irq_cfg->napi, skb); + } + + return 0; +} + +int hinic3_rx_poll(struct hinic3_rxq *rxq, int budget) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(rxq->netdev); + u32 sw_ci, status, pkt_len, vlan_len; + struct hinic3_rq_cqe *rx_cqe; + u32 num_wqe = 0; + int nr_pkts = 0; + u16 num_lro; + + while (likely(nr_pkts < budget)) { + sw_ci = rxq->cons_idx & rxq->q_mask; + rx_cqe = rxq->cqe_arr + sw_ci; + status = rx_cqe->status; + if (!RQ_CQE_STATUS_GET(status, RXDONE)) + break; + + /* make sure we read rx_done before packet length */ + rmb(); + + vlan_len = rx_cqe->vlan_len; + pkt_len = RQ_CQE_SGE_GET(vlan_len, LEN); + if (recv_one_pkt(rxq, rx_cqe, pkt_len, vlan_len, status)) + break; + + nr_pkts++; + num_lro = RQ_CQE_STATUS_GET(status, NUM_LRO); + if (num_lro) + num_wqe += hinic3_get_sge_num(rxq, pkt_len); + + rx_cqe->status = 0; + + if (num_wqe >= nic_dev->lro_replenish_thld) + break; + } + + if (rxq->delta >= HINIC3_RX_BUFFER_WRITE) + hinic3_rx_fill_buffers(rxq); + + return nr_pkts; +} diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h new file mode 100644 index 000000000000..1cca21858d40 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h @@ -0,0 +1,90 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */ + +#ifndef _HINIC3_RX_H_ +#define _HINIC3_RX_H_ + +#include <linux/bitfield.h> +#include <linux/netdevice.h> + +#define RQ_CQE_OFFOLAD_TYPE_PKT_TYPE_MASK GENMASK(4, 0) +#define RQ_CQE_OFFOLAD_TYPE_IP_TYPE_MASK GENMASK(6, 5) +#define RQ_CQE_OFFOLAD_TYPE_TUNNEL_PKT_FORMAT_MASK GENMASK(11, 8) +#define RQ_CQE_OFFOLAD_TYPE_VLAN_EN_MASK BIT(21) +#define RQ_CQE_OFFOLAD_TYPE_GET(val, member) \ + FIELD_GET(RQ_CQE_OFFOLAD_TYPE_##member##_MASK, val) + +#define RQ_CQE_SGE_VLAN_MASK GENMASK(15, 0) +#define RQ_CQE_SGE_LEN_MASK GENMASK(31, 16) +#define RQ_CQE_SGE_GET(val, member) \ + FIELD_GET(RQ_CQE_SGE_##member##_MASK, val) + +#define RQ_CQE_STATUS_CSUM_ERR_MASK GENMASK(15, 0) +#define RQ_CQE_STATUS_NUM_LRO_MASK GENMASK(23, 16) +#define RQ_CQE_STATUS_RXDONE_MASK BIT(31) +#define RQ_CQE_STATUS_GET(val, member) \ + FIELD_GET(RQ_CQE_STATUS_##member##_MASK, val) + +/* RX Completion information that is provided by HW for a specific RX WQE */ +struct hinic3_rq_cqe { + u32 status; + u32 vlan_len; + u32 offload_type; + u32 rsvd3; + u32 rsvd4; + u32 rsvd5; + u32 rsvd6; + u32 pkt_info; +}; + +struct hinic3_rq_wqe { + u32 buf_hi_addr; + u32 buf_lo_addr; + u32 cqe_hi_addr; + u32 cqe_lo_addr; +}; + +struct hinic3_rx_info { + struct page *page; + u32 page_offset; +}; + +struct hinic3_rxq { + struct net_device *netdev; + + u16 q_id; + u32 q_depth; + u32 q_mask; + + u16 buf_len; + u32 buf_len_shift; + + u32 cons_idx; + u32 delta; + + u32 irq_id; + u16 msix_entry_idx; + + /* cqe_arr and rx_info are arrays of rq_depth elements. Each element is + * statically associated (by index) to a specific rq_wqe. + */ + struct hinic3_rq_cqe *cqe_arr; + struct hinic3_rx_info *rx_info; + struct page_pool *page_pool; + + struct hinic3_io_queue *rq; + + struct hinic3_irq_cfg *irq_cfg; + u16 next_to_alloc; + u16 next_to_update; + struct device *dev; /* device for DMA mapping */ + + dma_addr_t cqe_start_paddr; +} ____cacheline_aligned; + +int hinic3_alloc_rxqs(struct net_device *netdev); +void hinic3_free_rxqs(struct net_device *netdev); + +int hinic3_rx_poll(struct hinic3_rxq *rxq, int budget); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c new file mode 100644 index 000000000000..ae08257dd1d2 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c @@ -0,0 +1,670 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. + +#include <linux/if_vlan.h> +#include <linux/iopoll.h> +#include <net/ip6_checksum.h> +#include <net/ipv6.h> +#include <net/netdev_queues.h> + +#include "hinic3_hwdev.h" +#include "hinic3_nic_cfg.h" +#include "hinic3_nic_dev.h" +#include "hinic3_nic_io.h" +#include "hinic3_tx.h" +#include "hinic3_wq.h" + +#define MIN_SKB_LEN 32 + +int hinic3_alloc_txqs(struct net_device *netdev) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic3_hwdev *hwdev = nic_dev->hwdev; + u16 q_id, num_txqs = nic_dev->max_qps; + struct pci_dev *pdev = nic_dev->pdev; + struct hinic3_txq *txq; + + if (!num_txqs) { + dev_err(hwdev->dev, "Cannot allocate zero size txqs\n"); + return -EINVAL; + } + + nic_dev->txqs = kcalloc(num_txqs, sizeof(*nic_dev->txqs), GFP_KERNEL); + if (!nic_dev->txqs) + return -ENOMEM; + + for (q_id = 0; q_id < num_txqs; q_id++) { + txq = &nic_dev->txqs[q_id]; + txq->netdev = netdev; + txq->q_id = q_id; + txq->q_depth = nic_dev->q_params.sq_depth; + txq->q_mask = nic_dev->q_params.sq_depth - 1; + txq->dev = &pdev->dev; + } + + return 0; +} + +void hinic3_free_txqs(struct net_device *netdev) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + + kfree(nic_dev->txqs); +} + +static void hinic3_set_buf_desc(struct hinic3_sq_bufdesc *buf_descs, + dma_addr_t addr, u32 len) +{ + buf_descs->hi_addr = upper_32_bits(addr); + buf_descs->lo_addr = lower_32_bits(addr); + buf_descs->len = len; +} + +static int hinic3_tx_map_skb(struct net_device *netdev, struct sk_buff *skb, + struct hinic3_txq *txq, + struct hinic3_tx_info *tx_info, + struct hinic3_sq_wqe_combo *wqe_combo) +{ + struct hinic3_sq_wqe_desc *wqe_desc = wqe_combo->ctrl_bd0; + struct hinic3_sq_bufdesc *buf_desc = wqe_combo->bds_head; + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic3_dma_info *dma_info = tx_info->dma_info; + struct pci_dev *pdev = nic_dev->pdev; + skb_frag_t *frag; + u32 i, idx; + int err; + + dma_info[0].dma = dma_map_single(&pdev->dev, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, dma_info[0].dma)) + return -EFAULT; + + dma_info[0].len = skb_headlen(skb); + + wqe_desc->hi_addr = upper_32_bits(dma_info[0].dma); + wqe_desc->lo_addr = lower_32_bits(dma_info[0].dma); + + wqe_desc->ctrl_len = dma_info[0].len; + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + frag = &(skb_shinfo(skb)->frags[i]); + if (unlikely(i == wqe_combo->first_bds_num)) + buf_desc = wqe_combo->bds_sec2; + + idx = i + 1; + dma_info[idx].dma = skb_frag_dma_map(&pdev->dev, frag, 0, + skb_frag_size(frag), + DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, dma_info[idx].dma)) { + err = -EFAULT; + goto err_unmap_page; + } + dma_info[idx].len = skb_frag_size(frag); + + hinic3_set_buf_desc(buf_desc, dma_info[idx].dma, + dma_info[idx].len); + buf_desc++; + } + + return 0; + +err_unmap_page: + while (idx > 1) { + idx--; + dma_unmap_page(&pdev->dev, dma_info[idx].dma, + dma_info[idx].len, DMA_TO_DEVICE); + } + dma_unmap_single(&pdev->dev, dma_info[0].dma, dma_info[0].len, + DMA_TO_DEVICE); + return err; +} + +static void hinic3_tx_unmap_skb(struct net_device *netdev, + struct sk_buff *skb, + struct hinic3_dma_info *dma_info) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + struct pci_dev *pdev = nic_dev->pdev; + int i; + + for (i = 0; i < skb_shinfo(skb)->nr_frags;) { + i++; + dma_unmap_page(&pdev->dev, + dma_info[i].dma, + dma_info[i].len, DMA_TO_DEVICE); + } + + dma_unmap_single(&pdev->dev, dma_info[0].dma, + dma_info[0].len, DMA_TO_DEVICE); +} + +union hinic3_ip { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; +}; + +union hinic3_l4 { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; +}; + +enum hinic3_l3_type { + HINIC3_L3_UNKNOWN = 0, + HINIC3_L3_IP6_PKT = 1, + HINIC3_L3_IP4_PKT_NO_CSUM = 2, + HINIC3_L3_IP4_PKT_CSUM = 3, +}; + +enum hinic3_l4_offload_type { + HINIC3_L4_OFFLOAD_DISABLE = 0, + HINIC3_L4_OFFLOAD_TCP = 1, + HINIC3_L4_OFFLOAD_STCP = 2, + HINIC3_L4_OFFLOAD_UDP = 3, +}; + +/* initialize l4 offset and offload */ +static void get_inner_l4_info(struct sk_buff *skb, union hinic3_l4 *l4, + u8 l4_proto, u32 *offset, + enum hinic3_l4_offload_type *l4_offload) +{ + switch (l4_proto) { + case IPPROTO_TCP: + *l4_offload = HINIC3_L4_OFFLOAD_TCP; + /* To be same with TSO, payload offset begins from payload */ + *offset = (l4->tcp->doff << TCP_HDR_DATA_OFF_UNIT_SHIFT) + + TRANSPORT_OFFSET(l4->hdr, skb); + break; + + case IPPROTO_UDP: + *l4_offload = HINIC3_L4_OFFLOAD_UDP; + *offset = TRANSPORT_OFFSET(l4->hdr, skb); + break; + default: + *l4_offload = HINIC3_L4_OFFLOAD_DISABLE; + *offset = 0; + } +} + +static int hinic3_tx_csum(struct hinic3_txq *txq, struct hinic3_sq_task *task, + struct sk_buff *skb) +{ + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + if (skb->encapsulation) { + union hinic3_ip ip; + u8 l4_proto; + + task->pkt_info0 |= SQ_TASK_INFO0_SET(1, TUNNEL_FLAG); + + ip.hdr = skb_network_header(skb); + if (ip.v4->version == 4) { + l4_proto = ip.v4->protocol; + } else if (ip.v4->version == 6) { + union hinic3_l4 l4; + unsigned char *exthdr; + __be16 frag_off; + + exthdr = ip.hdr + sizeof(*ip.v6); + l4_proto = ip.v6->nexthdr; + l4.hdr = skb_transport_header(skb); + if (l4.hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_proto, &frag_off); + } else { + l4_proto = IPPROTO_RAW; + } + + if (l4_proto != IPPROTO_UDP || + ((struct udphdr *)skb_transport_header(skb))->dest != + VXLAN_OFFLOAD_PORT_LE) { + /* Unsupported tunnel packet, disable csum offload */ + skb_checksum_help(skb); + return 0; + } + } + + task->pkt_info0 |= SQ_TASK_INFO0_SET(1, INNER_L4_EN); + + return 1; +} + +static void get_inner_l3_l4_type(struct sk_buff *skb, union hinic3_ip *ip, + union hinic3_l4 *l4, + enum hinic3_l3_type *l3_type, u8 *l4_proto) +{ + unsigned char *exthdr; + __be16 frag_off; + + if (ip->v4->version == 4) { + *l3_type = HINIC3_L3_IP4_PKT_CSUM; + *l4_proto = ip->v4->protocol; + } else if (ip->v4->version == 6) { + *l3_type = HINIC3_L3_IP6_PKT; + exthdr = ip->hdr + sizeof(*ip->v6); + *l4_proto = ip->v6->nexthdr; + if (exthdr != l4->hdr) { + ipv6_skip_exthdr(skb, exthdr - skb->data, + l4_proto, &frag_off); + } + } else { + *l3_type = HINIC3_L3_UNKNOWN; + *l4_proto = 0; + } +} + +static void hinic3_set_tso_info(struct hinic3_sq_task *task, u32 *queue_info, + enum hinic3_l4_offload_type l4_offload, + u32 offset, u32 mss) +{ + if (l4_offload == HINIC3_L4_OFFLOAD_TCP) { + *queue_info |= SQ_CTRL_QUEUE_INFO_SET(1, TSO); + task->pkt_info0 |= SQ_TASK_INFO0_SET(1, INNER_L4_EN); + } else if (l4_offload == HINIC3_L4_OFFLOAD_UDP) { + *queue_info |= SQ_CTRL_QUEUE_INFO_SET(1, UFO); + task->pkt_info0 |= SQ_TASK_INFO0_SET(1, INNER_L4_EN); + } + + /* enable L3 calculation */ + task->pkt_info0 |= SQ_TASK_INFO0_SET(1, INNER_L3_EN); + + *queue_info |= SQ_CTRL_QUEUE_INFO_SET(offset >> 1, PLDOFF); + + /* set MSS value */ + *queue_info &= ~SQ_CTRL_QUEUE_INFO_MSS_MASK; + *queue_info |= SQ_CTRL_QUEUE_INFO_SET(mss, MSS); +} + +static __sum16 csum_magic(union hinic3_ip *ip, unsigned short proto) +{ + return (ip->v4->version == 4) ? + csum_tcpudp_magic(ip->v4->saddr, ip->v4->daddr, 0, proto, 0) : + csum_ipv6_magic(&ip->v6->saddr, &ip->v6->daddr, 0, proto, 0); +} + +static int hinic3_tso(struct hinic3_sq_task *task, u32 *queue_info, + struct sk_buff *skb) +{ + enum hinic3_l4_offload_type l4_offload; + enum hinic3_l3_type l3_type; + union hinic3_ip ip; + union hinic3_l4 l4; + u8 l4_proto; + u32 offset; + int err; + + if (!skb_is_gso(skb)) + return 0; + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + if (skb->encapsulation) { + u32 gso_type = skb_shinfo(skb)->gso_type; + /* L3 checksum is always enabled */ + task->pkt_info0 |= SQ_TASK_INFO0_SET(1, OUT_L3_EN); + task->pkt_info0 |= SQ_TASK_INFO0_SET(1, TUNNEL_FLAG); + + l4.hdr = skb_transport_header(skb); + ip.hdr = skb_network_header(skb); + + if (gso_type & SKB_GSO_UDP_TUNNEL_CSUM) { + l4.udp->check = ~csum_magic(&ip, IPPROTO_UDP); + task->pkt_info0 |= SQ_TASK_INFO0_SET(1, OUT_L4_EN); + } + + ip.hdr = skb_inner_network_header(skb); + l4.hdr = skb_inner_transport_header(skb); + } else { + ip.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + } + + get_inner_l3_l4_type(skb, &ip, &l4, &l3_type, &l4_proto); + + if (l4_proto == IPPROTO_TCP) + l4.tcp->check = ~csum_magic(&ip, IPPROTO_TCP); + + get_inner_l4_info(skb, &l4, l4_proto, &offset, &l4_offload); + + hinic3_set_tso_info(task, queue_info, l4_offload, offset, + skb_shinfo(skb)->gso_size); + + return 1; +} + +static void hinic3_set_vlan_tx_offload(struct hinic3_sq_task *task, + u16 vlan_tag, u8 vlan_tpid) +{ + /* vlan_tpid: 0=select TPID0 in IPSU, 1=select TPID1 in IPSU + * 2=select TPID2 in IPSU, 3=select TPID3 in IPSU, + * 4=select TPID4 in IPSU + */ + task->vlan_offload = SQ_TASK_INFO3_SET(vlan_tag, VLAN_TAG) | + SQ_TASK_INFO3_SET(vlan_tpid, VLAN_TPID) | + SQ_TASK_INFO3_SET(1, VLAN_TAG_VALID); +} + +static u32 hinic3_tx_offload(struct sk_buff *skb, struct hinic3_sq_task *task, + u32 *queue_info, struct hinic3_txq *txq) +{ + u32 offload = 0; + int tso_cs_en; + + task->pkt_info0 = 0; + task->ip_identify = 0; + task->rsvd = 0; + task->vlan_offload = 0; + + tso_cs_en = hinic3_tso(task, queue_info, skb); + if (tso_cs_en < 0) { + offload = HINIC3_TX_OFFLOAD_INVALID; + return offload; + } else if (tso_cs_en) { + offload |= HINIC3_TX_OFFLOAD_TSO; + } else { + tso_cs_en = hinic3_tx_csum(txq, task, skb); + if (tso_cs_en) + offload |= HINIC3_TX_OFFLOAD_CSUM; + } + +#define VLAN_INSERT_MODE_MAX 5 + if (unlikely(skb_vlan_tag_present(skb))) { + /* select vlan insert mode by qid, default 802.1Q Tag type */ + hinic3_set_vlan_tx_offload(task, skb_vlan_tag_get(skb), + txq->q_id % VLAN_INSERT_MODE_MAX); + offload |= HINIC3_TX_OFFLOAD_VLAN; + } + + if (unlikely(SQ_CTRL_QUEUE_INFO_GET(*queue_info, PLDOFF) > + SQ_CTRL_MAX_PLDOFF)) { + offload = HINIC3_TX_OFFLOAD_INVALID; + return offload; + } + + return offload; +} + +static u16 hinic3_get_and_update_sq_owner(struct hinic3_io_queue *sq, + u16 curr_pi, u16 wqebb_cnt) +{ + u16 owner = sq->owner; + + if (unlikely(curr_pi + wqebb_cnt >= sq->wq.q_depth)) + sq->owner = !sq->owner; + + return owner; +} + +static u16 hinic3_set_wqe_combo(struct hinic3_txq *txq, + struct hinic3_sq_wqe_combo *wqe_combo, + u32 offload, u16 num_sge, u16 *curr_pi) +{ + struct hinic3_sq_bufdesc *first_part_wqebbs, *second_part_wqebbs; + u16 first_part_wqebbs_num, tmp_pi; + + wqe_combo->ctrl_bd0 = hinic3_wq_get_one_wqebb(&txq->sq->wq, curr_pi); + if (!offload && num_sge == 1) { + wqe_combo->wqe_type = SQ_WQE_COMPACT_TYPE; + return hinic3_get_and_update_sq_owner(txq->sq, *curr_pi, 1); + } + + wqe_combo->wqe_type = SQ_WQE_EXTENDED_TYPE; + + if (offload) { + wqe_combo->task = hinic3_wq_get_one_wqebb(&txq->sq->wq, + &tmp_pi); + wqe_combo->task_type = SQ_WQE_TASKSECT_16BYTES; + } else { + wqe_combo->task_type = SQ_WQE_TASKSECT_46BITS; + } + + if (num_sge > 1) { + /* first wqebb contain bd0, and bd size is equal to sq wqebb + * size, so we use (num_sge - 1) as wanted weqbb_cnt + */ + hinic3_wq_get_multi_wqebbs(&txq->sq->wq, num_sge - 1, &tmp_pi, + &first_part_wqebbs, + &second_part_wqebbs, + &first_part_wqebbs_num); + wqe_combo->bds_head = first_part_wqebbs; + wqe_combo->bds_sec2 = second_part_wqebbs; + wqe_combo->first_bds_num = first_part_wqebbs_num; + } + + return hinic3_get_and_update_sq_owner(txq->sq, *curr_pi, + num_sge + !!offload); +} + +static void hinic3_prepare_sq_ctrl(struct hinic3_sq_wqe_combo *wqe_combo, + u32 queue_info, int nr_descs, u16 owner) +{ + struct hinic3_sq_wqe_desc *wqe_desc = wqe_combo->ctrl_bd0; + + if (wqe_combo->wqe_type == SQ_WQE_COMPACT_TYPE) { + wqe_desc->ctrl_len |= + SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) | + SQ_CTRL_SET(wqe_combo->wqe_type, EXTENDED) | + SQ_CTRL_SET(owner, OWNER); + + /* compact wqe queue_info will transfer to chip */ + wqe_desc->queue_info = 0; + return; + } + + wqe_desc->ctrl_len |= SQ_CTRL_SET(nr_descs, BUFDESC_NUM) | + SQ_CTRL_SET(wqe_combo->task_type, TASKSECT_LEN) | + SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) | + SQ_CTRL_SET(wqe_combo->wqe_type, EXTENDED) | + SQ_CTRL_SET(owner, OWNER); + + wqe_desc->queue_info = queue_info; + wqe_desc->queue_info |= SQ_CTRL_QUEUE_INFO_SET(1, UC); + + if (!SQ_CTRL_QUEUE_INFO_GET(wqe_desc->queue_info, MSS)) { + wqe_desc->queue_info |= + SQ_CTRL_QUEUE_INFO_SET(HINIC3_TX_MSS_DEFAULT, MSS); + } else if (SQ_CTRL_QUEUE_INFO_GET(wqe_desc->queue_info, MSS) < + HINIC3_TX_MSS_MIN) { + /* mss should not be less than 80 */ + wqe_desc->queue_info &= ~SQ_CTRL_QUEUE_INFO_MSS_MASK; + wqe_desc->queue_info |= + SQ_CTRL_QUEUE_INFO_SET(HINIC3_TX_MSS_MIN, MSS); + } +} + +static netdev_tx_t hinic3_send_one_skb(struct sk_buff *skb, + struct net_device *netdev, + struct hinic3_txq *txq) +{ + struct hinic3_sq_wqe_combo wqe_combo = {}; + struct hinic3_tx_info *tx_info; + struct hinic3_txq *tx_q = txq; + u32 offload, queue_info = 0; + struct hinic3_sq_task task; + u16 wqebb_cnt, num_sge; + u16 saved_wq_prod_idx; + u16 owner, pi = 0; + u8 saved_sq_owner; + int err; + + if (unlikely(skb->len < MIN_SKB_LEN)) { + if (skb_pad(skb, MIN_SKB_LEN - skb->len)) + goto err_out; + + skb->len = MIN_SKB_LEN; + } + + num_sge = skb_shinfo(skb)->nr_frags + 1; + /* assume normal wqe format + 1 wqebb for task info */ + wqebb_cnt = num_sge + 1; + + if (unlikely(hinic3_wq_free_wqebbs(&txq->sq->wq) < wqebb_cnt)) { + if (likely(wqebb_cnt > txq->tx_stop_thrs)) + txq->tx_stop_thrs = min(wqebb_cnt, txq->tx_start_thrs); + + netif_subqueue_try_stop(netdev, tx_q->sq->q_id, + hinic3_wq_free_wqebbs(&tx_q->sq->wq), + tx_q->tx_start_thrs); + + return NETDEV_TX_BUSY; + } + + offload = hinic3_tx_offload(skb, &task, &queue_info, txq); + if (unlikely(offload == HINIC3_TX_OFFLOAD_INVALID)) { + goto err_drop_pkt; + } else if (!offload) { + wqebb_cnt -= 1; + if (unlikely(num_sge == 1 && + skb->len > HINIC3_COMPACT_WQEE_SKB_MAX_LEN)) + goto err_drop_pkt; + } + + saved_wq_prod_idx = txq->sq->wq.prod_idx; + saved_sq_owner = txq->sq->owner; + + owner = hinic3_set_wqe_combo(txq, &wqe_combo, offload, num_sge, &pi); + if (offload) + *wqe_combo.task = task; + + tx_info = &txq->tx_info[pi]; + tx_info->skb = skb; + tx_info->wqebb_cnt = wqebb_cnt; + + err = hinic3_tx_map_skb(netdev, skb, txq, tx_info, &wqe_combo); + if (err) { + /* Rollback work queue to reclaim the wqebb we did not use */ + txq->sq->wq.prod_idx = saved_wq_prod_idx; + txq->sq->owner = saved_sq_owner; + goto err_drop_pkt; + } + + netdev_tx_sent_queue(netdev_get_tx_queue(netdev, txq->sq->q_id), + skb->len); + netif_subqueue_maybe_stop(netdev, tx_q->sq->q_id, + hinic3_wq_free_wqebbs(&tx_q->sq->wq), + tx_q->tx_stop_thrs, + tx_q->tx_start_thrs); + + hinic3_prepare_sq_ctrl(&wqe_combo, queue_info, num_sge, owner); + hinic3_write_db(txq->sq, 0, DB_CFLAG_DP_SQ, + hinic3_get_sq_local_pi(txq->sq)); + + return NETDEV_TX_OK; + +err_drop_pkt: + dev_kfree_skb_any(skb); + +err_out: + return NETDEV_TX_OK; +} + +netdev_tx_t hinic3_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + u16 q_id = skb_get_queue_mapping(skb); + + if (unlikely(!netif_carrier_ok(netdev))) + goto err_drop_pkt; + + if (unlikely(q_id >= nic_dev->q_params.num_qps)) + goto err_drop_pkt; + + return hinic3_send_one_skb(skb, netdev, &nic_dev->txqs[q_id]); + +err_drop_pkt: + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +static bool is_hw_complete_sq_process(struct hinic3_io_queue *sq) +{ + u16 sw_pi, hw_ci; + + sw_pi = hinic3_get_sq_local_pi(sq); + hw_ci = hinic3_get_sq_hw_ci(sq); + + return sw_pi == hw_ci; +} + +#define HINIC3_FLUSH_QUEUE_POLL_SLEEP_US 10000 +#define HINIC3_FLUSH_QUEUE_POLL_TIMEOUT_US 10000000 +static int hinic3_stop_sq(struct hinic3_txq *txq) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(txq->netdev); + int err, rc; + + err = read_poll_timeout(hinic3_force_drop_tx_pkt, rc, + is_hw_complete_sq_process(txq->sq) || rc, + HINIC3_FLUSH_QUEUE_POLL_SLEEP_US, + HINIC3_FLUSH_QUEUE_POLL_TIMEOUT_US, + true, nic_dev->hwdev); + if (rc) + return rc; + else + return err; +} + +/* packet transmission should be stopped before calling this function */ +void hinic3_flush_txqs(struct net_device *netdev) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + u16 qid; + int err; + + for (qid = 0; qid < nic_dev->q_params.num_qps; qid++) { + err = hinic3_stop_sq(&nic_dev->txqs[qid]); + netdev_tx_reset_subqueue(netdev, qid); + if (err) + netdev_err(netdev, "Failed to stop sq%u\n", qid); + } +} + +#define HINIC3_BDS_PER_SQ_WQEBB \ + (HINIC3_SQ_WQEBB_SIZE / sizeof(struct hinic3_sq_bufdesc)) + +bool hinic3_tx_poll(struct hinic3_txq *txq, int budget) +{ + struct net_device *netdev = txq->netdev; + u16 hw_ci, sw_ci, q_id = txq->sq->q_id; + struct hinic3_tx_info *tx_info; + struct hinic3_txq *tx_q = txq; + unsigned int bytes_compl = 0; + unsigned int pkts = 0; + u16 wqebb_cnt = 0; + + hw_ci = hinic3_get_sq_hw_ci(txq->sq); + dma_rmb(); + sw_ci = hinic3_get_sq_local_ci(txq->sq); + + do { + tx_info = &txq->tx_info[sw_ci]; + + /* Did all wqebb of this wqe complete? */ + if (hw_ci == sw_ci || + ((hw_ci - sw_ci) & txq->q_mask) < tx_info->wqebb_cnt) + break; + + sw_ci = (sw_ci + tx_info->wqebb_cnt) & txq->q_mask; + net_prefetch(&txq->tx_info[sw_ci]); + + wqebb_cnt += tx_info->wqebb_cnt; + bytes_compl += tx_info->skb->len; + pkts++; + + hinic3_tx_unmap_skb(netdev, tx_info->skb, tx_info->dma_info); + napi_consume_skb(tx_info->skb, budget); + tx_info->skb = NULL; + } while (likely(pkts < HINIC3_TX_POLL_WEIGHT)); + + hinic3_wq_put_wqebbs(&txq->sq->wq, wqebb_cnt); + + netif_subqueue_completed_wake(netdev, q_id, pkts, bytes_compl, + hinic3_wq_free_wqebbs(&tx_q->sq->wq), + tx_q->tx_start_thrs); + + return pkts == HINIC3_TX_POLL_WEIGHT; +} diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_tx.h b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.h new file mode 100644 index 000000000000..9e505cc19dd5 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.h @@ -0,0 +1,135 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */ + +#ifndef _HINIC3_TX_H_ +#define _HINIC3_TX_H_ + +#include <linux/bitops.h> +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <linux/netdevice.h> +#include <net/checksum.h> + +#define VXLAN_OFFLOAD_PORT_LE cpu_to_be16(4789) +#define TCP_HDR_DATA_OFF_UNIT_SHIFT 2 +#define TRANSPORT_OFFSET(l4_hdr, skb) ((l4_hdr) - (skb)->data) + +#define HINIC3_COMPACT_WQEE_SKB_MAX_LEN 16383 +#define HINIC3_TX_POLL_WEIGHT 64 +#define HINIC3_DEFAULT_STOP_THRS 6 +#define HINIC3_DEFAULT_START_THRS 24 + +enum sq_wqe_data_format { + SQ_NORMAL_WQE = 0, +}; + +enum sq_wqe_ec_type { + SQ_WQE_COMPACT_TYPE = 0, + SQ_WQE_EXTENDED_TYPE = 1, +}; + +enum sq_wqe_tasksect_len_type { + SQ_WQE_TASKSECT_46BITS = 0, + SQ_WQE_TASKSECT_16BYTES = 1, +}; + +enum hinic3_tx_offload_type { + HINIC3_TX_OFFLOAD_TSO = BIT(0), + HINIC3_TX_OFFLOAD_CSUM = BIT(1), + HINIC3_TX_OFFLOAD_VLAN = BIT(2), + HINIC3_TX_OFFLOAD_INVALID = BIT(3), + HINIC3_TX_OFFLOAD_ESP = BIT(4), +}; + +#define SQ_CTRL_BUFDESC_NUM_MASK GENMASK(26, 19) +#define SQ_CTRL_TASKSECT_LEN_MASK BIT(27) +#define SQ_CTRL_DATA_FORMAT_MASK BIT(28) +#define SQ_CTRL_EXTENDED_MASK BIT(30) +#define SQ_CTRL_OWNER_MASK BIT(31) +#define SQ_CTRL_SET(val, member) \ + FIELD_PREP(SQ_CTRL_##member##_MASK, val) + +#define SQ_CTRL_QUEUE_INFO_PLDOFF_MASK GENMASK(9, 2) +#define SQ_CTRL_QUEUE_INFO_UFO_MASK BIT(10) +#define SQ_CTRL_QUEUE_INFO_TSO_MASK BIT(11) +#define SQ_CTRL_QUEUE_INFO_MSS_MASK GENMASK(26, 13) +#define SQ_CTRL_QUEUE_INFO_UC_MASK BIT(28) + +#define SQ_CTRL_QUEUE_INFO_SET(val, member) \ + FIELD_PREP(SQ_CTRL_QUEUE_INFO_##member##_MASK, val) +#define SQ_CTRL_QUEUE_INFO_GET(val, member) \ + FIELD_GET(SQ_CTRL_QUEUE_INFO_##member##_MASK, val) + +#define SQ_CTRL_MAX_PLDOFF 221 + +#define SQ_TASK_INFO0_TUNNEL_FLAG_MASK BIT(19) +#define SQ_TASK_INFO0_INNER_L4_EN_MASK BIT(24) +#define SQ_TASK_INFO0_INNER_L3_EN_MASK BIT(25) +#define SQ_TASK_INFO0_OUT_L4_EN_MASK BIT(27) +#define SQ_TASK_INFO0_OUT_L3_EN_MASK BIT(28) +#define SQ_TASK_INFO0_SET(val, member) \ + FIELD_PREP(SQ_TASK_INFO0_##member##_MASK, val) + +#define SQ_TASK_INFO3_VLAN_TAG_MASK GENMASK(15, 0) +#define SQ_TASK_INFO3_VLAN_TPID_MASK GENMASK(18, 16) +#define SQ_TASK_INFO3_VLAN_TAG_VALID_MASK BIT(19) +#define SQ_TASK_INFO3_SET(val, member) \ + FIELD_PREP(SQ_TASK_INFO3_##member##_MASK, val) + +struct hinic3_sq_wqe_desc { + u32 ctrl_len; + u32 queue_info; + u32 hi_addr; + u32 lo_addr; +}; + +struct hinic3_sq_task { + u32 pkt_info0; + u32 ip_identify; + u32 rsvd; + u32 vlan_offload; +}; + +struct hinic3_sq_wqe_combo { + struct hinic3_sq_wqe_desc *ctrl_bd0; + struct hinic3_sq_task *task; + struct hinic3_sq_bufdesc *bds_head; + struct hinic3_sq_bufdesc *bds_sec2; + u16 first_bds_num; + u32 wqe_type; + u32 task_type; +}; + +struct hinic3_dma_info { + dma_addr_t dma; + u32 len; +}; + +struct hinic3_tx_info { + struct sk_buff *skb; + u16 wqebb_cnt; + struct hinic3_dma_info *dma_info; +}; + +struct hinic3_txq { + struct net_device *netdev; + struct device *dev; + + u16 q_id; + u16 tx_stop_thrs; + u16 tx_start_thrs; + u32 q_mask; + u32 q_depth; + + struct hinic3_tx_info *tx_info; + struct hinic3_io_queue *sq; +} ____cacheline_aligned; + +int hinic3_alloc_txqs(struct net_device *netdev); +void hinic3_free_txqs(struct net_device *netdev); + +netdev_tx_t hinic3_xmit_frame(struct sk_buff *skb, struct net_device *netdev); +bool hinic3_tx_poll(struct hinic3_txq *txq, int budget); +void hinic3_flush_txqs(struct net_device *netdev); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_wq.c b/drivers/net/ethernet/huawei/hinic3/hinic3_wq.c new file mode 100644 index 000000000000..2ac7efcd1365 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_wq.c @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. + +#include <linux/dma-mapping.h> + +#include "hinic3_hwdev.h" +#include "hinic3_wq.h" + +void hinic3_wq_get_multi_wqebbs(struct hinic3_wq *wq, + u16 num_wqebbs, u16 *prod_idx, + struct hinic3_sq_bufdesc **first_part_wqebbs, + struct hinic3_sq_bufdesc **second_part_wqebbs, + u16 *first_part_wqebbs_num) +{ + u32 idx, remaining; + + idx = wq->prod_idx & wq->idx_mask; + wq->prod_idx += num_wqebbs; + *prod_idx = idx; + *first_part_wqebbs = get_q_element(&wq->qpages, idx, &remaining); + if (likely(remaining >= num_wqebbs)) { + *first_part_wqebbs_num = num_wqebbs; + *second_part_wqebbs = NULL; + } else { + *first_part_wqebbs_num = remaining; + idx += remaining; + *second_part_wqebbs = get_q_element(&wq->qpages, idx, NULL); + } +} diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_wq.h b/drivers/net/ethernet/huawei/hinic3/hinic3_wq.h new file mode 100644 index 000000000000..ab37893efd7e --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_wq.h @@ -0,0 +1,76 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */ + +#ifndef _HINIC3_WQ_H_ +#define _HINIC3_WQ_H_ + +#include <linux/io.h> + +#include "hinic3_queue_common.h" + +struct hinic3_sq_bufdesc { + /* 31-bits Length, L2NIC only uses length[17:0] */ + u32 len; + u32 rsvd; + u32 hi_addr; + u32 lo_addr; +}; + +/* Work queue is used to submit elements (tx, rx, cmd) to hw. + * Driver is the producer that advances prod_idx. cons_idx is advanced when + * HW reports completions of previously submitted elements. + */ +struct hinic3_wq { + struct hinic3_queue_pages qpages; + /* Unmasked producer/consumer indices that are advanced to natural + * integer overflow regardless of queue depth. + */ + u16 cons_idx; + u16 prod_idx; + + u32 q_depth; + u16 idx_mask; + + /* Work Queue (logical WQEBB array) is mapped to hw via Chip Logical + * Address (CLA) using 1 of 2 levels: + * level 0 - direct mapping of single wq page + * level 1 - indirect mapping of multiple pages via additional page + * table. + * When wq uses level 1, wq_block will hold the allocated indirection + * table. + */ + dma_addr_t wq_block_paddr; + __be64 *wq_block_vaddr; +} ____cacheline_aligned; + +/* Get number of elements in work queue that are in-use. */ +static inline u16 hinic3_wq_get_used(const struct hinic3_wq *wq) +{ + return READ_ONCE(wq->prod_idx) - READ_ONCE(wq->cons_idx); +} + +static inline u16 hinic3_wq_free_wqebbs(struct hinic3_wq *wq) +{ + /* Don't allow queue to become completely full, report (free - 1). */ + return wq->q_depth - hinic3_wq_get_used(wq) - 1; +} + +static inline void *hinic3_wq_get_one_wqebb(struct hinic3_wq *wq, u16 *pi) +{ + *pi = wq->prod_idx & wq->idx_mask; + wq->prod_idx++; + return get_q_element(&wq->qpages, *pi, NULL); +} + +static inline void hinic3_wq_put_wqebbs(struct hinic3_wq *wq, u16 num_wqebbs) +{ + wq->cons_idx += num_wqebbs; +} + +void hinic3_wq_get_multi_wqebbs(struct hinic3_wq *wq, + u16 num_wqebbs, u16 *prod_idx, + struct hinic3_sq_bufdesc **first_part_wqebbs, + struct hinic3_sq_bufdesc **second_part_wqebbs, + u16 *first_part_wqebbs_num); + +#endif diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c index 22371011c249..2410aee59fb2 100644 --- a/drivers/net/ethernet/intel/ice/ice_lag.c +++ b/drivers/net/ethernet/intel/ice/ice_lag.c @@ -1321,12 +1321,18 @@ static void ice_lag_changeupper_event(struct ice_lag *lag, void *ptr) */ if (!primary_lag) { lag->primary = true; + if (!ice_is_switchdev_running(lag->pf)) + return; + /* Configure primary's SWID to be shared */ ice_lag_primary_swid(lag, true); primary_lag = lag; } else { u16 swid; + if (!ice_is_switchdev_running(primary_lag->pf)) + return; + swid = primary_lag->pf->hw.port_info->sw_id; ice_lag_set_swid(swid, lag, true); ice_lag_add_prune_list(primary_lag, lag->pf); diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c index b147f6cf2615..eeeb9968e477 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c @@ -4322,7 +4322,6 @@ static int ice_vc_repr_add_mac(struct ice_vf *vf, u8 *msg) } ice_vfhw_mac_add(vf, &al->list[i]); - vf->num_mac++; break; } diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h index 124fd6f0b286..1e812c3f62f9 100644 --- a/drivers/net/ethernet/intel/idpf/idpf.h +++ b/drivers/net/ethernet/intel/idpf/idpf.h @@ -143,6 +143,7 @@ enum idpf_vport_state { * @vport_id: Vport identifier * @link_speed_mbps: Link speed in mbps * @vport_idx: Relative vport index + * @max_tx_hdr_size: Max header length hardware can support * @state: See enum idpf_vport_state * @netstats: Packet and byte stats * @stats_lock: Lock to protect stats update @@ -153,6 +154,7 @@ struct idpf_netdev_priv { u32 vport_id; u32 link_speed_mbps; u16 vport_idx; + u16 max_tx_hdr_size; enum idpf_vport_state state; struct rtnl_link_stats64 netstats; spinlock_t stats_lock; diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c index 62a9c406a269..bab12ecb2df5 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_lib.c +++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c @@ -708,6 +708,7 @@ static int idpf_cfg_netdev(struct idpf_vport *vport) np->vport = vport; np->vport_idx = vport->idx; np->vport_id = vport->vport_id; + np->max_tx_hdr_size = idpf_get_max_tx_hdr_size(adapter); vport->netdev = netdev; return idpf_init_mac_addr(vport, netdev); @@ -725,6 +726,7 @@ static int idpf_cfg_netdev(struct idpf_vport *vport) np->adapter = adapter; np->vport_idx = vport->idx; np->vport_id = vport->vport_id; + np->max_tx_hdr_size = idpf_get_max_tx_hdr_size(adapter); spin_lock_init(&np->stats_lock); @@ -2188,8 +2190,8 @@ static netdev_features_t idpf_features_check(struct sk_buff *skb, struct net_device *netdev, netdev_features_t features) { - struct idpf_vport *vport = idpf_netdev_to_vport(netdev); - struct idpf_adapter *adapter = vport->adapter; + struct idpf_netdev_priv *np = netdev_priv(netdev); + u16 max_tx_hdr_size = np->max_tx_hdr_size; size_t len; /* No point in doing any of this if neither checksum nor GSO are @@ -2212,7 +2214,7 @@ static netdev_features_t idpf_features_check(struct sk_buff *skb, goto unsupported; len = skb_network_header_len(skb); - if (unlikely(len > idpf_get_max_tx_hdr_size(adapter))) + if (unlikely(len > max_tx_hdr_size)) goto unsupported; if (!skb->encapsulation) @@ -2225,7 +2227,7 @@ static netdev_features_t idpf_features_check(struct sk_buff *skb, /* IPLEN can support at most 127 dwords */ len = skb_inner_network_header_len(skb); - if (unlikely(len > idpf_get_max_tx_hdr_size(adapter))) + if (unlikely(len > max_tx_hdr_size)) goto unsupported; /* No need to validate L4LEN as TCP is the only protocol with a diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c index 127560bebded..631679cdaa6f 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c @@ -4180,6 +4180,14 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget) return budget; } + /* Switch to poll mode in the tear-down path after sending disable + * queues virtchnl message, as the interrupts will be disabled after + * that. + */ + if (unlikely(q_vector->num_txq && idpf_queue_has(POLL_MODE, + q_vector->tx[0]))) + return budget; + work_done = min_t(int, work_done, budget - 1); /* Exit the polling mode, but don't re-enable interrupts if stack might @@ -4190,15 +4198,7 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget) else idpf_vport_intr_set_wb_on_itr(q_vector); - /* Switch to poll mode in the tear-down path after sending disable - * queues virtchnl message, as the interrupts will be disabled after - * that - */ - if (unlikely(q_vector->num_txq && idpf_queue_has(POLL_MODE, - q_vector->tx[0]))) - return budget; - else - return work_done; + return work_done; } /** diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c index 648a7c618cd1..d1f4073b36f9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c @@ -9,7 +9,7 @@ #define IXGBE_IPSEC_KEY_BITS 160 static const char aes_gcm_name[] = "rfc4106(gcm(aes))"; -static void ixgbe_ipsec_del_sa(struct xfrm_state *xs); +static void ixgbe_ipsec_del_sa(struct net_device *dev, struct xfrm_state *xs); /** * ixgbe_ipsec_set_tx_sa - set the Tx SA registers @@ -321,7 +321,7 @@ void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter) if (r->used) { if (r->mode & IXGBE_RXTXMOD_VF) - ixgbe_ipsec_del_sa(r->xs); + ixgbe_ipsec_del_sa(adapter->netdev, r->xs); else ixgbe_ipsec_set_rx_sa(hw, i, r->xs->id.spi, r->key, r->salt, @@ -330,7 +330,7 @@ void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter) if (t->used) { if (t->mode & IXGBE_RXTXMOD_VF) - ixgbe_ipsec_del_sa(t->xs); + ixgbe_ipsec_del_sa(adapter->netdev, t->xs); else ixgbe_ipsec_set_tx_sa(hw, i, t->key, t->salt); } @@ -417,6 +417,7 @@ static struct xfrm_state *ixgbe_ipsec_find_rx_state(struct ixgbe_ipsec *ipsec, /** * ixgbe_ipsec_parse_proto_keys - find the key and salt based on the protocol + * @dev: pointer to net device * @xs: pointer to xfrm_state struct * @mykey: pointer to key array to populate * @mysalt: pointer to salt value to populate @@ -424,10 +425,10 @@ static struct xfrm_state *ixgbe_ipsec_find_rx_state(struct ixgbe_ipsec *ipsec, * This copies the protocol keys and salt to our own data tables. The * 82599 family only supports the one algorithm. **/ -static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs, +static int ixgbe_ipsec_parse_proto_keys(struct net_device *dev, + struct xfrm_state *xs, u32 *mykey, u32 *mysalt) { - struct net_device *dev = xs->xso.real_dev; unsigned char *key_data; char *alg_name = NULL; int key_len; @@ -473,11 +474,12 @@ static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs, /** * ixgbe_ipsec_check_mgmt_ip - make sure there is no clash with mgmt IP filters + * @dev: pointer to net device * @xs: pointer to transformer state struct **/ -static int ixgbe_ipsec_check_mgmt_ip(struct xfrm_state *xs) +static int ixgbe_ipsec_check_mgmt_ip(struct net_device *dev, + struct xfrm_state *xs) { - struct net_device *dev = xs->xso.real_dev; struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev); struct ixgbe_hw *hw = &adapter->hw; u32 mfval, manc, reg; @@ -556,13 +558,14 @@ static int ixgbe_ipsec_check_mgmt_ip(struct xfrm_state *xs) /** * ixgbe_ipsec_add_sa - program device with a security association + * @dev: pointer to device to program * @xs: pointer to transformer state struct * @extack: extack point to fill failure reason **/ -static int ixgbe_ipsec_add_sa(struct xfrm_state *xs, +static int ixgbe_ipsec_add_sa(struct net_device *dev, + struct xfrm_state *xs, struct netlink_ext_ack *extack) { - struct net_device *dev = xs->xso.real_dev; struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev); struct ixgbe_ipsec *ipsec = adapter->ipsec; struct ixgbe_hw *hw = &adapter->hw; @@ -581,7 +584,7 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs, return -EINVAL; } - if (ixgbe_ipsec_check_mgmt_ip(xs)) { + if (ixgbe_ipsec_check_mgmt_ip(dev, xs)) { NL_SET_ERR_MSG_MOD(extack, "IPsec IP addr clash with mgmt filters"); return -EINVAL; } @@ -615,7 +618,7 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs, rsa.decrypt = xs->ealg || xs->aead; /* get the key and salt */ - ret = ixgbe_ipsec_parse_proto_keys(xs, rsa.key, &rsa.salt); + ret = ixgbe_ipsec_parse_proto_keys(dev, xs, rsa.key, &rsa.salt); if (ret) { NL_SET_ERR_MSG_MOD(extack, "Failed to get key data for Rx SA table"); return ret; @@ -724,7 +727,7 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs, if (xs->id.proto & IPPROTO_ESP) tsa.encrypt = xs->ealg || xs->aead; - ret = ixgbe_ipsec_parse_proto_keys(xs, tsa.key, &tsa.salt); + ret = ixgbe_ipsec_parse_proto_keys(dev, xs, tsa.key, &tsa.salt); if (ret) { NL_SET_ERR_MSG_MOD(extack, "Failed to get key data for Tx SA table"); memset(&tsa, 0, sizeof(tsa)); @@ -752,11 +755,11 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs, /** * ixgbe_ipsec_del_sa - clear out this specific SA + * @dev: pointer to device to program * @xs: pointer to transformer state struct **/ -static void ixgbe_ipsec_del_sa(struct xfrm_state *xs) +static void ixgbe_ipsec_del_sa(struct net_device *dev, struct xfrm_state *xs) { - struct net_device *dev = xs->xso.real_dev; struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev); struct ixgbe_ipsec *ipsec = adapter->ipsec; struct ixgbe_hw *hw = &adapter->hw; @@ -841,7 +844,8 @@ void ixgbe_ipsec_vf_clear(struct ixgbe_adapter *adapter, u32 vf) continue; if (ipsec->rx_tbl[i].mode & IXGBE_RXTXMOD_VF && ipsec->rx_tbl[i].vf == vf) - ixgbe_ipsec_del_sa(ipsec->rx_tbl[i].xs); + ixgbe_ipsec_del_sa(adapter->netdev, + ipsec->rx_tbl[i].xs); } /* search tx sa table */ @@ -850,7 +854,8 @@ void ixgbe_ipsec_vf_clear(struct ixgbe_adapter *adapter, u32 vf) continue; if (ipsec->tx_tbl[i].mode & IXGBE_RXTXMOD_VF && ipsec->tx_tbl[i].vf == vf) - ixgbe_ipsec_del_sa(ipsec->tx_tbl[i].xs); + ixgbe_ipsec_del_sa(adapter->netdev, + ipsec->tx_tbl[i].xs); } } @@ -930,7 +935,7 @@ int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) memcpy(xs->aead->alg_name, aes_gcm_name, sizeof(aes_gcm_name)); /* set up the HW offload */ - err = ixgbe_ipsec_add_sa(xs, NULL); + err = ixgbe_ipsec_add_sa(adapter->netdev, xs, NULL); if (err) goto err_aead; @@ -1034,7 +1039,7 @@ int ixgbe_ipsec_vf_del_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) xs = ipsec->tx_tbl[sa_idx].xs; } - ixgbe_ipsec_del_sa(xs); + ixgbe_ipsec_del_sa(adapter->netdev, xs); /* remove the xs that was made-up in the add request */ kfree_sensitive(xs); diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.c b/drivers/net/ethernet/intel/ixgbevf/ipsec.c index 8ba037e3d9c2..65580b9cb06f 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ipsec.c +++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.c @@ -201,6 +201,7 @@ struct xfrm_state *ixgbevf_ipsec_find_rx_state(struct ixgbevf_ipsec *ipsec, /** * ixgbevf_ipsec_parse_proto_keys - find the key and salt based on the protocol + * @dev: pointer to net device to program * @xs: pointer to xfrm_state struct * @mykey: pointer to key array to populate * @mysalt: pointer to salt value to populate @@ -208,10 +209,10 @@ struct xfrm_state *ixgbevf_ipsec_find_rx_state(struct ixgbevf_ipsec *ipsec, * This copies the protocol keys and salt to our own data tables. The * 82599 family only supports the one algorithm. **/ -static int ixgbevf_ipsec_parse_proto_keys(struct xfrm_state *xs, +static int ixgbevf_ipsec_parse_proto_keys(struct net_device *dev, + struct xfrm_state *xs, u32 *mykey, u32 *mysalt) { - struct net_device *dev = xs->xso.real_dev; unsigned char *key_data; char *alg_name = NULL; int key_len; @@ -256,13 +257,14 @@ static int ixgbevf_ipsec_parse_proto_keys(struct xfrm_state *xs, /** * ixgbevf_ipsec_add_sa - program device with a security association + * @dev: pointer to net device to program * @xs: pointer to transformer state struct * @extack: extack point to fill failure reason **/ -static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs, +static int ixgbevf_ipsec_add_sa(struct net_device *dev, + struct xfrm_state *xs, struct netlink_ext_ack *extack) { - struct net_device *dev = xs->xso.real_dev; struct ixgbevf_adapter *adapter; struct ixgbevf_ipsec *ipsec; u16 sa_idx; @@ -310,7 +312,8 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs, rsa.decrypt = xs->ealg || xs->aead; /* get the key and salt */ - ret = ixgbevf_ipsec_parse_proto_keys(xs, rsa.key, &rsa.salt); + ret = ixgbevf_ipsec_parse_proto_keys(dev, xs, rsa.key, + &rsa.salt); if (ret) { NL_SET_ERR_MSG_MOD(extack, "Failed to get key data for Rx SA table"); return ret; @@ -363,7 +366,8 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs, if (xs->id.proto & IPPROTO_ESP) tsa.encrypt = xs->ealg || xs->aead; - ret = ixgbevf_ipsec_parse_proto_keys(xs, tsa.key, &tsa.salt); + ret = ixgbevf_ipsec_parse_proto_keys(dev, xs, tsa.key, + &tsa.salt); if (ret) { NL_SET_ERR_MSG_MOD(extack, "Failed to get key data for Tx SA table"); memset(&tsa, 0, sizeof(tsa)); @@ -388,11 +392,12 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs, /** * ixgbevf_ipsec_del_sa - clear out this specific SA + * @dev: pointer to net device to program * @xs: pointer to transformer state struct **/ -static void ixgbevf_ipsec_del_sa(struct xfrm_state *xs) +static void ixgbevf_ipsec_del_sa(struct net_device *dev, + struct xfrm_state *xs) { - struct net_device *dev = xs->xso.real_dev; struct ixgbevf_adapter *adapter; struct ixgbevf_ipsec *ipsec; u16 sa_idx; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c index 511eb5b2a2d4..19a5f0da4c7f 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c @@ -1393,8 +1393,6 @@ static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype) if (blkaddr < 0) return; - if (blktype == BLKTYPE_NIX) - rvu_nix_reset_mac(pfvf, pcifunc); block = &hw->block[blkaddr]; @@ -1407,6 +1405,10 @@ static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype) if (lf < 0) /* This should never happen */ continue; + if (blktype == BLKTYPE_NIX) { + rvu_nix_reset_mac(pfvf, pcifunc); + rvu_npc_clear_ucast_entry(rvu, pcifunc, lf); + } /* Disable the LF */ rvu_write64(rvu, blkaddr, block->lfcfg_reg | (lf << block->lfshift), 0x00ULL); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index 147d7f5c1fcc..48f66292ad5c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -994,6 +994,8 @@ void rvu_npc_get_mcam_entry_alloc_info(struct rvu *rvu, u16 pcifunc, void rvu_npc_get_mcam_counter_alloc_info(struct rvu *rvu, u16 pcifunc, int blkaddr, int *alloc_cnt, int *enable_cnt); +void rvu_npc_clear_ucast_entry(struct rvu *rvu, int pcifunc, int nixlf); + bool is_npc_intf_tx(u8 intf); bool is_npc_intf_rx(u8 intf); bool is_npc_interface_valid(struct rvu *rvu, u8 intf); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c index 7fa98aeb3663..4a3370a40dd8 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c @@ -13,19 +13,26 @@ /* RVU LMTST */ #define LMT_TBL_OP_READ 0 #define LMT_TBL_OP_WRITE 1 -#define LMT_MAP_TABLE_SIZE (128 * 1024) #define LMT_MAPTBL_ENTRY_SIZE 16 +#define LMT_MAX_VFS 256 + +#define LMT_MAP_ENTRY_ENA BIT_ULL(20) +#define LMT_MAP_ENTRY_LINES GENMASK_ULL(18, 16) /* Function to perform operations (read/write) on lmtst map table */ static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val, int lmt_tbl_op) { void __iomem *lmt_map_base; - u64 tbl_base; + u64 tbl_base, cfg; + int pfs, vfs; tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE); + cfg = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CFG); + vfs = 1 << (cfg & 0xF); + pfs = 1 << ((cfg >> 4) & 0x7); - lmt_map_base = ioremap_wc(tbl_base, LMT_MAP_TABLE_SIZE); + lmt_map_base = ioremap_wc(tbl_base, pfs * vfs * LMT_MAPTBL_ENTRY_SIZE); if (!lmt_map_base) { dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n"); return -ENOMEM; @@ -35,6 +42,13 @@ static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val, *val = readq(lmt_map_base + index); } else { writeq((*val), (lmt_map_base + index)); + + cfg = FIELD_PREP(LMT_MAP_ENTRY_ENA, 0x1); + /* 2048 LMTLINES */ + cfg |= FIELD_PREP(LMT_MAP_ENTRY_LINES, 0x6); + + writeq(cfg, (lmt_map_base + (index + 8))); + /* Flushing the AP interceptor cache to make APR_LMT_MAP_ENTRY_S * changes effective. Write 1 for flush and read is being used as a * barrier and sets up a data dependency. Write to 0 after a write @@ -52,7 +66,7 @@ static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val, #define LMT_MAP_TBL_W1_OFF 8 static u32 rvu_get_lmtst_tbl_index(struct rvu *rvu, u16 pcifunc) { - return ((rvu_get_pf(pcifunc) * rvu->hw->total_vfs) + + return ((rvu_get_pf(pcifunc) * LMT_MAX_VFS) + (pcifunc & RVU_PFVF_FUNC_MASK)) * LMT_MAPTBL_ENTRY_SIZE; } @@ -69,7 +83,7 @@ static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc, mutex_lock(&rvu->rsrc_lock); rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_REQ, iova); - pf = rvu_get_pf(pcifunc) & 0x1F; + pf = rvu_get_pf(pcifunc) & RVU_PFVF_PF_MASK; val = BIT_ULL(63) | BIT_ULL(14) | BIT_ULL(13) | pf << 8 | ((pcifunc & RVU_PFVF_FUNC_MASK) & 0xFF); rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TXN_REQ, val); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c index a1f9ec03c2ce..c827da626471 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c @@ -553,6 +553,7 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp, u64 lmt_addr, val, tbl_base; int pf, vf, num_vfs, hw_vfs; void __iomem *lmt_map_base; + int apr_pfs, apr_vfs; int buf_size = 10240; size_t off = 0; int index = 0; @@ -568,8 +569,12 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp, return -ENOMEM; tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE); + val = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CFG); + apr_vfs = 1 << (val & 0xF); + apr_pfs = 1 << ((val >> 4) & 0x7); - lmt_map_base = ioremap_wc(tbl_base, 128 * 1024); + lmt_map_base = ioremap_wc(tbl_base, apr_pfs * apr_vfs * + LMT_MAPTBL_ENTRY_SIZE); if (!lmt_map_base) { dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n"); kfree(buf); @@ -591,7 +596,7 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp, off += scnprintf(&buf[off], buf_size - 1 - off, "PF%d \t\t\t", pf); - index = pf * rvu->hw->total_vfs * LMT_MAPTBL_ENTRY_SIZE; + index = pf * apr_vfs * LMT_MAPTBL_ENTRY_SIZE; off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%llx\t\t", (tbl_base + index)); lmt_addr = readq(lmt_map_base + index); @@ -604,7 +609,7 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp, /* Reading num of VFs per PF */ rvu_get_pf_numvfs(rvu, pf, &num_vfs, &hw_vfs); for (vf = 0; vf < num_vfs; vf++) { - index = (pf * rvu->hw->total_vfs * 16) + + index = (pf * apr_vfs * LMT_MAPTBL_ENTRY_SIZE) + ((vf + 1) * LMT_MAPTBL_ENTRY_SIZE); off += scnprintf(&buf[off], buf_size - 1 - off, "PF%d:VF%d \t\t", pf, vf); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c index 6296a3cdabbb..da15bb451178 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c @@ -1107,6 +1107,7 @@ void npc_enadis_default_mce_entry(struct rvu *rvu, u16 pcifunc, static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf, bool enable) { + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); struct npc_mcam *mcam = &rvu->hw->mcam; int index, blkaddr; @@ -1115,9 +1116,12 @@ static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc, return; /* Ucast MCAM match entry of this PF/VF */ - index = npc_get_nixlf_mcam_index(mcam, pcifunc, - nixlf, NIXLF_UCAST_ENTRY); - npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); + if (npc_is_feature_supported(rvu, BIT_ULL(NPC_DMAC), + pfvf->nix_rx_intf)) { + index = npc_get_nixlf_mcam_index(mcam, pcifunc, + nixlf, NIXLF_UCAST_ENTRY); + npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); + } /* Nothing to do for VFs, on platforms where pkt replication * is not supported @@ -3570,3 +3574,33 @@ int rvu_mbox_handler_npc_mcam_entry_stats(struct rvu *rvu, return 0; } + +void rvu_npc_clear_ucast_entry(struct rvu *rvu, int pcifunc, int nixlf) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + struct rvu_npc_mcam_rule *rule; + int ucast_idx, blkaddr; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return; + + ucast_idx = npc_get_nixlf_mcam_index(mcam, pcifunc, + nixlf, NIXLF_UCAST_ENTRY); + + npc_enable_mcam_entry(rvu, mcam, blkaddr, ucast_idx, false); + + npc_set_mcam_action(rvu, mcam, blkaddr, ucast_idx, 0); + + npc_clear_mcam_entry(rvu, mcam, blkaddr, ucast_idx); + + mutex_lock(&mcam->lock); + list_for_each_entry(rule, &mcam->mcam_rules, list) { + if (rule->entry == ucast_idx) { + list_del(&rule->list); + kfree(rule); + break; + } + } + mutex_unlock(&mcam->lock); +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c index 5f69380ccc82..19e0d16b12f6 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c @@ -12,3 +12,4 @@ EXPORT_TRACEPOINT_SYMBOL(otx2_msg_alloc); EXPORT_TRACEPOINT_SYMBOL(otx2_msg_interrupt); EXPORT_TRACEPOINT_SYMBOL(otx2_msg_process); EXPORT_TRACEPOINT_SYMBOL(otx2_msg_status); +EXPORT_TRACEPOINT_SYMBOL(otx2_parse_dump); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h index db02b4d6ed4b..4cd0fc4b0d20 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h @@ -133,6 +133,32 @@ TRACE_EVENT(otx2_msg_status, __get_str(str), __entry->num_msgs) ); +TRACE_EVENT(otx2_parse_dump, + TP_PROTO(const struct pci_dev *pdev, char *msg, u64 *word), + TP_ARGS(pdev, msg, word), + TP_STRUCT__entry(__string(dev, pci_name(pdev)) + __string(str, msg) + __field(u64, w0) + __field(u64, w1) + __field(u64, w2) + __field(u64, w3) + __field(u64, w4) + __field(u64, w5) + ), + TP_fast_assign(__assign_str(dev); + __assign_str(str); + __entry->w0 = *(word + 0); + __entry->w1 = *(word + 1); + __entry->w2 = *(word + 2); + __entry->w3 = *(word + 3); + __entry->w4 = *(word + 4); + __entry->w5 = *(word + 5); + ), + TP_printk("[%s] nix parse %s W0:%#llx W1:%#llx W2:%#llx W3:%#llx W4:%#llx W5:%#llx\n", + __get_str(dev), __get_str(str), __entry->w0, __entry->w1, __entry->w2, + __entry->w3, __entry->w4, __entry->w5) +); + #endif /* __RVU_TRACE_H */ #undef TRACE_INCLUDE_PATH diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c index fc59e50bafce..a6500e3673f2 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c @@ -663,10 +663,10 @@ static int cn10k_ipsec_inb_add_state(struct xfrm_state *x, return -EOPNOTSUPP; } -static int cn10k_ipsec_outb_add_state(struct xfrm_state *x, +static int cn10k_ipsec_outb_add_state(struct net_device *dev, + struct xfrm_state *x, struct netlink_ext_ack *extack) { - struct net_device *netdev = x->xso.dev; struct cn10k_tx_sa_s *sa_entry; struct qmem *sa_info; struct otx2_nic *pf; @@ -676,7 +676,7 @@ static int cn10k_ipsec_outb_add_state(struct xfrm_state *x, if (err) return err; - pf = netdev_priv(netdev); + pf = netdev_priv(dev); err = qmem_alloc(pf->dev, &sa_info, pf->ipsec.sa_size, OTX2_ALIGN); if (err) @@ -700,18 +700,18 @@ static int cn10k_ipsec_outb_add_state(struct xfrm_state *x, return 0; } -static int cn10k_ipsec_add_state(struct xfrm_state *x, +static int cn10k_ipsec_add_state(struct net_device *dev, + struct xfrm_state *x, struct netlink_ext_ack *extack) { if (x->xso.dir == XFRM_DEV_OFFLOAD_IN) return cn10k_ipsec_inb_add_state(x, extack); else - return cn10k_ipsec_outb_add_state(x, extack); + return cn10k_ipsec_outb_add_state(dev, x, extack); } -static void cn10k_ipsec_del_state(struct xfrm_state *x) +static void cn10k_ipsec_del_state(struct net_device *dev, struct xfrm_state *x) { - struct net_device *netdev = x->xso.dev; struct cn10k_tx_sa_s *sa_entry; struct qmem *sa_info; struct otx2_nic *pf; @@ -720,7 +720,7 @@ static void cn10k_ipsec_del_state(struct xfrm_state *x) if (x->xso.dir == XFRM_DEV_OFFLOAD_IN) return; - pf = netdev_priv(netdev); + pf = netdev_priv(dev); sa_info = (struct qmem *)x->xso.offload_handle; sa_entry = (struct cn10k_tx_sa_s *)sa_info->base; @@ -732,7 +732,7 @@ static void cn10k_ipsec_del_state(struct xfrm_state *x) err = cn10k_outb_write_sa(pf, sa_info); if (err) - netdev_err(netdev, "Error (%d) deleting SA\n", err); + netdev_err(dev, "Error (%d) deleting SA\n", err); x->xso.offload_handle = 0; qmem_free(pf->dev, sa_info); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c index 9593627b35a3..99ace381cc78 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c @@ -335,6 +335,7 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf, struct nix_rx_parse_s *parse = &cqe->parse; struct nix_rx_sg_s *sg = &cqe->sg; struct sk_buff *skb = NULL; + u64 *word = (u64 *)parse; void *end, *start; u32 metasize = 0; u64 *seg_addr; @@ -342,9 +343,12 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf, int seg; if (unlikely(parse->errlev || parse->errcode)) { - if (otx2_check_rcv_errors(pfvf, cqe, cq->cq_idx)) + if (otx2_check_rcv_errors(pfvf, cqe, cq->cq_idx)) { + trace_otx2_parse_dump(pfvf->pdev, "Err:", word); return; + } } + trace_otx2_parse_dump(pfvf->pdev, "", word); if (pfvf->xdp_prog) if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq, diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c index ba4ae6d9c569..8a8b598bd389 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c @@ -727,9 +727,12 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) } #ifdef CONFIG_DCB - err = otx2_dcbnl_set_ops(netdev); - if (err) - goto err_free_zc_bmap; + /* Priority flow control is not supported for LBK and SDP vf(s) */ + if (!(is_otx2_lbkvf(vf->pdev) || is_otx2_sdp_rep(vf->pdev))) { + err = otx2_dcbnl_set_ops(netdev); + if (err) + goto err_free_zc_bmap; + } #endif otx2_qos_init(vf, qos_txqs); diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index 2d4b9964d3db..6f72a8c8ae1e 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -1178,7 +1178,7 @@ struct mtk_reg_map { }; /* struct mtk_eth_data - This is the structure holding all differences - * among various plaforms + * among various platforms * @reg_map Soc register map. * @ana_rgc3: The offset for register ANA_RGC3 related to * sgmiisys syscon @@ -1278,7 +1278,7 @@ struct mtk_soc_data { * @mii_bus: If there is a bus we need to create an instance for it * @pending_work: The workqueue used to reset the dma ring * @state: Initialization and runtime state of the device - * @soc: Holding specific data among vaious SoCs + * @soc: Holding specific data among various SoCs */ struct mtk_eth { diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c index e212a4ba9275..351dd152f4f3 100644 --- a/drivers/net/ethernet/mediatek/mtk_wed.c +++ b/drivers/net/ethernet/mediatek/mtk_wed.c @@ -2000,7 +2000,7 @@ mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask) if (mtk_wed_is_v3_or_greater(dev->hw)) wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_TKID_ALI_EN); - /* initail tx interrupt trigger */ + /* initial tx interrupt trigger */ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX, MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN | MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR | @@ -2011,7 +2011,7 @@ mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask) FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX1_DONE_TRIG, dev->wlan.tx_tbit[1])); - /* initail txfree interrupt trigger */ + /* initial txfree interrupt trigger */ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX_FREE, MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_EN | MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_CLR | diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c index 81523825faa2..cb972b2d46e2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c @@ -114,6 +114,7 @@ int mlx5e_health_recover_channels(struct mlx5e_priv *priv) int err = 0; rtnl_lock(); + netdev_lock(priv->netdev); mutex_lock(&priv->state_lock); if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) @@ -123,6 +124,7 @@ int mlx5e_health_recover_channels(struct mlx5e_priv *priv) out: mutex_unlock(&priv->state_lock); + netdev_unlock(priv->netdev); rtnl_unlock(); return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c index 131ed97ca997..5d0014129a7e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c @@ -8,6 +8,7 @@ #include "en/fs_tt_redirect.h" #include <linux/list.h> #include <linux/spinlock.h> +#include <net/netdev_lock.h> struct mlx5e_ptp_fs { struct mlx5_flow_handle *l2_rule; @@ -449,8 +450,22 @@ static void mlx5e_ptpsq_unhealthy_work(struct work_struct *work) { struct mlx5e_ptpsq *ptpsq = container_of(work, struct mlx5e_ptpsq, report_unhealthy_work); + struct mlx5e_txqsq *sq = &ptpsq->txqsq; + + /* Recovering the PTP SQ means re-enabling NAPI, which requires the + * netdev instance lock. However, SQ closing has to wait for this work + * task to finish while also holding the same lock. So either get the + * lock or find that the SQ is no longer enabled and thus this work is + * not relevant anymore. + */ + while (!netdev_trylock(sq->netdev)) { + if (!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)) + return; + msleep(20); + } mlx5e_reporter_tx_ptpsq_unhealthy(ptpsq); + netdev_unlock(sq->netdev); } static int mlx5e_ptp_open_txqsq(struct mlx5e_ptp *c, u32 tisn, @@ -892,7 +907,7 @@ int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params, if (err) goto err_free; - netif_napi_add(netdev, &c->napi, mlx5e_ptp_napi_poll); + netif_napi_add_locked(netdev, &c->napi, mlx5e_ptp_napi_poll); mlx5e_ptp_build_params(c, cparams, params); @@ -910,7 +925,7 @@ int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params, return 0; err_napi_del: - netif_napi_del(&c->napi); + netif_napi_del_locked(&c->napi); err_free: kvfree(cparams); kvfree(c); @@ -920,7 +935,7 @@ err_free: void mlx5e_ptp_close(struct mlx5e_ptp *c) { mlx5e_ptp_close_queues(c); - netif_napi_del(&c->napi); + netif_napi_del_locked(&c->napi); kvfree(c); } @@ -929,7 +944,7 @@ void mlx5e_ptp_activate_channel(struct mlx5e_ptp *c) { int tc; - napi_enable(&c->napi); + napi_enable_locked(&c->napi); if (test_bit(MLX5E_PTP_STATE_TX, c->state)) { for (tc = 0; tc < c->num_tc; tc++) @@ -957,7 +972,7 @@ void mlx5e_ptp_deactivate_channel(struct mlx5e_ptp *c) mlx5e_deactivate_txqsq(&c->ptpsq[tc].txqsq); } - napi_disable(&c->napi); + napi_disable_locked(&c->napi); } int mlx5e_ptp_get_rqn(struct mlx5e_ptp *c, u32 *rqn) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c index dbd9482359e1..c3bda4612fa9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c @@ -107,9 +107,7 @@ static int mlx5e_tx_reporter_err_cqe_recover(void *ctx) mlx5e_reset_txqsq_cc_pc(sq); sq->stats->recover++; clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state); - rtnl_lock(); mlx5e_activate_txqsq(sq); - rtnl_unlock(); if (sq->channel) mlx5e_trigger_napi_icosq(sq->channel); @@ -176,7 +174,6 @@ static int mlx5e_tx_reporter_ptpsq_unhealthy_recover(void *ctx) priv = ptpsq->txqsq.priv; - rtnl_lock(); mutex_lock(&priv->state_lock); chs = &priv->channels; netdev = priv->netdev; @@ -196,7 +193,6 @@ static int mlx5e_tx_reporter_ptpsq_unhealthy_recover(void *ctx) netif_carrier_on(netdev); mutex_unlock(&priv->state_lock); - rtnl_unlock(); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c index 140606fcd23b..b5c19396e096 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c @@ -149,7 +149,7 @@ static struct mlx5e_trap *mlx5e_open_trap(struct mlx5e_priv *priv) t->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey); t->stats = &priv->trap_stats.ch; - netif_napi_add(netdev, &t->napi, mlx5e_trap_napi_poll); + netif_napi_add_locked(netdev, &t->napi, mlx5e_trap_napi_poll); err = mlx5e_open_trap_rq(priv, t); if (unlikely(err)) @@ -164,7 +164,7 @@ static struct mlx5e_trap *mlx5e_open_trap(struct mlx5e_priv *priv) err_close_trap_rq: mlx5e_close_trap_rq(&t->rq); err_napi_del: - netif_napi_del(&t->napi); + netif_napi_del_locked(&t->napi); kvfree(t); return ERR_PTR(err); } @@ -173,13 +173,13 @@ void mlx5e_close_trap(struct mlx5e_trap *trap) { mlx5e_tir_destroy(&trap->tir); mlx5e_close_trap_rq(&trap->rq); - netif_napi_del(&trap->napi); + netif_napi_del_locked(&trap->napi); kvfree(trap); } static void mlx5e_activate_trap(struct mlx5e_trap *trap) { - napi_enable(&trap->napi); + napi_enable_locked(&trap->napi); mlx5e_activate_rq(&trap->rq); mlx5e_trigger_napi_sched(&trap->napi); } @@ -189,7 +189,7 @@ void mlx5e_deactivate_trap(struct mlx5e_priv *priv) struct mlx5e_trap *trap = priv->en_trap; mlx5e_deactivate_rq(&trap->rq); - napi_disable(&trap->napi); + napi_disable_locked(&trap->napi); } static struct mlx5e_trap *mlx5e_add_trap_queue(struct mlx5e_priv *priv) @@ -285,6 +285,7 @@ int mlx5e_handle_trap_event(struct mlx5e_priv *priv, struct mlx5_trap_ctx *trap_ if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) return 0; + netdev_lock(priv->netdev); switch (trap_ctx->action) { case DEVLINK_TRAP_ACTION_TRAP: err = mlx5e_handle_action_trap(priv, trap_ctx->id); @@ -297,6 +298,7 @@ int mlx5e_handle_trap_event(struct mlx5e_priv *priv, struct mlx5_trap_ctx *trap_ trap_ctx->action); err = -EINVAL; } + netdev_unlock(priv->netdev); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c index 2dd842aac6fc..77f61cd28a79 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c @@ -259,8 +259,7 @@ static void mlx5e_ipsec_init_macs(struct mlx5e_ipsec_sa_entry *sa_entry, struct mlx5_accel_esp_xfrm_attrs *attrs) { struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry); - struct xfrm_state *x = sa_entry->x; - struct net_device *netdev; + struct net_device *netdev = sa_entry->dev; struct neighbour *n; u8 addr[ETH_ALEN]; const void *pkey; @@ -270,8 +269,6 @@ static void mlx5e_ipsec_init_macs(struct mlx5e_ipsec_sa_entry *sa_entry, attrs->type != XFRM_DEV_OFFLOAD_PACKET) return; - netdev = x->xso.real_dev; - mlx5_query_mac_address(mdev, addr); switch (attrs->dir) { case XFRM_DEV_OFFLOAD_IN: @@ -692,17 +689,17 @@ static int mlx5e_ipsec_create_dwork(struct mlx5e_ipsec_sa_entry *sa_entry) return 0; } -static int mlx5e_xfrm_add_state(struct xfrm_state *x, +static int mlx5e_xfrm_add_state(struct net_device *dev, + struct xfrm_state *x, struct netlink_ext_ack *extack) { struct mlx5e_ipsec_sa_entry *sa_entry = NULL; - struct net_device *netdev = x->xso.real_dev; struct mlx5e_ipsec *ipsec; struct mlx5e_priv *priv; gfp_t gfp; int err; - priv = netdev_priv(netdev); + priv = netdev_priv(dev); if (!priv->ipsec) return -EOPNOTSUPP; @@ -713,6 +710,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x, return -ENOMEM; sa_entry->x = x; + sa_entry->dev = dev; sa_entry->ipsec = ipsec; /* Check if this SA is originated from acquire flow temporary SA */ if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ) @@ -809,7 +807,7 @@ err_xfrm: return err; } -static void mlx5e_xfrm_del_state(struct xfrm_state *x) +static void mlx5e_xfrm_del_state(struct net_device *dev, struct xfrm_state *x) { struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x); struct mlx5e_ipsec *ipsec = sa_entry->ipsec; @@ -822,7 +820,7 @@ static void mlx5e_xfrm_del_state(struct xfrm_state *x) WARN_ON(old != sa_entry); } -static void mlx5e_xfrm_free_state(struct xfrm_state *x) +static void mlx5e_xfrm_free_state(struct net_device *dev, struct xfrm_state *x) { struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x); struct mlx5e_ipsec *ipsec = sa_entry->ipsec; @@ -855,8 +853,6 @@ static int mlx5e_ipsec_netevent_event(struct notifier_block *nb, struct mlx5e_ipsec_sa_entry *sa_entry; struct mlx5e_ipsec *ipsec; struct neighbour *n = ptr; - struct net_device *netdev; - struct xfrm_state *x; unsigned long idx; if (event != NETEVENT_NEIGH_UPDATE || !(n->nud_state & NUD_VALID)) @@ -876,11 +872,9 @@ static int mlx5e_ipsec_netevent_event(struct notifier_block *nb, continue; } - x = sa_entry->x; - netdev = x->xso.real_dev; data = sa_entry->work->data; - neigh_ha_snapshot(data->addr, n, netdev); + neigh_ha_snapshot(data->addr, n, sa_entry->dev); queue_work(ipsec->wq, &sa_entry->work->work); } @@ -996,8 +990,8 @@ static void mlx5e_xfrm_update_stats(struct xfrm_state *x) size_t headers; lockdep_assert(lockdep_is_held(&x->lock) || - lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_cfg_mutex) || - lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_state_lock)); + lockdep_is_held(&net->xfrm.xfrm_cfg_mutex) || + lockdep_is_held(&net->xfrm.xfrm_state_lock)); if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ) return; @@ -1170,7 +1164,7 @@ mlx5e_ipsec_build_accel_pol_attrs(struct mlx5e_ipsec_pol_entry *pol_entry, static int mlx5e_xfrm_add_policy(struct xfrm_policy *x, struct netlink_ext_ack *extack) { - struct net_device *netdev = x->xdo.real_dev; + struct net_device *netdev = x->xdo.dev; struct mlx5e_ipsec_pol_entry *pol_entry; struct mlx5e_priv *priv; int err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h index a63c2289f8af..ffcd0cdeb775 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h @@ -274,6 +274,7 @@ struct mlx5e_ipsec_limits { struct mlx5e_ipsec_sa_entry { struct mlx5e_ipsec_esn_state esn_state; struct xfrm_state *x; + struct net_device *dev; struct mlx5e_ipsec *ipsec; struct mlx5_accel_esp_xfrm_attrs attrs; void (*set_iv_op)(struct sk_buff *skb, struct xfrm_state *x, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c index 8705cffc747f..5fe016e477b3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c @@ -1147,6 +1147,7 @@ static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state) bool reset = true; int err; + netdev_lock(priv->netdev); mutex_lock(&priv->state_lock); new_params = priv->channels.params; @@ -1162,6 +1163,7 @@ static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state) &trust_state, reset); mutex_unlock(&priv->state_lock); + netdev_unlock(priv->netdev); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index e399d7a3d6cb..ea078c9f5d15 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -2060,14 +2060,9 @@ int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv, if (err) return err; - dev_hold(dev); - rtnl_unlock(); - err = mlx5_firmware_flash(mdev, fw, NULL); release_firmware(fw); - rtnl_lock(); - dev_put(dev); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index 05058710d2c7..04a969128161 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -484,7 +484,9 @@ static int mlx5e_vlan_rx_add_svid(struct mlx5e_flow_steering *fs, } /* Need to fix some features.. */ + netdev_lock(netdev); netdev_update_features(netdev); + netdev_unlock(netdev); return err; } @@ -521,7 +523,9 @@ int mlx5e_fs_vlan_rx_kill_vid(struct mlx5e_flow_steering *fs, } else if (be16_to_cpu(proto) == ETH_P_8021AD) { clear_bit(vid, fs->vlan->active_svlans); mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid); + netdev_lock(netdev); netdev_update_features(netdev); + netdev_unlock(netdev); } return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 9bd166f489e7..ea822c69d137 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -39,6 +39,7 @@ #include <linux/debugfs.h> #include <linux/if_bridge.h> #include <linux/filter.h> +#include <net/netdev_lock.h> #include <net/netdev_queues.h> #include <net/page_pool/types.h> #include <net/pkt_sched.h> @@ -1903,7 +1904,20 @@ void mlx5e_tx_err_cqe_work(struct work_struct *recover_work) struct mlx5e_txqsq *sq = container_of(recover_work, struct mlx5e_txqsq, recover_work); + /* Recovering queues means re-enabling NAPI, which requires the netdev + * instance lock. However, SQ closing flows have to wait for work tasks + * to finish while also holding the netdev instance lock. So either get + * the lock or find that the SQ is no longer enabled and thus this work + * is not relevant anymore. + */ + while (!netdev_trylock(sq->netdev)) { + if (!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)) + return; + msleep(20); + } + mlx5e_reporter_tx_err_cqe(sq); + netdev_unlock(sq->netdev); } static struct dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode) @@ -2705,8 +2719,8 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, c->aff_mask = irq_get_effective_affinity_mask(irq); c->lag_port = mlx5e_enumerate_lag_port(mdev, ix); - netif_napi_add_config(netdev, &c->napi, mlx5e_napi_poll, ix); - netif_napi_set_irq(&c->napi, irq); + netif_napi_add_config_locked(netdev, &c->napi, mlx5e_napi_poll, ix); + netif_napi_set_irq_locked(&c->napi, irq); err = mlx5e_open_queues(c, params, cparam); if (unlikely(err)) @@ -2728,7 +2742,7 @@ err_close_queues: mlx5e_close_queues(c); err_napi_del: - netif_napi_del(&c->napi); + netif_napi_del_locked(&c->napi); err_free: kvfree(cparam); @@ -2741,7 +2755,7 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c) { int tc; - napi_enable(&c->napi); + napi_enable_locked(&c->napi); for (tc = 0; tc < c->num_tc; tc++) mlx5e_activate_txqsq(&c->sq[tc]); @@ -2773,7 +2787,7 @@ static void mlx5e_deactivate_channel(struct mlx5e_channel *c) mlx5e_deactivate_txqsq(&c->sq[tc]); mlx5e_qos_deactivate_queues(c); - napi_disable(&c->napi); + napi_disable_locked(&c->napi); } static void mlx5e_close_channel(struct mlx5e_channel *c) @@ -2782,7 +2796,7 @@ static void mlx5e_close_channel(struct mlx5e_channel *c) mlx5e_close_xsk(c); mlx5e_close_queues(c); mlx5e_qos_close_queues(c); - netif_napi_del(&c->napi); + netif_napi_del_locked(&c->napi); kvfree(c); } @@ -4276,7 +4290,7 @@ void mlx5e_set_xdp_feature(struct net_device *netdev) if (!netdev->netdev_ops->ndo_bpf || params->packet_merge.type != MLX5E_PACKET_MERGE_NONE) { - xdp_clear_features_flag(netdev); + xdp_set_features_flag_locked(netdev, 0); return; } @@ -4285,7 +4299,7 @@ void mlx5e_set_xdp_feature(struct net_device *netdev) NETDEV_XDP_ACT_RX_SG | NETDEV_XDP_ACT_NDO_XMIT | NETDEV_XDP_ACT_NDO_XMIT_SG; - xdp_set_features_flag(netdev, val); + xdp_set_features_flag_locked(netdev, val); } int mlx5e_set_features(struct net_device *netdev, netdev_features_t features) @@ -4968,21 +4982,19 @@ static void mlx5e_tx_timeout_work(struct work_struct *work) struct net_device *netdev = priv->netdev; int i; - /* Take rtnl_lock to ensure no change in netdev->real_num_tx_queues - * through this flow. However, channel closing flows have to wait for - * this work to finish while holding rtnl lock too. So either get the - * lock or find that channels are being closed for other reason and - * this work is not relevant anymore. + /* Recovering the TX queues implies re-enabling NAPI, which requires + * the netdev instance lock. + * However, channel closing flows have to wait for this work to finish + * while holding the same lock. So either get the lock or find that + * channels are being closed for other reason and this work is not + * relevant anymore. */ - while (!rtnl_trylock()) { + while (!netdev_trylock(netdev)) { if (!test_bit(MLX5E_STATE_CHANNELS_ACTIVE, &priv->state)) return; msleep(20); } - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) - goto unlock; - for (i = 0; i < netdev->real_num_tx_queues; i++) { struct netdev_queue *dev_queue = netdev_get_tx_queue(netdev, i); @@ -4996,8 +5008,7 @@ static void mlx5e_tx_timeout_work(struct work_struct *work) break; } -unlock: - rtnl_unlock(); + netdev_unlock(netdev); } static void mlx5e_tx_timeout(struct net_device *dev, unsigned int txqueue) @@ -5321,7 +5332,6 @@ static void mlx5e_get_queue_stats_rx(struct net_device *dev, int i, struct mlx5e_rq_stats *xskrq_stats; struct mlx5e_rq_stats *rq_stats; - ASSERT_RTNL(); if (mlx5e_is_uplink_rep(priv) || !priv->stats_nch) return; @@ -5341,7 +5351,6 @@ static void mlx5e_get_queue_stats_tx(struct net_device *dev, int i, struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_sq_stats *sq_stats; - ASSERT_RTNL(); if (!priv->stats_nch) return; @@ -5362,7 +5371,6 @@ static void mlx5e_get_base_stats(struct net_device *dev, struct mlx5e_ptp *ptp_channel; int i, tc; - ASSERT_RTNL(); if (!mlx5e_is_uplink_rep(priv)) { rx->packets = 0; rx->bytes = 0; @@ -5458,6 +5466,8 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) netdev->netdev_ops = &mlx5e_netdev_ops; netdev->xdp_metadata_ops = &mlx5e_xdp_metadata_ops; netdev->xsk_tx_metadata_ops = &mlx5e_xsk_tx_metadata_ops; + netdev->request_ops_lock = true; + netdev_lockdep_set_classes(netdev); mlx5e_dcbnl_build_netdev(netdev); @@ -5839,9 +5849,11 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv) mlx5e_nic_set_rx_mode(priv); rtnl_lock(); + netdev_lock(netdev); if (netif_running(netdev)) mlx5e_open(netdev); udp_tunnel_nic_reset_ntf(priv->netdev); + netdev_unlock(netdev); netif_device_attach(netdev); rtnl_unlock(); } @@ -5854,9 +5866,16 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv) mlx5e_dcbnl_delete_app(priv); rtnl_lock(); + netdev_lock(priv->netdev); if (netif_running(priv->netdev)) mlx5e_close(priv->netdev); netif_device_detach(priv->netdev); + if (priv->en_trap) { + mlx5e_deactivate_trap(priv); + mlx5e_close_trap(priv->en_trap); + priv->en_trap = NULL; + } + netdev_unlock(priv->netdev); rtnl_unlock(); mlx5e_nic_set_rx_mode(priv); @@ -5866,11 +5885,6 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv) mlx5e_monitor_counter_cleanup(priv); mlx5e_disable_blocking_events(priv); - if (priv->en_trap) { - mlx5e_deactivate_trap(priv); - mlx5e_close_trap(priv->en_trap); - priv->en_trap = NULL; - } mlx5e_disable_async_events(priv); mlx5_lag_remove_netdev(mdev, priv->netdev); mlx5_vxlan_reset_to_default(mdev->vxlan); @@ -6125,7 +6139,9 @@ static void mlx5e_update_features(struct net_device *netdev) return; /* features will be updated on netdev registration */ rtnl_lock(); + netdev_lock(netdev); netdev_update_features(netdev); + netdev_unlock(netdev); rtnl_unlock(); } @@ -6136,7 +6152,7 @@ static void mlx5e_reset_channels(struct net_device *netdev) int mlx5e_attach_netdev(struct mlx5e_priv *priv) { - const bool take_rtnl = priv->netdev->reg_state == NETREG_REGISTERED; + const bool need_lock = priv->netdev->reg_state == NETREG_REGISTERED; const struct mlx5e_profile *profile = priv->profile; int max_nch; int err; @@ -6178,15 +6194,19 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv) * 2. Set our default XPS cpumask. * 3. Build the RQT. * - * rtnl_lock is required by netif_set_real_num_*_queues in case the + * Locking is required by netif_set_real_num_*_queues in case the * netdev has been registered by this point (if this function was called * in the reload or resume flow). */ - if (take_rtnl) + if (need_lock) { rtnl_lock(); + netdev_lock(priv->netdev); + } err = mlx5e_num_channels_changed(priv); - if (take_rtnl) + if (need_lock) { + netdev_unlock(priv->netdev); rtnl_unlock(); + } if (err) goto out; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 2abab241f03b..719aa16bd404 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -33,6 +33,7 @@ #include <linux/dim.h> #include <linux/debugfs.h> #include <linux/mlx5/fs.h> +#include <net/netdev_lock.h> #include <net/switchdev.h> #include <net/pkt_cls.h> #include <net/act_api.h> @@ -885,6 +886,8 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev, { SET_NETDEV_DEV(netdev, mdev->device); netdev->netdev_ops = &mlx5e_netdev_ops_rep; + netdev->request_ops_lock = true; + netdev_lockdep_set_classes(netdev); eth_hw_addr_random(netdev); netdev->ethtool_ops = &mlx5e_rep_ethtool_ops; @@ -1344,9 +1347,11 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv) netdev->wanted_features |= NETIF_F_HW_TC; rtnl_lock(); + netdev_lock(netdev); if (netif_running(netdev)) mlx5e_open(netdev); udp_tunnel_nic_reset_ntf(priv->netdev); + netdev_unlock(netdev); netif_device_attach(netdev); rtnl_unlock(); } @@ -1356,9 +1361,11 @@ static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv) struct mlx5_core_dev *mdev = priv->mdev; rtnl_lock(); + netdev_lock(priv->netdev); if (netif_running(priv->netdev)) mlx5e_close(priv->netdev); netif_device_detach(priv->netdev); + netdev_unlock(priv->netdev); rtnl_unlock(); mlx5e_rep_bridge_cleanup(priv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index a47c29571f64..1af76da8b132 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -527,7 +527,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, struct mlx5_flow_rule *dst; void *in_flow_context, *vlan; void *in_match_value; - int reformat_id = 0; + u32 reformat_id = 0; unsigned int inlen; int dst_cnt_size; u32 *in, action; @@ -580,23 +580,21 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, MLX5_SET(flow_context, in_flow_context, action, action); if (!extended_dest && fte->act_dests.action.pkt_reformat) { - struct mlx5_pkt_reformat *pkt_reformat = fte->act_dests.action.pkt_reformat; - - if (pkt_reformat->owner == MLX5_FLOW_RESOURCE_OWNER_SW) { - reformat_id = mlx5_fs_dr_action_get_pkt_reformat_id(pkt_reformat); - if (reformat_id < 0) { - mlx5_core_err(dev, - "Unsupported SW-owned pkt_reformat type (%d) in FW-owned table\n", - pkt_reformat->reformat_type); - err = reformat_id; - goto err_out; - } - } else { - reformat_id = fte->act_dests.action.pkt_reformat->id; + struct mlx5_pkt_reformat *pkt_reformat = + fte->act_dests.action.pkt_reformat; + + err = mlx5_fs_get_packet_reformat_id(pkt_reformat, + &reformat_id); + if (err) { + mlx5_core_err(dev, + "Unsupported pkt_reformat type (%d)\n", + pkt_reformat->reformat_type); + goto err_out; } } - MLX5_SET(flow_context, in_flow_context, packet_reformat_id, (u32)reformat_id); + MLX5_SET(flow_context, in_flow_context, packet_reformat_id, + reformat_id); if (fte->act_dests.action.modify_hdr) { if (fte->act_dests.action.modify_hdr->owner == MLX5_FLOW_RESOURCE_OWNER_SW) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 6163bc98d94a..23a7e8e7adfa 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -1830,14 +1830,35 @@ static int create_auto_flow_group(struct mlx5_flow_table *ft, return err; } +int mlx5_fs_get_packet_reformat_id(struct mlx5_pkt_reformat *pkt_reformat, + u32 *id) +{ + switch (pkt_reformat->owner) { + case MLX5_FLOW_RESOURCE_OWNER_FW: + *id = pkt_reformat->id; + return 0; + case MLX5_FLOW_RESOURCE_OWNER_SW: + return mlx5_fs_dr_action_get_pkt_reformat_id(pkt_reformat, id); + case MLX5_FLOW_RESOURCE_OWNER_HWS: + return mlx5_fs_hws_action_get_pkt_reformat_id(pkt_reformat, id); + default: + return -EINVAL; + } +} + static bool mlx5_pkt_reformat_cmp(struct mlx5_pkt_reformat *p1, struct mlx5_pkt_reformat *p2) { - return p1->owner == p2->owner && - (p1->owner == MLX5_FLOW_RESOURCE_OWNER_FW ? - p1->id == p2->id : - mlx5_fs_dr_action_get_pkt_reformat_id(p1) == - mlx5_fs_dr_action_get_pkt_reformat_id(p2)); + int err1, err2; + u32 id1, id2; + + if (p1->owner != p2->owner) + return false; + + err1 = mlx5_fs_get_packet_reformat_id(p1, &id1); + err2 = mlx5_fs_get_packet_reformat_id(p2, &id2); + + return !err1 && !err2 && id1 == id2; } static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index 0767239f651c..500826229b0b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -58,6 +58,7 @@ struct mlx5_flow_definer { enum mlx5_flow_resource_owner { MLX5_FLOW_RESOURCE_OWNER_FW, MLX5_FLOW_RESOURCE_OWNER_SW, + MLX5_FLOW_RESOURCE_OWNER_HWS, }; struct mlx5_modify_hdr { @@ -386,6 +387,9 @@ u32 mlx5_fs_get_capabilities(struct mlx5_core_dev *dev, enum mlx5_flow_namespace struct mlx5_flow_root_namespace *find_root(struct fs_node *node); +int mlx5_fs_get_packet_reformat_id(struct mlx5_pkt_reformat *pkt_reformat, + u32 *id); + #define fs_get_obj(v, _node) {v = container_of((_node), typeof(*v), node); } #define fs_list_for_each_entry(pos, root) \ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index 0979d672d47f..79ae3a51a4b3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -32,6 +32,7 @@ #include <rdma/ib_verbs.h> #include <linux/mlx5/fs.h> +#include <net/netdev_lock.h> #include "en.h" #include "en/params.h" #include "ipoib.h" @@ -102,6 +103,8 @@ int mlx5i_init(struct mlx5_core_dev *mdev, struct net_device *netdev) netdev->netdev_ops = &mlx5i_netdev_ops; netdev->ethtool_ops = &mlx5i_ethtool_ops; + netdev->request_ops_lock = true; + netdev_lockdep_set_classes(netdev); return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c index bef4d25c1a2a..fb62f3bc4bd4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c @@ -72,6 +72,11 @@ enum mlx5hws_action_type mlx5hws_action_get_type(struct mlx5hws_action *action) return action->type; } +struct mlx5_core_dev *mlx5hws_action_get_dev(struct mlx5hws_action *action) +{ + return action->ctx->mdev; +} + static int hws_action_get_shared_stc_nic(struct mlx5hws_context *ctx, enum mlx5hws_context_shared_stc_type stc_type, u8 tbl_type) @@ -1185,14 +1190,15 @@ hws_action_create_modify_header_hws(struct mlx5hws_action *action, struct mlx5hws_action_mh_pattern *pattern, u32 log_bulk_size) { + u16 num_actions, max_mh_actions = 0, hw_max_actions; struct mlx5hws_context *ctx = action->ctx; - u16 num_actions, max_mh_actions = 0; int i, ret, size_in_bytes; u32 pat_id, arg_id = 0; __be64 *new_pattern; size_t pat_max_sz; pat_max_sz = MLX5HWS_ARG_CHUNK_SIZE_MAX * MLX5HWS_ARG_DATA_SIZE; + hw_max_actions = pat_max_sz / MLX5HWS_MODIFY_ACTION_SIZE; size_in_bytes = pat_max_sz * sizeof(__be64); new_pattern = kcalloc(num_of_patterns, size_in_bytes, GFP_KERNEL); if (!new_pattern) @@ -1202,16 +1208,20 @@ hws_action_create_modify_header_hws(struct mlx5hws_action *action, for (i = 0; i < num_of_patterns; i++) { size_t new_num_actions; size_t cur_num_actions; - u32 nope_location; + u32 nop_locations; cur_num_actions = pattern[i].sz / MLX5HWS_MODIFY_ACTION_SIZE; - mlx5hws_pat_calc_nope(pattern[i].data, cur_num_actions, - pat_max_sz / MLX5HWS_MODIFY_ACTION_SIZE, - &new_num_actions, &nope_location, - &new_pattern[i * pat_max_sz]); + ret = mlx5hws_pat_calc_nop(pattern[i].data, cur_num_actions, + hw_max_actions, &new_num_actions, + &nop_locations, + &new_pattern[i * pat_max_sz]); + if (ret) { + mlx5hws_err(ctx, "Too many actions after nop insertion\n"); + goto free_new_pat; + } - action[i].modify_header.nope_locations = nope_location; + action[i].modify_header.nop_locations = nop_locations; action[i].modify_header.num_of_actions = new_num_actions; max_mh_actions = max(max_mh_actions, new_num_actions); @@ -1258,7 +1268,7 @@ hws_action_create_modify_header_hws(struct mlx5hws_action *action, MLX5_GET(set_action_in, pattern[i].data, action_type); } else { /* Multiple modify actions require a pattern */ - if (unlikely(action[i].modify_header.nope_locations)) { + if (unlikely(action[i].modify_header.nop_locations)) { size_t pattern_sz; pattern_sz = action[i].modify_header.num_of_actions * @@ -2100,21 +2110,23 @@ static void hws_action_modify_write(struct mlx5hws_send_engine *queue, u32 arg_idx, u8 *arg_data, u16 num_of_actions, - u32 nope_locations) + u32 nop_locations) { u8 *new_arg_data = NULL; int i, j; - if (unlikely(nope_locations)) { + if (unlikely(nop_locations)) { new_arg_data = kcalloc(num_of_actions, MLX5HWS_MODIFY_ACTION_SIZE, GFP_KERNEL); if (unlikely(!new_arg_data)) return; - for (i = 0, j = 0; i < num_of_actions; i++, j++) { - memcpy(&new_arg_data[j], arg_data, MLX5HWS_MODIFY_ACTION_SIZE); - if (BIT(i) & nope_locations) + for (i = 0, j = 0; j < num_of_actions; i++, j++) { + if (BIT(i) & nop_locations) j++; + memcpy(&new_arg_data[j * MLX5HWS_MODIFY_ACTION_SIZE], + &arg_data[i * MLX5HWS_MODIFY_ACTION_SIZE], + MLX5HWS_MODIFY_ACTION_SIZE); } } @@ -2210,6 +2222,7 @@ hws_action_setter_modify_header(struct mlx5hws_actions_apply_data *apply, struct mlx5hws_action *action; u32 arg_sz, arg_idx; u8 *single_action; + u8 max_actions; __be32 stc_idx; rule_action = &apply->rule_action[setter->idx_double]; @@ -2237,21 +2250,23 @@ hws_action_setter_modify_header(struct mlx5hws_actions_apply_data *apply, apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = *(__be32 *)MLX5_ADDR_OF(set_action_in, single_action, data); - } else { - /* Argument offset multiple with number of args per these actions */ - arg_sz = mlx5hws_arg_get_arg_size(action->modify_header.max_num_of_actions); - arg_idx = rule_action->modify_header.offset * arg_sz; - - apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = htonl(arg_idx); - - if (!(action->flags & MLX5HWS_ACTION_FLAG_SHARED)) { - apply->require_dep = 1; - hws_action_modify_write(apply->queue, - action->modify_header.arg_id + arg_idx, - rule_action->modify_header.data, - action->modify_header.num_of_actions, - action->modify_header.nope_locations); - } + return; + } + + /* Argument offset multiple with number of args per these actions */ + max_actions = action->modify_header.max_num_of_actions; + arg_sz = mlx5hws_arg_get_arg_size(max_actions); + arg_idx = rule_action->modify_header.offset * arg_sz; + + apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = htonl(arg_idx); + + if (!(action->flags & MLX5HWS_ACTION_FLAG_SHARED)) { + apply->require_dep = 1; + hws_action_modify_write(apply->queue, + action->modify_header.arg_id + arg_idx, + rule_action->modify_header.data, + action->modify_header.num_of_actions, + action->modify_header.nop_locations); } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.h index 25fa0d4c9221..55a079fdd08f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.h @@ -136,7 +136,7 @@ struct mlx5hws_action { u32 pat_id; u32 arg_id; __be64 single_action; - u32 nope_locations; + u32 nop_locations; u8 num_of_patterns; u8 single_action_type; u8 num_of_actions; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c index 1b787cd66e6f..9d1c0e4b224a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c @@ -1081,13 +1081,8 @@ static int mlx5_cmd_hws_create_fte(struct mlx5_flow_root_namespace *ns, struct mlx5hws_bwc_rule *rule; int err = 0; - if (mlx5_fs_cmd_is_fw_term_table(ft)) { - /* Packet reformat on terminamtion table not supported yet */ - if (fte->act_dests.action.action & - MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) - return -EOPNOTSUPP; + if (mlx5_fs_cmd_is_fw_term_table(ft)) return mlx5_fs_cmd_get_fw_cmds()->create_fte(ns, ft, group, fte); - } err = mlx5_fs_fte_get_hws_actions(ns, ft, group, fte, &ractions); if (err) @@ -1362,7 +1357,7 @@ mlx5_cmd_hws_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns, pkt_reformat->fs_hws_action.pr_data = pr_data; } - pkt_reformat->owner = MLX5_FLOW_RESOURCE_OWNER_SW; + pkt_reformat->owner = MLX5_FLOW_RESOURCE_OWNER_HWS; pkt_reformat->fs_hws_action.hws_action = hws_action; return 0; @@ -1380,6 +1375,15 @@ static void mlx5_cmd_hws_packet_reformat_dealloc(struct mlx5_flow_root_namespace struct mlx5_fs_hws_pr *pr_data; struct mlx5_fs_pool *pr_pool; + if (pkt_reformat->fs_hws_action.fw_reformat_id != 0) { + struct mlx5_pkt_reformat fw_pkt_reformat = { 0 }; + + fw_pkt_reformat.id = pkt_reformat->fs_hws_action.fw_reformat_id; + mlx5_fs_cmd_get_fw_cmds()-> + packet_reformat_dealloc(ns, &fw_pkt_reformat); + pkt_reformat->fs_hws_action.fw_reformat_id = 0; + } + if (pkt_reformat->reformat_type == MLX5_REFORMAT_TYPE_REMOVE_HDR) return; @@ -1499,6 +1503,7 @@ static int mlx5_cmd_hws_modify_header_alloc(struct mlx5_flow_root_namespace *ns, err = -ENOMEM; goto release_mh; } + mutex_init(&modify_hdr->fs_hws_action.lock); modify_hdr->fs_hws_action.mh_data = mh_data; modify_hdr->fs_hws_action.fs_pool = pool; modify_hdr->owner = MLX5_FLOW_RESOURCE_OWNER_SW; @@ -1532,6 +1537,58 @@ static void mlx5_cmd_hws_modify_header_dealloc(struct mlx5_flow_root_namespace * modify_hdr->fs_hws_action.mh_data = NULL; } +int +mlx5_fs_hws_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat, + u32 *reformat_id) +{ + enum mlx5_flow_namespace_type ns_type = pkt_reformat->ns_type; + struct mutex *lock = &pkt_reformat->fs_hws_action.lock; + u32 *id = &pkt_reformat->fs_hws_action.fw_reformat_id; + struct mlx5_pkt_reformat fw_pkt_reformat = { 0 }; + struct mlx5_pkt_reformat_params params = { 0 }; + struct mlx5_flow_root_namespace *ns; + struct mlx5_core_dev *dev; + int ret; + + mutex_lock(lock); + + if (*id != 0) { + *reformat_id = *id; + ret = 0; + goto unlock; + } + + dev = mlx5hws_action_get_dev(pkt_reformat->fs_hws_action.hws_action); + if (!dev) { + ret = -EINVAL; + goto unlock; + } + + ns = mlx5_get_root_namespace(dev, ns_type); + if (!ns) { + ret = -EINVAL; + goto unlock; + } + + params.type = pkt_reformat->reformat_type; + params.size = pkt_reformat->fs_hws_action.pr_data->data_size; + params.data = pkt_reformat->fs_hws_action.pr_data->data; + + ret = mlx5_fs_cmd_get_fw_cmds()-> + packet_reformat_alloc(ns, ¶ms, ns_type, &fw_pkt_reformat); + if (ret) + goto unlock; + + *id = fw_pkt_reformat.id; + *reformat_id = *id; + ret = 0; + +unlock: + mutex_unlock(lock); + + return ret; +} + static int mlx5_cmd_hws_create_match_definer(struct mlx5_flow_root_namespace *ns, u16 format_id, u32 *match_mask) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h index 8b56298288da..b92d55b2d147 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h @@ -41,6 +41,11 @@ struct mlx5_fs_hws_action { struct mlx5_fs_pool *fs_pool; struct mlx5_fs_hws_pr *pr_data; struct mlx5_fs_hws_mh *mh_data; + u32 fw_reformat_id; + /* Protect `fw_reformat_id` against being initialized from multiple + * threads. + */ + struct mutex lock; }; struct mlx5_fs_hws_matcher { @@ -84,12 +89,23 @@ void mlx5_fs_put_hws_action(struct mlx5_fs_hws_data *fs_hws_data); #ifdef CONFIG_MLX5_HW_STEERING +int +mlx5_fs_hws_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat, + u32 *reformat_id); + bool mlx5_fs_hws_is_supported(struct mlx5_core_dev *dev); const struct mlx5_flow_cmds *mlx5_fs_cmd_get_hws_cmds(void); #else +static inline int +mlx5_fs_hws_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat, + u32 *reformat_id) +{ + return -EOPNOTSUPP; +} + static inline bool mlx5_fs_hws_is_supported(struct mlx5_core_dev *dev) { return false; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h index fbd63369da10..9bbadc4d8a0b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h @@ -504,6 +504,15 @@ enum mlx5hws_action_type mlx5hws_action_get_type(struct mlx5hws_action *action); /** + * mlx5hws_action_get_dev - Get mlx5 core device. + * + * @action: The action to get the device from. + * + * Return: mlx5 core device. + */ +struct mlx5_core_dev *mlx5hws_action_get_dev(struct mlx5hws_action *action); + +/** * mlx5hws_action_create_dest_drop - Create a direct rule drop action. * * @ctx: The context in which the new action will be created. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c index f51ed24526b9..51e4c551e0ef 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c @@ -490,8 +490,8 @@ hws_action_modify_get_target_fields(u8 action_type, __be64 *pattern, switch (action_type) { case MLX5_ACTION_TYPE_SET: case MLX5_ACTION_TYPE_ADD: - *src_field = MLX5_GET(set_action_in, pattern, field); - *dst_field = INVALID_FIELD; + *src_field = INVALID_FIELD; + *dst_field = MLX5_GET(set_action_in, pattern, field); break; case MLX5_ACTION_TYPE_COPY: *src_field = MLX5_GET(copy_action_in, pattern, src_field); @@ -522,57 +522,59 @@ bool mlx5hws_pat_verify_actions(struct mlx5hws_context *ctx, __be64 pattern[], s return true; } -void mlx5hws_pat_calc_nope(__be64 *pattern, size_t num_actions, - size_t max_actions, size_t *new_size, - u32 *nope_location, __be64 *new_pat) +int mlx5hws_pat_calc_nop(__be64 *pattern, size_t num_actions, + size_t max_actions, size_t *new_size, + u32 *nop_locations, __be64 *new_pat) { - u16 prev_src_field = 0, prev_dst_field = 0; + u16 prev_src_field = INVALID_FIELD, prev_dst_field = INVALID_FIELD; u16 src_field, dst_field; u8 action_type; + bool dependent; size_t i, j; *new_size = num_actions; - *nope_location = 0; + *nop_locations = 0; if (num_actions == 1) - return; + return 0; for (i = 0, j = 0; i < num_actions; i++, j++) { - action_type = MLX5_GET(set_action_in, &pattern[i], action_type); + if (j >= max_actions) + return -EINVAL; + action_type = MLX5_GET(set_action_in, &pattern[i], action_type); hws_action_modify_get_target_fields(action_type, &pattern[i], &src_field, &dst_field); - if (i % 2) { - if (action_type == MLX5_ACTION_TYPE_COPY && - (prev_src_field == src_field || - prev_dst_field == dst_field)) { - /* need Nope */ - *new_size += 1; - *nope_location |= BIT(i); - memset(&new_pat[j], 0, MLX5HWS_MODIFY_ACTION_SIZE); - MLX5_SET(set_action_in, &new_pat[j], - action_type, - MLX5_MODIFICATION_TYPE_NOP); - j++; - } else if (prev_src_field == src_field) { - /* need Nope*/ - *new_size += 1; - *nope_location |= BIT(i); - MLX5_SET(set_action_in, &new_pat[j], - action_type, - MLX5_MODIFICATION_TYPE_NOP); - j++; - } - } - memcpy(&new_pat[j], &pattern[i], MLX5HWS_MODIFY_ACTION_SIZE); - /* check if no more space */ - if (j > max_actions) { - *new_size = num_actions; - *nope_location = 0; - return; + + /* For every action, look at it and the previous one. The two + * actions are dependent if: + */ + dependent = + (i > 0) && + /* At least one of the actions is a write and */ + (dst_field != INVALID_FIELD || + prev_dst_field != INVALID_FIELD) && + /* One reads from the other's source */ + (dst_field == prev_src_field || + src_field == prev_dst_field || + /* Or both write to the same destination */ + dst_field == prev_dst_field); + + if (dependent) { + *new_size += 1; + *nop_locations |= BIT(i); + memset(&new_pat[j], 0, MLX5HWS_MODIFY_ACTION_SIZE); + MLX5_SET(set_action_in, &new_pat[j], action_type, + MLX5_MODIFICATION_TYPE_NOP); + j++; + if (j >= max_actions) + return -EINVAL; } + memcpy(&new_pat[j], &pattern[i], MLX5HWS_MODIFY_ACTION_SIZE); prev_src_field = src_field; prev_dst_field = dst_field; } + + return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.h index 8ddb51980044..7fbd8dc7aa18 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.h @@ -96,6 +96,7 @@ int mlx5hws_arg_write_inline_arg_data(struct mlx5hws_context *ctx, u8 *arg_data, size_t data_size); -void mlx5hws_pat_calc_nope(__be64 *pattern, size_t num_actions, size_t max_actions, - size_t *new_size, u32 *nope_location, __be64 *new_pat); +int mlx5hws_pat_calc_nop(__be64 *pattern, size_t num_actions, + size_t max_actions, size_t *new_size, + u32 *nop_locations, __be64 *new_pat); #endif /* MLX5HWS_PAT_ARG_H_ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c index 8007d3f523c9..f367997ab61e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c @@ -833,15 +833,21 @@ static u32 mlx5_cmd_dr_get_capabilities(struct mlx5_flow_root_namespace *ns, return steering_caps; } -int mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat) +int +mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat, + u32 *reformat_id) { + struct mlx5dr_action *dr_action; + switch (pkt_reformat->reformat_type) { case MLX5_REFORMAT_TYPE_L2_TO_VXLAN: case MLX5_REFORMAT_TYPE_L2_TO_NVGRE: case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL: case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL: case MLX5_REFORMAT_TYPE_INSERT_HDR: - return mlx5dr_action_get_pkt_reformat_id(pkt_reformat->fs_dr_action.dr_action); + dr_action = pkt_reformat->fs_dr_action.dr_action; + *reformat_id = mlx5dr_action_get_pkt_reformat_id(dr_action); + return 0; } return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.h index 99a3b2eff6b8..f869f2daefbf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.h @@ -38,7 +38,9 @@ struct mlx5_fs_dr_table { bool mlx5_fs_dr_is_supported(struct mlx5_core_dev *dev); -int mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat); +int +mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat, + u32 *reformat_id); const struct mlx5_flow_cmds *mlx5_fs_cmd_get_dr_cmds(void); @@ -49,9 +51,11 @@ static inline const struct mlx5_flow_cmds *mlx5_fs_cmd_get_dr_cmds(void) return NULL; } -static inline u32 mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat) +static inline int +mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat, + u32 *reformat_id) { - return 0; + return -EOPNOTSUPP; } static inline bool mlx5_fs_dr_is_supported(struct mlx5_core_dev *dev) diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c b/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c index 71d9461a0d1b..4c4938eedd7b 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c @@ -166,11 +166,10 @@ fbnic_flash_start(struct fbnic_dev *fbd, struct pldmfw_component *component) struct fbnic_fw_completion *cmpl; int err; - cmpl = kzalloc(sizeof(*cmpl), GFP_KERNEL); + cmpl = fbnic_fw_alloc_cmpl(FBNIC_TLV_MSG_ID_FW_START_UPGRADE_REQ); if (!cmpl) return -ENOMEM; - fbnic_fw_init_cmpl(cmpl, FBNIC_TLV_MSG_ID_FW_START_UPGRADE_REQ); err = fbnic_fw_xmit_fw_start_upgrade(fbd, cmpl, component->identifier, component->component_size); @@ -237,11 +236,10 @@ fbnic_flash_component(struct pldmfw *context, * Setup completions for write before issuing the start message so * the driver can catch both messages. */ - cmpl = kzalloc(sizeof(*cmpl), GFP_KERNEL); + cmpl = fbnic_fw_alloc_cmpl(FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_REQ); if (!cmpl) return -ENOMEM; - fbnic_fw_init_cmpl(cmpl, FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_REQ); err = fbnic_mbx_set_cmpl(fbd, cmpl); if (err) goto cmpl_free; diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c index 6a803a59dc25..e2368075ab8c 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c @@ -1232,12 +1232,19 @@ void fbnic_get_fw_ver_commit_str(struct fbnic_dev *fbd, char *fw_version, fw_version, str_sz); } -void fbnic_fw_init_cmpl(struct fbnic_fw_completion *fw_cmpl, - u32 msg_type) +struct fbnic_fw_completion *fbnic_fw_alloc_cmpl(u32 msg_type) { - fw_cmpl->msg_type = msg_type; - init_completion(&fw_cmpl->done); - kref_init(&fw_cmpl->ref_count); + struct fbnic_fw_completion *cmpl; + + cmpl = kzalloc(sizeof(*cmpl), GFP_KERNEL); + if (!cmpl) + return NULL; + + cmpl->msg_type = msg_type; + init_completion(&cmpl->done); + kref_init(&cmpl->ref_count); + + return cmpl; } void fbnic_fw_clear_cmpl(struct fbnic_dev *fbd, diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h index 6baac10fd688..08bc4b918de7 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h +++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h @@ -80,8 +80,7 @@ int fbnic_fw_xmit_fw_write_chunk(struct fbnic_dev *fbd, int cancel_error); int fbnic_fw_xmit_tsene_read_msg(struct fbnic_dev *fbd, struct fbnic_fw_completion *cmpl_data); -void fbnic_fw_init_cmpl(struct fbnic_fw_completion *cmpl_data, - u32 msg_type); +struct fbnic_fw_completion *fbnic_fw_alloc_cmpl(u32 msg_type); void fbnic_fw_clear_cmpl(struct fbnic_dev *fbd, struct fbnic_fw_completion *cmpl_data); void fbnic_fw_put_cmpl(struct fbnic_fw_completion *cmpl_data); diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c index 4ba6f8d10775..10e108c1fcd0 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c @@ -687,13 +687,10 @@ static int fbnic_mac_get_sensor_asic(struct fbnic_dev *fbd, int id, int err = 0, retries = 5; s32 *sensor; - fw_cmpl = kzalloc(sizeof(*fw_cmpl), GFP_KERNEL); + fw_cmpl = fbnic_fw_alloc_cmpl(FBNIC_TLV_MSG_ID_TSENE_READ_RESP); if (!fw_cmpl) return -ENOMEM; - /* Initialize completion and queue it for FW to process */ - fbnic_fw_init_cmpl(fw_cmpl, FBNIC_TLV_MSG_ID_TSENE_READ_RESP); - switch (id) { case FBNIC_SENSOR_TEMP: sensor = &fw_cmpl->u.tsene.millidegrees; diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index 880681085df2..7e71579632f3 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -3495,6 +3495,7 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter, struct pci_dev *pdev) { struct lan743x_tx *tx; + u32 sgmii_ctl; int index; int ret; @@ -3507,6 +3508,15 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter, spin_lock_init(&adapter->eth_syslock_spinlock); mutex_init(&adapter->sgmii_rw_lock); pci11x1x_set_rfe_rd_fifo_threshold(adapter); + sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL); + if (adapter->is_sgmii_en) { + sgmii_ctl |= SGMII_CTL_SGMII_ENABLE_; + sgmii_ctl &= ~SGMII_CTL_SGMII_POWER_DN_; + } else { + sgmii_ctl &= ~SGMII_CTL_SGMII_ENABLE_; + sgmii_ctl |= SGMII_CTL_SGMII_POWER_DN_; + } + lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl); } else { adapter->max_tx_channels = LAN743X_MAX_TX_CHANNELS; adapter->used_tx_channels = LAN743X_USED_TX_CHANNELS; @@ -3558,7 +3568,6 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter, static int lan743x_mdiobus_init(struct lan743x_adapter *adapter) { - u32 sgmii_ctl; int ret; adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev); @@ -3570,10 +3579,6 @@ static int lan743x_mdiobus_init(struct lan743x_adapter *adapter) adapter->mdiobus->priv = (void *)adapter; if (adapter->is_pci11x1x) { if (adapter->is_sgmii_en) { - sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL); - sgmii_ctl |= SGMII_CTL_SGMII_ENABLE_; - sgmii_ctl &= ~SGMII_CTL_SGMII_POWER_DN_; - lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl); netif_dbg(adapter, drv, adapter->netdev, "SGMII operation\n"); adapter->mdiobus->read = lan743x_mdiobus_read_c22; @@ -3584,10 +3589,6 @@ static int lan743x_mdiobus_init(struct lan743x_adapter *adapter) netif_dbg(adapter, drv, adapter->netdev, "lan743x-mdiobus-c45\n"); } else { - sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL); - sgmii_ctl &= ~SGMII_CTL_SGMII_ENABLE_; - sgmii_ctl |= SGMII_CTL_SGMII_POWER_DN_; - lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl); netif_dbg(adapter, drv, adapter->netdev, "RGMII operation\n"); // Only C22 support when RGMII I/F diff --git a/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c b/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c index 671af5d4c5d2..9e7c285eaa6b 100644 --- a/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c +++ b/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c @@ -266,17 +266,17 @@ static void set_sha2_512hmac(struct nfp_ipsec_cfg_add_sa *cfg, int *trunc_len) } } -static int nfp_net_xfrm_add_state(struct xfrm_state *x, +static int nfp_net_xfrm_add_state(struct net_device *dev, + struct xfrm_state *x, struct netlink_ext_ack *extack) { - struct net_device *netdev = x->xso.real_dev; struct nfp_ipsec_cfg_mssg msg = {}; int i, key_len, trunc_len, err = 0; struct nfp_ipsec_cfg_add_sa *cfg; struct nfp_net *nn; unsigned int saidx; - nn = netdev_priv(netdev); + nn = netdev_priv(dev); cfg = &msg.cfg_add_sa; /* General */ @@ -546,17 +546,16 @@ static int nfp_net_xfrm_add_state(struct xfrm_state *x, return 0; } -static void nfp_net_xfrm_del_state(struct xfrm_state *x) +static void nfp_net_xfrm_del_state(struct net_device *dev, struct xfrm_state *x) { struct nfp_ipsec_cfg_mssg msg = { .cmd = NFP_IPSEC_CFG_MSSG_INV_SA, .sa_idx = x->xso.offload_handle - 1, }; - struct net_device *netdev = x->xso.real_dev; struct nfp_net *nn; int err; - nn = netdev_priv(netdev); + nn = netdev_priv(dev); err = nfp_net_sched_mbox_amsg_work(nn, NFP_NET_CFG_MBOX_CMD_IPSEC, &msg, sizeof(msg), nfp_net_ipsec_cfg); if (err) diff --git a/drivers/net/ethernet/realtek/rtase/rtase_main.c b/drivers/net/ethernet/realtek/rtase/rtase_main.c index 0efe7668e498..4d37217e9a14 100644 --- a/drivers/net/ethernet/realtek/rtase/rtase_main.c +++ b/drivers/net/ethernet/realtek/rtase/rtase_main.c @@ -1983,7 +1983,7 @@ static u16 rtase_calc_time_mitigation(u32 time_us) u8 msb, time_count, time_unit; u16 int_miti; - time_us = min_t(int, time_us, RTASE_MITI_MAX_TIME); + time_us = min(time_us, RTASE_MITI_MAX_TIME); if (time_us > RTASE_MITI_TIME_COUNT_MASK) { msb = fls(time_us); @@ -2005,7 +2005,7 @@ static u16 rtase_calc_packet_num_mitigation(u16 pkt_num) u8 msb, pkt_num_count, pkt_num_unit; u16 int_miti; - pkt_num = min_t(int, pkt_num, RTASE_MITI_MAX_PKT_NUM); + pkt_num = min(pkt_num, RTASE_MITI_MAX_PKT_NUM); if (pkt_num > 60) { pkt_num_unit = RTASE_MITI_MAX_PKT_NUM_IDX; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c index fd6518e252e3..2796dc426943 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c @@ -964,7 +964,7 @@ static int sun8i_dwmac_set_syscon(struct device *dev, /* of_mdio_parse_addr returns a valid (0 ~ 31) PHY * address. No need to mask it again. */ - reg |= 1 << H3_EPHY_ADDR_SHIFT; + reg |= ret << H3_EPHY_ADDR_SHIFT; } else { /* For SoCs without internal PHY the PHY selection bit should be * set to 0 (external PHY). diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index 988ce9119306..f20d1ff192ef 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -2687,7 +2687,7 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common) port->slave.mac_addr); if (!is_valid_ether_addr(port->slave.mac_addr)) { eth_random_addr(port->slave.mac_addr); - dev_err(dev, "Use random MAC address\n"); + dev_info(dev, "Use random MAC address\n"); } } diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c index 43019ec9329c..c12a4cb951f6 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c @@ -219,7 +219,7 @@ int wx_nway_reset(struct net_device *netdev) { struct wx *wx = netdev_priv(netdev); - if (wx->mac.type == wx_mac_aml) + if (wx->mac.type == wx_mac_aml40) return -EOPNOTSUPP; return phylink_ethtool_nway_reset(wx->phylink); @@ -231,9 +231,6 @@ int wx_get_link_ksettings(struct net_device *netdev, { struct wx *wx = netdev_priv(netdev); - if (wx->mac.type == wx_mac_aml) - return -EOPNOTSUPP; - return phylink_ethtool_ksettings_get(wx->phylink, cmd); } EXPORT_SYMBOL(wx_get_link_ksettings); @@ -243,7 +240,7 @@ int wx_set_link_ksettings(struct net_device *netdev, { struct wx *wx = netdev_priv(netdev); - if (wx->mac.type == wx_mac_aml) + if (wx->mac.type == wx_mac_aml40) return -EOPNOTSUPP; return phylink_ethtool_ksettings_set(wx->phylink, cmd); @@ -255,7 +252,7 @@ void wx_get_pauseparam(struct net_device *netdev, { struct wx *wx = netdev_priv(netdev); - if (wx->mac.type == wx_mac_aml) + if (wx->mac.type == wx_mac_aml40) return; phylink_ethtool_get_pauseparam(wx->phylink, pause); @@ -267,7 +264,7 @@ int wx_set_pauseparam(struct net_device *netdev, { struct wx *wx = netdev_priv(netdev); - if (wx->mac.type == wx_mac_aml) + if (wx->mac.type == wx_mac_aml40) return -EOPNOTSUPP; return phylink_ethtool_set_pauseparam(wx->phylink, pause); @@ -345,6 +342,7 @@ int wx_set_coalesce(struct net_device *netdev, max_eitr = WX_SP_MAX_EITR; break; case wx_mac_aml: + case wx_mac_aml40: max_eitr = WX_AML_MAX_EITR; break; default: @@ -375,6 +373,7 @@ int wx_set_coalesce(struct net_device *netdev, switch (wx->mac.type) { case wx_mac_sp: case wx_mac_aml: + case wx_mac_aml40: tx_itr_param = WX_12K_ITR; break; default: @@ -413,15 +412,10 @@ static unsigned int wx_max_channels(struct wx *wx) max_combined = 1; } else { /* support up to max allowed queues with RSS */ - switch (wx->mac.type) { - case wx_mac_sp: - case wx_mac_aml: + if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) max_combined = 63; - break; - default: + else max_combined = 8; - break; - } } return max_combined; diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c index 143cc1088eea..c6744a363e97 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c @@ -113,15 +113,10 @@ static void wx_intr_disable(struct wx *wx, u64 qmask) if (mask) wr32(wx, WX_PX_IMS(0), mask); - switch (wx->mac.type) { - case wx_mac_sp: - case wx_mac_aml: + if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) { mask = (qmask >> 32); if (mask) wr32(wx, WX_PX_IMS(1), mask); - break; - default: - break; } } @@ -133,15 +128,10 @@ void wx_intr_enable(struct wx *wx, u64 qmask) if (mask) wr32(wx, WX_PX_IMC(0), mask); - switch (wx->mac.type) { - case wx_mac_sp: - case wx_mac_aml: + if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) { mask = (qmask >> 32); if (mask) wr32(wx, WX_PX_IMC(1), mask); - break; - default: - break; } } EXPORT_SYMBOL(wx_intr_enable); @@ -705,6 +695,7 @@ void wx_init_eeprom_params(struct wx *wx) switch (wx->mac.type) { case wx_mac_sp: case wx_mac_aml: + case wx_mac_aml40: if (wx_read_ee_hostif(wx, WX_SW_REGION_PTR, &data)) { wx_err(wx, "NVM Read Error\n"); return; @@ -774,14 +765,8 @@ static int wx_set_rar(struct wx *wx, u32 index, u8 *addr, u64 pools, /* setup VMDq pool mapping */ wr32(wx, WX_PSR_MAC_SWC_VM_L, pools & 0xFFFFFFFF); - switch (wx->mac.type) { - case wx_mac_sp: - case wx_mac_aml: + if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) wr32(wx, WX_PSR_MAC_SWC_VM_H, pools >> 32); - break; - default: - break; - } /* HW expects these in little endian so we reverse the byte * order from network order (big endian) to little endian @@ -919,14 +904,9 @@ void wx_init_rx_addrs(struct wx *wx) wx_set_rar(wx, 0, wx->mac.addr, 0, WX_PSR_MAC_SWC_AD_H_AV); - switch (wx->mac.type) { - case wx_mac_sp: - case wx_mac_aml: + if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) { /* clear VMDq pool/queue selection for RAR 0 */ wx_clear_vmdq(wx, 0, WX_CLEAR_VMDQ_ALL); - break; - default: - break; } } @@ -1512,7 +1492,7 @@ static void wx_configure_virtualization(struct wx *wx) wr32m(wx, WX_PSR_VM_L2CTL(pool), WX_PSR_VM_L2CTL_AUPE, WX_PSR_VM_L2CTL_AUPE); - if (wx->mac.type == wx_mac_em) { + if (!test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) { vf_shift = BIT(VMDQ_P(0)); /* Enable only the PF pools for Tx/Rx */ wr32(wx, WX_RDM_VF_RE(0), vf_shift); @@ -1543,7 +1523,7 @@ static void wx_configure_port(struct wx *wx) { u32 value, i; - if (wx->mac.type == wx_mac_em) { + if (!test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) { value = (wx->num_vfs == 0) ? WX_CFG_PORT_CTL_NUM_VT_NONE : WX_CFG_PORT_CTL_NUM_VT_8; @@ -2074,7 +2054,7 @@ static void wx_setup_psrtype(struct wx *wx) WX_RDB_PL_CFG_TUN_OUTL2HDR | WX_RDB_PL_CFG_TUN_TUNHDR; - if (wx->mac.type == wx_mac_em) { + if (!test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) { for_each_set_bit(pool, &wx->fwd_bitmask, 8) wr32(wx, WX_RDB_PL_CFG(VMDQ_P(pool)), psrtype); } else { @@ -2272,10 +2252,8 @@ int wx_stop_adapter(struct wx *wx) } EXPORT_SYMBOL(wx_stop_adapter); -void wx_reset_misc(struct wx *wx) +void wx_reset_mac(struct wx *wx) { - int i; - /* receive packets that size > 2048 */ wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_JE, WX_MAC_RX_CFG_JE); @@ -2287,6 +2265,14 @@ void wx_reset_misc(struct wx *wx) WX_MAC_RX_FLOW_CTRL_RFE, WX_MAC_RX_FLOW_CTRL_RFE); wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR); +} +EXPORT_SYMBOL(wx_reset_mac); + +void wx_reset_misc(struct wx *wx) +{ + int i; + + wx_reset_mac(wx); wr32m(wx, WX_MIS_RST_ST, WX_MIS_RST_ST_RST_INIT, 0x1E00); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.h b/drivers/net/ethernet/wangxun/libwx/wx_hw.h index 91c1d6135045..26a56cba60b9 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_hw.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.h @@ -42,6 +42,7 @@ void wx_configure(struct wx *wx); void wx_start_hw(struct wx *wx); int wx_disable_pcie_master(struct wx *wx); int wx_stop_adapter(struct wx *wx); +void wx_reset_mac(struct wx *wx); void wx_reset_misc(struct wx *wx); int wx_get_pcie_msix_counts(struct wx *wx, u16 *msix_count, u16 max_msix_count); int wx_sw_init(struct wx *wx); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c index 2a808afeb414..5c747509d56b 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c @@ -5,6 +5,7 @@ #include <net/ip6_checksum.h> #include <net/page_pool/helpers.h> #include <net/inet_ecn.h> +#include <linux/workqueue.h> #include <linux/iopoll.h> #include <linux/sctp.h> #include <linux/pci.h> @@ -1633,7 +1634,7 @@ static bool wx_set_vmdq_queues(struct wx *wx) /* Add starting offset to total pool count */ vmdq_i += wx->ring_feature[RING_F_VMDQ].offset; - if (wx->mac.type == wx_mac_sp) { + if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) { /* double check we are limited to maximum pools */ vmdq_i = min_t(u16, 64, vmdq_i); @@ -1693,7 +1694,7 @@ static void wx_set_rss_queues(struct wx *wx) /* set mask for 16 queue limit of RSS */ f = &wx->ring_feature[RING_F_RSS]; - if (wx->mac.type == wx_mac_sp) + if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) f->mask = WX_RSS_64Q_MASK; else f->mask = WX_RSS_8Q_MASK; @@ -1853,7 +1854,7 @@ static bool wx_cache_ring_vmdq(struct wx *wx) if (!test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags)) return false; - if (wx->mac.type == wx_mac_sp) { + if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) { /* start at VMDq register offset for SR-IOV enabled setups */ reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); for (i = 0; i < wx->num_rx_queues; i++, reg_idx++) { @@ -1959,6 +1960,7 @@ static int wx_alloc_q_vector(struct wx *wx, switch (wx->mac.type) { case wx_mac_sp: case wx_mac_aml: + case wx_mac_aml40: default_itr = WX_12K_ITR; break; default: @@ -2327,6 +2329,7 @@ void wx_write_eitr(struct wx_q_vector *q_vector) itr_reg = q_vector->itr & WX_SP_MAX_EITR; break; case wx_mac_aml: + case wx_mac_aml40: itr_reg = (q_vector->itr >> 3) & WX_AML_MAX_EITR; break; default: @@ -2354,10 +2357,10 @@ void wx_configure_vectors(struct wx *wx) if (pdev->msix_enabled) { /* Populate MSIX to EITR Select */ - if (wx->mac.type == wx_mac_sp) { + if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) { if (wx->num_vfs >= 32) eitrsel = BIT(wx->num_vfs % 32) - 1; - } else if (wx->mac.type == wx_mac_em) { + } else { for (i = 0; i < wx->num_vfs; i++) eitrsel |= BIT(i); } @@ -3093,5 +3096,35 @@ void wx_set_ring(struct wx *wx, u32 new_tx_count, } EXPORT_SYMBOL(wx_set_ring); +void wx_service_event_schedule(struct wx *wx) +{ + if (!test_and_set_bit(WX_STATE_SERVICE_SCHED, wx->state)) + queue_work(system_power_efficient_wq, &wx->service_task); +} +EXPORT_SYMBOL(wx_service_event_schedule); + +void wx_service_event_complete(struct wx *wx) +{ + if (WARN_ON(!test_bit(WX_STATE_SERVICE_SCHED, wx->state))) + return; + + /* flush memory to make sure state is correct before next watchdog */ + smp_mb__before_atomic(); + clear_bit(WX_STATE_SERVICE_SCHED, wx->state); +} +EXPORT_SYMBOL(wx_service_event_complete); + +void wx_service_timer(struct timer_list *t) +{ + struct wx *wx = from_timer(wx, t, service_timer); + unsigned long next_event_offset = HZ * 2; + + /* Reset the timer */ + mod_timer(&wx->service_timer, next_event_offset + jiffies); + + wx_service_event_schedule(wx); +} +EXPORT_SYMBOL(wx_service_timer); + MODULE_DESCRIPTION("Common library for Wangxun(R) Ethernet drivers."); MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.h b/drivers/net/ethernet/wangxun/libwx/wx_lib.h index 49f8bde36ddb..aed6ea8cf0d6 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_lib.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.h @@ -38,5 +38,8 @@ netdev_features_t wx_features_check(struct sk_buff *skb, netdev_features_t features); void wx_set_ring(struct wx *wx, u32 new_tx_count, u32 new_rx_count, struct wx_ring *temp_ring); +void wx_service_event_schedule(struct wx *wx); +void wx_service_event_complete(struct wx *wx); +void wx_service_timer(struct timer_list *t); #endif /* _WX_LIB_H_ */ diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ptp.c b/drivers/net/ethernet/wangxun/libwx/wx_ptp.c index 07c015ba338f..2c39b879f977 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_ptp.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_ptp.c @@ -15,12 +15,14 @@ #define WX_INCVAL_100 0xA00000 #define WX_INCVAL_10 0xC7F380 #define WX_INCVAL_EM 0x2000000 +#define WX_INCVAL_AML 0xA00000 #define WX_INCVAL_SHIFT_10GB 20 #define WX_INCVAL_SHIFT_1GB 18 #define WX_INCVAL_SHIFT_100 15 #define WX_INCVAL_SHIFT_10 12 #define WX_INCVAL_SHIFT_EM 22 +#define WX_INCVAL_SHIFT_AML 21 #define WX_OVERFLOW_PERIOD (HZ * 30) #define WX_PTP_TX_TIMEOUT (HZ) @@ -504,15 +506,27 @@ static long wx_ptp_create_clock(struct wx *wx) wx->ptp_caps.gettimex64 = wx_ptp_gettimex64; wx->ptp_caps.settime64 = wx_ptp_settime64; wx->ptp_caps.do_aux_work = wx_ptp_do_aux_work; - if (wx->mac.type == wx_mac_em) { - wx->ptp_caps.max_adj = 500000000; + switch (wx->mac.type) { + case wx_mac_aml: + case wx_mac_aml40: + wx->ptp_caps.max_adj = 250000000; wx->ptp_caps.n_per_out = 1; wx->ptp_setup_sdp = wx_ptp_setup_sdp; wx->ptp_caps.enable = wx_ptp_feature_enable; - } else { + break; + case wx_mac_sp: wx->ptp_caps.max_adj = 250000000; wx->ptp_caps.n_per_out = 0; wx->ptp_setup_sdp = NULL; + break; + case wx_mac_em: + wx->ptp_caps.max_adj = 500000000; + wx->ptp_caps.n_per_out = 1; + wx->ptp_setup_sdp = wx_ptp_setup_sdp; + wx->ptp_caps.enable = wx_ptp_feature_enable; + break; + default: + return -EOPNOTSUPP; } wx->ptp_clock = ptp_clock_register(&wx->ptp_caps, &wx->pdev->dev); @@ -647,10 +661,18 @@ static u64 wx_ptp_read(const struct cyclecounter *hw_cc) static void wx_ptp_link_speed_adjust(struct wx *wx, u32 *shift, u32 *incval) { - if (wx->mac.type == wx_mac_em) { + switch (wx->mac.type) { + case wx_mac_aml: + case wx_mac_aml40: + *shift = WX_INCVAL_SHIFT_AML; + *incval = WX_INCVAL_AML; + return; + case wx_mac_em: *shift = WX_INCVAL_SHIFT_EM; *incval = WX_INCVAL_EM; return; + default: + break; } switch (wx->speed) { diff --git a/drivers/net/ethernet/wangxun/libwx/wx_sriov.c b/drivers/net/ethernet/wangxun/libwx/wx_sriov.c index 52e6a6faf715..e8656d9d733b 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_sriov.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_sriov.c @@ -76,7 +76,7 @@ static int __wx_enable_sriov(struct wx *wx, u8 num_vfs) u32 value = 0; set_bit(WX_FLAG_SRIOV_ENABLED, wx->flags); - wx_err(wx, "SR-IOV enabled with %d VFs\n", num_vfs); + dev_info(&wx->pdev->dev, "SR-IOV enabled with %d VFs\n", num_vfs); /* Enable VMDq flag so device will be set in VM mode */ set_bit(WX_FLAG_VMDQ_ENABLED, wx->flags); @@ -106,7 +106,7 @@ static int __wx_enable_sriov(struct wx *wx, u8 num_vfs) wx->vfinfo[i].xcast_mode = WXVF_XCAST_MODE_NONE; } - if (wx->mac.type == wx_mac_em) { + if (!test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) { value = WX_CFG_PORT_CTL_NUM_VT_8; } else { if (num_vfs < 32) @@ -599,10 +599,10 @@ static int wx_set_vf_vlan_msg(struct wx *wx, u32 *msgbuf, u16 vf) if (VMDQ_P(0) < 32) { bits = rd32(wx, WX_PSR_VLAN_SWC_VM_L); bits &= ~BIT(VMDQ_P(0)); - if (wx->mac.type != wx_mac_em) + if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) bits |= rd32(wx, WX_PSR_VLAN_SWC_VM_H); } else { - if (wx->mac.type != wx_mac_em) + if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) bits = rd32(wx, WX_PSR_VLAN_SWC_VM_H); bits &= ~BIT(VMDQ_P(0) % 32); bits |= rd32(wx, WX_PSR_VLAN_SWC_VM_L); @@ -848,7 +848,7 @@ void wx_disable_vf_rx_tx(struct wx *wx) { wr32(wx, WX_TDM_VFTE_CLR(0), U32_MAX); wr32(wx, WX_RDM_VFRE_CLR(0), U32_MAX); - if (wx->mac.type != wx_mac_em) { + if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) { wr32(wx, WX_TDM_VFTE_CLR(1), U32_MAX); wr32(wx, WX_RDM_VFRE_CLR(1), U32_MAX); } diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h index 8cfed5d4ebf7..7730c9fc3e02 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_type.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h @@ -838,13 +838,14 @@ enum wx_mac_type { wx_mac_sp, wx_mac_em, wx_mac_aml, + wx_mac_aml40, }; -enum sp_media_type { - sp_media_unknown = 0, - sp_media_fiber, - sp_media_copper, - sp_media_backplane +enum wx_media_type { + wx_media_unknown = 0, + wx_media_fiber, + wx_media_copper, + wx_media_backplane }; enum em_mac_type { @@ -1153,6 +1154,7 @@ enum wx_state { WX_STATE_SWFW_BUSY, WX_STATE_PTP_RUNNING, WX_STATE_PTP_TX_IN_PROGRESS, + WX_STATE_SERVICE_SCHED, WX_STATE_NBITS /* must be last */ }; @@ -1184,6 +1186,7 @@ struct vf_macvlans { }; enum wx_pf_flags { + WX_FLAG_MULTI_64_FUNC, WX_FLAG_SWFW_RING, WX_FLAG_VMDQ_ENABLED, WX_FLAG_VLAN_PROMISC, @@ -1195,6 +1198,8 @@ enum wx_pf_flags { WX_FLAG_RX_HWTSTAMP_ENABLED, WX_FLAG_RX_HWTSTAMP_IN_REGISTER, WX_FLAG_PTP_PPS_ENABLED, + WX_FLAG_NEED_LINK_CONFIG, + WX_FLAG_NEED_SFP_RESET, WX_PF_FLAGS_NBITS /* must be last */ }; @@ -1211,7 +1216,7 @@ struct wx { struct wx_mbx_info mbx; struct wx_mac_info mac; enum em_mac_type mac_type; - enum sp_media_type media_type; + enum wx_media_type media_type; struct wx_eeprom_info eeprom; struct wx_addr_filter_info addr_ctrl; struct wx_fc_info fc; @@ -1233,6 +1238,8 @@ struct wx { /* PHY stuff */ bool notify_down; + int adv_speed; + int adv_duplex; unsigned int link; int speed; int duplex; @@ -1330,6 +1337,9 @@ struct wx { struct ptp_clock_info ptp_caps; struct kernel_hwtstamp_config tstamp_config; struct sk_buff *ptp_tx_skb; + + struct timer_list service_timer; + struct work_struct service_task; }; #define WX_INTR_ALL (~0ULL) diff --git a/drivers/net/ethernet/wangxun/txgbe/Makefile b/drivers/net/ethernet/wangxun/txgbe/Makefile index f74576fe7062..c757fa95e58e 100644 --- a/drivers/net/ethernet/wangxun/txgbe/Makefile +++ b/drivers/net/ethernet/wangxun/txgbe/Makefile @@ -11,4 +11,5 @@ txgbe-objs := txgbe_main.o \ txgbe_phy.o \ txgbe_irq.o \ txgbe_fdir.o \ - txgbe_ethtool.o + txgbe_ethtool.o \ + txgbe_aml.o diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c new file mode 100644 index 000000000000..7dbcf41750c1 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c @@ -0,0 +1,385 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */ + +#include <linux/phylink.h> +#include <linux/iopoll.h> +#include <linux/pci.h> +#include <linux/phy.h> + +#include "../libwx/wx_type.h" +#include "../libwx/wx_lib.h" +#include "../libwx/wx_ptp.h" +#include "../libwx/wx_hw.h" +#include "../libwx/wx_sriov.h" +#include "txgbe_type.h" +#include "txgbe_aml.h" +#include "txgbe_hw.h" + +void txgbe_gpio_init_aml(struct wx *wx) +{ + u32 status; + + wr32(wx, WX_GPIO_INTTYPE_LEVEL, TXGBE_GPIOBIT_2 | TXGBE_GPIOBIT_3); + wr32(wx, WX_GPIO_INTEN, TXGBE_GPIOBIT_2 | TXGBE_GPIOBIT_3); + + status = rd32(wx, WX_GPIO_INTSTATUS); + for (int i = 0; i < 6; i++) { + if (status & BIT(i)) + wr32(wx, WX_GPIO_EOI, BIT(i)); + } +} + +irqreturn_t txgbe_gpio_irq_handler_aml(int irq, void *data) +{ + struct txgbe *txgbe = data; + struct wx *wx = txgbe->wx; + u32 status; + + wr32(wx, WX_GPIO_INTMASK, 0xFF); + status = rd32(wx, WX_GPIO_INTSTATUS); + if (status & TXGBE_GPIOBIT_2) { + set_bit(WX_FLAG_NEED_SFP_RESET, wx->flags); + wr32(wx, WX_GPIO_EOI, TXGBE_GPIOBIT_2); + wx_service_event_schedule(wx); + } + if (status & TXGBE_GPIOBIT_3) { + set_bit(WX_FLAG_NEED_LINK_CONFIG, wx->flags); + wx_service_event_schedule(wx); + wr32(wx, WX_GPIO_EOI, TXGBE_GPIOBIT_3); + } + + wr32(wx, WX_GPIO_INTMASK, 0); + return IRQ_HANDLED; +} + +int txgbe_test_hostif(struct wx *wx) +{ + struct txgbe_hic_ephy_getlink buffer; + + if (wx->mac.type != wx_mac_aml) + return 0; + + buffer.hdr.cmd = FW_PHY_GET_LINK_CMD; + buffer.hdr.buf_len = sizeof(struct txgbe_hic_ephy_getlink) - + sizeof(struct wx_hic_hdr); + buffer.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + + return wx_host_interface_command(wx, (u32 *)&buffer, sizeof(buffer), + WX_HI_COMMAND_TIMEOUT, true); +} + +static int txgbe_identify_sfp_hostif(struct wx *wx, struct txgbe_hic_i2c_read *buffer) +{ + buffer->hdr.cmd = FW_READ_SFP_INFO_CMD; + buffer->hdr.buf_len = sizeof(struct txgbe_hic_i2c_read) - + sizeof(struct wx_hic_hdr); + buffer->hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + + return wx_host_interface_command(wx, (u32 *)buffer, + sizeof(struct txgbe_hic_i2c_read), + WX_HI_COMMAND_TIMEOUT, true); +} + +static int txgbe_set_phy_link_hostif(struct wx *wx, int speed, int autoneg, int duplex) +{ + struct txgbe_hic_ephy_setlink buffer; + + buffer.hdr.cmd = FW_PHY_SET_LINK_CMD; + buffer.hdr.buf_len = sizeof(struct txgbe_hic_ephy_setlink) - + sizeof(struct wx_hic_hdr); + buffer.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + + switch (speed) { + case SPEED_25000: + buffer.speed = TXGBE_LINK_SPEED_25GB_FULL; + break; + case SPEED_10000: + buffer.speed = TXGBE_LINK_SPEED_10GB_FULL; + break; + } + + buffer.fec_mode = TXGBE_PHY_FEC_AUTO; + buffer.autoneg = autoneg; + buffer.duplex = duplex; + + return wx_host_interface_command(wx, (u32 *)&buffer, sizeof(buffer), + WX_HI_COMMAND_TIMEOUT, true); +} + +static void txgbe_get_link_capabilities(struct wx *wx) +{ + struct txgbe *txgbe = wx->priv; + + if (test_bit(PHY_INTERFACE_MODE_25GBASER, txgbe->sfp_interfaces)) + wx->adv_speed = SPEED_25000; + else if (test_bit(PHY_INTERFACE_MODE_10GBASER, txgbe->sfp_interfaces)) + wx->adv_speed = SPEED_10000; + else + wx->adv_speed = SPEED_UNKNOWN; + + wx->adv_duplex = wx->adv_speed == SPEED_UNKNOWN ? + DUPLEX_HALF : DUPLEX_FULL; +} + +static void txgbe_get_phy_link(struct wx *wx, int *speed) +{ + u32 status; + + status = rd32(wx, TXGBE_CFG_PORT_ST); + if (!(status & TXGBE_CFG_PORT_ST_LINK_UP)) + *speed = SPEED_UNKNOWN; + else if (status & TXGBE_CFG_PORT_ST_LINK_AML_25G) + *speed = SPEED_25000; + else if (status & TXGBE_CFG_PORT_ST_LINK_AML_10G) + *speed = SPEED_10000; + else + *speed = SPEED_UNKNOWN; +} + +int txgbe_set_phy_link(struct wx *wx) +{ + int speed, err; + u32 gpio; + + /* Check RX signal */ + gpio = rd32(wx, WX_GPIO_EXT); + if (gpio & TXGBE_GPIOBIT_3) + return -ENODEV; + + txgbe_get_link_capabilities(wx); + if (wx->adv_speed == SPEED_UNKNOWN) + return -ENODEV; + + txgbe_get_phy_link(wx, &speed); + if (speed == wx->adv_speed) + return 0; + + err = txgbe_set_phy_link_hostif(wx, wx->adv_speed, 0, wx->adv_duplex); + if (err) { + wx_err(wx, "Failed to setup link\n"); + return err; + } + + return 0; +} + +static int txgbe_sfp_to_linkmodes(struct wx *wx, struct txgbe_sfp_id *id) +{ + __ETHTOOL_DECLARE_LINK_MODE_MASK(modes) = { 0, }; + DECLARE_PHY_INTERFACE_MASK(interfaces); + struct txgbe *txgbe = wx->priv; + + if (id->com_25g_code & (TXGBE_SFF_25GBASESR_CAPABLE | + TXGBE_SFF_25GBASEER_CAPABLE | + TXGBE_SFF_25GBASELR_CAPABLE)) { + phylink_set(modes, 25000baseSR_Full); + __set_bit(PHY_INTERFACE_MODE_25GBASER, interfaces); + } + if (id->com_10g_code & TXGBE_SFF_10GBASESR_CAPABLE) { + phylink_set(modes, 10000baseSR_Full); + __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces); + } + if (id->com_10g_code & TXGBE_SFF_10GBASELR_CAPABLE) { + phylink_set(modes, 10000baseLR_Full); + __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces); + } + + if (phy_interface_empty(interfaces)) { + wx_err(wx, "unsupported SFP module\n"); + return -EINVAL; + } + + phylink_set(modes, Pause); + phylink_set(modes, Asym_Pause); + phylink_set(modes, FIBRE); + txgbe->link_port = PORT_FIBRE; + + if (!linkmode_equal(txgbe->sfp_support, modes)) { + linkmode_copy(txgbe->sfp_support, modes); + phy_interface_and(txgbe->sfp_interfaces, + wx->phylink_config.supported_interfaces, + interfaces); + linkmode_copy(txgbe->advertising, modes); + + set_bit(WX_FLAG_NEED_LINK_CONFIG, wx->flags); + } + + return 0; +} + +int txgbe_identify_sfp(struct wx *wx) +{ + struct txgbe_hic_i2c_read buffer; + struct txgbe_sfp_id *id; + int err = 0; + u32 gpio; + + gpio = rd32(wx, WX_GPIO_EXT); + if (gpio & TXGBE_GPIOBIT_2) + return -ENODEV; + + err = txgbe_identify_sfp_hostif(wx, &buffer); + if (err) { + wx_err(wx, "Failed to identify SFP module\n"); + return err; + } + + id = &buffer.id; + if (id->identifier != TXGBE_SFF_IDENTIFIER_SFP) { + wx_err(wx, "Invalid SFP module\n"); + return -ENODEV; + } + + err = txgbe_sfp_to_linkmodes(wx, id); + if (err) + return err; + + if (gpio & TXGBE_GPIOBIT_3) + set_bit(WX_FLAG_NEED_LINK_CONFIG, wx->flags); + + return 0; +} + +void txgbe_setup_link(struct wx *wx) +{ + struct txgbe *txgbe = wx->priv; + + phy_interface_zero(txgbe->sfp_interfaces); + linkmode_zero(txgbe->sfp_support); + + txgbe_identify_sfp(wx); +} + +static void txgbe_get_link_state(struct phylink_config *config, + struct phylink_link_state *state) +{ + struct wx *wx = phylink_to_wx(config); + int speed; + + txgbe_get_phy_link(wx, &speed); + state->link = speed != SPEED_UNKNOWN; + state->speed = speed; + state->duplex = state->link ? DUPLEX_FULL : DUPLEX_UNKNOWN; +} + +static void txgbe_reconfig_mac(struct wx *wx) +{ + u32 wdg, fc; + + wdg = rd32(wx, WX_MAC_WDG_TIMEOUT); + fc = rd32(wx, WX_MAC_RX_FLOW_CTRL); + + wr32(wx, WX_MIS_RST, TXGBE_MIS_RST_MAC_RST(wx->bus.func)); + /* wait for MAC reset complete */ + usleep_range(1000, 1500); + + wr32m(wx, TXGBE_MAC_MISC_CTL, TXGBE_MAC_MISC_CTL_LINK_STS_MOD, + TXGBE_MAC_MISC_CTL_LINK_BOTH); + wx_reset_mac(wx); + + wr32(wx, WX_MAC_WDG_TIMEOUT, wdg); + wr32(wx, WX_MAC_RX_FLOW_CTRL, fc); +} + +static void txgbe_mac_link_up_aml(struct phylink_config *config, + struct phy_device *phy, + unsigned int mode, + phy_interface_t interface, + int speed, int duplex, + bool tx_pause, bool rx_pause) +{ + struct wx *wx = phylink_to_wx(config); + u32 txcfg; + + wx_fc_enable(wx, tx_pause, rx_pause); + + txgbe_reconfig_mac(wx); + + txcfg = rd32(wx, TXGBE_AML_MAC_TX_CFG); + txcfg &= ~TXGBE_AML_MAC_TX_CFG_SPEED_MASK; + + switch (speed) { + case SPEED_25000: + txcfg |= TXGBE_AML_MAC_TX_CFG_SPEED_25G; + break; + case SPEED_10000: + txcfg |= TXGBE_AML_MAC_TX_CFG_SPEED_10G; + break; + default: + break; + } + + wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_RE, WX_MAC_RX_CFG_RE); + wr32(wx, TXGBE_AML_MAC_TX_CFG, txcfg | TXGBE_AML_MAC_TX_CFG_TE); + + wx->speed = speed; + wx->last_rx_ptp_check = jiffies; + if (test_bit(WX_STATE_PTP_RUNNING, wx->state)) + wx_ptp_reset_cyclecounter(wx); + /* ping all the active vfs to let them know we are going up */ + wx_ping_all_vfs_with_link_status(wx, true); +} + +static void txgbe_mac_link_down_aml(struct phylink_config *config, + unsigned int mode, + phy_interface_t interface) +{ + struct wx *wx = phylink_to_wx(config); + + wr32m(wx, TXGBE_AML_MAC_TX_CFG, TXGBE_AML_MAC_TX_CFG_TE, 0); + wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_RE, 0); + + wx->speed = SPEED_UNKNOWN; + if (test_bit(WX_STATE_PTP_RUNNING, wx->state)) + wx_ptp_reset_cyclecounter(wx); + /* ping all the active vfs to let them know we are going down */ + wx_ping_all_vfs_with_link_status(wx, false); +} + +static void txgbe_mac_config_aml(struct phylink_config *config, unsigned int mode, + const struct phylink_link_state *state) +{ +} + +static const struct phylink_mac_ops txgbe_mac_ops_aml = { + .mac_config = txgbe_mac_config_aml, + .mac_link_down = txgbe_mac_link_down_aml, + .mac_link_up = txgbe_mac_link_up_aml, +}; + +int txgbe_phylink_init_aml(struct txgbe *txgbe) +{ + struct phylink_link_state state; + struct phylink_config *config; + struct wx *wx = txgbe->wx; + phy_interface_t phy_mode; + struct phylink *phylink; + int err; + + config = &wx->phylink_config; + config->dev = &wx->netdev->dev; + config->type = PHYLINK_NETDEV; + config->mac_capabilities = MAC_25000FD | MAC_10000FD | + MAC_SYM_PAUSE | MAC_ASYM_PAUSE; + config->get_fixed_state = txgbe_get_link_state; + + phy_mode = PHY_INTERFACE_MODE_25GBASER; + __set_bit(PHY_INTERFACE_MODE_25GBASER, config->supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_10GBASER, config->supported_interfaces); + + phylink = phylink_create(config, NULL, phy_mode, &txgbe_mac_ops_aml); + if (IS_ERR(phylink)) + return PTR_ERR(phylink); + + state.speed = SPEED_25000; + state.duplex = DUPLEX_FULL; + err = phylink_set_fixed_link(phylink, &state); + if (err) { + wx_err(wx, "Failed to set fixed link\n"); + return err; + } + + wx->phylink = phylink; + + return 0; +} diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.h new file mode 100644 index 000000000000..25d4971ca0d9 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */ + +#ifndef _TXGBE_AML_H_ +#define _TXGBE_AML_H_ + +void txgbe_gpio_init_aml(struct wx *wx); +irqreturn_t txgbe_gpio_irq_handler_aml(int irq, void *data); +int txgbe_test_hostif(struct wx *wx); +int txgbe_set_phy_link(struct wx *wx); +int txgbe_identify_sfp(struct wx *wx); +void txgbe_setup_link(struct wx *wx); +int txgbe_phylink_init_aml(struct txgbe *txgbe); + +#endif /* _TXGBE_AML_H_ */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c index 78999d484f18..fa770961df5f 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c @@ -12,6 +12,31 @@ #include "txgbe_fdir.h" #include "txgbe_ethtool.h" +int txgbe_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct wx *wx = netdev_priv(netdev); + struct txgbe *txgbe = wx->priv; + int err; + + if (wx->mac.type == wx_mac_aml40) + return -EOPNOTSUPP; + + err = wx_get_link_ksettings(netdev, cmd); + if (err) + return err; + + if (wx->mac.type == wx_mac_sp) + return 0; + + cmd->base.port = txgbe->link_port; + cmd->base.autoneg = AUTONEG_DISABLE; + linkmode_copy(cmd->link_modes.supported, txgbe->sfp_support); + linkmode_copy(cmd->link_modes.advertising, txgbe->advertising); + + return 0; +} + static int txgbe_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, @@ -510,7 +535,7 @@ static const struct ethtool_ops txgbe_ethtool_ops = { .get_drvinfo = wx_get_drvinfo, .nway_reset = wx_nway_reset, .get_link = ethtool_op_get_link, - .get_link_ksettings = wx_get_link_ksettings, + .get_link_ksettings = txgbe_get_link_ksettings, .set_link_ksettings = wx_set_link_ksettings, .get_sset_count = wx_get_sset_count, .get_strings = wx_get_strings, diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.h index ace1b3571012..66dbc8ec1bb6 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.h @@ -4,6 +4,8 @@ #ifndef _TXGBE_ETHTOOL_H_ #define _TXGBE_ETHTOOL_H_ +int txgbe_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd); void txgbe_set_ethtool_ops(struct net_device *netdev); #endif /* _TXGBE_ETHTOOL_H_ */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c index a054b259d435..e551ae0e2069 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c @@ -188,7 +188,7 @@ int txgbe_reset_hw(struct wx *wx) if (status != 0) return status; - if (wx->media_type != sp_media_copper) { + if (wx->media_type != wx_media_copper) { u32 val; val = WX_MIS_RST_LAN_RST(wx->bus.func); @@ -218,7 +218,7 @@ int txgbe_reset_hw(struct wx *wx) * clear the multicast table. Also reset num_rar_entries to 128, * since we modify this value when programming the SAN MAC address. */ - wx->mac.num_rar_entries = TXGBE_SP_RAR_ENTRIES; + wx->mac.num_rar_entries = TXGBE_RAR_ENTRIES; wx_init_rx_addrs(wx); pci_set_master(wx->pdev); diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c index 19878f02d956..dfc3a2cc27f6 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c @@ -6,11 +6,13 @@ #include "../libwx/wx_type.h" #include "../libwx/wx_lib.h" +#include "../libwx/wx_ptp.h" #include "../libwx/wx_hw.h" #include "../libwx/wx_sriov.h" #include "txgbe_type.h" #include "txgbe_phy.h" #include "txgbe_irq.h" +#include "txgbe_aml.h" /** * txgbe_irq_enable - Enable default interrupt generation settings @@ -19,7 +21,14 @@ **/ void txgbe_irq_enable(struct wx *wx, bool queues) { - wr32(wx, WX_PX_MISC_IEN, TXGBE_PX_MISC_IEN_MASK); + u32 misc_ien = TXGBE_PX_MISC_IEN_MASK; + + if (wx->mac.type == wx_mac_aml) { + misc_ien |= TXGBE_PX_MISC_GPIO; + txgbe_gpio_init_aml(wx); + } + + wr32(wx, WX_PX_MISC_IEN, misc_ien); /* unmask interrupt */ wx_intr_enable(wx, TXGBE_INTR_MISC); @@ -81,6 +90,14 @@ static int txgbe_request_link_irq(struct txgbe *txgbe) IRQF_ONESHOT, "txgbe-link-irq", txgbe); } +static int txgbe_request_gpio_irq(struct txgbe *txgbe) +{ + txgbe->gpio_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_GPIO); + return request_threaded_irq(txgbe->gpio_irq, NULL, + txgbe_gpio_irq_handler_aml, + IRQF_ONESHOT, "txgbe-gpio-irq", txgbe); +} + static const struct irq_chip txgbe_irq_chip = { .name = "txgbe-misc-irq", }; @@ -157,6 +174,15 @@ static irqreturn_t txgbe_misc_irq_thread_fn(int irq, void *data) handle_nested_irq(sub_irq); nhandled++; } + if (eicr & TXGBE_PX_MISC_GPIO) { + sub_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_GPIO); + handle_nested_irq(sub_irq); + nhandled++; + } + if (unlikely(eicr & TXGBE_PX_MISC_IC_TIMESYNC)) { + wx_ptp_check_pps_event(wx); + nhandled++; + } wx_intr_enable(wx, TXGBE_INTR_MISC); return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE); @@ -176,9 +202,12 @@ static void txgbe_del_irq_domain(struct txgbe *txgbe) void txgbe_free_misc_irq(struct txgbe *txgbe) { - if (txgbe->wx->mac.type == wx_mac_aml) + if (txgbe->wx->mac.type == wx_mac_aml40) return; + if (txgbe->wx->mac.type == wx_mac_aml) + free_irq(txgbe->gpio_irq, txgbe); + free_irq(txgbe->link_irq, txgbe); free_irq(txgbe->misc.irq, txgbe); txgbe_del_irq_domain(txgbe); @@ -190,7 +219,7 @@ int txgbe_setup_misc_irq(struct txgbe *txgbe) struct wx *wx = txgbe->wx; int hwirq, err; - if (wx->mac.type == wx_mac_aml) + if (wx->mac.type == wx_mac_aml40) goto skip_sp_irq; txgbe->misc.nirqs = TXGBE_IRQ_MAX; @@ -222,11 +251,20 @@ int txgbe_setup_misc_irq(struct txgbe *txgbe) if (err) goto free_msic_irq; + if (wx->mac.type == wx_mac_sp) + goto skip_sp_irq; + + err = txgbe_request_gpio_irq(txgbe); + if (err) + goto free_link_irq; + skip_sp_irq: wx->misc_irq_domain = true; return 0; +free_link_irq: + free_irq(txgbe->link_irq, txgbe); free_msic_irq: free_irq(txgbe->misc.irq, txgbe); del_misc_irq: diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c index f57d84628e07..f3d2778b8e35 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c @@ -21,6 +21,7 @@ #include "txgbe_type.h" #include "txgbe_hw.h" #include "txgbe_phy.h" +#include "txgbe_aml.h" #include "txgbe_irq.h" #include "txgbe_fdir.h" #include "txgbe_ethtool.h" @@ -88,9 +89,62 @@ static int txgbe_enumerate_functions(struct wx *wx) return physfns; } +static void txgbe_sfp_detection_subtask(struct wx *wx) +{ + int err; + + if (!test_bit(WX_FLAG_NEED_SFP_RESET, wx->flags)) + return; + + /* wait for SFP module ready */ + msleep(200); + + err = txgbe_identify_sfp(wx); + if (err) + return; + + clear_bit(WX_FLAG_NEED_SFP_RESET, wx->flags); +} + +static void txgbe_link_config_subtask(struct wx *wx) +{ + int err; + + if (!test_bit(WX_FLAG_NEED_LINK_CONFIG, wx->flags)) + return; + + err = txgbe_set_phy_link(wx); + if (err) + return; + + clear_bit(WX_FLAG_NEED_LINK_CONFIG, wx->flags); +} + +/** + * txgbe_service_task - manages and runs subtasks + * @work: pointer to work_struct containing our data + **/ +static void txgbe_service_task(struct work_struct *work) +{ + struct wx *wx = container_of(work, struct wx, service_task); + + txgbe_sfp_detection_subtask(wx); + txgbe_link_config_subtask(wx); + + wx_service_event_complete(wx); +} + +static void txgbe_init_service(struct wx *wx) +{ + timer_setup(&wx->service_timer, wx_service_timer, 0); + INIT_WORK(&wx->service_task, txgbe_service_task); + clear_bit(WX_STATE_SERVICE_SCHED, wx->state); +} + static void txgbe_up_complete(struct wx *wx) { struct net_device *netdev = wx->netdev; + u32 reg; wx_control_hw(wx, true); wx_configure_vectors(wx); @@ -99,17 +153,26 @@ static void txgbe_up_complete(struct wx *wx) smp_mb__before_atomic(); wx_napi_enable_all(wx); - if (wx->mac.type == wx_mac_aml) { - u32 reg; - + switch (wx->mac.type) { + case wx_mac_aml40: reg = rd32(wx, TXGBE_AML_MAC_TX_CFG); reg &= ~TXGBE_AML_MAC_TX_CFG_SPEED_MASK; - reg |= TXGBE_AML_MAC_TX_CFG_SPEED_25G; + reg |= TXGBE_AML_MAC_TX_CFG_SPEED_40G; wr32(wx, WX_MAC_TX_CFG, reg); txgbe_enable_sec_tx_path(wx); netif_carrier_on(wx->netdev); - } else { + break; + case wx_mac_aml: + /* Enable TX laser */ + wr32m(wx, WX_GPIO_DR, TXGBE_GPIOBIT_1, 0); + txgbe_setup_link(wx); phylink_start(wx->phylink); + break; + case wx_mac_sp: + phylink_start(wx->phylink); + break; + default: + break; } /* clear any pending interrupts, may auto mask */ @@ -120,6 +183,7 @@ static void txgbe_up_complete(struct wx *wx) /* enable transmits */ netif_tx_start_all_queues(netdev); + mod_timer(&wx->service_timer, jiffies); /* Set PF Reset Done bit so PF/VF Mail Ops can work */ wr32m(wx, WX_CFG_PORT_CTL, WX_CFG_PORT_CTL_PFRSTD, @@ -168,6 +232,8 @@ static void txgbe_disable_device(struct wx *wx) wx_irq_disable(wx); wx_napi_disable_all(wx); + timer_delete_sync(&wx->service_timer); + if (wx->bus.func < 2) wr32m(wx, TXGBE_MIS_PRB_CTL, TXGBE_MIS_PRB_CTL_LAN_UP(wx->bus.func), 0); else @@ -207,10 +273,22 @@ void txgbe_down(struct wx *wx) { txgbe_disable_device(wx); txgbe_reset(wx); - if (wx->mac.type == wx_mac_aml) + + switch (wx->mac.type) { + case wx_mac_aml40: netif_carrier_off(wx->netdev); - else + break; + case wx_mac_aml: phylink_stop(wx->phylink); + /* Disable TX laser */ + wr32m(wx, WX_GPIO_DR, TXGBE_GPIOBIT_1, TXGBE_GPIOBIT_1); + break; + case wx_mac_sp: + phylink_stop(wx->phylink); + break; + default: + break; + } wx_clean_all_tx_rings(wx); wx_clean_all_rx_rings(wx); @@ -240,9 +318,11 @@ static void txgbe_init_type_code(struct wx *wx) case TXGBE_DEV_ID_AML5110: case TXGBE_DEV_ID_AML5025: case TXGBE_DEV_ID_AML5125: + wx->mac.type = wx_mac_aml; + break; case TXGBE_DEV_ID_AML5040: case TXGBE_DEV_ID_AML5140: - wx->mac.type = wx_mac_aml; + wx->mac.type = wx_mac_aml40; break; default: wx->mac.type = wx_mac_unknown; @@ -251,25 +331,25 @@ static void txgbe_init_type_code(struct wx *wx) switch (device_type) { case TXGBE_ID_SFP: - wx->media_type = sp_media_fiber; + wx->media_type = wx_media_fiber; break; case TXGBE_ID_XAUI: case TXGBE_ID_SGMII: - wx->media_type = sp_media_copper; + wx->media_type = wx_media_copper; break; case TXGBE_ID_KR_KX_KX4: case TXGBE_ID_MAC_XAUI: case TXGBE_ID_MAC_SGMII: - wx->media_type = sp_media_backplane; + wx->media_type = wx_media_backplane; break; case TXGBE_ID_SFI_XAUI: if (wx->bus.func == 0) - wx->media_type = sp_media_fiber; + wx->media_type = wx_media_fiber; else - wx->media_type = sp_media_copper; + wx->media_type = wx_media_copper; break; default: - wx->media_type = sp_media_unknown; + wx->media_type = wx_media_unknown; break; } } @@ -283,13 +363,13 @@ static int txgbe_sw_init(struct wx *wx) u16 msix_count = 0; int err; - wx->mac.num_rar_entries = TXGBE_SP_RAR_ENTRIES; - wx->mac.max_tx_queues = TXGBE_SP_MAX_TX_QUEUES; - wx->mac.max_rx_queues = TXGBE_SP_MAX_RX_QUEUES; - wx->mac.mcft_size = TXGBE_SP_MC_TBL_SIZE; - wx->mac.vft_size = TXGBE_SP_VFT_TBL_SIZE; - wx->mac.rx_pb_size = TXGBE_SP_RX_PB_SIZE; - wx->mac.tx_pb_size = TXGBE_SP_TDB_PB_SZ; + wx->mac.num_rar_entries = TXGBE_RAR_ENTRIES; + wx->mac.max_tx_queues = TXGBE_MAX_TXQ; + wx->mac.max_rx_queues = TXGBE_MAX_RXQ; + wx->mac.mcft_size = TXGBE_MC_TBL_SIZE; + wx->mac.vft_size = TXGBE_VFT_TBL_SIZE; + wx->mac.rx_pb_size = TXGBE_RX_PB_SIZE; + wx->mac.tx_pb_size = TXGBE_TDB_PB_SZ; /* PCI config space info */ err = wx_sw_init(wx); @@ -318,6 +398,7 @@ static int txgbe_sw_init(struct wx *wx) wx->configure_fdir = txgbe_configure_fdir; set_bit(WX_FLAG_RSC_CAPABLE, wx->flags); + set_bit(WX_FLAG_MULTI_64_FUNC, wx->flags); /* enable itr by default in dynamic mode */ wx->rx_itr_setting = 1; @@ -340,6 +421,7 @@ static int txgbe_sw_init(struct wx *wx) case wx_mac_sp: break; case wx_mac_aml: + case wx_mac_aml40: set_bit(WX_FLAG_SWFW_RING, wx->flags); wx->swfw_index = 0; break; @@ -735,9 +817,11 @@ static int txgbe_probe(struct pci_dev *pdev, eth_hw_addr_set(netdev, wx->mac.perm_addr); wx_mac_set_default_filter(wx, wx->mac.perm_addr); + txgbe_init_service(wx); + err = wx_init_interrupt_scheme(wx); if (err) - goto err_free_mac_table; + goto err_cancel_service; /* Save off EEPROM version number and Option Rom version which * together make a unique identify for the eeprom @@ -780,6 +864,13 @@ static int txgbe_probe(struct pci_dev *pdev, if (etrack_id < 0x20010) dev_warn(&pdev->dev, "Please upgrade the firmware to 0x20010 or above.\n"); + err = txgbe_test_hostif(wx); + if (err != 0) { + dev_err(&pdev->dev, "Mismatched Firmware version\n"); + err = -EIO; + goto err_release_hw; + } + txgbe = devm_kzalloc(&pdev->dev, sizeof(*txgbe), GFP_KERNEL); if (!txgbe) { err = -ENOMEM; @@ -830,6 +921,9 @@ err_free_misc_irq: err_release_hw: wx_clear_interrupt_scheme(wx); wx_control_hw(wx, false); +err_cancel_service: + timer_delete_sync(&wx->service_timer); + cancel_work_sync(&wx->service_task); err_free_mac_table: kfree(wx->rss_key); kfree(wx->mac_table); @@ -856,6 +950,8 @@ static void txgbe_remove(struct pci_dev *pdev) struct txgbe *txgbe = wx->priv; struct net_device *netdev; + cancel_work_sync(&wx->service_task); + netdev = wx->netdev; wx_disable_sriov(wx); unregister_netdev(netdev); diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c index 1863cfd27ee7..03f1b9bc604d 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c @@ -20,6 +20,7 @@ #include "../libwx/wx_mbx.h" #include "../libwx/wx_hw.h" #include "txgbe_type.h" +#include "txgbe_aml.h" #include "txgbe_phy.h" #include "txgbe_hw.h" @@ -165,7 +166,7 @@ static struct phylink_pcs *txgbe_phylink_mac_select(struct phylink_config *confi struct wx *wx = phylink_to_wx(config); struct txgbe *txgbe = wx->priv; - if (wx->media_type != sp_media_copper) + if (wx->media_type != wx_media_copper) return txgbe->pcs; return NULL; @@ -278,7 +279,7 @@ static int txgbe_phylink_init(struct txgbe *txgbe) config->mac_capabilities = MAC_10000FD | MAC_1000FD | MAC_100FD | MAC_SYM_PAUSE | MAC_ASYM_PAUSE; - if (wx->media_type == sp_media_copper) { + if (wx->media_type == wx_media_copper) { phy_mode = PHY_INTERFACE_MODE_XAUI; __set_bit(PHY_INTERFACE_MODE_XAUI, config->supported_interfaces); } else { @@ -318,7 +319,10 @@ irqreturn_t txgbe_link_irq_handler(int irq, void *data) status = rd32(wx, TXGBE_CFG_PORT_ST); up = !!(status & TXGBE_CFG_PORT_ST_LINK_UP); - phylink_pcs_change(txgbe->pcs, up); + if (txgbe->pcs) + phylink_pcs_change(txgbe->pcs, up); + else + phylink_mac_change(wx->phylink, up); return IRQ_HANDLED; } @@ -573,11 +577,18 @@ int txgbe_init_phy(struct txgbe *txgbe) struct wx *wx = txgbe->wx; int ret; - if (wx->mac.type == wx_mac_aml) + switch (wx->mac.type) { + case wx_mac_aml40: return 0; - - if (txgbe->wx->media_type == sp_media_copper) - return txgbe_ext_phy_init(txgbe); + case wx_mac_aml: + return txgbe_phylink_init_aml(txgbe); + case wx_mac_sp: + if (wx->media_type == wx_media_copper) + return txgbe_ext_phy_init(txgbe); + break; + default: + break; + } ret = txgbe_swnodes_register(txgbe); if (ret) { @@ -640,13 +651,21 @@ err_unregister_swnode: void txgbe_remove_phy(struct txgbe *txgbe) { - if (txgbe->wx->mac.type == wx_mac_aml) + switch (txgbe->wx->mac.type) { + case wx_mac_aml40: return; - - if (txgbe->wx->media_type == sp_media_copper) { - phylink_disconnect_phy(txgbe->wx->phylink); + case wx_mac_aml: phylink_destroy(txgbe->wx->phylink); return; + case wx_mac_sp: + if (txgbe->wx->media_type == wx_media_copper) { + phylink_disconnect_phy(txgbe->wx->phylink); + phylink_destroy(txgbe->wx->phylink); + return; + } + break; + default: + break; } platform_device_unregister(txgbe->sfp_dev); diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h index 261a83308568..7a00e3343be6 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h @@ -6,6 +6,8 @@ #include <linux/property.h> #include <linux/irq.h> +#include <linux/phy.h> +#include "../libwx/wx_type.h" /* Device IDs */ #define TXGBE_DEV_ID_SP1000 0x1001 @@ -50,6 +52,8 @@ /**************** SP Registers ****************************/ /* chip control Registers */ +#define TXGBE_MIS_RST 0x1000C +#define TXGBE_MIS_RST_MAC_RST(_i) BIT(20 - (_i) * 3) #define TXGBE_MIS_PRB_CTL 0x10010 #define TXGBE_MIS_PRB_CTL_LAN_UP(_i) BIT(1 - (_i)) /* FMGR Registers */ @@ -62,6 +66,11 @@ #define TXGBE_TS_CTL 0x10300 #define TXGBE_TS_CTL_EVAL_MD BIT(31) +/* MAC Misc Registers */ +#define TXGBE_MAC_MISC_CTL 0x11F00 +#define TXGBE_MAC_MISC_CTL_LINK_STS_MOD BIT(0) +#define TXGBE_MAC_MISC_CTL_LINK_PCS FIELD_PREP(BIT(0), 0) +#define TXGBE_MAC_MISC_CTL_LINK_BOTH FIELD_PREP(BIT(0), 1) /* GPIO register bit */ #define TXGBE_GPIOBIT_0 BIT(0) /* I:tx fault */ #define TXGBE_GPIOBIT_1 BIT(1) /* O:tx disabled */ @@ -73,6 +82,7 @@ /* Extended Interrupt Enable Set */ #define TXGBE_PX_MISC_ETH_LKDN BIT(8) #define TXGBE_PX_MISC_DEV_RST BIT(10) +#define TXGBE_PX_MISC_IC_TIMESYNC BIT(11) #define TXGBE_PX_MISC_ETH_EVENT BIT(17) #define TXGBE_PX_MISC_ETH_LK BIT(18) #define TXGBE_PX_MISC_ETH_AN BIT(19) @@ -83,11 +93,13 @@ (TXGBE_PX_MISC_ETH_LKDN | TXGBE_PX_MISC_DEV_RST | \ TXGBE_PX_MISC_ETH_EVENT | TXGBE_PX_MISC_ETH_LK | \ TXGBE_PX_MISC_ETH_AN | TXGBE_PX_MISC_INT_ERR | \ - TXGBE_PX_MISC_IC_VF_MBOX) + TXGBE_PX_MISC_IC_VF_MBOX | TXGBE_PX_MISC_IC_TIMESYNC) /* Port cfg registers */ #define TXGBE_CFG_PORT_ST 0x14404 #define TXGBE_CFG_PORT_ST_LINK_UP BIT(0) +#define TXGBE_CFG_PORT_ST_LINK_AML_25G BIT(3) +#define TXGBE_CFG_PORT_ST_LINK_AML_10G BIT(4) #define TXGBE_CFG_VXLAN 0x14410 #define TXGBE_CFG_VXLAN_GPE 0x14414 #define TXGBE_CFG_GENEVE 0x14418 @@ -151,8 +163,11 @@ /*************************** Amber Lite Registers ****************************/ #define TXGBE_PX_PF_BME 0x4B8 #define TXGBE_AML_MAC_TX_CFG 0x11000 +#define TXGBE_AML_MAC_TX_CFG_TE BIT(0) #define TXGBE_AML_MAC_TX_CFG_SPEED_MASK GENMASK(30, 27) -#define TXGBE_AML_MAC_TX_CFG_SPEED_25G BIT(28) +#define TXGBE_AML_MAC_TX_CFG_SPEED_40G FIELD_PREP(GENMASK(30, 27), 0) +#define TXGBE_AML_MAC_TX_CFG_SPEED_25G FIELD_PREP(GENMASK(30, 27), 2) +#define TXGBE_AML_MAC_TX_CFG_SPEED_10G FIELD_PREP(GENMASK(30, 27), 8) #define TXGBE_RDM_RSC_CTL 0x1200C #define TXGBE_RDM_RSC_CTL_FREE_CTL BIT(7) @@ -173,13 +188,13 @@ #define TXGBE_MAX_RX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1) #define TXGBE_MAX_TX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1) -#define TXGBE_SP_MAX_TX_QUEUES 128 -#define TXGBE_SP_MAX_RX_QUEUES 128 -#define TXGBE_SP_RAR_ENTRIES 128 -#define TXGBE_SP_MC_TBL_SIZE 128 -#define TXGBE_SP_VFT_TBL_SIZE 128 -#define TXGBE_SP_RX_PB_SIZE 512 -#define TXGBE_SP_TDB_PB_SZ (160 * 1024) /* 160KB Packet Buffer */ +#define TXGBE_MAX_TXQ 128 +#define TXGBE_MAX_RXQ 128 +#define TXGBE_RAR_ENTRIES 128 +#define TXGBE_MC_TBL_SIZE 128 +#define TXGBE_VFT_TBL_SIZE 128 +#define TXGBE_RX_PB_SIZE 512 +#define TXGBE_TDB_PB_SZ (160 * 1024) /* 160KB Packet Buffer */ #define TXGBE_MAX_VFS_DRV_LIMIT 63 @@ -299,6 +314,72 @@ void txgbe_up(struct wx *wx); int txgbe_setup_tc(struct net_device *dev, u8 tc); void txgbe_do_reset(struct net_device *netdev); +#define TXGBE_LINK_SPEED_10GB_FULL 4 +#define TXGBE_LINK_SPEED_25GB_FULL 0x10 + +#define TXGBE_SFF_IDENTIFIER_SFP 0x3 +#define TXGBE_SFF_DA_PASSIVE_CABLE 0x4 +#define TXGBE_SFF_DA_ACTIVE_CABLE 0x8 +#define TXGBE_SFF_DA_SPEC_ACTIVE_LIMIT 0x4 +#define TXGBE_SFF_FCPI4_LIMITING 0x3 +#define TXGBE_SFF_10GBASESR_CAPABLE 0x10 +#define TXGBE_SFF_10GBASELR_CAPABLE 0x20 +#define TXGBE_SFF_25GBASESR_CAPABLE 0x2 +#define TXGBE_SFF_25GBASELR_CAPABLE 0x3 +#define TXGBE_SFF_25GBASEER_CAPABLE 0x4 +#define TXGBE_SFF_25GBASECR_91FEC 0xB +#define TXGBE_SFF_25GBASECR_74FEC 0xC +#define TXGBE_SFF_25GBASECR_NOFEC 0xD + +#define TXGBE_PHY_FEC_RS BIT(0) +#define TXGBE_PHY_FEC_BASER BIT(1) +#define TXGBE_PHY_FEC_OFF BIT(2) +#define TXGBE_PHY_FEC_AUTO (TXGBE_PHY_FEC_OFF | \ + TXGBE_PHY_FEC_BASER |\ + TXGBE_PHY_FEC_RS) + +#define FW_PHY_GET_LINK_CMD 0xC0 +#define FW_PHY_SET_LINK_CMD 0xC1 +#define FW_READ_SFP_INFO_CMD 0xC5 + +struct txgbe_sfp_id { + u8 identifier; /* A0H 0x00 */ + u8 com_1g_code; /* A0H 0x06 */ + u8 com_10g_code; /* A0H 0x03 */ + u8 com_25g_code; /* A0H 0x24 */ + u8 cable_spec; /* A0H 0x3C */ + u8 cable_tech; /* A0H 0x08 */ + u8 vendor_oui0; /* A0H 0x25 */ + u8 vendor_oui1; /* A0H 0x26 */ + u8 vendor_oui2; /* A0H 0x27 */ + u8 reserved[3]; +}; + +struct txgbe_hic_i2c_read { + struct wx_hic_hdr hdr; + struct txgbe_sfp_id id; +}; + +struct txgbe_hic_ephy_setlink { + struct wx_hic_hdr hdr; + u8 speed; + u8 duplex; + u8 autoneg; + u8 fec_mode; + u8 resv[4]; +}; + +struct txgbe_hic_ephy_getlink { + struct wx_hic_hdr hdr; + u8 speed; + u8 duplex; + u8 autoneg; + u8 flow_ctl; + u8 power; + u8 fec_mode; + u8 resv[6]; +}; + #define NODE_PROP(_NAME, _PROP) \ (const struct software_node) { \ .name = _NAME, \ @@ -336,6 +417,7 @@ struct txgbe_nodes { enum txgbe_misc_irqs { TXGBE_IRQ_LINK = 0, + TXGBE_IRQ_GPIO, TXGBE_IRQ_MAX }; @@ -357,6 +439,7 @@ struct txgbe { struct clk *clk; struct gpio_chip *gpio; unsigned int link_irq; + unsigned int gpio_irq; u32 eicr; /* flow director */ @@ -364,6 +447,11 @@ struct txgbe { union txgbe_atr_input fdir_mask; int fdir_filter_count; spinlock_t fdir_perfect_lock; /* spinlock for FDIR */ + + DECLARE_PHY_INTERFACE_MASK(sfp_interfaces); + __ETHTOOL_DECLARE_LINK_MODE_MASK(sfp_support); + __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); + u8 link_port; }; #endif /* _TXGBE_TYPE_H_ */ diff --git a/drivers/net/mdio/Kconfig b/drivers/net/mdio/Kconfig index d3219ca194ec..7db40aaa079d 100644 --- a/drivers/net/mdio/Kconfig +++ b/drivers/net/mdio/Kconfig @@ -3,21 +3,10 @@ # MDIO Layer Configuration # -menuconfig MDIO_DEVICE - tristate "MDIO bus device drivers" - help - MDIO devices and driver infrastructure code. - -if MDIO_DEVICE - config MDIO_BUS - tristate - default m if PHYLIB=m - default MDIO_DEVICE + tristate "MDIO bus consumer layer" help - This internal symbol is used for link time dependencies and it - reflects whether the mdio_bus/mdio_device code is built as a - loadable module or built-in. + MDIO bus consumer layer if PHYLIB @@ -291,4 +280,3 @@ config MDIO_BUS_MUX_MMIOREG endif -endif diff --git a/drivers/net/mdio/of_mdio.c b/drivers/net/mdio/of_mdio.c index 2f4fc664d2e1..98f667b121f7 100644 --- a/drivers/net/mdio/of_mdio.c +++ b/drivers/net/mdio/of_mdio.c @@ -458,7 +458,7 @@ int of_phy_register_fixed_link(struct device_node *np) return -ENODEV; register_phy: - return PTR_ERR_OR_ZERO(fixed_phy_register(PHY_POLL, &status, np)); + return PTR_ERR_OR_ZERO(fixed_phy_register(&status, np)); } EXPORT_SYMBOL(of_phy_register_fixed_link); diff --git a/drivers/net/netdevsim/ipsec.c b/drivers/net/netdevsim/ipsec.c index d88bdb9a1717..47cdee5577d4 100644 --- a/drivers/net/netdevsim/ipsec.c +++ b/drivers/net/netdevsim/ipsec.c @@ -85,11 +85,11 @@ static int nsim_ipsec_find_empty_idx(struct nsim_ipsec *ipsec) return -ENOSPC; } -static int nsim_ipsec_parse_proto_keys(struct xfrm_state *xs, +static int nsim_ipsec_parse_proto_keys(struct net_device *dev, + struct xfrm_state *xs, u32 *mykey, u32 *mysalt) { const char aes_gcm_name[] = "rfc4106(gcm(aes))"; - struct net_device *dev = xs->xso.real_dev; unsigned char *key_data; char *alg_name = NULL; int key_len; @@ -129,17 +129,16 @@ static int nsim_ipsec_parse_proto_keys(struct xfrm_state *xs, return 0; } -static int nsim_ipsec_add_sa(struct xfrm_state *xs, +static int nsim_ipsec_add_sa(struct net_device *dev, + struct xfrm_state *xs, struct netlink_ext_ack *extack) { struct nsim_ipsec *ipsec; - struct net_device *dev; struct netdevsim *ns; struct nsim_sa sa; u16 sa_idx; int ret; - dev = xs->xso.real_dev; ns = netdev_priv(dev); ipsec = &ns->ipsec; @@ -174,7 +173,7 @@ static int nsim_ipsec_add_sa(struct xfrm_state *xs, sa.crypt = xs->ealg || xs->aead; /* get the key and salt */ - ret = nsim_ipsec_parse_proto_keys(xs, sa.key, &sa.salt); + ret = nsim_ipsec_parse_proto_keys(dev, xs, sa.key, &sa.salt); if (ret) { NL_SET_ERR_MSG_MOD(extack, "Failed to get key data for SA table"); return ret; @@ -200,9 +199,9 @@ static int nsim_ipsec_add_sa(struct xfrm_state *xs, return 0; } -static void nsim_ipsec_del_sa(struct xfrm_state *xs) +static void nsim_ipsec_del_sa(struct net_device *dev, struct xfrm_state *xs) { - struct netdevsim *ns = netdev_priv(xs->xso.real_dev); + struct netdevsim *ns = netdev_priv(dev); struct nsim_ipsec *ipsec = &ns->ipsec; u16 sa_idx; diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c index 2aa999345fe1..af545d42961c 100644 --- a/drivers/net/netdevsim/netdev.c +++ b/drivers/net/netdevsim/netdev.c @@ -881,11 +881,13 @@ static void nsim_setup(struct net_device *dev) NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | + NETIF_F_LRO | NETIF_F_TSO; dev->hw_features |= NETIF_F_HW_TC | NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | + NETIF_F_LRO | NETIF_F_TSO; dev->max_mtu = ETH_MAX_MTU; dev->xdp_features = NETDEV_XDP_ACT_HW_OFFLOAD; diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 677d56e06539..a95cecabfe1b 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -14,7 +14,7 @@ config PHYLINK menuconfig PHYLIB tristate "PHY Device support and infrastructure" - select MDIO_DEVICE + select MDIO_BUS help Ethernet controllers are usually attached to PHY devices. This option provides infrastructure for @@ -76,6 +76,18 @@ config SFP comment "MII PHY device drivers" +config AS21XXX_PHY + tristate "Aeonsemi AS21xxx PHYs" + help + Currently supports the Aeonsemi AS21xxx PHY. + + These are C45 PHYs 10G that require all a generic firmware. + + Supported PHYs AS21011JB1, AS21011PB1, AS21010JB1, AS21010PB1, + AS21511JB1, AS21511PB1, AS21510JB1, AS21510PB1, AS21210JB1, + AS21210PB1 that all register with the PHY ID 0x7500 0x7500 + before the firmware is loaded. + config AIR_EN8811H_PHY tristate "Airoha EN8811H 2.5 Gigabit PHY" help diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index 59ac3a9a3177..57933366fe54 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -6,27 +6,19 @@ libphy-y := phy.o phy-c45.o phy-core.o phy_device.o \ phy_package.o phy_caps.o mdio_bus_provider.o mdio-bus-y += mdio_bus.o mdio_device.o -ifdef CONFIG_MDIO_DEVICE -obj-y += mdio-boardinfo.o -endif - -# PHYLIB implies MDIO_DEVICE, in that case, we have a bunch of circular -# dependencies that does not make it possible to split mdio-bus objects into a -# dedicated loadable module, so we bundle them all together into libphy.ko ifdef CONFIG_PHYLIB -libphy-y += $(mdio-bus-y) -# the stubs are built-in whenever PHYLIB is built-in or module -obj-y += stubs.o -else -obj-$(CONFIG_MDIO_DEVICE) += mdio-bus.o +# built-in whenever PHYLIB is built-in or module +obj-y += stubs.o mdio-boardinfo.o endif -obj-$(CONFIG_PHYLIB) += mdio_devres.o + libphy-$(CONFIG_SWPHY) += swphy.o libphy-$(CONFIG_LED_TRIGGER_PHY) += phy_led_triggers.o libphy-$(CONFIG_OPEN_ALLIANCE_HELPERS) += open_alliance_helpers.o +obj-$(CONFIG_MDIO_BUS) += mdio-bus.o obj-$(CONFIG_PHYLINK) += phylink.o obj-$(CONFIG_PHYLIB) += libphy.o +obj-$(CONFIG_PHYLIB) += mdio_devres.o obj-$(CONFIG_NETWORK_PHY_TIMESTAMPING) += mii_timestamper.o @@ -40,6 +32,7 @@ obj-$(CONFIG_AIR_EN8811H_PHY) += air_en8811h.o obj-$(CONFIG_AMD_PHY) += amd.o obj-$(CONFIG_AMCC_QT2025_PHY) += qt2025.o obj-$(CONFIG_AQUANTIA_PHY) += aquantia/ +obj-$(CONFIG_AS21XXX_PHY) += as21xxx.o ifdef CONFIG_AX88796B_RUST_PHY obj-$(CONFIG_AX88796B_PHY) += ax88796b_rust.o else diff --git a/drivers/net/phy/as21xxx.c b/drivers/net/phy/as21xxx.c new file mode 100644 index 000000000000..92697f43087d --- /dev/null +++ b/drivers/net/phy/as21xxx.c @@ -0,0 +1,1087 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Aeonsemi AS21XXxX PHY Driver + * + * Author: Christian Marangi <ansuelsmth@gmail.com> + */ + +#include <linux/bitfield.h> +#include <linux/firmware.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/phy.h> + +#define VEND1_GLB_REG_CPU_RESET_ADDR_LO_BASEADDR 0x3 +#define VEND1_GLB_REG_CPU_RESET_ADDR_HI_BASEADDR 0x4 + +#define VEND1_GLB_REG_CPU_CTRL 0xe +#define VEND1_GLB_CPU_CTRL_MASK GENMASK(4, 0) +#define VEND1_GLB_CPU_CTRL_LED_POLARITY_MASK GENMASK(12, 8) +#define VEND1_GLB_CPU_CTRL_LED_POLARITY(_n) FIELD_PREP(VEND1_GLB_CPU_CTRL_LED_POLARITY_MASK, \ + BIT(_n)) + +#define VEND1_FW_START_ADDR 0x100 + +#define VEND1_GLB_REG_MDIO_INDIRECT_ADDRCMD 0x101 +#define VEND1_GLB_REG_MDIO_INDIRECT_LOAD 0x102 + +#define VEND1_GLB_REG_MDIO_INDIRECT_STATUS 0x103 + +#define VEND1_PTP_CLK 0x142 +#define VEND1_PTP_CLK_EN BIT(6) + +/* 5 LED at step of 0x20 + * FE: Fast-Ethernet (10/100) + * GE: Gigabit-Ethernet (1000) + * NG: New-Generation (2500/5000/10000) + */ +#define VEND1_LED_REG(_n) (0x1800 + ((_n) * 0x10)) +#define VEND1_LED_REG_A_EVENT GENMASK(15, 11) +#define VEND1_LED_CONF 0x1881 +#define VEND1_LED_CONFG_BLINK GENMASK(7, 0) + +#define VEND1_SPEED_STATUS 0x4002 +#define VEND1_SPEED_MASK GENMASK(7, 0) +#define VEND1_SPEED_10000 FIELD_PREP_CONST(VEND1_SPEED_MASK, 0x3) +#define VEND1_SPEED_5000 FIELD_PREP_CONST(VEND1_SPEED_MASK, 0x5) +#define VEND1_SPEED_2500 FIELD_PREP_CONST(VEND1_SPEED_MASK, 0x9) +#define VEND1_SPEED_1000 FIELD_PREP_CONST(VEND1_SPEED_MASK, 0x10) +#define VEND1_SPEED_100 FIELD_PREP_CONST(VEND1_SPEED_MASK, 0x20) +#define VEND1_SPEED_10 FIELD_PREP_CONST(VEND1_SPEED_MASK, 0x0) + +#define VEND1_IPC_CMD 0x5801 +#define AEON_IPC_CMD_PARITY BIT(15) +#define AEON_IPC_CMD_SIZE GENMASK(10, 6) +#define AEON_IPC_CMD_OPCODE GENMASK(5, 0) + +#define IPC_CMD_NOOP 0x0 /* Do nothing */ +#define IPC_CMD_INFO 0x1 /* Get Firmware Version */ +#define IPC_CMD_SYS_CPU 0x2 /* SYS_CPU */ +#define IPC_CMD_BULK_DATA 0xa /* Pass bulk data in ipc registers. */ +#define IPC_CMD_BULK_WRITE 0xc /* Write bulk data to memory */ +#define IPC_CMD_CFG_PARAM 0x1a /* Write config parameters to memory */ +#define IPC_CMD_NG_TESTMODE 0x1b /* Set NG test mode and tone */ +#define IPC_CMD_TEMP_MON 0x15 /* Temperature monitoring function */ +#define IPC_CMD_SET_LED 0x23 /* Set led */ + +#define VEND1_IPC_STS 0x5802 +#define AEON_IPC_STS_PARITY BIT(15) +#define AEON_IPC_STS_SIZE GENMASK(14, 10) +#define AEON_IPC_STS_OPCODE GENMASK(9, 4) +#define AEON_IPC_STS_STATUS GENMASK(3, 0) +#define AEON_IPC_STS_STATUS_RCVD FIELD_PREP_CONST(AEON_IPC_STS_STATUS, 0x1) +#define AEON_IPC_STS_STATUS_PROCESS FIELD_PREP_CONST(AEON_IPC_STS_STATUS, 0x2) +#define AEON_IPC_STS_STATUS_SUCCESS FIELD_PREP_CONST(AEON_IPC_STS_STATUS, 0x4) +#define AEON_IPC_STS_STATUS_ERROR FIELD_PREP_CONST(AEON_IPC_STS_STATUS, 0x8) +#define AEON_IPC_STS_STATUS_BUSY FIELD_PREP_CONST(AEON_IPC_STS_STATUS, 0xe) +#define AEON_IPC_STS_STATUS_READY FIELD_PREP_CONST(AEON_IPC_STS_STATUS, 0xf) + +#define VEND1_IPC_DATA0 0x5808 +#define VEND1_IPC_DATA1 0x5809 +#define VEND1_IPC_DATA2 0x580a +#define VEND1_IPC_DATA3 0x580b +#define VEND1_IPC_DATA4 0x580c +#define VEND1_IPC_DATA5 0x580d +#define VEND1_IPC_DATA6 0x580e +#define VEND1_IPC_DATA7 0x580f +#define VEND1_IPC_DATA(_n) (VEND1_IPC_DATA0 + (_n)) + +/* Sub command of CMD_INFO */ +#define IPC_INFO_VERSION 0x1 + +/* Sub command of CMD_SYS_CPU */ +#define IPC_SYS_CPU_REBOOT 0x3 +#define IPC_SYS_CPU_IMAGE_OFST 0x4 +#define IPC_SYS_CPU_IMAGE_CHECK 0x5 +#define IPC_SYS_CPU_PHY_ENABLE 0x6 + +/* Sub command of CMD_CFG_PARAM */ +#define IPC_CFG_PARAM_DIRECT 0x4 + +/* CFG DIRECT sub command */ +#define IPC_CFG_PARAM_DIRECT_NG_PHYCTRL 0x1 +#define IPC_CFG_PARAM_DIRECT_CU_AN 0x2 +#define IPC_CFG_PARAM_DIRECT_SDS_PCS 0x3 +#define IPC_CFG_PARAM_DIRECT_AUTO_EEE 0x4 +#define IPC_CFG_PARAM_DIRECT_SDS_PMA 0x5 +#define IPC_CFG_PARAM_DIRECT_DPC_RA 0x6 +#define IPC_CFG_PARAM_DIRECT_DPC_PKT_CHK 0x7 +#define IPC_CFG_PARAM_DIRECT_DPC_SDS_WAIT_ETH 0x8 +#define IPC_CFG_PARAM_DIRECT_WDT 0x9 +#define IPC_CFG_PARAM_DIRECT_SDS_RESTART_AN 0x10 +#define IPC_CFG_PARAM_DIRECT_TEMP_MON 0x11 +#define IPC_CFG_PARAM_DIRECT_WOL 0x12 + +/* Sub command of CMD_TEMP_MON */ +#define IPC_CMD_TEMP_MON_GET 0x4 + +#define AS21XXX_MDIO_AN_C22 0xffe0 + +#define PHY_ID_AS21XXX 0x75009410 +/* AS21xxx ID Legend + * AS21x1xxB1 + * ^ ^^ + * | |J: Supports SyncE/PTP + * | |P: No SyncE/PTP support + * | 1: Supports 2nd Serdes + * | 2: Not 2nd Serdes support + * 0: 10G, 5G, 2.5G + * 5: 5G, 2.5G + * 2: 2.5G + */ +#define PHY_ID_AS21011JB1 0x75009402 +#define PHY_ID_AS21011PB1 0x75009412 +#define PHY_ID_AS21010JB1 0x75009422 +#define PHY_ID_AS21010PB1 0x75009432 +#define PHY_ID_AS21511JB1 0x75009442 +#define PHY_ID_AS21511PB1 0x75009452 +#define PHY_ID_AS21510JB1 0x75009462 +#define PHY_ID_AS21510PB1 0x75009472 +#define PHY_ID_AS21210JB1 0x75009482 +#define PHY_ID_AS21210PB1 0x75009492 +#define PHY_VENDOR_AEONSEMI 0x75009400 + +#define AEON_MAX_LEDS 5 +#define AEON_IPC_DELAY 10000 +#define AEON_IPC_TIMEOUT (AEON_IPC_DELAY * 100) +#define AEON_IPC_DATA_NUM_REGISTERS 8 +#define AEON_IPC_DATA_MAX (AEON_IPC_DATA_NUM_REGISTERS * sizeof(u16)) + +#define AEON_BOOT_ADDR 0x1000 +#define AEON_CPU_BOOT_ADDR 0x2000 +#define AEON_CPU_CTRL_FW_LOAD (BIT(4) | BIT(2) | BIT(1) | BIT(0)) +#define AEON_CPU_CTRL_FW_START BIT(0) + +enum as21xxx_led_event { + VEND1_LED_REG_A_EVENT_ON_10 = 0x0, + VEND1_LED_REG_A_EVENT_ON_100, + VEND1_LED_REG_A_EVENT_ON_1000, + VEND1_LED_REG_A_EVENT_ON_2500, + VEND1_LED_REG_A_EVENT_ON_5000, + VEND1_LED_REG_A_EVENT_ON_10000, + VEND1_LED_REG_A_EVENT_ON_FE_GE, + VEND1_LED_REG_A_EVENT_ON_NG, + VEND1_LED_REG_A_EVENT_ON_FULL_DUPLEX, + VEND1_LED_REG_A_EVENT_ON_COLLISION, + VEND1_LED_REG_A_EVENT_BLINK_TX, + VEND1_LED_REG_A_EVENT_BLINK_RX, + VEND1_LED_REG_A_EVENT_BLINK_ACT, + VEND1_LED_REG_A_EVENT_ON_LINK, + VEND1_LED_REG_A_EVENT_ON_LINK_BLINK_ACT, + VEND1_LED_REG_A_EVENT_ON_LINK_BLINK_RX, + VEND1_LED_REG_A_EVENT_ON_FE_GE_BLINK_ACT, + VEND1_LED_REG_A_EVENT_ON_NG_BLINK_ACT, + VEND1_LED_REG_A_EVENT_ON_NG_BLINK_FE_GE, + VEND1_LED_REG_A_EVENT_ON_FD_BLINK_COLLISION, + VEND1_LED_REG_A_EVENT_ON, + VEND1_LED_REG_A_EVENT_OFF, +}; + +struct as21xxx_led_pattern_info { + unsigned int pattern; + u16 val; +}; + +struct as21xxx_priv { + bool parity_status; + /* Protect concurrent IPC access */ + struct mutex ipc_lock; +}; + +static struct as21xxx_led_pattern_info as21xxx_led_supported_pattern[] = { + { + .pattern = BIT(TRIGGER_NETDEV_LINK_10), + .val = VEND1_LED_REG_A_EVENT_ON_10 + }, + { + .pattern = BIT(TRIGGER_NETDEV_LINK_100), + .val = VEND1_LED_REG_A_EVENT_ON_100 + }, + { + .pattern = BIT(TRIGGER_NETDEV_LINK_1000), + .val = VEND1_LED_REG_A_EVENT_ON_1000 + }, + { + .pattern = BIT(TRIGGER_NETDEV_LINK_2500), + .val = VEND1_LED_REG_A_EVENT_ON_2500 + }, + { + .pattern = BIT(TRIGGER_NETDEV_LINK_5000), + .val = VEND1_LED_REG_A_EVENT_ON_5000 + }, + { + .pattern = BIT(TRIGGER_NETDEV_LINK_10000), + .val = VEND1_LED_REG_A_EVENT_ON_10000 + }, + { + .pattern = BIT(TRIGGER_NETDEV_LINK), + .val = VEND1_LED_REG_A_EVENT_ON_LINK + }, + { + .pattern = BIT(TRIGGER_NETDEV_LINK_10) | + BIT(TRIGGER_NETDEV_LINK_100) | + BIT(TRIGGER_NETDEV_LINK_1000), + .val = VEND1_LED_REG_A_EVENT_ON_FE_GE + }, + { + .pattern = BIT(TRIGGER_NETDEV_LINK_2500) | + BIT(TRIGGER_NETDEV_LINK_5000) | + BIT(TRIGGER_NETDEV_LINK_10000), + .val = VEND1_LED_REG_A_EVENT_ON_NG + }, + { + .pattern = BIT(TRIGGER_NETDEV_FULL_DUPLEX), + .val = VEND1_LED_REG_A_EVENT_ON_FULL_DUPLEX + }, + { + .pattern = BIT(TRIGGER_NETDEV_TX), + .val = VEND1_LED_REG_A_EVENT_BLINK_TX + }, + { + .pattern = BIT(TRIGGER_NETDEV_RX), + .val = VEND1_LED_REG_A_EVENT_BLINK_RX + }, + { + .pattern = BIT(TRIGGER_NETDEV_TX) | + BIT(TRIGGER_NETDEV_RX), + .val = VEND1_LED_REG_A_EVENT_BLINK_ACT + }, + { + .pattern = BIT(TRIGGER_NETDEV_LINK_10) | + BIT(TRIGGER_NETDEV_LINK_100) | + BIT(TRIGGER_NETDEV_LINK_1000) | + BIT(TRIGGER_NETDEV_LINK_2500) | + BIT(TRIGGER_NETDEV_LINK_5000) | + BIT(TRIGGER_NETDEV_LINK_10000), + .val = VEND1_LED_REG_A_EVENT_ON_LINK + }, + { + .pattern = BIT(TRIGGER_NETDEV_LINK_10) | + BIT(TRIGGER_NETDEV_LINK_100) | + BIT(TRIGGER_NETDEV_LINK_1000) | + BIT(TRIGGER_NETDEV_LINK_2500) | + BIT(TRIGGER_NETDEV_LINK_5000) | + BIT(TRIGGER_NETDEV_LINK_10000) | + BIT(TRIGGER_NETDEV_TX) | + BIT(TRIGGER_NETDEV_RX), + .val = VEND1_LED_REG_A_EVENT_ON_LINK_BLINK_ACT + }, + { + .pattern = BIT(TRIGGER_NETDEV_LINK_10) | + BIT(TRIGGER_NETDEV_LINK_100) | + BIT(TRIGGER_NETDEV_LINK_1000) | + BIT(TRIGGER_NETDEV_LINK_2500) | + BIT(TRIGGER_NETDEV_LINK_5000) | + BIT(TRIGGER_NETDEV_LINK_10000) | + BIT(TRIGGER_NETDEV_RX), + .val = VEND1_LED_REG_A_EVENT_ON_LINK_BLINK_RX + }, + { + .pattern = BIT(TRIGGER_NETDEV_LINK_10) | + BIT(TRIGGER_NETDEV_LINK_100) | + BIT(TRIGGER_NETDEV_LINK_1000) | + BIT(TRIGGER_NETDEV_TX) | + BIT(TRIGGER_NETDEV_RX), + .val = VEND1_LED_REG_A_EVENT_ON_FE_GE_BLINK_ACT + }, + { + .pattern = BIT(TRIGGER_NETDEV_LINK_2500) | + BIT(TRIGGER_NETDEV_LINK_5000) | + BIT(TRIGGER_NETDEV_LINK_10000) | + BIT(TRIGGER_NETDEV_TX) | + BIT(TRIGGER_NETDEV_RX), + .val = VEND1_LED_REG_A_EVENT_ON_NG_BLINK_ACT + } +}; + +static int aeon_firmware_boot(struct phy_device *phydev, const u8 *data, + size_t size) +{ + int i, ret; + u16 val; + + ret = phy_modify_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLB_REG_CPU_CTRL, + VEND1_GLB_CPU_CTRL_MASK, AEON_CPU_CTRL_FW_LOAD); + if (ret) + return ret; + + ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_FW_START_ADDR, + AEON_BOOT_ADDR); + if (ret) + return ret; + + ret = phy_modify_mmd(phydev, MDIO_MMD_VEND1, + VEND1_GLB_REG_MDIO_INDIRECT_ADDRCMD, + 0x3ffc, 0xc000); + if (ret) + return ret; + + val = phy_read_mmd(phydev, MDIO_MMD_VEND1, + VEND1_GLB_REG_MDIO_INDIRECT_STATUS); + if (val > 1) { + phydev_err(phydev, "wrong origin mdio_indirect_status: %x\n", val); + return -EINVAL; + } + + /* Firmware is always aligned to u16 */ + for (i = 0; i < size; i += 2) { + val = data[i + 1] << 8 | data[i]; + + ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, + VEND1_GLB_REG_MDIO_INDIRECT_LOAD, val); + if (ret) + return ret; + } + + ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, + VEND1_GLB_REG_CPU_RESET_ADDR_LO_BASEADDR, + lower_16_bits(AEON_CPU_BOOT_ADDR)); + if (ret) + return ret; + + ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, + VEND1_GLB_REG_CPU_RESET_ADDR_HI_BASEADDR, + upper_16_bits(AEON_CPU_BOOT_ADDR)); + if (ret) + return ret; + + return phy_modify_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLB_REG_CPU_CTRL, + VEND1_GLB_CPU_CTRL_MASK, AEON_CPU_CTRL_FW_START); +} + +static int aeon_firmware_load(struct phy_device *phydev) +{ + struct device *dev = &phydev->mdio.dev; + const struct firmware *fw; + const char *fw_name; + int ret; + + ret = of_property_read_string(dev->of_node, "firmware-name", + &fw_name); + if (ret) + return ret; + + ret = request_firmware(&fw, fw_name, dev); + if (ret) { + phydev_err(phydev, "failed to find FW file %s (%d)\n", + fw_name, ret); + return ret; + } + + ret = aeon_firmware_boot(phydev, fw->data, fw->size); + + release_firmware(fw); + + return ret; +} + +static bool aeon_ipc_ready(u16 val, bool parity_status) +{ + u16 status; + + if (FIELD_GET(AEON_IPC_STS_PARITY, val) != parity_status) + return false; + + status = val & AEON_IPC_STS_STATUS; + + return status != AEON_IPC_STS_STATUS_RCVD && + status != AEON_IPC_STS_STATUS_PROCESS && + status != AEON_IPC_STS_STATUS_BUSY; +} + +static int aeon_ipc_wait_cmd(struct phy_device *phydev, bool parity_status) +{ + u16 val; + + /* Exit condition logic: + * - Wait for parity bit equal + * - Wait for status success, error OR ready + */ + return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1, VEND1_IPC_STS, val, + aeon_ipc_ready(val, parity_status), + AEON_IPC_DELAY, AEON_IPC_TIMEOUT, false); +} + +static int aeon_ipc_send_cmd(struct phy_device *phydev, + struct as21xxx_priv *priv, + u16 cmd, u16 *ret_sts) +{ + bool curr_parity; + int ret; + + /* The IPC sync by using a single parity bit. + * Each CMD have alternately this bit set or clear + * to understand correct flow and packet order. + */ + curr_parity = priv->parity_status; + if (priv->parity_status) + cmd |= AEON_IPC_CMD_PARITY; + + /* Always update parity for next packet */ + priv->parity_status = !priv->parity_status; + + ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_IPC_CMD, cmd); + if (ret) + return ret; + + /* Wait for packet to be processed */ + usleep_range(AEON_IPC_DELAY, AEON_IPC_DELAY + 5000); + + /* With no ret_sts, ignore waiting for packet completion + * (ipc parity bit sync) + */ + if (!ret_sts) + return 0; + + ret = aeon_ipc_wait_cmd(phydev, curr_parity); + if (ret) + return ret; + + ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_IPC_STS); + if (ret < 0) + return ret; + + *ret_sts = ret; + if ((*ret_sts & AEON_IPC_STS_STATUS) != AEON_IPC_STS_STATUS_SUCCESS) + return -EINVAL; + + return 0; +} + +/* If data is NULL, return 0 or negative error. + * If data not NULL, return number of Bytes received from IPC or + * a negative error. + */ +static int aeon_ipc_send_msg(struct phy_device *phydev, + u16 opcode, u16 *data, unsigned int data_len, + u16 *ret_data) +{ + struct as21xxx_priv *priv = phydev->priv; + unsigned int ret_size; + u16 cmd, ret_sts; + int ret; + int i; + + /* IPC have a max of 8 register to transfer data, + * make sure we never exceed this. + */ + if (data_len > AEON_IPC_DATA_MAX) + return -EINVAL; + + for (i = 0; i < data_len / sizeof(u16); i++) + phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_IPC_DATA(i), + data[i]); + + cmd = FIELD_PREP(AEON_IPC_CMD_SIZE, data_len) | + FIELD_PREP(AEON_IPC_CMD_OPCODE, opcode); + + mutex_lock(&priv->ipc_lock); + + ret = aeon_ipc_send_cmd(phydev, priv, cmd, &ret_sts); + if (ret) { + phydev_err(phydev, "failed to send ipc msg for %x: %d\n", + opcode, ret); + goto out; + } + + if (!data) + goto out; + + if ((ret_sts & AEON_IPC_STS_STATUS) == AEON_IPC_STS_STATUS_ERROR) { + ret = -EINVAL; + goto out; + } + + /* Prevent IPC from stack smashing the kernel. + * We can't trust IPC to return a good value and we always + * preallocate space for 16 Bytes. + */ + ret_size = FIELD_GET(AEON_IPC_STS_SIZE, ret_sts); + if (ret_size > AEON_IPC_DATA_MAX) { + ret = -EINVAL; + goto out; + } + + /* Read data from IPC data register for ret_size value from IPC */ + for (i = 0; i < DIV_ROUND_UP(ret_size, sizeof(u16)); i++) { + ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_IPC_DATA(i)); + if (ret < 0) + goto out; + + ret_data[i] = ret; + } + + ret = ret_size; + +out: + mutex_unlock(&priv->ipc_lock); + + return ret; +} + +static int aeon_ipc_noop(struct phy_device *phydev, + struct as21xxx_priv *priv, u16 *ret_sts) +{ + u16 cmd; + + cmd = FIELD_PREP(AEON_IPC_CMD_SIZE, 0) | + FIELD_PREP(AEON_IPC_CMD_OPCODE, IPC_CMD_NOOP); + + return aeon_ipc_send_cmd(phydev, priv, cmd, ret_sts); +} + +/* Logic to sync parity bit with IPC. + * We send 2 NOP cmd with same partity and we wait for IPC + * to handle the packet only for the second one. This way + * we make sure we are sync for every next cmd. + */ +static int aeon_ipc_sync_parity(struct phy_device *phydev, + struct as21xxx_priv *priv) +{ + u16 ret_sts; + int ret; + + mutex_lock(&priv->ipc_lock); + + /* Send NOP with no parity */ + aeon_ipc_noop(phydev, priv, NULL); + + /* Reset packet parity */ + priv->parity_status = false; + + /* Send second NOP with no parity */ + ret = aeon_ipc_noop(phydev, priv, &ret_sts); + + mutex_unlock(&priv->ipc_lock); + + /* We expect to return -EINVAL */ + if (ret != -EINVAL) + return ret; + + if ((ret_sts & AEON_IPC_STS_STATUS) != AEON_IPC_STS_STATUS_READY) { + phydev_err(phydev, "Invalid IPC status on sync parity: %x\n", + ret_sts); + return -EINVAL; + } + + return 0; +} + +static int aeon_ipc_get_fw_version(struct phy_device *phydev) +{ + u16 ret_data[AEON_IPC_DATA_NUM_REGISTERS], data[1]; + char fw_version[AEON_IPC_DATA_MAX + 1]; + int ret; + + data[0] = IPC_INFO_VERSION; + + ret = aeon_ipc_send_msg(phydev, IPC_CMD_INFO, data, + sizeof(data), ret_data); + if (ret < 0) + return ret; + + /* Make sure FW version is NULL terminated */ + memcpy(fw_version, ret_data, ret); + fw_version[ret] = '\0'; + + phydev_info(phydev, "Firmware Version: %s\n", fw_version); + + return 0; +} + +static int aeon_dpc_ra_enable(struct phy_device *phydev) +{ + u16 data[2]; + + data[0] = IPC_CFG_PARAM_DIRECT; + data[1] = IPC_CFG_PARAM_DIRECT_DPC_RA; + + return aeon_ipc_send_msg(phydev, IPC_CMD_CFG_PARAM, data, + sizeof(data), NULL); +} + +static int as21xxx_probe(struct phy_device *phydev) +{ + struct as21xxx_priv *priv; + int ret; + + priv = devm_kzalloc(&phydev->mdio.dev, + sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + phydev->priv = priv; + + ret = devm_mutex_init(&phydev->mdio.dev, + &priv->ipc_lock); + if (ret) + return ret; + + ret = aeon_ipc_sync_parity(phydev, priv); + if (ret) + return ret; + + ret = aeon_ipc_get_fw_version(phydev); + if (ret) + return ret; + + /* Enable PTP clk if not already Enabled */ + ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PTP_CLK, + VEND1_PTP_CLK_EN); + if (ret) + return ret; + + return aeon_dpc_ra_enable(phydev); +} + +static int as21xxx_read_link(struct phy_device *phydev, int *bmcr) +{ + int status; + + /* Normal C22 BMCR report inconsistent data, use + * the mapped C22 in C45 to have more consistent link info. + */ + *bmcr = phy_read_mmd(phydev, MDIO_MMD_AN, + AS21XXX_MDIO_AN_C22 + MII_BMCR); + if (*bmcr < 0) + return *bmcr; + + /* Autoneg is being started, therefore disregard current + * link status and report link as down. + */ + if (*bmcr & BMCR_ANRESTART) { + phydev->link = 0; + return 0; + } + + status = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1); + if (status < 0) + return status; + + phydev->link = !!(status & MDIO_STAT1_LSTATUS); + + return 0; +} + +static int as21xxx_read_c22_lpa(struct phy_device *phydev) +{ + int lpagb; + + /* MII_STAT1000 are only filled in the mapped C22 + * in C45, use that to fill lpagb values and check. + */ + lpagb = phy_read_mmd(phydev, MDIO_MMD_AN, + AS21XXX_MDIO_AN_C22 + MII_STAT1000); + if (lpagb < 0) + return lpagb; + + if (lpagb & LPA_1000MSFAIL) { + int adv = phy_read_mmd(phydev, MDIO_MMD_AN, + AS21XXX_MDIO_AN_C22 + MII_CTRL1000); + + if (adv < 0) + return adv; + + if (adv & CTL1000_ENABLE_MASTER) + phydev_err(phydev, "Master/Slave resolution failed, maybe conflicting manual settings?\n"); + else + phydev_err(phydev, "Master/Slave resolution failed\n"); + return -ENOLINK; + } + + mii_stat1000_mod_linkmode_lpa_t(phydev->lp_advertising, + lpagb); + + return 0; +} + +static int as21xxx_read_status(struct phy_device *phydev) +{ + int bmcr, old_link = phydev->link; + int ret; + + ret = as21xxx_read_link(phydev, &bmcr); + if (ret) + return ret; + + /* why bother the PHY if nothing can have changed */ + if (phydev->autoneg == AUTONEG_ENABLE && old_link && phydev->link) + return 0; + + phydev->speed = SPEED_UNKNOWN; + phydev->duplex = DUPLEX_UNKNOWN; + phydev->pause = 0; + phydev->asym_pause = 0; + + if (phydev->autoneg == AUTONEG_ENABLE) { + ret = genphy_c45_read_lpa(phydev); + if (ret) + return ret; + + ret = as21xxx_read_c22_lpa(phydev); + if (ret) + return ret; + + phy_resolve_aneg_linkmode(phydev); + } else { + int speed; + + linkmode_zero(phydev->lp_advertising); + + speed = phy_read_mmd(phydev, MDIO_MMD_VEND1, + VEND1_SPEED_STATUS); + if (speed < 0) + return speed; + + switch (speed & VEND1_SPEED_STATUS) { + case VEND1_SPEED_10000: + phydev->speed = SPEED_10000; + phydev->duplex = DUPLEX_FULL; + break; + case VEND1_SPEED_5000: + phydev->speed = SPEED_5000; + phydev->duplex = DUPLEX_FULL; + break; + case VEND1_SPEED_2500: + phydev->speed = SPEED_2500; + phydev->duplex = DUPLEX_FULL; + break; + case VEND1_SPEED_1000: + phydev->speed = SPEED_1000; + if (bmcr & BMCR_FULLDPLX) + phydev->duplex = DUPLEX_FULL; + else + phydev->duplex = DUPLEX_HALF; + break; + case VEND1_SPEED_100: + phydev->speed = SPEED_100; + phydev->duplex = DUPLEX_FULL; + break; + case VEND1_SPEED_10: + phydev->speed = SPEED_10; + phydev->duplex = DUPLEX_FULL; + break; + default: + return -EINVAL; + } + } + + return 0; +} + +static int as21xxx_led_brightness_set(struct phy_device *phydev, + u8 index, enum led_brightness value) +{ + u16 val = VEND1_LED_REG_A_EVENT_OFF; + + if (index > AEON_MAX_LEDS) + return -EINVAL; + + if (value) + val = VEND1_LED_REG_A_EVENT_ON; + + return phy_modify_mmd(phydev, MDIO_MMD_VEND1, + VEND1_LED_REG(index), + VEND1_LED_REG_A_EVENT, + FIELD_PREP(VEND1_LED_REG_A_EVENT, val)); +} + +static int as21xxx_led_hw_is_supported(struct phy_device *phydev, u8 index, + unsigned long rules) +{ + int i; + + if (index > AEON_MAX_LEDS) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(as21xxx_led_supported_pattern); i++) + if (rules == as21xxx_led_supported_pattern[i].pattern) + return 0; + + return -EOPNOTSUPP; +} + +static int as21xxx_led_hw_control_get(struct phy_device *phydev, u8 index, + unsigned long *rules) +{ + int i, val; + + if (index > AEON_MAX_LEDS) + return -EINVAL; + + val = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_LED_REG(index)); + if (val < 0) + return val; + + val = FIELD_GET(VEND1_LED_REG_A_EVENT, val); + for (i = 0; i < ARRAY_SIZE(as21xxx_led_supported_pattern); i++) + if (val == as21xxx_led_supported_pattern[i].val) { + *rules = as21xxx_led_supported_pattern[i].pattern; + return 0; + } + + /* Should be impossible */ + return -EINVAL; +} + +static int as21xxx_led_hw_control_set(struct phy_device *phydev, u8 index, + unsigned long rules) +{ + u16 val = 0; + int i; + + if (index > AEON_MAX_LEDS) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(as21xxx_led_supported_pattern); i++) + if (rules == as21xxx_led_supported_pattern[i].pattern) { + val = as21xxx_led_supported_pattern[i].val; + break; + } + + return phy_modify_mmd(phydev, MDIO_MMD_VEND1, + VEND1_LED_REG(index), + VEND1_LED_REG_A_EVENT, + FIELD_PREP(VEND1_LED_REG_A_EVENT, val)); +} + +static int as21xxx_led_polarity_set(struct phy_device *phydev, int index, + unsigned long modes) +{ + bool led_active_low = false; + u16 mask, val = 0; + u32 mode; + + if (index > AEON_MAX_LEDS) + return -EINVAL; + + for_each_set_bit(mode, &modes, __PHY_LED_MODES_NUM) { + switch (mode) { + case PHY_LED_ACTIVE_LOW: + led_active_low = true; + break; + case PHY_LED_ACTIVE_HIGH: /* default mode */ + led_active_low = false; + break; + default: + return -EINVAL; + } + } + + mask = VEND1_GLB_CPU_CTRL_LED_POLARITY(index); + if (led_active_low) + val = VEND1_GLB_CPU_CTRL_LED_POLARITY(index); + + return phy_modify_mmd(phydev, MDIO_MMD_VEND1, + VEND1_GLB_REG_CPU_CTRL, + mask, val); +} + +static int as21xxx_match_phy_device(struct phy_device *phydev, + const struct phy_driver *phydrv) +{ + struct as21xxx_priv *priv; + u16 ret_sts; + u32 phy_id; + int ret; + + /* Skip PHY that are not AS21xxx or already have firmware loaded */ + if (phydev->c45_ids.device_ids[MDIO_MMD_PCS] != PHY_ID_AS21XXX) + return genphy_match_phy_device(phydev, phydrv); + + /* Read PHY ID to handle firmware just loaded */ + ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MII_PHYSID1); + if (ret < 0) + return ret; + phy_id = ret << 16; + + ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MII_PHYSID2); + if (ret < 0) + return ret; + phy_id |= ret; + + /* With PHY ID not the generic AS21xxx one assume + * the firmware just loaded + */ + if (phy_id != PHY_ID_AS21XXX) + return phy_id == phydrv->phy_id; + + /* Allocate temp priv and load the firmware */ + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + mutex_init(&priv->ipc_lock); + + ret = aeon_firmware_load(phydev); + if (ret) + goto out; + + /* Sync parity... */ + ret = aeon_ipc_sync_parity(phydev, priv); + if (ret) + goto out; + + /* ...and send a third NOOP cmd to wait for firmware finish loading */ + ret = aeon_ipc_noop(phydev, priv, &ret_sts); + if (ret) + goto out; + +out: + mutex_destroy(&priv->ipc_lock); + kfree(priv); + + /* Return can either be 0 or a negative error code. + * Returning 0 here means THIS is NOT a suitable PHY. + * + * For the specific case of the generic Aeonsemi PHY ID that + * needs the firmware the be loaded first to have a correct PHY ID, + * this is OK as a matching PHY ID will be found right after. + * This relies on the driver probe order where the first PHY driver + * probed is the generic one. + */ + return ret; +} + +static struct phy_driver as21xxx_drivers[] = { + { + /* PHY expose in C45 as 0x7500 0x9410 + * before firmware is loaded. + * This driver entry must be attempted first to load + * the firmware and thus update the ID registers. + */ + PHY_ID_MATCH_EXACT(PHY_ID_AS21XXX), + .name = "Aeonsemi AS21xxx", + .match_phy_device = as21xxx_match_phy_device, + }, + { + PHY_ID_MATCH_EXACT(PHY_ID_AS21011JB1), + .name = "Aeonsemi AS21011JB1", + .probe = as21xxx_probe, + .match_phy_device = as21xxx_match_phy_device, + .read_status = as21xxx_read_status, + .led_brightness_set = as21xxx_led_brightness_set, + .led_hw_is_supported = as21xxx_led_hw_is_supported, + .led_hw_control_set = as21xxx_led_hw_control_set, + .led_hw_control_get = as21xxx_led_hw_control_get, + .led_polarity_set = as21xxx_led_polarity_set, + }, + { + PHY_ID_MATCH_EXACT(PHY_ID_AS21011PB1), + .name = "Aeonsemi AS21011PB1", + .probe = as21xxx_probe, + .match_phy_device = as21xxx_match_phy_device, + .read_status = as21xxx_read_status, + .led_brightness_set = as21xxx_led_brightness_set, + .led_hw_is_supported = as21xxx_led_hw_is_supported, + .led_hw_control_set = as21xxx_led_hw_control_set, + .led_hw_control_get = as21xxx_led_hw_control_get, + .led_polarity_set = as21xxx_led_polarity_set, + }, + { + PHY_ID_MATCH_EXACT(PHY_ID_AS21010PB1), + .name = "Aeonsemi AS21010PB1", + .probe = as21xxx_probe, + .match_phy_device = as21xxx_match_phy_device, + .read_status = as21xxx_read_status, + .led_brightness_set = as21xxx_led_brightness_set, + .led_hw_is_supported = as21xxx_led_hw_is_supported, + .led_hw_control_set = as21xxx_led_hw_control_set, + .led_hw_control_get = as21xxx_led_hw_control_get, + .led_polarity_set = as21xxx_led_polarity_set, + }, + { + PHY_ID_MATCH_EXACT(PHY_ID_AS21010JB1), + .name = "Aeonsemi AS21010JB1", + .probe = as21xxx_probe, + .match_phy_device = as21xxx_match_phy_device, + .read_status = as21xxx_read_status, + .led_brightness_set = as21xxx_led_brightness_set, + .led_hw_is_supported = as21xxx_led_hw_is_supported, + .led_hw_control_set = as21xxx_led_hw_control_set, + .led_hw_control_get = as21xxx_led_hw_control_get, + .led_polarity_set = as21xxx_led_polarity_set, + }, + { + PHY_ID_MATCH_EXACT(PHY_ID_AS21210PB1), + .name = "Aeonsemi AS21210PB1", + .probe = as21xxx_probe, + .match_phy_device = as21xxx_match_phy_device, + .read_status = as21xxx_read_status, + .led_brightness_set = as21xxx_led_brightness_set, + .led_hw_is_supported = as21xxx_led_hw_is_supported, + .led_hw_control_set = as21xxx_led_hw_control_set, + .led_hw_control_get = as21xxx_led_hw_control_get, + .led_polarity_set = as21xxx_led_polarity_set, + }, + { + PHY_ID_MATCH_EXACT(PHY_ID_AS21510JB1), + .name = "Aeonsemi AS21510JB1", + .probe = as21xxx_probe, + .match_phy_device = as21xxx_match_phy_device, + .read_status = as21xxx_read_status, + .led_brightness_set = as21xxx_led_brightness_set, + .led_hw_is_supported = as21xxx_led_hw_is_supported, + .led_hw_control_set = as21xxx_led_hw_control_set, + .led_hw_control_get = as21xxx_led_hw_control_get, + .led_polarity_set = as21xxx_led_polarity_set, + }, + { + PHY_ID_MATCH_EXACT(PHY_ID_AS21510PB1), + .name = "Aeonsemi AS21510PB1", + .probe = as21xxx_probe, + .match_phy_device = as21xxx_match_phy_device, + .read_status = as21xxx_read_status, + .led_brightness_set = as21xxx_led_brightness_set, + .led_hw_is_supported = as21xxx_led_hw_is_supported, + .led_hw_control_set = as21xxx_led_hw_control_set, + .led_hw_control_get = as21xxx_led_hw_control_get, + .led_polarity_set = as21xxx_led_polarity_set, + }, + { + PHY_ID_MATCH_EXACT(PHY_ID_AS21511JB1), + .name = "Aeonsemi AS21511JB1", + .probe = as21xxx_probe, + .match_phy_device = as21xxx_match_phy_device, + .read_status = as21xxx_read_status, + .led_brightness_set = as21xxx_led_brightness_set, + .led_hw_is_supported = as21xxx_led_hw_is_supported, + .led_hw_control_set = as21xxx_led_hw_control_set, + .led_hw_control_get = as21xxx_led_hw_control_get, + .led_polarity_set = as21xxx_led_polarity_set, + }, + { + PHY_ID_MATCH_EXACT(PHY_ID_AS21210JB1), + .name = "Aeonsemi AS21210JB1", + .probe = as21xxx_probe, + .match_phy_device = as21xxx_match_phy_device, + .read_status = as21xxx_read_status, + .led_brightness_set = as21xxx_led_brightness_set, + .led_hw_is_supported = as21xxx_led_hw_is_supported, + .led_hw_control_set = as21xxx_led_hw_control_set, + .led_hw_control_get = as21xxx_led_hw_control_get, + .led_polarity_set = as21xxx_led_polarity_set, + }, + { + PHY_ID_MATCH_EXACT(PHY_ID_AS21511PB1), + .name = "Aeonsemi AS21511PB1", + .probe = as21xxx_probe, + .match_phy_device = as21xxx_match_phy_device, + .read_status = as21xxx_read_status, + .led_brightness_set = as21xxx_led_brightness_set, + .led_hw_is_supported = as21xxx_led_hw_is_supported, + .led_hw_control_set = as21xxx_led_hw_control_set, + .led_hw_control_get = as21xxx_led_hw_control_get, + .led_polarity_set = as21xxx_led_polarity_set, + }, +}; +module_phy_driver(as21xxx_drivers); + +static struct mdio_device_id __maybe_unused as21xxx_tbl[] = { + { PHY_ID_MATCH_VENDOR(PHY_VENDOR_AEONSEMI) }, + { } +}; +MODULE_DEVICE_TABLE(mdio, as21xxx_tbl); + +MODULE_DESCRIPTION("Aeonsemi AS21xxx PHY driver"); +MODULE_AUTHOR("Christian Marangi <ansuelsmth@gmail.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/phy/bcm87xx.c b/drivers/net/phy/bcm87xx.c index e81404bf8994..299f9a8f30f4 100644 --- a/drivers/net/phy/bcm87xx.c +++ b/drivers/net/phy/bcm87xx.c @@ -185,14 +185,10 @@ static irqreturn_t bcm87xx_handle_interrupt(struct phy_device *phydev) return IRQ_HANDLED; } -static int bcm8706_match_phy_device(struct phy_device *phydev) +static int bcm87xx_match_phy_device(struct phy_device *phydev, + const struct phy_driver *phydrv) { - return phydev->c45_ids.device_ids[4] == PHY_ID_BCM8706; -} - -static int bcm8727_match_phy_device(struct phy_device *phydev) -{ - return phydev->c45_ids.device_ids[4] == PHY_ID_BCM8727; + return phydev->c45_ids.device_ids[4] == phydrv->phy_id; } static struct phy_driver bcm87xx_driver[] = { @@ -206,7 +202,7 @@ static struct phy_driver bcm87xx_driver[] = { .read_status = bcm87xx_read_status, .config_intr = bcm87xx_config_intr, .handle_interrupt = bcm87xx_handle_interrupt, - .match_phy_device = bcm8706_match_phy_device, + .match_phy_device = bcm87xx_match_phy_device, }, { .phy_id = PHY_ID_BCM8727, .phy_id_mask = 0xffffffff, @@ -217,7 +213,7 @@ static struct phy_driver bcm87xx_driver[] = { .read_status = bcm87xx_read_status, .config_intr = bcm87xx_config_intr, .handle_interrupt = bcm87xx_handle_interrupt, - .match_phy_device = bcm8727_match_phy_device, + .match_phy_device = bcm87xx_match_phy_device, } }; module_phy_driver(bcm87xx_driver); diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c index c91adf2464c5..033656d574b8 100644 --- a/drivers/net/phy/fixed_phy.c +++ b/drivers/net/phy/fixed_phy.c @@ -131,7 +131,7 @@ int fixed_phy_set_link_update(struct phy_device *phydev, EXPORT_SYMBOL_GPL(fixed_phy_set_link_update); static int fixed_phy_add_gpiod(unsigned int irq, int phy_addr, - struct fixed_phy_status *status, + const struct fixed_phy_status *status, struct gpio_desc *gpiod) { int ret; @@ -160,10 +160,9 @@ static int fixed_phy_add_gpiod(unsigned int irq, int phy_addr, return 0; } -int fixed_phy_add(unsigned int irq, int phy_addr, - struct fixed_phy_status *status) +int fixed_phy_add(int phy_addr, const struct fixed_phy_status *status) { - return fixed_phy_add_gpiod(irq, phy_addr, status, NULL); + return fixed_phy_add_gpiod(PHY_POLL, phy_addr, status, NULL); } EXPORT_SYMBOL_GPL(fixed_phy_add); @@ -223,8 +222,7 @@ static struct gpio_desc *fixed_phy_get_gpiod(struct device_node *np) } #endif -struct phy_device *fixed_phy_register(unsigned int irq, - struct fixed_phy_status *status, +struct phy_device *fixed_phy_register(const struct fixed_phy_status *status, struct device_node *np) { struct fixed_mdio_bus *fmb = &platform_fmb; @@ -246,7 +244,7 @@ struct phy_device *fixed_phy_register(unsigned int irq, if (phy_addr < 0) return ERR_PTR(phy_addr); - ret = fixed_phy_add_gpiod(irq, phy_addr, status, gpiod); + ret = fixed_phy_add_gpiod(PHY_POLL, phy_addr, status, gpiod); if (ret < 0) { ida_free(&phy_fixed_ida, phy_addr); return ERR_PTR(ret); diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c index bbcc7d2b54cd..c0c4f19cfb6a 100644 --- a/drivers/net/phy/icplus.c +++ b/drivers/net/phy/icplus.c @@ -520,12 +520,14 @@ static int ip101a_g_match_phy_device(struct phy_device *phydev, bool ip101a) return ip101a == !ret; } -static int ip101a_match_phy_device(struct phy_device *phydev) +static int ip101a_match_phy_device(struct phy_device *phydev, + const struct phy_driver *phydrv) { return ip101a_g_match_phy_device(phydev, true); } -static int ip101g_match_phy_device(struct phy_device *phydev) +static int ip101g_match_phy_device(struct phy_device *phydev, + const struct phy_driver *phydrv) { return ip101a_g_match_phy_device(phydev, false); } diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c index 5354c8895163..13e81dff42c1 100644 --- a/drivers/net/phy/marvell10g.c +++ b/drivers/net/phy/marvell10g.c @@ -1264,7 +1264,8 @@ static int mv3310_get_number_of_ports(struct phy_device *phydev) return ret + 1; } -static int mv3310_match_phy_device(struct phy_device *phydev) +static int mv3310_match_phy_device(struct phy_device *phydev, + const struct phy_driver *phydrv) { if ((phydev->c45_ids.device_ids[MDIO_MMD_PMAPMD] & MARVELL_PHY_ID_MASK) != MARVELL_PHY_ID_88X3310) @@ -1273,7 +1274,8 @@ static int mv3310_match_phy_device(struct phy_device *phydev) return mv3310_get_number_of_ports(phydev) == 1; } -static int mv3340_match_phy_device(struct phy_device *phydev) +static int mv3340_match_phy_device(struct phy_device *phydev, + const struct phy_driver *phydrv) { if ((phydev->c45_ids.device_ids[MDIO_MMD_PMAPMD] & MARVELL_PHY_ID_MASK) != MARVELL_PHY_ID_88X3310) @@ -1297,12 +1299,14 @@ static int mv211x_match_phy_device(struct phy_device *phydev, bool has_5g) return !!(val & MDIO_PCS_SPEED_5G) == has_5g; } -static int mv2110_match_phy_device(struct phy_device *phydev) +static int mv2110_match_phy_device(struct phy_device *phydev, + const struct phy_driver *phydrv) { return mv211x_match_phy_device(phydev, true); } -static int mv2111_match_phy_device(struct phy_device *phydev) +static int mv2111_match_phy_device(struct phy_device *phydev, + const struct phy_driver *phydrv) { return mv211x_match_phy_device(phydev, false); } diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index f5ccbe33a384..a6bcb0fee863 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -988,7 +988,7 @@ const struct bus_type mdio_bus_type = { }; EXPORT_SYMBOL(mdio_bus_type); -int __init mdio_bus_init(void) +static int __init mdio_bus_init(void) { int ret; @@ -1002,16 +1002,14 @@ int __init mdio_bus_init(void) return ret; } -#if IS_ENABLED(CONFIG_PHYLIB) -void mdio_bus_exit(void) +static void __exit mdio_bus_exit(void) { class_unregister(&mdio_bus_class); bus_unregister(&mdio_bus_type); } -EXPORT_SYMBOL_GPL(mdio_bus_exit); -#else -module_init(mdio_bus_init); -/* no module_exit, intentional */ + +subsys_initcall(mdio_bus_init); +module_exit(mdio_bus_exit); + MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("MDIO bus/device layer"); -#endif diff --git a/drivers/net/phy/mdio_device.c b/drivers/net/phy/mdio_device.c index e747ee63c665..cce3f405d1a4 100644 --- a/drivers/net/phy/mdio_device.c +++ b/drivers/net/phy/mdio_device.c @@ -45,6 +45,7 @@ int mdio_device_bus_match(struct device *dev, const struct device_driver *drv) return strcmp(mdiodev->modalias, drv->name) == 0; } +EXPORT_SYMBOL_GPL(mdio_device_bus_match); struct mdio_device *mdio_device_create(struct mii_bus *bus, int addr) { diff --git a/drivers/net/phy/mediatek/Kconfig b/drivers/net/phy/mediatek/Kconfig index 4308002bb82c..9f30a91be8dd 100644 --- a/drivers/net/phy/mediatek/Kconfig +++ b/drivers/net/phy/mediatek/Kconfig @@ -1,6 +1,14 @@ # SPDX-License-Identifier: GPL-2.0-only -config MTK_NET_PHYLIB - tristate +config MEDIATEK_2P5GE_PHY + tristate "MediaTek 2.5Gb Ethernet PHYs" + depends on (ARM64 && ARCH_MEDIATEK) || COMPILE_TEST + select MTK_NET_PHYLIB + help + Supports MediaTek SoC built-in 2.5Gb Ethernet PHYs. + + This will load necessary firmware and add appropriate time delay. + Accelerate this procedure through internal pbus instead of MDIO + bus. Certain link-up issues will also be fixed here. config MEDIATEK_GE_PHY tristate "MediaTek Gigabit Ethernet PHYs" @@ -26,3 +34,6 @@ config MEDIATEK_GE_SOC_PHY the MT7981 and MT7988 SoCs. These PHYs need calibration data present in the SoCs efuse and will dynamically calibrate VCM (common-mode voltage) during startup. + +config MTK_NET_PHYLIB + tristate diff --git a/drivers/net/phy/mediatek/Makefile b/drivers/net/phy/mediatek/Makefile index 814879d0abe5..ac57ecc799fc 100644 --- a/drivers/net/phy/mediatek/Makefile +++ b/drivers/net/phy/mediatek/Makefile @@ -1,4 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -obj-$(CONFIG_MTK_NET_PHYLIB) += mtk-phy-lib.o +obj-$(CONFIG_MEDIATEK_2P5GE_PHY) += mtk-2p5ge.o obj-$(CONFIG_MEDIATEK_GE_PHY) += mtk-ge.o obj-$(CONFIG_MEDIATEK_GE_SOC_PHY) += mtk-ge-soc.o +obj-$(CONFIG_MTK_NET_PHYLIB) += mtk-phy-lib.o diff --git a/drivers/net/phy/mediatek/mtk-2p5ge.c b/drivers/net/phy/mediatek/mtk-2p5ge.c new file mode 100644 index 000000000000..e147eab523ef --- /dev/null +++ b/drivers/net/phy/mediatek/mtk-2p5ge.c @@ -0,0 +1,321 @@ +// SPDX-License-Identifier: GPL-2.0+ +#include <linux/bitfield.h> +#include <linux/firmware.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/of_platform.h> +#include <linux/pinctrl/consumer.h> +#include <linux/phy.h> + +#include "mtk.h" + +#define MTK_2P5GPHY_ID_MT7988 0x00339c11 + +#define MT7988_2P5GE_PMB_FW "mediatek/mt7988/i2p5ge-phy-pmb.bin" +#define MT7988_2P5GE_PMB_FW_SIZE 0x20000 +#define MT7988_2P5GE_PMB_FW_BASE 0x0f100000 +#define MT7988_2P5GE_PMB_FW_LEN 0x20000 +#define MTK_2P5GPHY_MCU_CSR_BASE 0x0f0f0000 +#define MTK_2P5GPHY_MCU_CSR_LEN 0x20 +#define MD32_EN_CFG 0x18 +#define MD32_EN BIT(0) + +#define BASE100T_STATUS_EXTEND 0x10 +#define BASE1000T_STATUS_EXTEND 0x11 +#define EXTEND_CTRL_AND_STATUS 0x16 + +#define PHY_AUX_CTRL_STATUS 0x1d +#define PHY_AUX_DPX_MASK GENMASK(5, 5) +#define PHY_AUX_SPEED_MASK GENMASK(4, 2) + +/* Registers on MDIO_MMD_VEND1 */ +#define MTK_PHY_LPI_PCS_DSP_CTRL 0x121 +#define MTK_PHY_LPI_SIG_EN_LO_THRESH100_MASK GENMASK(12, 8) + +#define MTK_PHY_HOST_CMD1 0x800e +#define MTK_PHY_HOST_CMD2 0x800f +/* Registers on Token Ring debug nodes */ +/* ch_addr = 0x0, node_addr = 0xf, data_addr = 0x3c */ +#define AUTO_NP_10XEN BIT(6) + +enum { + PHY_AUX_SPD_10 = 0, + PHY_AUX_SPD_100, + PHY_AUX_SPD_1000, + PHY_AUX_SPD_2500, +}; + +static int mt798x_2p5ge_phy_load_fw(struct phy_device *phydev) +{ + struct device *dev = &phydev->mdio.dev; + void __iomem *mcu_csr_base, *pmb_addr; + const struct firmware *fw; + int ret, i; + u32 reg; + + pmb_addr = ioremap(MT7988_2P5GE_PMB_FW_BASE, MT7988_2P5GE_PMB_FW_LEN); + if (!pmb_addr) + return -ENOMEM; + mcu_csr_base = ioremap(MTK_2P5GPHY_MCU_CSR_BASE, + MTK_2P5GPHY_MCU_CSR_LEN); + if (!mcu_csr_base) { + ret = -ENOMEM; + goto free_pmb; + } + + ret = request_firmware_direct(&fw, MT7988_2P5GE_PMB_FW, dev); + if (ret) { + dev_err(dev, "failed to load firmware: %s, ret: %d\n", + MT7988_2P5GE_PMB_FW, ret); + goto free; + } + + if (fw->size != MT7988_2P5GE_PMB_FW_SIZE) { + dev_err(dev, "Firmware size 0x%zx != 0x%x\n", + fw->size, MT7988_2P5GE_PMB_FW_SIZE); + ret = -EINVAL; + goto release_fw; + } + + reg = readw(mcu_csr_base + MD32_EN_CFG); + if (reg & MD32_EN) { + phy_set_bits(phydev, MII_BMCR, BMCR_RESET); + usleep_range(10000, 11000); + } + phy_set_bits(phydev, MII_BMCR, BMCR_PDOWN); + + /* Write magic number to safely stall MCU */ + phy_write_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_HOST_CMD1, 0x1100); + phy_write_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_HOST_CMD2, 0x00df); + + for (i = 0; i < MT7988_2P5GE_PMB_FW_SIZE - 1; i += 4) + writel(*((uint32_t *)(fw->data + i)), pmb_addr + i); + + writew(reg & ~MD32_EN, mcu_csr_base + MD32_EN_CFG); + writew(reg | MD32_EN, mcu_csr_base + MD32_EN_CFG); + phy_set_bits(phydev, MII_BMCR, BMCR_RESET); + /* We need a delay here to stabilize initialization of MCU */ + usleep_range(7000, 8000); + + dev_info(dev, "Firmware date code: %x/%x/%x, version: %x.%x\n", + be16_to_cpu(*((__be16 *)(fw->data + + MT7988_2P5GE_PMB_FW_SIZE - 8))), + *(fw->data + MT7988_2P5GE_PMB_FW_SIZE - 6), + *(fw->data + MT7988_2P5GE_PMB_FW_SIZE - 5), + *(fw->data + MT7988_2P5GE_PMB_FW_SIZE - 2), + *(fw->data + MT7988_2P5GE_PMB_FW_SIZE - 1)); + +release_fw: + release_firmware(fw); +free: + iounmap(mcu_csr_base); +free_pmb: + iounmap(pmb_addr); + + return ret; +} + +static int mt798x_2p5ge_phy_config_init(struct phy_device *phydev) +{ + /* Check if PHY interface type is compatible */ + if (phydev->interface != PHY_INTERFACE_MODE_INTERNAL) + return -ENODEV; + + phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_LPI_PCS_DSP_CTRL, + MTK_PHY_LPI_SIG_EN_LO_THRESH100_MASK, 0); + + /* Enable 16-bit next page exchange bit if 1000-BT isn't advertising */ + mtk_tr_modify(phydev, 0x0, 0xf, 0x3c, AUTO_NP_10XEN, + FIELD_PREP(AUTO_NP_10XEN, 0x1)); + + /* Enable HW auto downshift */ + phy_modify_paged(phydev, MTK_PHY_PAGE_EXTENDED_1, + MTK_PHY_AUX_CTRL_AND_STATUS, + 0, MTK_PHY_ENABLE_DOWNSHIFT); + + return 0; +} + +static int mt798x_2p5ge_phy_config_aneg(struct phy_device *phydev) +{ + bool changed = false; + u32 adv; + int ret; + + ret = genphy_c45_an_config_aneg(phydev); + if (ret < 0) + return ret; + if (ret > 0) + changed = true; + + /* Clause 45 doesn't define 1000BaseT support. Use Clause 22 instead in + * our design. + */ + adv = linkmode_adv_to_mii_ctrl1000_t(phydev->advertising); + ret = phy_modify_changed(phydev, MII_CTRL1000, ADVERTISE_1000FULL, adv); + if (ret < 0) + return ret; + if (ret > 0) + changed = true; + + return genphy_c45_check_and_restart_aneg(phydev, changed); +} + +static int mt798x_2p5ge_phy_get_features(struct phy_device *phydev) +{ + int ret; + + ret = genphy_c45_pma_read_abilities(phydev); + if (ret) + return ret; + + /* This phy can't handle collision, and neither can (XFI)MAC it's + * connected to. Although it can do HDX handshake, it doesn't support + * CSMA/CD that HDX requires. + */ + linkmode_clear_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, + phydev->supported); + + return 0; +} + +static int mt798x_2p5ge_phy_read_status(struct phy_device *phydev) +{ + int ret; + + /* When MDIO_STAT1_LSTATUS is raised genphy_c45_read_link(), this phy + * actually hasn't finished AN. So use CL22's link update function + * instead. + */ + ret = genphy_update_link(phydev); + if (ret) + return ret; + + phydev->speed = SPEED_UNKNOWN; + phydev->duplex = DUPLEX_UNKNOWN; + phydev->pause = 0; + phydev->asym_pause = 0; + + /* We'll read link speed through vendor specific registers down below. + * So remove phy_resolve_aneg_linkmode (AN on) & genphy_c45_read_pma + * (AN off). + */ + if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete) { + ret = genphy_c45_read_lpa(phydev); + if (ret < 0) + return ret; + + /* Clause 45 doesn't define 1000BaseT support. Read the link + * partner's 1G advertisement via Clause 22. + */ + ret = phy_read(phydev, MII_STAT1000); + if (ret < 0) + return ret; + mii_stat1000_mod_linkmode_lpa_t(phydev->lp_advertising, ret); + } else if (phydev->autoneg == AUTONEG_DISABLE) { + linkmode_zero(phydev->lp_advertising); + } + + if (phydev->link) { + ret = phy_read(phydev, PHY_AUX_CTRL_STATUS); + if (ret < 0) + return ret; + + switch (FIELD_GET(PHY_AUX_SPEED_MASK, ret)) { + case PHY_AUX_SPD_10: + phydev->speed = SPEED_10; + break; + case PHY_AUX_SPD_100: + phydev->speed = SPEED_100; + break; + case PHY_AUX_SPD_1000: + phydev->speed = SPEED_1000; + break; + case PHY_AUX_SPD_2500: + phydev->speed = SPEED_2500; + break; + } + + phydev->duplex = DUPLEX_FULL; + phydev->rate_matching = RATE_MATCH_PAUSE; + } + + return 0; +} + +static int mt798x_2p5ge_phy_get_rate_matching(struct phy_device *phydev, + phy_interface_t iface) +{ + return RATE_MATCH_PAUSE; +} + +static int mt798x_2p5ge_phy_probe(struct phy_device *phydev) +{ + struct pinctrl *pinctrl; + int ret; + + switch (phydev->drv->phy_id) { + case MTK_2P5GPHY_ID_MT7988: + /* This built-in 2.5GbE hardware only sets MDIO_DEVS_PMAPMD. + * Set the rest by this driver since PCS/AN/VEND1/VEND2 MDIO + * manageable devices actually exist. + */ + phydev->c45_ids.mmds_present |= MDIO_DEVS_PCS | + MDIO_DEVS_AN | + MDIO_DEVS_VEND1 | + MDIO_DEVS_VEND2; + break; + default: + return -EINVAL; + } + + ret = mt798x_2p5ge_phy_load_fw(phydev); + if (ret < 0) + return ret; + + /* Setup LED */ + phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, MTK_PHY_LED0_ON_CTRL, + MTK_PHY_LED_ON_POLARITY | MTK_PHY_LED_ON_LINK10 | + MTK_PHY_LED_ON_LINK100 | MTK_PHY_LED_ON_LINK1000 | + MTK_PHY_LED_ON_LINK2500); + phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, MTK_PHY_LED1_ON_CTRL, + MTK_PHY_LED_ON_FDX | MTK_PHY_LED_ON_HDX); + + /* Switch pinctrl after setting polarity to avoid bogus blinking */ + pinctrl = devm_pinctrl_get_select(&phydev->mdio.dev, "i2p5gbe-led"); + if (IS_ERR(pinctrl)) + dev_err(&phydev->mdio.dev, "Fail to set LED pins!\n"); + + return 0; +} + +static struct phy_driver mtk_2p5gephy_driver[] = { + { + PHY_ID_MATCH_MODEL(MTK_2P5GPHY_ID_MT7988), + .name = "MediaTek MT7988 2.5GbE PHY", + .probe = mt798x_2p5ge_phy_probe, + .config_init = mt798x_2p5ge_phy_config_init, + .config_aneg = mt798x_2p5ge_phy_config_aneg, + .get_features = mt798x_2p5ge_phy_get_features, + .read_status = mt798x_2p5ge_phy_read_status, + .get_rate_matching = mt798x_2p5ge_phy_get_rate_matching, + .suspend = genphy_suspend, + .resume = genphy_resume, + .read_page = mtk_phy_read_page, + .write_page = mtk_phy_write_page, + }, +}; + +module_phy_driver(mtk_2p5gephy_driver); + +static struct mdio_device_id __maybe_unused mtk_2p5ge_phy_tbl[] = { + { PHY_ID_MATCH_VENDOR(0x00339c00) }, + { } +}; + +MODULE_DESCRIPTION("MediaTek 2.5Gb Ethernet PHY driver"); +MODULE_AUTHOR("SkyLake Huang <SkyLake.Huang@mediatek.com>"); +MODULE_LICENSE("GPL"); + +MODULE_DEVICE_TABLE(mdio, mtk_2p5ge_phy_tbl); +MODULE_FIRMWARE(MT7988_2P5GE_PMB_FW); diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index c2e5be404f07..64aa03aed770 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -768,7 +768,8 @@ static int ksz8051_ksz8795_match_phy_device(struct phy_device *phydev, return !ret; } -static int ksz8051_match_phy_device(struct phy_device *phydev) +static int ksz8051_match_phy_device(struct phy_device *phydev, + const struct phy_driver *phydrv) { return ksz8051_ksz8795_match_phy_device(phydev, true); } @@ -888,7 +889,8 @@ static int ksz8061_config_init(struct phy_device *phydev) return kszphy_config_init(phydev); } -static int ksz8795_match_phy_device(struct phy_device *phydev) +static int ksz8795_match_phy_device(struct phy_device *phydev, + const struct phy_driver *phydrv) { return ksz8051_ksz8795_match_phy_device(phydev, false); } diff --git a/drivers/net/phy/nxp-c45-tja11xx.c b/drivers/net/phy/nxp-c45-tja11xx.c index f11dd32494c3..4c6d905f0a9f 100644 --- a/drivers/net/phy/nxp-c45-tja11xx.c +++ b/drivers/net/phy/nxp-c45-tja11xx.c @@ -19,7 +19,6 @@ #include "nxp-c45-tja11xx.h" -#define PHY_ID_MASK GENMASK(31, 4) /* Same id: TJA1103, TJA1104 */ #define PHY_ID_TJA_1103 0x001BB010 /* Same id: TJA1120, TJA1121 */ @@ -1966,28 +1965,24 @@ static int nxp_c45_macsec_ability(struct phy_device *phydev) return macsec_ability; } -static int tja1103_match_phy_device(struct phy_device *phydev) +static int tja11xx_no_macsec_match_phy_device(struct phy_device *phydev, + const struct phy_driver *phydrv) { - return phy_id_compare(phydev->phy_id, PHY_ID_TJA_1103, PHY_ID_MASK) && - !nxp_c45_macsec_ability(phydev); -} + if (!phy_id_compare(phydev->phy_id, phydrv->phy_id, + phydrv->phy_id_mask)) + return 0; -static int tja1104_match_phy_device(struct phy_device *phydev) -{ - return phy_id_compare(phydev->phy_id, PHY_ID_TJA_1103, PHY_ID_MASK) && - nxp_c45_macsec_ability(phydev); + return !nxp_c45_macsec_ability(phydev); } -static int tja1120_match_phy_device(struct phy_device *phydev) +static int tja11xx_macsec_match_phy_device(struct phy_device *phydev, + const struct phy_driver *phydrv) { - return phy_id_compare(phydev->phy_id, PHY_ID_TJA_1120, PHY_ID_MASK) && - !nxp_c45_macsec_ability(phydev); -} + if (!phy_id_compare(phydev->phy_id, phydrv->phy_id, + phydrv->phy_id_mask)) + return 0; -static int tja1121_match_phy_device(struct phy_device *phydev) -{ - return phy_id_compare(phydev->phy_id, PHY_ID_TJA_1120, PHY_ID_MASK) && - nxp_c45_macsec_ability(phydev); + return nxp_c45_macsec_ability(phydev); } static const struct nxp_c45_regmap tja1120_regmap = { @@ -2060,6 +2055,7 @@ static const struct nxp_c45_phy_data tja1120_phy_data = { static struct phy_driver nxp_c45_driver[] = { { + PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103), .name = "NXP C45 TJA1103", .get_features = nxp_c45_get_features, .driver_data = &tja1103_phy_data, @@ -2081,9 +2077,10 @@ static struct phy_driver nxp_c45_driver[] = { .get_sqi = nxp_c45_get_sqi, .get_sqi_max = nxp_c45_get_sqi_max, .remove = nxp_c45_remove, - .match_phy_device = tja1103_match_phy_device, + .match_phy_device = tja11xx_no_macsec_match_phy_device, }, { + PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103), .name = "NXP C45 TJA1104", .get_features = nxp_c45_get_features, .driver_data = &tja1103_phy_data, @@ -2105,9 +2102,10 @@ static struct phy_driver nxp_c45_driver[] = { .get_sqi = nxp_c45_get_sqi, .get_sqi_max = nxp_c45_get_sqi_max, .remove = nxp_c45_remove, - .match_phy_device = tja1104_match_phy_device, + .match_phy_device = tja11xx_macsec_match_phy_device, }, { + PHY_ID_MATCH_MODEL(PHY_ID_TJA_1120), .name = "NXP C45 TJA1120", .get_features = nxp_c45_get_features, .driver_data = &tja1120_phy_data, @@ -2130,9 +2128,10 @@ static struct phy_driver nxp_c45_driver[] = { .get_sqi = nxp_c45_get_sqi, .get_sqi_max = nxp_c45_get_sqi_max, .remove = nxp_c45_remove, - .match_phy_device = tja1120_match_phy_device, + .match_phy_device = tja11xx_no_macsec_match_phy_device, }, { + PHY_ID_MATCH_MODEL(PHY_ID_TJA_1120), .name = "NXP C45 TJA1121", .get_features = nxp_c45_get_features, .driver_data = &tja1120_phy_data, @@ -2155,7 +2154,7 @@ static struct phy_driver nxp_c45_driver[] = { .get_sqi = nxp_c45_get_sqi, .get_sqi_max = nxp_c45_get_sqi_max, .remove = nxp_c45_remove, - .match_phy_device = tja1121_match_phy_device, + .match_phy_device = tja11xx_macsec_match_phy_device, }, }; diff --git a/drivers/net/phy/nxp-tja11xx.c b/drivers/net/phy/nxp-tja11xx.c index 07e94a2478ac..3c38a8ddae2f 100644 --- a/drivers/net/phy/nxp-tja11xx.c +++ b/drivers/net/phy/nxp-tja11xx.c @@ -651,12 +651,14 @@ static int tja1102_match_phy_device(struct phy_device *phydev, bool port0) return !ret; } -static int tja1102_p0_match_phy_device(struct phy_device *phydev) +static int tja1102_p0_match_phy_device(struct phy_device *phydev, + const struct phy_driver *phydrv) { return tja1102_match_phy_device(phydev, true); } -static int tja1102_p1_match_phy_device(struct phy_device *phydev) +static int tja1102_p1_match_phy_device(struct phy_device *phydev, + const struct phy_driver *phydrv) { return tja1102_match_phy_device(phydev, false); } diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 2eb735e68dd8..0f6f86252622 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -543,20 +543,26 @@ static int phy_scan_fixups(struct phy_device *phydev) return 0; } -static int phy_bus_match(struct device *dev, const struct device_driver *drv) +/** + * genphy_match_phy_device - match a PHY device with a PHY driver + * @phydev: target phy_device struct + * @phydrv: target phy_driver struct + * + * Description: Checks whether the given PHY device matches the specified + * PHY driver. For Clause 45 PHYs, iterates over the available device + * identifiers and compares them against the driver's expected PHY ID, + * applying the provided mask. For Clause 22 PHYs, a direct ID comparison + * is performed. + * + * Return: 1 if the PHY device matches the driver, 0 otherwise. + */ +int genphy_match_phy_device(struct phy_device *phydev, + const struct phy_driver *phydrv) { - struct phy_device *phydev = to_phy_device(dev); - const struct phy_driver *phydrv = to_phy_driver(drv); - const int num_ids = ARRAY_SIZE(phydev->c45_ids.device_ids); - int i; - - if (!(phydrv->mdiodrv.flags & MDIO_DEVICE_IS_PHY)) - return 0; - - if (phydrv->match_phy_device) - return phydrv->match_phy_device(phydev); - if (phydev->is_c45) { + const int num_ids = ARRAY_SIZE(phydev->c45_ids.device_ids); + int i; + for (i = 1; i < num_ids; i++) { if (phydev->c45_ids.device_ids[i] == 0xffffffff) continue; @@ -565,11 +571,27 @@ static int phy_bus_match(struct device *dev, const struct device_driver *drv) phydrv->phy_id, phydrv->phy_id_mask)) return 1; } + return 0; - } else { - return phy_id_compare(phydev->phy_id, phydrv->phy_id, - phydrv->phy_id_mask); } + + return phy_id_compare(phydev->phy_id, phydrv->phy_id, + phydrv->phy_id_mask); +} +EXPORT_SYMBOL_GPL(genphy_match_phy_device); + +static int phy_bus_match(struct device *dev, const struct device_driver *drv) +{ + struct phy_device *phydev = to_phy_device(dev); + const struct phy_driver *phydrv = to_phy_driver(drv); + + if (!(phydrv->mdiodrv.flags & MDIO_DEVICE_IS_PHY)) + return 0; + + if (phydrv->match_phy_device) + return phydrv->match_phy_device(phydev, phydrv); + + return genphy_match_phy_device(phydev, phydrv); } static ssize_t @@ -3557,19 +3579,15 @@ static int __init phy_init(void) phylib_register_stubs(); rtnl_unlock(); - rc = mdio_bus_init(); - if (rc) - goto err_ethtool_phy_ops; - rc = phy_caps_init(); if (rc) - goto err_mdio_bus; + goto err_ethtool_phy_ops; features_init(); rc = phy_driver_register(&genphy_c45_driver, THIS_MODULE); if (rc) - goto err_mdio_bus; + goto err_ethtool_phy_ops; rc = phy_driver_register(&genphy_driver, THIS_MODULE); if (rc) @@ -3579,8 +3597,6 @@ static int __init phy_init(void) err_c45: phy_driver_unregister(&genphy_c45_driver); -err_mdio_bus: - mdio_bus_exit(); err_ethtool_phy_ops: rtnl_lock(); phylib_unregister_stubs(); @@ -3594,7 +3610,6 @@ static void __exit phy_exit(void) { phy_driver_unregister(&genphy_c45_driver); phy_driver_unregister(&genphy_driver); - mdio_bus_exit(); rtnl_lock(); phylib_unregister_stubs(); ethtool_set_ethtool_phy_ops(NULL); diff --git a/drivers/net/phy/realtek/realtek_main.c b/drivers/net/phy/realtek/realtek_main.c index 301fbe141b9b..c3dcb6257430 100644 --- a/drivers/net/phy/realtek/realtek_main.c +++ b/drivers/net/phy/realtek/realtek_main.c @@ -158,6 +158,7 @@ #define RTL_8221B_VB_CG 0x001cc849 #define RTL_8221B_VN_CG 0x001cc84a #define RTL_8251B 0x001cc862 +#define RTL_8261C 0x001cc890 /* RTL8211E and RTL8211F support up to three LEDs */ #define RTL8211x_LED_COUNT 3 @@ -1314,13 +1315,15 @@ static bool rtlgen_supports_mmd(struct phy_device *phydev) return val > 0; } -static int rtlgen_match_phy_device(struct phy_device *phydev) +static int rtlgen_match_phy_device(struct phy_device *phydev, + const struct phy_driver *phydrv) { return phydev->phy_id == RTL_GENERIC_PHYID && !rtlgen_supports_2_5gbps(phydev); } -static int rtl8226_match_phy_device(struct phy_device *phydev) +static int rtl8226_match_phy_device(struct phy_device *phydev, + const struct phy_driver *phydrv) { return phydev->phy_id == RTL_GENERIC_PHYID && rtlgen_supports_2_5gbps(phydev) && @@ -1336,32 +1339,38 @@ static int rtlgen_is_c45_match(struct phy_device *phydev, unsigned int id, return !is_c45 && (id == phydev->phy_id); } -static int rtl8221b_match_phy_device(struct phy_device *phydev) +static int rtl8221b_match_phy_device(struct phy_device *phydev, + const struct phy_driver *phydrv) { return phydev->phy_id == RTL_8221B && rtlgen_supports_mmd(phydev); } -static int rtl8221b_vb_cg_c22_match_phy_device(struct phy_device *phydev) +static int rtl8221b_vb_cg_c22_match_phy_device(struct phy_device *phydev, + const struct phy_driver *phydrv) { return rtlgen_is_c45_match(phydev, RTL_8221B_VB_CG, false); } -static int rtl8221b_vb_cg_c45_match_phy_device(struct phy_device *phydev) +static int rtl8221b_vb_cg_c45_match_phy_device(struct phy_device *phydev, + const struct phy_driver *phydrv) { return rtlgen_is_c45_match(phydev, RTL_8221B_VB_CG, true); } -static int rtl8221b_vn_cg_c22_match_phy_device(struct phy_device *phydev) +static int rtl8221b_vn_cg_c22_match_phy_device(struct phy_device *phydev, + const struct phy_driver *phydrv) { return rtlgen_is_c45_match(phydev, RTL_8221B_VN_CG, false); } -static int rtl8221b_vn_cg_c45_match_phy_device(struct phy_device *phydev) +static int rtl8221b_vn_cg_c45_match_phy_device(struct phy_device *phydev, + const struct phy_driver *phydrv) { return rtlgen_is_c45_match(phydev, RTL_8221B_VN_CG, true); } -static int rtl_internal_nbaset_match_phy_device(struct phy_device *phydev) +static int rtl_internal_nbaset_match_phy_device(struct phy_device *phydev, + const struct phy_driver *phydrv) { if (phydev->is_c45) return false; @@ -1370,6 +1379,7 @@ static int rtl_internal_nbaset_match_phy_device(struct phy_device *phydev) case RTL_GENERIC_PHYID: case RTL_8221B: case RTL_8251B: + case RTL_8261C: case 0x001cc841: break; default: @@ -1379,7 +1389,8 @@ static int rtl_internal_nbaset_match_phy_device(struct phy_device *phydev) return rtlgen_supports_2_5gbps(phydev) && !rtlgen_supports_mmd(phydev); } -static int rtl8251b_c45_match_phy_device(struct phy_device *phydev) +static int rtl8251b_c45_match_phy_device(struct phy_device *phydev, + const struct phy_driver *phydrv) { return rtlgen_is_c45_match(phydev, RTL_8251B, true); } diff --git a/drivers/net/phy/teranetics.c b/drivers/net/phy/teranetics.c index 752d4bf7bb99..46c5ff7d7b56 100644 --- a/drivers/net/phy/teranetics.c +++ b/drivers/net/phy/teranetics.c @@ -67,7 +67,8 @@ static int teranetics_read_status(struct phy_device *phydev) return 0; } -static int teranetics_match_phy_device(struct phy_device *phydev) +static int teranetics_match_phy_device(struct phy_device *phydev, + const struct phy_driver *phydrv) { return phydev->c45_ids.device_ids[3] == PHY_ID_TN2020; } diff --git a/drivers/net/team/team_core.c b/drivers/net/team/team_core.c index d8fc0c79745d..b75ceb90359f 100644 --- a/drivers/net/team/team_core.c +++ b/drivers/net/team/team_core.c @@ -1778,8 +1778,8 @@ static void team_change_rx_flags(struct net_device *dev, int change) struct team_port *port; int inc; - rcu_read_lock(); - list_for_each_entry_rcu(port, &team->port_list, list) { + mutex_lock(&team->lock); + list_for_each_entry(port, &team->port_list, list) { if (change & IFF_PROMISC) { inc = dev->flags & IFF_PROMISC ? 1 : -1; dev_set_promiscuity(port->dev, inc); @@ -1789,7 +1789,7 @@ static void team_change_rx_flags(struct net_device *dev, int change) dev_set_allmulti(port->dev, inc); } } - rcu_read_unlock(); + mutex_unlock(&team->lock); } static void team_set_rx_mode(struct net_device *dev) diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c index ff5be2cbf17b..453a2cf82753 100644 --- a/drivers/net/usb/aqc111.c +++ b/drivers/net/usb/aqc111.c @@ -30,10 +30,13 @@ static int aqc111_read_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value, ret = usbnet_read_cmd_nopm(dev, cmd, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, index, data, size); - if (unlikely(ret < 0)) + if (unlikely(ret < size)) { + ret = ret < 0 ? ret : -ENODATA; + netdev_warn(dev->net, "Failed to read(0x%x) reg index 0x%04x: %d\n", cmd, index, ret); + } return ret; } @@ -46,10 +49,13 @@ static int aqc111_read_cmd(struct usbnet *dev, u8 cmd, u16 value, ret = usbnet_read_cmd(dev, cmd, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, index, data, size); - if (unlikely(ret < 0)) + if (unlikely(ret < size)) { + ret = ret < 0 ? ret : -ENODATA; + netdev_warn(dev->net, "Failed to read(0x%x) reg index 0x%04x: %d\n", cmd, index, ret); + } return ret; } diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 58e3589e3b89..480bbc0f2d8f 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -2640,7 +2640,7 @@ static struct phy_device *lan78xx_register_fixed_phy(struct lan78xx_net *dev) netdev_info(dev->net, "No PHY found on LAN7801 – registering fixed PHY (e.g. EVB-KSZ9897-1)\n"); - return fixed_phy_register(PHY_POLL, &fphy_status, NULL); + return fixed_phy_register(&fphy_status, NULL); } /** diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 3df6aabc7e33..c676979c7ab9 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -3607,8 +3607,6 @@ vmxnet3_change_mtu(struct net_device *netdev, int new_mtu) struct vmxnet3_adapter *adapter = netdev_priv(netdev); int err = 0; - WRITE_ONCE(netdev->mtu, new_mtu); - /* * Reset_work may be in the middle of resetting the device, wait for its * completion. @@ -3622,6 +3620,7 @@ vmxnet3_change_mtu(struct net_device *netdev, int new_mtu) /* we need to re-create the rx queue based on the new mtu */ vmxnet3_rq_destroy_all(adapter); + WRITE_ONCE(netdev->mtu, new_mtu); vmxnet3_adjust_rx_ring_size(adapter); err = vmxnet3_rq_create_all(adapter); if (err) { @@ -3638,6 +3637,8 @@ vmxnet3_change_mtu(struct net_device *netdev, int new_mtu) "Closing it\n", err); goto out; } + } else { + WRITE_ONCE(netdev->mtu, new_mtu); } out: diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c index 4fc81ae17a8a..a89a7491a76c 100644 --- a/drivers/net/wireless/ath/ath10k/ce.c +++ b/drivers/net/wireless/ath/ath10k/ce.c @@ -80,7 +80,7 @@ static inline u32 shadow_sr_wr_ind_addr(struct ath10k *ar, static inline unsigned int ath10k_set_ring_byte(unsigned int offset, - struct ath10k_hw_ce_regs_addr_map *addr_map) + const struct ath10k_hw_ce_regs_addr_map *addr_map) { return ((offset << addr_map->lsb) & addr_map->mask); } @@ -203,7 +203,7 @@ static inline void ath10k_ce_src_ring_dmax_set(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int n) { - struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs; + const struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs; u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr + ctrl_regs->addr); @@ -217,7 +217,7 @@ static inline void ath10k_ce_src_ring_byte_swap_set(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int n) { - struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs; + const struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs; u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr + ctrl_regs->addr); @@ -231,7 +231,7 @@ static inline void ath10k_ce_dest_ring_byte_swap_set(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int n) { - struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs; + const struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs; u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr + ctrl_regs->addr); @@ -313,7 +313,7 @@ static inline void ath10k_ce_src_ring_highmark_set(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int n) { - struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr; + const struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr; u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + srcr_wm->addr); ath10k_ce_write32(ar, ce_ctrl_addr + srcr_wm->addr, @@ -325,7 +325,7 @@ static inline void ath10k_ce_src_ring_lowmark_set(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int n) { - struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr; + const struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr; u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + srcr_wm->addr); ath10k_ce_write32(ar, ce_ctrl_addr + srcr_wm->addr, @@ -337,7 +337,7 @@ static inline void ath10k_ce_dest_ring_highmark_set(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int n) { - struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr; + const struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr; u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + dstr_wm->addr); ath10k_ce_write32(ar, ce_ctrl_addr + dstr_wm->addr, @@ -349,7 +349,7 @@ static inline void ath10k_ce_dest_ring_lowmark_set(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int n) { - struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr; + const struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr; u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + dstr_wm->addr); ath10k_ce_write32(ar, ce_ctrl_addr + dstr_wm->addr, @@ -360,7 +360,7 @@ static inline void ath10k_ce_dest_ring_lowmark_set(struct ath10k *ar, static inline void ath10k_ce_copy_complete_inter_enable(struct ath10k *ar, u32 ce_ctrl_addr) { - struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie; + const struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie; u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr); @@ -372,7 +372,7 @@ static inline void ath10k_ce_copy_complete_inter_enable(struct ath10k *ar, static inline void ath10k_ce_copy_complete_intr_disable(struct ath10k *ar, u32 ce_ctrl_addr) { - struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie; + const struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie; u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr); @@ -384,7 +384,7 @@ static inline void ath10k_ce_copy_complete_intr_disable(struct ath10k *ar, static inline void ath10k_ce_watermark_intr_disable(struct ath10k *ar, u32 ce_ctrl_addr) { - struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs; + const struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs; u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr); @@ -396,7 +396,7 @@ static inline void ath10k_ce_watermark_intr_disable(struct ath10k *ar, static inline void ath10k_ce_error_intr_disable(struct ath10k *ar, u32 ce_ctrl_addr) { - struct ath10k_hw_ce_misc_regs *misc_regs = ar->hw_ce_regs->misc_regs; + const struct ath10k_hw_ce_misc_regs *misc_regs = ar->hw_ce_regs->misc_regs; u32 misc_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr); @@ -410,7 +410,7 @@ static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int mask) { - struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs; + const struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs; ath10k_ce_write32(ar, ce_ctrl_addr + wm_regs->addr, mask); } @@ -1230,7 +1230,7 @@ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id) { struct ath10k_ce *ce = ath10k_ce_priv(ar); struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id]; - struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs; + const struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs; u32 ctrl_addr = ce_state->ctrl_addr; /* diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c index 8fafe096adff..84b35a22fc23 100644 --- a/drivers/net/wireless/ath/ath10k/hw.c +++ b/drivers/net/wireless/ath/ath10k/hw.c @@ -212,40 +212,40 @@ const struct ath10k_hw_regs wcn3990_regs = { .pcie_intr_fw_mask = 0x00100000, }; -static struct ath10k_hw_ce_regs_addr_map wcn3990_src_ring = { +static const struct ath10k_hw_ce_regs_addr_map wcn3990_src_ring = { .msb = 0x00000010, .lsb = 0x00000010, .mask = GENMASK(17, 17), }; -static struct ath10k_hw_ce_regs_addr_map wcn3990_dst_ring = { +static const struct ath10k_hw_ce_regs_addr_map wcn3990_dst_ring = { .msb = 0x00000012, .lsb = 0x00000012, .mask = GENMASK(18, 18), }; -static struct ath10k_hw_ce_regs_addr_map wcn3990_dmax = { +static const struct ath10k_hw_ce_regs_addr_map wcn3990_dmax = { .msb = 0x00000000, .lsb = 0x00000000, .mask = GENMASK(15, 0), }; -static struct ath10k_hw_ce_ctrl1 wcn3990_ctrl1 = { +static const struct ath10k_hw_ce_ctrl1 wcn3990_ctrl1 = { .addr = 0x00000018, .src_ring = &wcn3990_src_ring, .dst_ring = &wcn3990_dst_ring, .dmax = &wcn3990_dmax, }; -static struct ath10k_hw_ce_regs_addr_map wcn3990_host_ie_cc = { +static const struct ath10k_hw_ce_regs_addr_map wcn3990_host_ie_cc = { .mask = GENMASK(0, 0), }; -static struct ath10k_hw_ce_host_ie wcn3990_host_ie = { +static const struct ath10k_hw_ce_host_ie wcn3990_host_ie = { .copy_complete = &wcn3990_host_ie_cc, }; -static struct ath10k_hw_ce_host_wm_regs wcn3990_wm_reg = { +static const struct ath10k_hw_ce_host_wm_regs wcn3990_wm_reg = { .dstr_lmask = 0x00000010, .dstr_hmask = 0x00000008, .srcr_lmask = 0x00000004, @@ -255,7 +255,7 @@ static struct ath10k_hw_ce_host_wm_regs wcn3990_wm_reg = { .addr = 0x00000030, }; -static struct ath10k_hw_ce_misc_regs wcn3990_misc_reg = { +static const struct ath10k_hw_ce_misc_regs wcn3990_misc_reg = { .axi_err = 0x00000100, .dstr_add_err = 0x00000200, .srcr_len_err = 0x00000100, @@ -266,19 +266,19 @@ static struct ath10k_hw_ce_misc_regs wcn3990_misc_reg = { .addr = 0x00000038, }; -static struct ath10k_hw_ce_regs_addr_map wcn3990_src_wm_low = { +static const struct ath10k_hw_ce_regs_addr_map wcn3990_src_wm_low = { .msb = 0x00000000, .lsb = 0x00000010, .mask = GENMASK(31, 16), }; -static struct ath10k_hw_ce_regs_addr_map wcn3990_src_wm_high = { +static const struct ath10k_hw_ce_regs_addr_map wcn3990_src_wm_high = { .msb = 0x0000000f, .lsb = 0x00000000, .mask = GENMASK(15, 0), }; -static struct ath10k_hw_ce_dst_src_wm_regs wcn3990_wm_src_ring = { +static const struct ath10k_hw_ce_dst_src_wm_regs wcn3990_wm_src_ring = { .addr = 0x0000004c, .low_rst = 0x00000000, .high_rst = 0x00000000, @@ -286,18 +286,18 @@ static struct ath10k_hw_ce_dst_src_wm_regs wcn3990_wm_src_ring = { .wm_high = &wcn3990_src_wm_high, }; -static struct ath10k_hw_ce_regs_addr_map wcn3990_dst_wm_low = { +static const struct ath10k_hw_ce_regs_addr_map wcn3990_dst_wm_low = { .lsb = 0x00000010, .mask = GENMASK(31, 16), }; -static struct ath10k_hw_ce_regs_addr_map wcn3990_dst_wm_high = { +static const struct ath10k_hw_ce_regs_addr_map wcn3990_dst_wm_high = { .msb = 0x0000000f, .lsb = 0x00000000, .mask = GENMASK(15, 0), }; -static struct ath10k_hw_ce_dst_src_wm_regs wcn3990_wm_dst_ring = { +static const struct ath10k_hw_ce_dst_src_wm_regs wcn3990_wm_dst_ring = { .addr = 0x00000050, .low_rst = 0x00000000, .high_rst = 0x00000000, @@ -305,7 +305,7 @@ static struct ath10k_hw_ce_dst_src_wm_regs wcn3990_wm_dst_ring = { .wm_high = &wcn3990_dst_wm_high, }; -static struct ath10k_hw_ce_ctrl1_upd wcn3990_ctrl1_upd = { +static const struct ath10k_hw_ce_ctrl1_upd wcn3990_ctrl1_upd = { .shift = 19, .mask = 0x00080000, .enable = 0x00000000, @@ -344,25 +344,25 @@ const struct ath10k_hw_values wcn3990_values = { .ce_desc_meta_data_lsb = 4, }; -static struct ath10k_hw_ce_regs_addr_map qcax_src_ring = { +static const struct ath10k_hw_ce_regs_addr_map qcax_src_ring = { .msb = 0x00000010, .lsb = 0x00000010, .mask = GENMASK(16, 16), }; -static struct ath10k_hw_ce_regs_addr_map qcax_dst_ring = { +static const struct ath10k_hw_ce_regs_addr_map qcax_dst_ring = { .msb = 0x00000011, .lsb = 0x00000011, .mask = GENMASK(17, 17), }; -static struct ath10k_hw_ce_regs_addr_map qcax_dmax = { +static const struct ath10k_hw_ce_regs_addr_map qcax_dmax = { .msb = 0x0000000f, .lsb = 0x00000000, .mask = GENMASK(15, 0), }; -static struct ath10k_hw_ce_ctrl1 qcax_ctrl1 = { +static const struct ath10k_hw_ce_ctrl1 qcax_ctrl1 = { .addr = 0x00000010, .hw_mask = 0x0007ffff, .sw_mask = 0x0007ffff, @@ -375,31 +375,31 @@ static struct ath10k_hw_ce_ctrl1 qcax_ctrl1 = { .dmax = &qcax_dmax, }; -static struct ath10k_hw_ce_regs_addr_map qcax_cmd_halt_status = { +static const struct ath10k_hw_ce_regs_addr_map qcax_cmd_halt_status = { .msb = 0x00000003, .lsb = 0x00000003, .mask = GENMASK(3, 3), }; -static struct ath10k_hw_ce_cmd_halt qcax_cmd_halt = { +static const struct ath10k_hw_ce_cmd_halt qcax_cmd_halt = { .msb = 0x00000000, .mask = GENMASK(0, 0), .status_reset = 0x00000000, .status = &qcax_cmd_halt_status, }; -static struct ath10k_hw_ce_regs_addr_map qcax_host_ie_cc = { +static const struct ath10k_hw_ce_regs_addr_map qcax_host_ie_cc = { .msb = 0x00000000, .lsb = 0x00000000, .mask = GENMASK(0, 0), }; -static struct ath10k_hw_ce_host_ie qcax_host_ie = { +static const struct ath10k_hw_ce_host_ie qcax_host_ie = { .copy_complete_reset = 0x00000000, .copy_complete = &qcax_host_ie_cc, }; -static struct ath10k_hw_ce_host_wm_regs qcax_wm_reg = { +static const struct ath10k_hw_ce_host_wm_regs qcax_wm_reg = { .dstr_lmask = 0x00000010, .dstr_hmask = 0x00000008, .srcr_lmask = 0x00000004, @@ -409,7 +409,7 @@ static struct ath10k_hw_ce_host_wm_regs qcax_wm_reg = { .addr = 0x00000030, }; -static struct ath10k_hw_ce_misc_regs qcax_misc_reg = { +static const struct ath10k_hw_ce_misc_regs qcax_misc_reg = { .axi_err = 0x00000400, .dstr_add_err = 0x00000200, .srcr_len_err = 0x00000100, @@ -420,19 +420,19 @@ static struct ath10k_hw_ce_misc_regs qcax_misc_reg = { .addr = 0x00000038, }; -static struct ath10k_hw_ce_regs_addr_map qcax_src_wm_low = { +static const struct ath10k_hw_ce_regs_addr_map qcax_src_wm_low = { .msb = 0x0000001f, .lsb = 0x00000010, .mask = GENMASK(31, 16), }; -static struct ath10k_hw_ce_regs_addr_map qcax_src_wm_high = { +static const struct ath10k_hw_ce_regs_addr_map qcax_src_wm_high = { .msb = 0x0000000f, .lsb = 0x00000000, .mask = GENMASK(15, 0), }; -static struct ath10k_hw_ce_dst_src_wm_regs qcax_wm_src_ring = { +static const struct ath10k_hw_ce_dst_src_wm_regs qcax_wm_src_ring = { .addr = 0x0000004c, .low_rst = 0x00000000, .high_rst = 0x00000000, @@ -440,18 +440,18 @@ static struct ath10k_hw_ce_dst_src_wm_regs qcax_wm_src_ring = { .wm_high = &qcax_src_wm_high, }; -static struct ath10k_hw_ce_regs_addr_map qcax_dst_wm_low = { +static const struct ath10k_hw_ce_regs_addr_map qcax_dst_wm_low = { .lsb = 0x00000010, .mask = GENMASK(31, 16), }; -static struct ath10k_hw_ce_regs_addr_map qcax_dst_wm_high = { +static const struct ath10k_hw_ce_regs_addr_map qcax_dst_wm_high = { .msb = 0x0000000f, .lsb = 0x00000000, .mask = GENMASK(15, 0), }; -static struct ath10k_hw_ce_dst_src_wm_regs qcax_wm_dst_ring = { +static const struct ath10k_hw_ce_dst_src_wm_regs qcax_wm_dst_ring = { .addr = 0x00000050, .low_rst = 0x00000000, .high_rst = 0x00000000, diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index 442091c6dfd2..7ffa1fbe2874 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -289,19 +289,22 @@ struct ath10k_hw_ce_ctrl1 { u32 sw_wr_mask; u32 reset_mask; u32 reset; - struct ath10k_hw_ce_regs_addr_map *src_ring; - struct ath10k_hw_ce_regs_addr_map *dst_ring; - struct ath10k_hw_ce_regs_addr_map *dmax; }; + const struct ath10k_hw_ce_regs_addr_map *src_ring; + const struct ath10k_hw_ce_regs_addr_map *dst_ring; + const struct ath10k_hw_ce_regs_addr_map *dmax; +}; struct ath10k_hw_ce_cmd_halt { u32 status_reset; u32 msb; u32 mask; - struct ath10k_hw_ce_regs_addr_map *status; }; + const struct ath10k_hw_ce_regs_addr_map *status; +}; struct ath10k_hw_ce_host_ie { u32 copy_complete_reset; - struct ath10k_hw_ce_regs_addr_map *copy_complete; }; + const struct ath10k_hw_ce_regs_addr_map *copy_complete; +}; struct ath10k_hw_ce_host_wm_regs { u32 dstr_lmask; @@ -328,8 +331,9 @@ struct ath10k_hw_ce_dst_src_wm_regs { u32 addr; u32 low_rst; u32 high_rst; - struct ath10k_hw_ce_regs_addr_map *wm_low; - struct ath10k_hw_ce_regs_addr_map *wm_high; }; + const struct ath10k_hw_ce_regs_addr_map *wm_low; + const struct ath10k_hw_ce_regs_addr_map *wm_high; +}; struct ath10k_hw_ce_ctrl1_upd { u32 shift; @@ -355,14 +359,14 @@ struct ath10k_hw_ce_regs { u32 ce_rri_low; u32 ce_rri_high; u32 host_ie_addr; - struct ath10k_hw_ce_host_wm_regs *wm_regs; - struct ath10k_hw_ce_misc_regs *misc_regs; - struct ath10k_hw_ce_ctrl1 *ctrl1_regs; - struct ath10k_hw_ce_cmd_halt *cmd_halt; - struct ath10k_hw_ce_host_ie *host_ie; - struct ath10k_hw_ce_dst_src_wm_regs *wm_srcr; - struct ath10k_hw_ce_dst_src_wm_regs *wm_dstr; - struct ath10k_hw_ce_ctrl1_upd *upd; + const struct ath10k_hw_ce_host_wm_regs *wm_regs; + const struct ath10k_hw_ce_misc_regs *misc_regs; + const struct ath10k_hw_ce_ctrl1 *ctrl1_regs; + const struct ath10k_hw_ce_cmd_halt *cmd_halt; + const struct ath10k_hw_ce_host_ie *host_ie; + const struct ath10k_hw_ce_dst_src_wm_regs *wm_srcr; + const struct ath10k_hw_ce_dst_src_wm_regs *wm_dstr; + const struct ath10k_hw_ce_ctrl1_upd *upd; }; struct ath10k_hw_values { diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c index 7ce74b4ef201..f3212eab56a1 100644 --- a/drivers/net/wireless/ath/ath10k/sdio.c +++ b/drivers/net/wireless/ath/ath10k/sdio.c @@ -1844,7 +1844,7 @@ static int ath10k_sdio_get_htt_tx_complete(struct ath10k *ar) ret = ath10k_sdio_diag_read32(ar, addr, &val); if (ret) { ath10k_warn(ar, - "unable to read hi_acs_flags for htt tx comple : %d\n", ret); + "unable to read hi_acs_flags for htt tx complete: %d\n", ret); return ret; } diff --git a/drivers/net/wireless/ath/ath11k/ce.c b/drivers/net/wireless/ath/ath11k/ce.c index e66e86bdec20..9d8efec46508 100644 --- a/drivers/net/wireless/ath/ath11k/ce.c +++ b/drivers/net/wireless/ath/ath11k/ce.c @@ -393,11 +393,10 @@ static int ath11k_ce_completed_recv_next(struct ath11k_ce_pipe *pipe, goto err; } + /* Make sure descriptor is read after the head pointer. */ + dma_rmb(); + *nbytes = ath11k_hal_ce_dst_status_get_length(desc); - if (*nbytes == 0) { - ret = -EIO; - goto err; - } *skb = pipe->dest_ring->skb[sw_index]; pipe->dest_ring->skb[sw_index] = NULL; @@ -430,8 +429,8 @@ static void ath11k_ce_recv_process_cb(struct ath11k_ce_pipe *pipe) dma_unmap_single(ab->dev, ATH11K_SKB_RXCB(skb)->paddr, max_nbytes, DMA_FROM_DEVICE); - if (unlikely(max_nbytes < nbytes)) { - ath11k_warn(ab, "rxed more than expected (nbytes %d, max %d)", + if (unlikely(max_nbytes < nbytes || nbytes == 0)) { + ath11k_warn(ab, "unexpected rx length (nbytes %d, max %d)", nbytes, max_nbytes); dev_kfree_skb_any(skb); continue; diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c index 218ab41c0f3c..ea2959305dec 100644 --- a/drivers/net/wireless/ath/ath11k/dp_rx.c +++ b/drivers/net/wireless/ath/ath11k/dp_rx.c @@ -2637,7 +2637,7 @@ int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id, struct ath11k *ar; struct hal_reo_dest_ring *desc; enum hal_reo_dest_ring_push_reason push_reason; - u32 cookie; + u32 cookie, info0, rx_msdu_info0, rx_mpdu_info0; int i; for (i = 0; i < MAX_RADIOS; i++) @@ -2650,11 +2650,14 @@ int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id, try_again: ath11k_hal_srng_access_begin(ab, srng); + /* Make sure descriptor is read after the head pointer. */ + dma_rmb(); + while (likely(desc = (struct hal_reo_dest_ring *)ath11k_hal_srng_dst_get_next_entry(ab, srng))) { cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, - desc->buf_addr_info.info1); + READ_ONCE(desc->buf_addr_info.info1)); buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie); mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, cookie); @@ -2683,8 +2686,9 @@ try_again: num_buffs_reaped[mac_id]++; + info0 = READ_ONCE(desc->info0); push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON, - desc->info0); + info0); if (unlikely(push_reason != HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION)) { dev_kfree_skb_any(msdu); @@ -2692,18 +2696,21 @@ try_again: continue; } - rxcb->is_first_msdu = !!(desc->rx_msdu_info.info0 & + rx_msdu_info0 = READ_ONCE(desc->rx_msdu_info.info0); + rx_mpdu_info0 = READ_ONCE(desc->rx_mpdu_info.info0); + + rxcb->is_first_msdu = !!(rx_msdu_info0 & RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU); - rxcb->is_last_msdu = !!(desc->rx_msdu_info.info0 & + rxcb->is_last_msdu = !!(rx_msdu_info0 & RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU); - rxcb->is_continuation = !!(desc->rx_msdu_info.info0 & + rxcb->is_continuation = !!(rx_msdu_info0 & RX_MSDU_DESC_INFO0_MSDU_CONTINUATION); rxcb->peer_id = FIELD_GET(RX_MPDU_DESC_META_DATA_PEER_ID, - desc->rx_mpdu_info.meta_data); + READ_ONCE(desc->rx_mpdu_info.meta_data)); rxcb->seq_no = FIELD_GET(RX_MPDU_DESC_INFO0_SEQ_NUM, - desc->rx_mpdu_info.info0); + rx_mpdu_info0); rxcb->tid = FIELD_GET(HAL_REO_DEST_RING_INFO0_RX_QUEUE_NUM, - desc->info0); + info0); rxcb->mac_id = mac_id; __skb_queue_tail(&msdu_list[mac_id], msdu); diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c index 61f4b6dd5380..8cb1505a5a0c 100644 --- a/drivers/net/wireless/ath/ath11k/hal.c +++ b/drivers/net/wireless/ath/ath11k/hal.c @@ -599,7 +599,7 @@ u32 ath11k_hal_ce_dst_status_get_length(void *buf) struct hal_ce_srng_dst_status_desc *desc = buf; u32 len; - len = FIELD_GET(HAL_CE_DST_STATUS_DESC_FLAGS_LEN, desc->flags); + len = FIELD_GET(HAL_CE_DST_STATUS_DESC_FLAGS_LEN, READ_ONCE(desc->flags)); desc->flags &= ~HAL_CE_DST_STATUS_DESC_FLAGS_LEN; return len; @@ -829,7 +829,7 @@ void ath11k_hal_srng_access_begin(struct ath11k_base *ab, struct hal_srng *srng) srng->u.src_ring.cached_tp = *(volatile u32 *)srng->u.src_ring.tp_addr; } else { - srng->u.dst_ring.cached_hp = *srng->u.dst_ring.hp_addr; + srng->u.dst_ring.cached_hp = READ_ONCE(*srng->u.dst_ring.hp_addr); /* Try to prefetch the next descriptor in the ring */ if (srng->flags & HAL_SRNG_FLAGS_CACHED) diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index 8191ee14a1fd..08d7b136851f 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -9972,12 +9972,17 @@ static int ath11k_mac_setup_iface_combinations(struct ath11k *ar) struct ath11k_base *ab = ar->ab; struct ieee80211_iface_combination *combinations; struct ieee80211_iface_limit *limits; - int n_limits; + int n_limits, n_combos; bool p2p; p2p = ab->hw_params.interface_modes & BIT(NL80211_IFTYPE_P2P_DEVICE); - combinations = kzalloc(sizeof(*combinations), GFP_KERNEL); + if (ab->hw_params.support_dual_stations) + n_combos = 2; + else + n_combos = 1; + + combinations = kcalloc(n_combos, sizeof(*combinations), GFP_KERNEL); if (!combinations) return -ENOMEM; @@ -9992,7 +9997,9 @@ static int ath11k_mac_setup_iface_combinations(struct ath11k *ar) return -ENOMEM; } + limits[0].max = 1; limits[0].types |= BIT(NL80211_IFTYPE_STATION); + limits[1].max = 16; limits[1].types |= BIT(NL80211_IFTYPE_AP); if (IS_ENABLED(CONFIG_MAC80211_MESH) && ab->hw_params.interface_modes & BIT(NL80211_IFTYPE_MESH_POINT)) @@ -10002,25 +10009,24 @@ static int ath11k_mac_setup_iface_combinations(struct ath11k *ar) combinations[0].n_limits = n_limits; combinations[0].beacon_int_infra_match = true; combinations[0].beacon_int_min_gcd = 100; + combinations[0].max_interfaces = 16; + combinations[0].num_different_channels = 1; + combinations[0].radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | + BIT(NL80211_CHAN_WIDTH_20) | + BIT(NL80211_CHAN_WIDTH_40) | + BIT(NL80211_CHAN_WIDTH_80) | + BIT(NL80211_CHAN_WIDTH_80P80) | + BIT(NL80211_CHAN_WIDTH_160); if (ab->hw_params.support_dual_stations) { limits[0].max = 2; - limits[1].max = 1; - - combinations[0].max_interfaces = ab->hw_params.num_vdevs; - combinations[0].num_different_channels = 2; - } else { - limits[0].max = 1; - limits[1].max = 16; - combinations[0].max_interfaces = 16; - combinations[0].num_different_channels = 1; - combinations[0].radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | - BIT(NL80211_CHAN_WIDTH_20) | - BIT(NL80211_CHAN_WIDTH_40) | - BIT(NL80211_CHAN_WIDTH_80) | - BIT(NL80211_CHAN_WIDTH_80P80) | - BIT(NL80211_CHAN_WIDTH_160); + combinations[1].limits = limits; + combinations[1].n_limits = n_limits; + combinations[1].beacon_int_infra_match = true; + combinations[1].beacon_int_min_gcd = 100; + combinations[1].max_interfaces = ab->hw_params.num_vdevs; + combinations[1].num_different_channels = 2; } if (p2p) { @@ -10031,7 +10037,7 @@ static int ath11k_mac_setup_iface_combinations(struct ath11k *ar) } ar->hw->wiphy->iface_combinations = combinations; - ar->hw->wiphy->n_iface_combinations = 1; + ar->hw->wiphy->n_iface_combinations = n_combos; return 0; } diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c index 47b9d4126d3a..2782f4723e41 100644 --- a/drivers/net/wireless/ath/ath11k/qmi.c +++ b/drivers/net/wireless/ath/ath11k/qmi.c @@ -1993,6 +1993,15 @@ static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab) chunk->prev_size == chunk->size) continue; + if (ab->qmi.mem_seg_count <= ATH11K_QMI_FW_MEM_REQ_SEGMENT_CNT) { + ath11k_dbg(ab, ATH11K_DBG_QMI, + "size/type mismatch (current %d %u) (prev %d %u), try later with small size\n", + chunk->size, chunk->type, + chunk->prev_size, chunk->prev_type); + ab->qmi.target_mem_delayed = true; + return 0; + } + /* cannot reuse the existing chunk */ dma_free_coherent(ab->dev, chunk->prev_size, chunk->vaddr, chunk->paddr); diff --git a/drivers/net/wireless/ath/ath12k/ahb.c b/drivers/net/wireless/ath/ath12k/ahb.c index 636dfe237a79..8d1a86e420a4 100644 --- a/drivers/net/wireless/ath/ath12k/ahb.c +++ b/drivers/net/wireless/ath/ath12k/ahb.c @@ -1125,14 +1125,13 @@ static void ath12k_ahb_remove(struct platform_device *pdev) if (test_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags)) { ath12k_ahb_power_down(ab, false); - ath12k_qmi_deinit_service(ab); goto qmi_fail; } ath12k_ahb_remove_prepare(ab); - ath12k_core_deinit(ab); - + ath12k_core_hw_group_cleanup(ab->ag); qmi_fail: + ath12k_core_deinit(ab); ath12k_ahb_free_resources(ab); } diff --git a/drivers/net/wireless/ath/ath12k/ce.c b/drivers/net/wireless/ath/ath12k/ce.c index 7a5656aceebe..47821a3b060b 100644 --- a/drivers/net/wireless/ath/ath12k/ce.c +++ b/drivers/net/wireless/ath/ath12k/ce.c @@ -433,11 +433,10 @@ static int ath12k_ce_completed_recv_next(struct ath12k_ce_pipe *pipe, goto err; } + /* Make sure descriptor is read after the head pointer. */ + dma_rmb(); + *nbytes = ath12k_hal_ce_dst_status_get_length(desc); - if (*nbytes == 0) { - ret = -EIO; - goto err; - } *skb = pipe->dest_ring->skb[sw_index]; pipe->dest_ring->skb[sw_index] = NULL; @@ -470,8 +469,8 @@ static void ath12k_ce_recv_process_cb(struct ath12k_ce_pipe *pipe) dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr, max_nbytes, DMA_FROM_DEVICE); - if (unlikely(max_nbytes < nbytes)) { - ath12k_warn(ab, "rxed more than expected (nbytes %d, max %d)", + if (unlikely(max_nbytes < nbytes || nbytes == 0)) { + ath12k_warn(ab, "unexpected rx length (nbytes %d, max %d)", nbytes, max_nbytes); dev_kfree_skb_any(skb); continue; diff --git a/drivers/net/wireless/ath/ath12k/core.c b/drivers/net/wireless/ath/ath12k/core.c index 7eccd9cf9036..31d851d8e688 100644 --- a/drivers/net/wireless/ath/ath12k/core.c +++ b/drivers/net/wireless/ath/ath12k/core.c @@ -697,7 +697,7 @@ static void ath12k_core_stop(struct ath12k_base *ab) /* De-Init of components as needed */ } -static void ath12k_core_check_bdfext(const struct dmi_header *hdr, void *data) +static void ath12k_core_check_cc_code_bdfext(const struct dmi_header *hdr, void *data) { struct ath12k_base *ab = data; const char *magic = ATH12K_SMBIOS_BDF_EXT_MAGIC; @@ -719,6 +719,28 @@ static void ath12k_core_check_bdfext(const struct dmi_header *hdr, void *data) return; } + spin_lock_bh(&ab->base_lock); + + switch (smbios->country_code_flag) { + case ATH12K_SMBIOS_CC_ISO: + ab->new_alpha2[0] = u16_get_bits(smbios->cc_code >> 8, 0xff); + ab->new_alpha2[1] = u16_get_bits(smbios->cc_code, 0xff); + ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot smbios cc_code %c%c\n", + ab->new_alpha2[0], ab->new_alpha2[1]); + break; + case ATH12K_SMBIOS_CC_WW: + ab->new_alpha2[0] = '0'; + ab->new_alpha2[1] = '0'; + ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot smbios worldwide regdomain\n"); + break; + default: + ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot ignore smbios country code setting %d\n", + smbios->country_code_flag); + break; + } + + spin_unlock_bh(&ab->base_lock); + if (!smbios->bdf_enabled) { ath12k_dbg(ab, ATH12K_DBG_BOOT, "bdf variant name not found.\n"); return; @@ -758,7 +780,7 @@ static void ath12k_core_check_bdfext(const struct dmi_header *hdr, void *data) int ath12k_core_check_smbios(struct ath12k_base *ab) { ab->qmi.target.bdf_ext[0] = '\0'; - dmi_walk(ath12k_core_check_bdfext, ab); + dmi_walk(ath12k_core_check_cc_code_bdfext, ab); if (ab->qmi.target.bdf_ext[0] == '\0') return -ENODATA; @@ -789,6 +811,8 @@ static int ath12k_core_soc_create(struct ath12k_base *ab) goto err_qmi_deinit; } + ath12k_debugfs_pdev_create(ab); + return 0; err_qmi_deinit: @@ -1812,9 +1836,9 @@ static int ath12k_core_get_wsi_info(struct ath12k_hw_group *ag, of_node_put(next_rx_endpoint); device_count++; - if (device_count > ATH12K_MAX_SOCS) { + if (device_count > ATH12K_MAX_DEVICES) { ath12k_warn(ab, "device count in DT %d is more than limit %d\n", - device_count, ATH12K_MAX_SOCS); + device_count, ATH12K_MAX_DEVICES); of_node_put(next_wsi_dev); return -EINVAL; } @@ -1990,7 +2014,7 @@ static void ath12k_core_hw_group_destroy(struct ath12k_hw_group *ag) } } -static void ath12k_core_hw_group_cleanup(struct ath12k_hw_group *ag) +void ath12k_core_hw_group_cleanup(struct ath12k_hw_group *ag) { struct ath12k_base *ab; int i; @@ -2136,10 +2160,9 @@ err: void ath12k_core_deinit(struct ath12k_base *ab) { - ath12k_core_panic_notifier_unregister(ab); - ath12k_core_hw_group_cleanup(ab->ag); ath12k_core_hw_group_destroy(ab->ag); ath12k_core_hw_group_unassign(ab); + ath12k_core_panic_notifier_unregister(ab); } void ath12k_core_free(struct ath12k_base *ab) diff --git a/drivers/net/wireless/ath/ath12k/core.h b/drivers/net/wireless/ath/ath12k/core.h index 4b8f434e3e9a..941db6e49d6e 100644 --- a/drivers/net/wireless/ath/ath12k/core.h +++ b/drivers/net/wireless/ath/ath12k/core.h @@ -17,6 +17,7 @@ #include <linux/of_reserved_mem.h> #include <linux/panic_notifier.h> #include <linux/average.h> +#include <linux/of.h> #include "qmi.h" #include "htc.h" #include "wmi.h" @@ -63,8 +64,8 @@ #define ATH12K_RECONFIGURE_TIMEOUT_HZ (10 * HZ) #define ATH12K_RECOVER_START_TIMEOUT_HZ (20 * HZ) -#define ATH12K_MAX_SOCS 3 -#define ATH12K_GROUP_MAX_RADIO (ATH12K_MAX_SOCS * MAX_RADIOS) +#define ATH12K_MAX_DEVICES 3 +#define ATH12K_GROUP_MAX_RADIO (ATH12K_MAX_DEVICES * MAX_RADIOS) #define ATH12K_INVALID_GROUP_ID 0xFF #define ATH12K_INVALID_DEVICE_ID 0xFF @@ -175,9 +176,34 @@ struct ath12k_ext_irq_grp { struct net_device *napi_ndev; }; +enum ath12k_smbios_cc_type { + /* disable country code setting from SMBIOS */ + ATH12K_SMBIOS_CC_DISABLE = 0, + + /* set country code by ANSI country name, based on ISO3166-1 alpha2 */ + ATH12K_SMBIOS_CC_ISO = 1, + + /* worldwide regdomain */ + ATH12K_SMBIOS_CC_WW = 2, +}; + struct ath12k_smbios_bdf { struct dmi_header hdr; - u32 padding; + u8 features_disabled; + + /* enum ath12k_smbios_cc_type */ + u8 country_code_flag; + + /* To set specific country, you need to set country code + * flag=ATH12K_SMBIOS_CC_ISO first, then if country is United + * States, then country code value = 0x5553 ("US",'U' = 0x55, 'S'= + * 0x53). To set country to INDONESIA, then country code value = + * 0x4944 ("IN", 'I'=0x49, 'D'=0x44). If country code flag = + * ATH12K_SMBIOS_CC_WW, then you can use worldwide regulatory + * setting. + */ + u16 cc_code; + u8 bdf_enabled; u8 bdf_ext[]; } __packed; @@ -317,6 +343,8 @@ struct ath12k_link_vif { /* only used in station mode */ bool is_sta_assoc_link; + + struct ath12k_reg_tpc_power_info reg_tpc_info; }; struct ath12k_vif { @@ -784,6 +812,8 @@ struct ath12k { u8 ftm_msgref; struct ath12k_fw_stats fw_stats; unsigned long last_tx_power_update; + + s8 max_allowed_tx_power; }; struct ath12k_hw { @@ -875,7 +905,7 @@ struct ath12k_board_data { size_t len; }; -struct ath12k_soc_dp_tx_err_stats { +struct ath12k_device_dp_tx_err_stats { /* TCL Ring Descriptor unavailable */ u32 desc_na[DP_TCL_NUM_RING_MAX]; /* Other failures during dp_tx due to mem allocation failure @@ -884,13 +914,20 @@ struct ath12k_soc_dp_tx_err_stats { atomic_t misc_fail; }; -struct ath12k_soc_dp_stats { +struct ath12k_device_dp_stats { u32 err_ring_pkts; u32 invalid_rbm; u32 rxdma_error[HAL_REO_ENTR_RING_RXDMA_ECODE_MAX]; u32 reo_error[HAL_REO_DEST_RING_ERROR_CODE_MAX]; u32 hal_reo_error[DP_REO_DST_RING_MAX]; - struct ath12k_soc_dp_tx_err_stats tx_err; + struct ath12k_device_dp_tx_err_stats tx_err; + u32 reo_rx[DP_REO_DST_RING_MAX][ATH12K_MAX_DEVICES]; + u32 rx_wbm_rel_source[HAL_WBM_REL_SRC_MODULE_MAX][ATH12K_MAX_DEVICES]; + u32 tqm_rel_reason[MAX_TQM_RELEASE_REASON]; + u32 fw_tx_status[MAX_FW_TX_STATUS]; + u32 tx_wbm_rel_source[HAL_WBM_REL_SRC_MODULE_MAX]; + u32 tx_enqueued[DP_TCL_NUM_RING_MAX]; + u32 tx_completed[DP_TCL_NUM_RING_MAX]; }; struct ath12k_reg_freq { @@ -919,7 +956,7 @@ struct ath12k_hw_group { u8 num_probed; u8 num_started; unsigned long flags; - struct ath12k_base *ab[ATH12K_MAX_SOCS]; + struct ath12k_base *ab[ATH12K_MAX_DEVICES]; /* protects access to this struct */ struct mutex mutex; @@ -933,7 +970,7 @@ struct ath12k_hw_group { struct ath12k_hw *ah[ATH12K_GROUP_MAX_RADIO]; u8 num_hw; bool mlo_capable; - struct device_node *wsi_node[ATH12K_MAX_SOCS]; + struct device_node *wsi_node[ATH12K_MAX_DEVICES]; struct ath12k_mlo_memory mlo_mem; struct ath12k_hw_link hw_links[ATH12K_GROUP_MAX_RADIO]; bool hw_link_id_init_done; @@ -1041,9 +1078,11 @@ struct ath12k_base { */ struct ieee80211_regdomain *new_regd[MAX_RADIOS]; + struct ath12k_reg_info *reg_info[MAX_RADIOS]; + /* Current DFS Regulatory */ enum ath12k_dfs_region dfs_region; - struct ath12k_soc_dp_stats soc_stats; + struct ath12k_device_dp_stats device_stats; #ifdef CONFIG_ATH12K_DEBUGFS struct dentry *debugfs_soc; #endif @@ -1072,8 +1111,6 @@ struct ath12k_base { struct ath12k_dbring_cap *db_caps; u32 num_db_cap; - struct timer_list mon_reap_timer; - struct completion htc_suspend; u64 fw_soc_drop_count; @@ -1246,6 +1283,7 @@ struct ath12k_fw_stats_pdev { }; int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab); +void ath12k_core_hw_group_cleanup(struct ath12k_hw_group *ag); int ath12k_core_pre_init(struct ath12k_base *ab); int ath12k_core_init(struct ath12k_base *ath12k); void ath12k_core_deinit(struct ath12k_base *ath12k); @@ -1342,8 +1380,16 @@ static inline void ath12k_core_create_firmware_path(struct ath12k_base *ab, const char *filename, void *buf, size_t buf_len) { - snprintf(buf, buf_len, "%s/%s/%s", ATH12K_FW_DIR, - ab->hw_params->fw.dir, filename); + const char *fw_name = NULL; + + of_property_read_string(ab->dev->of_node, "firmware-name", &fw_name); + + if (fw_name && strncmp(filename, "board", 5)) + snprintf(buf, buf_len, "%s/%s/%s/%s", ATH12K_FW_DIR, + ab->hw_params->fw.dir, fw_name, filename); + else + snprintf(buf, buf_len, "%s/%s/%s", ATH12K_FW_DIR, + ab->hw_params->fw.dir, filename); } static inline const char *ath12k_bus_str(enum ath12k_bus bus) diff --git a/drivers/net/wireless/ath/ath12k/debugfs.c b/drivers/net/wireless/ath/ath12k/debugfs.c index bab7be9c5a38..dd624d73b8b2 100644 --- a/drivers/net/wireless/ath/ath12k/debugfs.c +++ b/drivers/net/wireless/ath/ath12k/debugfs.c @@ -33,6 +33,76 @@ static const struct file_operations fops_simulate_radar = { .open = simple_open }; +static ssize_t ath12k_read_simulate_fw_crash(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + const char buf[] = + "To simulate firmware crash write one of the keywords to this file:\n" + "`assert` - send WMI_FORCE_FW_HANG_CMDID to firmware to cause assert.\n"; + + return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf)); +} + +static ssize_t +ath12k_write_simulate_fw_crash(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath12k_base *ab = file->private_data; + struct ath12k_pdev *pdev; + struct ath12k *ar = NULL; + char buf[32] = {0}; + int i, ret; + ssize_t rc; + + /* filter partial writes and invalid commands */ + if (*ppos != 0 || count >= sizeof(buf) || count == 0) + return -EINVAL; + + rc = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count); + if (rc < 0) + return rc; + + /* drop the possible '\n' from the end */ + if (buf[*ppos - 1] == '\n') + buf[*ppos - 1] = '\0'; + + for (i = 0; i < ab->num_radios; i++) { + pdev = &ab->pdevs[i]; + ar = pdev->ar; + if (ar) + break; + } + + if (!ar) + return -ENETDOWN; + + if (!strcmp(buf, "assert")) { + ath12k_info(ab, "simulating firmware assert crash\n"); + ret = ath12k_wmi_force_fw_hang_cmd(ar, + ATH12K_WMI_FW_HANG_ASSERT_TYPE, + ATH12K_WMI_FW_HANG_DELAY); + } else { + return -EINVAL; + } + + if (ret) { + ath12k_warn(ab, "failed to simulate firmware crash: %d\n", ret); + return ret; + } + + return count; +} + +static const struct file_operations fops_simulate_fw_crash = { + .read = ath12k_read_simulate_fw_crash, + .write = ath12k_write_simulate_fw_crash, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + static ssize_t ath12k_write_tpc_stats_type(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) @@ -951,6 +1021,199 @@ void ath12k_debugfs_op_vif_add(struct ieee80211_hw *hw, &ath12k_fops_link_stats); } +static ssize_t ath12k_debugfs_dump_device_dp_stats(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath12k_base *ab = file->private_data; + struct ath12k_device_dp_stats *device_stats = &ab->device_stats; + int len = 0, i, j, ret; + struct ath12k *ar; + const int size = 4096; + static const char *rxdma_err[HAL_REO_ENTR_RING_RXDMA_ECODE_MAX] = { + [HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR] = "Overflow", + [HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR] = "MPDU len", + [HAL_REO_ENTR_RING_RXDMA_ECODE_FCS_ERR] = "FCS", + [HAL_REO_ENTR_RING_RXDMA_ECODE_DECRYPT_ERR] = "Decrypt", + [HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR] = "TKIP MIC", + [HAL_REO_ENTR_RING_RXDMA_ECODE_UNECRYPTED_ERR] = "Unencrypt", + [HAL_REO_ENTR_RING_RXDMA_ECODE_MSDU_LEN_ERR] = "MSDU len", + [HAL_REO_ENTR_RING_RXDMA_ECODE_MSDU_LIMIT_ERR] = "MSDU limit", + [HAL_REO_ENTR_RING_RXDMA_ECODE_WIFI_PARSE_ERR] = "WiFi parse", + [HAL_REO_ENTR_RING_RXDMA_ECODE_AMSDU_PARSE_ERR] = "AMSDU parse", + [HAL_REO_ENTR_RING_RXDMA_ECODE_SA_TIMEOUT_ERR] = "SA timeout", + [HAL_REO_ENTR_RING_RXDMA_ECODE_DA_TIMEOUT_ERR] = "DA timeout", + [HAL_REO_ENTR_RING_RXDMA_ECODE_FLOW_TIMEOUT_ERR] = "Flow timeout", + [HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR] = "Flush req", + [HAL_REO_ENTR_RING_RXDMA_ECODE_AMSDU_FRAG_ERR] = "AMSDU frag", + [HAL_REO_ENTR_RING_RXDMA_ECODE_MULTICAST_ECHO_ERR] = "Multicast echo", + [HAL_REO_ENTR_RING_RXDMA_ECODE_AMSDU_MISMATCH_ERR] = "AMSDU mismatch", + [HAL_REO_ENTR_RING_RXDMA_ECODE_UNAUTH_WDS_ERR] = "Unauth WDS", + [HAL_REO_ENTR_RING_RXDMA_ECODE_GRPCAST_AMSDU_WDS_ERR] = "AMSDU or WDS"}; + + static const char *reo_err[HAL_REO_DEST_RING_ERROR_CODE_MAX] = { + [HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO] = "Desc addr zero", + [HAL_REO_DEST_RING_ERROR_CODE_DESC_INVALID] = "Desc inval", + [HAL_REO_DEST_RING_ERROR_CODE_AMPDU_IN_NON_BA] = "AMPDU in non BA", + [HAL_REO_DEST_RING_ERROR_CODE_NON_BA_DUPLICATE] = "Non BA dup", + [HAL_REO_DEST_RING_ERROR_CODE_BA_DUPLICATE] = "BA dup", + [HAL_REO_DEST_RING_ERROR_CODE_FRAME_2K_JUMP] = "Frame 2k jump", + [HAL_REO_DEST_RING_ERROR_CODE_BAR_2K_JUMP] = "BAR 2k jump", + [HAL_REO_DEST_RING_ERROR_CODE_FRAME_OOR] = "Frame OOR", + [HAL_REO_DEST_RING_ERROR_CODE_BAR_OOR] = "BAR OOR", + [HAL_REO_DEST_RING_ERROR_CODE_NO_BA_SESSION] = "No BA session", + [HAL_REO_DEST_RING_ERROR_CODE_FRAME_SN_EQUALS_SSN] = "Frame SN equal SSN", + [HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED] = "PN check fail", + [HAL_REO_DEST_RING_ERROR_CODE_2K_ERR_FLAG_SET] = "2k err", + [HAL_REO_DEST_RING_ERROR_CODE_PN_ERR_FLAG_SET] = "PN err", + [HAL_REO_DEST_RING_ERROR_CODE_DESC_BLOCKED] = "Desc blocked"}; + + static const char *wbm_rel_src[HAL_WBM_REL_SRC_MODULE_MAX] = { + [HAL_WBM_REL_SRC_MODULE_TQM] = "TQM", + [HAL_WBM_REL_SRC_MODULE_RXDMA] = "Rxdma", + [HAL_WBM_REL_SRC_MODULE_REO] = "Reo", + [HAL_WBM_REL_SRC_MODULE_FW] = "FW", + [HAL_WBM_REL_SRC_MODULE_SW] = "SW"}; + + char *buf __free(kfree) = kzalloc(size, GFP_KERNEL); + + if (!buf) + return -ENOMEM; + + len += scnprintf(buf + len, size - len, "DEVICE RX STATS:\n\n"); + len += scnprintf(buf + len, size - len, "err ring pkts: %u\n", + device_stats->err_ring_pkts); + len += scnprintf(buf + len, size - len, "Invalid RBM: %u\n\n", + device_stats->invalid_rbm); + len += scnprintf(buf + len, size - len, "RXDMA errors:\n"); + + for (i = 0; i < HAL_REO_ENTR_RING_RXDMA_ECODE_MAX; i++) + len += scnprintf(buf + len, size - len, "%s: %u\n", + rxdma_err[i], device_stats->rxdma_error[i]); + + len += scnprintf(buf + len, size - len, "\nREO errors:\n"); + + for (i = 0; i < HAL_REO_DEST_RING_ERROR_CODE_MAX; i++) + len += scnprintf(buf + len, size - len, "%s: %u\n", + reo_err[i], device_stats->reo_error[i]); + + len += scnprintf(buf + len, size - len, "\nHAL REO errors:\n"); + + for (i = 0; i < DP_REO_DST_RING_MAX; i++) + len += scnprintf(buf + len, size - len, + "ring%d: %u\n", i, + device_stats->hal_reo_error[i]); + + len += scnprintf(buf + len, size - len, "\nDEVICE TX STATS:\n"); + len += scnprintf(buf + len, size - len, "\nTCL Ring Full Failures:\n"); + + for (i = 0; i < DP_TCL_NUM_RING_MAX; i++) + len += scnprintf(buf + len, size - len, "ring%d: %u\n", + i, device_stats->tx_err.desc_na[i]); + + len += scnprintf(buf + len, size - len, + "\nMisc Transmit Failures: %d\n", + atomic_read(&device_stats->tx_err.misc_fail)); + + len += scnprintf(buf + len, size - len, "\ntx_wbm_rel_source:"); + + for (i = 0; i < HAL_WBM_REL_SRC_MODULE_MAX; i++) + len += scnprintf(buf + len, size - len, " %d:%u", + i, device_stats->tx_wbm_rel_source[i]); + + len += scnprintf(buf + len, size - len, "\n"); + + len += scnprintf(buf + len, size - len, "\ntqm_rel_reason:"); + + for (i = 0; i < MAX_TQM_RELEASE_REASON; i++) + len += scnprintf(buf + len, size - len, " %d:%u", + i, device_stats->tqm_rel_reason[i]); + + len += scnprintf(buf + len, size - len, "\n"); + + len += scnprintf(buf + len, size - len, "\nfw_tx_status:"); + + for (i = 0; i < MAX_FW_TX_STATUS; i++) + len += scnprintf(buf + len, size - len, " %d:%u", + i, device_stats->fw_tx_status[i]); + + len += scnprintf(buf + len, size - len, "\n"); + + len += scnprintf(buf + len, size - len, "\ntx_enqueued:"); + + for (i = 0; i < DP_TCL_NUM_RING_MAX; i++) + len += scnprintf(buf + len, size - len, " %d:%u", i, + device_stats->tx_enqueued[i]); + + len += scnprintf(buf + len, size - len, "\n"); + + len += scnprintf(buf + len, size - len, "\ntx_completed:"); + + for (i = 0; i < DP_TCL_NUM_RING_MAX; i++) + len += scnprintf(buf + len, size - len, " %d:%u", + i, device_stats->tx_completed[i]); + + len += scnprintf(buf + len, size - len, "\n"); + + for (i = 0; i < ab->num_radios; i++) { + ar = ath12k_mac_get_ar_by_pdev_id(ab, DP_SW2HW_MACID(i)); + if (ar) { + len += scnprintf(buf + len, size - len, + "\nradio%d tx_pending: %u\n", i, + atomic_read(&ar->dp.num_tx_pending)); + } + } + + len += scnprintf(buf + len, size - len, "\nREO Rx Received:\n"); + + for (i = 0; i < DP_REO_DST_RING_MAX; i++) { + len += scnprintf(buf + len, size - len, "Ring%d:", i + 1); + + for (j = 0; j < ATH12K_MAX_DEVICES; j++) { + len += scnprintf(buf + len, size - len, + "\t%d:%u", j, + device_stats->reo_rx[i][j]); + } + + len += scnprintf(buf + len, size - len, "\n"); + } + + len += scnprintf(buf + len, size - len, "\nRx WBM REL SRC Errors:\n"); + + for (i = 0; i < HAL_WBM_REL_SRC_MODULE_MAX; i++) { + len += scnprintf(buf + len, size - len, "%s:", wbm_rel_src[i]); + + for (j = 0; j < ATH12K_MAX_DEVICES; j++) { + len += scnprintf(buf + len, + size - len, + "\t%d:%u", j, + device_stats->rx_wbm_rel_source[i][j]); + } + + len += scnprintf(buf + len, size - len, "\n"); + } + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, len); + + return ret; +} + +static const struct file_operations fops_device_dp_stats = { + .read = ath12k_debugfs_dump_device_dp_stats, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +void ath12k_debugfs_pdev_create(struct ath12k_base *ab) +{ + debugfs_create_file("simulate_fw_crash", 0600, ab->debugfs_soc, ab, + &fops_simulate_fw_crash); + + debugfs_create_file("device_dp_stats", 0400, ab->debugfs_soc, ab, + &fops_device_dp_stats); +} + void ath12k_debugfs_soc_create(struct ath12k_base *ab) { bool dput_needed; diff --git a/drivers/net/wireless/ath/ath12k/debugfs.h b/drivers/net/wireless/ath/ath12k/debugfs.h index 74a255a27886..ebef7dace344 100644 --- a/drivers/net/wireless/ath/ath12k/debugfs.h +++ b/drivers/net/wireless/ath/ath12k/debugfs.h @@ -16,6 +16,7 @@ void ath12k_debugfs_fw_stats_process(struct ath12k *ar, struct ath12k_fw_stats *stats); void ath12k_debugfs_op_vif_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif); +void ath12k_debugfs_pdev_create(struct ath12k_base *ab); static inline bool ath12k_debugfs_is_extd_rx_stats_enabled(struct ath12k *ar) { @@ -144,6 +145,10 @@ static inline void ath12k_debugfs_op_vif_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { } + +static inline void ath12k_debugfs_pdev_create(struct ath12k_base *ab) +{ +} #endif /* CONFIG_ATH12K_DEBUGFS */ #endif /* _ATH12K_DEBUGFS_H_ */ diff --git a/drivers/net/wireless/ath/ath12k/dp.c b/drivers/net/wireless/ath/ath12k/dp.c index ad873013e46c..6317c6d4c043 100644 --- a/drivers/net/wireless/ath/ath12k/dp.c +++ b/drivers/net/wireless/ath/ath12k/dp.c @@ -168,6 +168,8 @@ static int ath12k_dp_srng_calculate_msi_group(struct ath12k_base *ab, grp_mask = &ab->hw_params->ring_mask->reo_status[0]; break; case HAL_RXDMA_MONITOR_STATUS: + grp_mask = &ab->hw_params->ring_mask->rx_mon_status[0]; + break; case HAL_RXDMA_MONITOR_DST: grp_mask = &ab->hw_params->ring_mask->rx_mon_dest[0]; break; @@ -274,12 +276,17 @@ int ath12k_dp_srng_setup(struct ath12k_base *ab, struct dp_srng *ring, break; case HAL_RXDMA_BUF: case HAL_RXDMA_MONITOR_BUF: - case HAL_RXDMA_MONITOR_STATUS: params.low_threshold = num_entries >> 3; params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN; params.intr_batch_cntr_thres_entries = 0; params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX; break; + case HAL_RXDMA_MONITOR_STATUS: + params.low_threshold = num_entries >> 3; + params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN; + params.intr_batch_cntr_thres_entries = 1; + params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX; + break; case HAL_TX_MONITOR_DST: params.low_threshold = DP_TX_MONITOR_BUF_SIZE_MAX >> 3; params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN; @@ -354,7 +361,10 @@ u32 ath12k_dp_tx_get_vdev_bank_config(struct ath12k_base *ab, u32_encode_bits(0, HAL_TX_BANK_CONFIG_EPD); /* only valid if idx_lookup_override is not set in tcl_data_cmd */ - bank_config |= u32_encode_bits(0, HAL_TX_BANK_CONFIG_INDEX_LOOKUP_EN); + if (ahvif->vdev_type == WMI_VDEV_TYPE_STA) + bank_config |= u32_encode_bits(1, HAL_TX_BANK_CONFIG_INDEX_LOOKUP_EN); + else + bank_config |= u32_encode_bits(0, HAL_TX_BANK_CONFIG_INDEX_LOOKUP_EN); bank_config |= u32_encode_bits(arvif->hal_addr_search_flags & HAL_TX_ADDRX_EN, HAL_TX_BANK_CONFIG_ADDRX_EN) | @@ -919,6 +929,25 @@ int ath12k_dp_service_srng(struct ath12k_base *ab, goto done; } + if (ab->hw_params->ring_mask->rx_mon_status[grp_id]) { + ring_mask = ab->hw_params->ring_mask->rx_mon_status[grp_id]; + for (i = 0; i < ab->num_radios; i++) { + for (j = 0; j < ab->hw_params->num_rxdma_per_pdev; j++) { + int id = i * ab->hw_params->num_rxdma_per_pdev + j; + + if (ring_mask & BIT(id)) { + work_done = + ath12k_dp_mon_process_ring(ab, id, napi, budget, + 0); + budget -= work_done; + tot_work_done += work_done; + if (budget <= 0) + goto done; + } + } + } + } + if (ab->hw_params->ring_mask->rx_mon_dest[grp_id]) { monitor_mode = ATH12K_DP_RX_MONITOR_MODE; ring_mask = ab->hw_params->ring_mask->rx_mon_dest[grp_id]; @@ -982,11 +1011,6 @@ void ath12k_dp_pdev_free(struct ath12k_base *ab) { int i; - if (!ab->mon_reap_timer.function) - return; - - timer_delete_sync(&ab->mon_reap_timer); - for (i = 0; i < ab->num_radios; i++) ath12k_dp_rx_pdev_free(ab, i); } @@ -1024,27 +1048,6 @@ void ath12k_dp_hal_rx_desc_init(struct ath12k_base *ab) ab->hal_rx_ops->rx_desc_get_desc_size(); } -static void ath12k_dp_service_mon_ring(struct timer_list *t) -{ - struct ath12k_base *ab = from_timer(ab, t, mon_reap_timer); - int i; - - for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) - ath12k_dp_mon_process_ring(ab, i, NULL, DP_MON_SERVICE_BUDGET, - ATH12K_DP_RX_MONITOR_MODE); - - mod_timer(&ab->mon_reap_timer, jiffies + - msecs_to_jiffies(ATH12K_MON_TIMER_INTERVAL)); -} - -static void ath12k_dp_mon_reap_timer_init(struct ath12k_base *ab) -{ - if (ab->hw_params->rxdma1_enable) - return; - - timer_setup(&ab->mon_reap_timer, ath12k_dp_service_mon_ring, 0); -} - int ath12k_dp_pdev_alloc(struct ath12k_base *ab) { struct ath12k *ar; @@ -1055,8 +1058,6 @@ int ath12k_dp_pdev_alloc(struct ath12k_base *ab) if (ret) goto out; - ath12k_dp_mon_reap_timer_init(ab); - /* TODO: Per-pdev rx ring unlike tx ring which is mapped to different AC's */ for (i = 0; i < ab->num_radios; i++) { ar = ab->pdevs[i].ar; @@ -1107,11 +1108,8 @@ static void ath12k_dp_update_vdev_search(struct ath12k_link_vif *arvif) { switch (arvif->ahvif->vdev_type) { case WMI_VDEV_TYPE_STA: - /* TODO: Verify the search type and flags since ast hash - * is not part of peer mapv3 - */ arvif->hal_addr_search_flags = HAL_TX_ADDRY_EN; - arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT; + arvif->search_type = HAL_TX_ADDR_SEARCH_INDEX; break; case WMI_VDEV_TYPE_AP: case WMI_VDEV_TYPE_IBSS: diff --git a/drivers/net/wireless/ath/ath12k/dp.h b/drivers/net/wireless/ath/ath12k/dp.h index 706d766d8c81..a353333f83b6 100644 --- a/drivers/net/wireless/ath/ath12k/dp.h +++ b/drivers/net/wireless/ath/ath12k/dp.h @@ -70,6 +70,16 @@ struct ath12k_pdev_mon_stats { u32 dest_mpdu_drop; u32 dup_mon_linkdesc_cnt; u32 dup_mon_buf_cnt; + u32 dest_mon_stuck; + u32 dest_mon_not_reaped; +}; + +enum dp_mon_status_buf_state { + DP_MON_STATUS_MATCH, + DP_MON_STATUS_NO_DMA, + DP_MON_STATUS_LAG, + DP_MON_STATUS_LEAD, + DP_MON_STATUS_REPLINISH, }; struct dp_link_desc_bank { @@ -121,8 +131,11 @@ struct ath12k_mon_data { u32 mon_last_buf_cookie; u64 mon_last_linkdesc_paddr; u16 chan_noise_floor; + u32 err_bitmap; + u8 decap_format; struct ath12k_pdev_mon_stats rx_mon_stats; + enum dp_mon_status_buf_state buf_state; /* lock for monitor data */ spinlock_t mon_lock; struct sk_buff_head rx_status_q; @@ -191,6 +204,14 @@ struct ath12k_pdev_dp { #define DP_RX_BUFFER_SIZE_LITE 1024 #define DP_RX_BUFFER_ALIGN_SIZE 128 +#define RX_MON_STATUS_BASE_BUF_SIZE 2048 +#define RX_MON_STATUS_BUF_ALIGN 128 +#define RX_MON_STATUS_BUF_RESERVATION 128 +#define RX_MON_STATUS_BUF_SIZE (RX_MON_STATUS_BASE_BUF_SIZE - \ + (RX_MON_STATUS_BUF_RESERVATION + \ + RX_MON_STATUS_BUF_ALIGN + \ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))) + #define DP_RXDMA_BUF_COOKIE_BUF_ID GENMASK(17, 0) #define DP_RXDMA_BUF_COOKIE_PDEV_ID GENMASK(19, 18) @@ -266,6 +287,9 @@ struct ath12k_pdev_dp { /* Invalid TX Bank ID value */ #define DP_INVALID_BANK_ID -1 +#define MAX_TQM_RELEASE_REASON 15 +#define MAX_FW_TX_STATUS 7 + struct ath12k_dp_tx_bank_profile { u8 is_configured; u32 num_users; @@ -338,6 +362,7 @@ struct ath12k_link_stats { struct ath12k_dp { struct ath12k_base *ab; + u32 mon_dest_ring_stuck_cnt; u8 num_bank_profiles; /* protects the access and update of bank_profiles */ spinlock_t tx_bank_lock; @@ -390,6 +415,7 @@ struct ath12k_dp { struct dp_srng rxdma_err_dst_ring[MAX_RXDMA_PER_PDEV]; struct dp_rxdma_mon_ring rxdma_mon_buf_ring; struct dp_rxdma_mon_ring tx_mon_buf_ring; + struct dp_rxdma_mon_ring rx_mon_status_refill_ring[MAX_RXDMA_PER_PDEV]; struct ath12k_reo_q_addr_lut reoq_lut; struct ath12k_reo_q_addr_lut ml_reoq_lut; }; @@ -1330,6 +1356,8 @@ struct htt_t2h_version_conf_msg { #define HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16 GENMASK(15, 0) #define HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID GENMASK(31, 16) #define HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL GENMASK(15, 0) +#define HTT_T2H_PEER_MAP3_INFO2_HW_PEER_ID GENMASK(15, 0) +#define HTT_T2H_PEER_MAP3_INFO2_AST_HASH_VAL GENMASK(31, 16) #define HTT_T2H_PEER_MAP_INFO2_NEXT_HOP_M BIT(16) #define HTT_T2H_PEER_MAP_INFO2_NEXT_HOP_S 16 diff --git a/drivers/net/wireless/ath/ath12k/dp_mon.c b/drivers/net/wireless/ath/ath12k/dp_mon.c index 17117315ec7b..28cadc4167f7 100644 --- a/drivers/net/wireless/ath/ath12k/dp_mon.c +++ b/drivers/net/wireless/ath/ath12k/dp_mon.c @@ -1416,6 +1416,40 @@ ath12k_dp_mon_hal_rx_parse_user_info(const struct hal_receive_user_info *rx_usr_ } } +static void ath12k_dp_mon_parse_rx_msdu_end_err(u32 info, u32 *errmap) +{ + if (info & RX_MSDU_END_INFO13_FCS_ERR) + *errmap |= HAL_RX_MPDU_ERR_FCS; + + if (info & RX_MSDU_END_INFO13_DECRYPT_ERR) + *errmap |= HAL_RX_MPDU_ERR_DECRYPT; + + if (info & RX_MSDU_END_INFO13_TKIP_MIC_ERR) + *errmap |= HAL_RX_MPDU_ERR_TKIP_MIC; + + if (info & RX_MSDU_END_INFO13_A_MSDU_ERROR) + *errmap |= HAL_RX_MPDU_ERR_AMSDU_ERR; + + if (info & RX_MSDU_END_INFO13_OVERFLOW_ERR) + *errmap |= HAL_RX_MPDU_ERR_OVERFLOW; + + if (info & RX_MSDU_END_INFO13_MSDU_LEN_ERR) + *errmap |= HAL_RX_MPDU_ERR_MSDU_LEN; + + if (info & RX_MSDU_END_INFO13_MPDU_LEN_ERR) + *errmap |= HAL_RX_MPDU_ERR_MPDU_LEN; +} + +static void +ath12k_dp_mon_parse_status_msdu_end(struct ath12k_mon_data *pmon, + const struct hal_rx_msdu_end *msdu_end) +{ + ath12k_dp_mon_parse_rx_msdu_end_err(__le32_to_cpu(msdu_end->info2), + &pmon->err_bitmap); + pmon->decap_format = le32_get_bits(msdu_end->info1, + RX_MSDU_END_INFO11_DECAP_FORMAT); +} + static enum hal_rx_mon_status ath12k_dp_mon_rx_parse_status_tlv(struct ath12k *ar, struct ath12k_mon_data *pmon, @@ -1655,6 +1689,7 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k *ar, case HAL_MON_BUF_ADDR: return HAL_RX_MON_STATUS_BUF_ADDR; case HAL_RX_MSDU_END: + ath12k_dp_mon_parse_status_msdu_end(pmon, tlv_data); return HAL_RX_MON_STATUS_MSDU_END; case HAL_RX_MPDU_END: return HAL_RX_MON_STATUS_MPDU_END; @@ -1716,6 +1751,124 @@ ath12k_dp_mon_fill_rx_stats_info(struct ath12k *ar, } } +static struct sk_buff +*ath12k_dp_rx_alloc_mon_status_buf(struct ath12k_base *ab, + struct dp_rxdma_mon_ring *rx_ring, + int *buf_id) +{ + struct sk_buff *skb; + dma_addr_t paddr; + + skb = dev_alloc_skb(RX_MON_STATUS_BUF_SIZE); + + if (!skb) + goto fail_alloc_skb; + + if (!IS_ALIGNED((unsigned long)skb->data, + RX_MON_STATUS_BUF_ALIGN)) { + skb_pull(skb, PTR_ALIGN(skb->data, RX_MON_STATUS_BUF_ALIGN) - + skb->data); + } + + paddr = dma_map_single(ab->dev, skb->data, + skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(ab->dev, paddr))) + goto fail_free_skb; + + spin_lock_bh(&rx_ring->idr_lock); + *buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0, + rx_ring->bufs_max, GFP_ATOMIC); + spin_unlock_bh(&rx_ring->idr_lock); + if (*buf_id < 0) + goto fail_dma_unmap; + + ATH12K_SKB_RXCB(skb)->paddr = paddr; + return skb; + +fail_dma_unmap: + dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); +fail_free_skb: + dev_kfree_skb_any(skb); +fail_alloc_skb: + return NULL; +} + +static enum dp_mon_status_buf_state +ath12k_dp_rx_mon_buf_done(struct ath12k_base *ab, struct hal_srng *srng, + struct dp_rxdma_mon_ring *rx_ring) +{ + struct ath12k_skb_rxcb *rxcb; + struct hal_tlv_64_hdr *tlv; + struct sk_buff *skb; + void *status_desc; + dma_addr_t paddr; + u32 cookie; + int buf_id; + u8 rbm; + + status_desc = ath12k_hal_srng_src_next_peek(ab, srng); + if (!status_desc) + return DP_MON_STATUS_NO_DMA; + + ath12k_hal_rx_buf_addr_info_get(status_desc, &paddr, &cookie, &rbm); + + buf_id = u32_get_bits(cookie, DP_RXDMA_BUF_COOKIE_BUF_ID); + + spin_lock_bh(&rx_ring->idr_lock); + skb = idr_find(&rx_ring->bufs_idr, buf_id); + spin_unlock_bh(&rx_ring->idr_lock); + + if (!skb) + return DP_MON_STATUS_NO_DMA; + + rxcb = ATH12K_SKB_RXCB(skb); + dma_sync_single_for_cpu(ab->dev, rxcb->paddr, + skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); + + tlv = (struct hal_tlv_64_hdr *)skb->data; + if (le64_get_bits(tlv->tl, HAL_TLV_HDR_TAG) != HAL_RX_STATUS_BUFFER_DONE) + return DP_MON_STATUS_NO_DMA; + + return DP_MON_STATUS_REPLINISH; +} + +static u32 ath12k_dp_mon_comp_ppduid(u32 msdu_ppdu_id, u32 *ppdu_id) +{ + u32 ret = 0; + + if ((*ppdu_id < msdu_ppdu_id) && + ((msdu_ppdu_id - *ppdu_id) < DP_NOT_PPDU_ID_WRAP_AROUND)) { + /* Hold on mon dest ring, and reap mon status ring. */ + *ppdu_id = msdu_ppdu_id; + ret = msdu_ppdu_id; + } else if ((*ppdu_id > msdu_ppdu_id) && + ((*ppdu_id - msdu_ppdu_id) > DP_NOT_PPDU_ID_WRAP_AROUND)) { + /* PPDU ID has exceeded the maximum value and will + * restart from 0. + */ + *ppdu_id = msdu_ppdu_id; + ret = msdu_ppdu_id; + } + return ret; +} + +static +void ath12k_dp_mon_next_link_desc_get(struct hal_rx_msdu_link *msdu_link, + dma_addr_t *paddr, u32 *sw_cookie, u8 *rbm, + struct ath12k_buffer_addr **pp_buf_addr_info) +{ + struct ath12k_buffer_addr *buf_addr_info; + + buf_addr_info = &msdu_link->buf_addr_info; + + ath12k_hal_rx_buf_addr_info_get(buf_addr_info, paddr, sw_cookie, rbm); + + *pp_buf_addr_info = buf_addr_info; +} + static void ath12k_dp_mon_fill_rx_rate(struct ath12k *ar, struct hal_rx_mon_ppdu_info *ppdu_info, @@ -1795,6 +1948,24 @@ ath12k_dp_mon_fill_rx_rate(struct ath12k *ar, } } +static void ath12k_dp_mon_rx_msdus_set_payload(struct ath12k *ar, + struct sk_buff *head_msdu, + struct sk_buff *tail_msdu) +{ + u32 rx_pkt_offset, l2_hdr_offset, total_offset; + + rx_pkt_offset = ar->ab->hal.hal_desc_sz; + l2_hdr_offset = + ath12k_dp_rx_h_l3pad(ar->ab, (struct hal_rx_desc *)tail_msdu->data); + + if (ar->ab->hw_params->rxdma1_enable) + total_offset = ATH12K_MON_RX_PKT_OFFSET; + else + total_offset = rx_pkt_offset + l2_hdr_offset; + + skb_pull(head_msdu, total_offset); +} + static struct sk_buff * ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar, struct dp_mon_mpdu *mon_mpdu, @@ -1803,7 +1974,7 @@ ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar, { struct ath12k_base *ab = ar->ab; struct sk_buff *msdu, *mpdu_buf, *prev_buf, *head_frag_list; - struct sk_buff *head_msdu; + struct sk_buff *head_msdu, *tail_msdu; struct hal_rx_desc *rx_desc; u8 *hdr_desc, *dest, decap_format = mon_mpdu->decap_format; struct ieee80211_hdr_3addr *wh; @@ -1813,8 +1984,9 @@ ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar, mpdu_buf = NULL; head_msdu = mon_mpdu->head; + tail_msdu = mon_mpdu->tail; - if (!head_msdu) + if (!head_msdu || !tail_msdu) goto err_merge_fail; ath12k_dp_mon_fill_rx_stats_info(ar, ppdu_info, rxs); @@ -1842,14 +2014,14 @@ ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar, ath12k_dp_mon_fill_rx_rate(ar, ppdu_info, rxs); if (decap_format == DP_RX_DECAP_TYPE_RAW) { - skb_pull(head_msdu, ATH12K_MON_RX_PKT_OFFSET); + ath12k_dp_mon_rx_msdus_set_payload(ar, head_msdu, tail_msdu); prev_buf = head_msdu; msdu = head_msdu->next; head_frag_list = NULL; while (msdu) { - skb_pull(msdu, ATH12K_MON_RX_PKT_OFFSET); + ath12k_dp_mon_rx_msdus_set_payload(ar, head_msdu, tail_msdu); if (!head_frag_list) head_frag_list = msdu; @@ -1884,7 +2056,7 @@ ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar, msdu = head_msdu; while (msdu) { - skb_pull(msdu, ATH12K_MON_RX_PKT_OFFSET); + ath12k_dp_mon_rx_msdus_set_payload(ar, head_msdu, tail_msdu); if (qos_pkt) { dest = skb_push(msdu, sizeof(__le16)); if (!dest) @@ -2223,43 +2395,30 @@ static int ath12k_dp_pkt_set_pktlen(struct sk_buff *skb, u32 len) return 0; } -static void ath12k_dp_mon_parse_rx_msdu_end_err(u32 info, u32 *errmap) -{ - if (info & RX_MSDU_END_INFO13_FCS_ERR) - *errmap |= HAL_RX_MPDU_ERR_FCS; - - if (info & RX_MSDU_END_INFO13_DECRYPT_ERR) - *errmap |= HAL_RX_MPDU_ERR_DECRYPT; - - if (info & RX_MSDU_END_INFO13_TKIP_MIC_ERR) - *errmap |= HAL_RX_MPDU_ERR_TKIP_MIC; - - if (info & RX_MSDU_END_INFO13_A_MSDU_ERROR) - *errmap |= HAL_RX_MPDU_ERR_AMSDU_ERR; - - if (info & RX_MSDU_END_INFO13_OVERFLOW_ERR) - *errmap |= HAL_RX_MPDU_ERR_OVERFLOW; - - if (info & RX_MSDU_END_INFO13_MSDU_LEN_ERR) - *errmap |= HAL_RX_MPDU_ERR_MSDU_LEN; - - if (info & RX_MSDU_END_INFO13_MPDU_LEN_ERR) - *errmap |= HAL_RX_MPDU_ERR_MPDU_LEN; -} +/* Hardware fill buffer with 128 bytes aligned. So need to reap it + * with 128 bytes aligned. + */ +#define RXDMA_DATA_DMA_BLOCK_SIZE 128 -static int -ath12k_dp_mon_parse_status_msdu_end(struct ath12k_mon_data *pmon, - const struct hal_rx_msdu_end *msdu_end) +static void +ath12k_dp_mon_get_buf_len(struct hal_rx_msdu_desc_info *info, + bool *is_frag, u32 *total_len, + u32 *frag_len, u32 *msdu_cnt) { - struct dp_mon_mpdu *mon_mpdu = pmon->mon_mpdu; - - ath12k_dp_mon_parse_rx_msdu_end_err(__le32_to_cpu(msdu_end->info2), - &mon_mpdu->err_bitmap); - - mon_mpdu->decap_format = le32_get_bits(msdu_end->info1, - RX_MSDU_END_INFO11_DECAP_FORMAT); + if (info->msdu_flags & RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) { + *is_frag = true; + *frag_len = (RX_MON_STATUS_BASE_BUF_SIZE - + sizeof(struct hal_rx_desc)) & + ~(RXDMA_DATA_DMA_BLOCK_SIZE - 1); + *total_len += *frag_len; + } else { + if (*is_frag) + *frag_len = info->msdu_len - *total_len; + else + *frag_len = info->msdu_len; - return 0; + *msdu_cnt -= 1; + } } static int @@ -2335,7 +2494,9 @@ ath12k_dp_mon_parse_rx_dest_tlv(struct ath12k *ar, pmon->mon_mpdu = NULL; break; case HAL_RX_MON_STATUS_MSDU_END: - return ath12k_dp_mon_parse_status_msdu_end(pmon, tlv_data); + pmon->mon_mpdu->decap_format = pmon->decap_format; + pmon->mon_mpdu->err_bitmap = pmon->err_bitmap; + break; default: break; } @@ -2370,7 +2531,7 @@ ath12k_dp_mon_parse_rx_dest(struct ath12k *ar, struct ath12k_mon_data *pmon, hal_status = ath12k_dp_mon_rx_parse_status_tlv(ar, pmon, tlv); - if (ar->monitor_started && + if (ar->monitor_started && ar->ab->hw_params->rxdma1_enable && ath12k_dp_mon_parse_rx_dest_tlv(ar, pmon, hal_status, tlv->value)) return HAL_RX_MON_STATUS_PPDU_DONE; @@ -2495,6 +2656,94 @@ fail_alloc_skb: return -ENOMEM; } +int ath12k_dp_mon_status_bufs_replenish(struct ath12k_base *ab, + struct dp_rxdma_mon_ring *rx_ring, + int req_entries) +{ + enum hal_rx_buf_return_buf_manager mgr = + ab->hw_params->hal_params->rx_buf_rbm; + int num_free, num_remain, buf_id; + struct ath12k_buffer_addr *desc; + struct hal_srng *srng; + struct sk_buff *skb; + dma_addr_t paddr; + u32 cookie; + + req_entries = min(req_entries, rx_ring->bufs_max); + + srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; + + spin_lock_bh(&srng->lock); + + ath12k_hal_srng_access_begin(ab, srng); + + num_free = ath12k_hal_srng_src_num_free(ab, srng, true); + if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4)) + req_entries = num_free; + + req_entries = min(num_free, req_entries); + num_remain = req_entries; + + while (num_remain > 0) { + skb = dev_alloc_skb(RX_MON_STATUS_BUF_SIZE); + if (!skb) + break; + + if (!IS_ALIGNED((unsigned long)skb->data, + RX_MON_STATUS_BUF_ALIGN)) { + skb_pull(skb, + PTR_ALIGN(skb->data, RX_MON_STATUS_BUF_ALIGN) - + skb->data); + } + + paddr = dma_map_single(ab->dev, skb->data, + skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); + if (dma_mapping_error(ab->dev, paddr)) + goto fail_free_skb; + + spin_lock_bh(&rx_ring->idr_lock); + buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0, + rx_ring->bufs_max * 3, GFP_ATOMIC); + spin_unlock_bh(&rx_ring->idr_lock); + if (buf_id < 0) + goto fail_dma_unmap; + cookie = u32_encode_bits(buf_id, DP_RXDMA_BUF_COOKIE_BUF_ID); + + desc = ath12k_hal_srng_src_get_next_entry(ab, srng); + if (!desc) + goto fail_buf_unassign; + + ATH12K_SKB_RXCB(skb)->paddr = paddr; + + num_remain--; + + ath12k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr); + } + + ath12k_hal_srng_access_end(ab, srng); + + spin_unlock_bh(&srng->lock); + + return req_entries - num_remain; + +fail_buf_unassign: + spin_lock_bh(&rx_ring->idr_lock); + idr_remove(&rx_ring->bufs_idr, buf_id); + spin_unlock_bh(&rx_ring->idr_lock); +fail_dma_unmap: + dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); +fail_free_skb: + dev_kfree_skb_any(skb); + + ath12k_hal_srng_access_end(ab, srng); + + spin_unlock_bh(&srng->lock); + + return req_entries - num_remain; +} + static struct dp_mon_tx_ppdu_info * ath12k_dp_mon_tx_get_ppdu_info(struct ath12k_mon_data *pmon, unsigned int ppdu_id, @@ -3641,6 +3890,487 @@ free_skb: return num_buffs_reaped; } +static int ath12k_dp_rx_reap_mon_status_ring(struct ath12k_base *ab, int mac_id, + int *budget, struct sk_buff_head *skb_list) +{ + const struct ath12k_hw_hal_params *hal_params; + int buf_id, srng_id, num_buffs_reaped = 0; + enum dp_mon_status_buf_state reap_status; + struct dp_rxdma_mon_ring *rx_ring; + struct ath12k_mon_data *pmon; + struct ath12k_skb_rxcb *rxcb; + struct hal_tlv_64_hdr *tlv; + void *rx_mon_status_desc; + struct hal_srng *srng; + struct ath12k_dp *dp; + struct sk_buff *skb; + struct ath12k *ar; + dma_addr_t paddr; + u32 cookie; + u8 rbm; + + ar = ab->pdevs[ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id)].ar; + dp = &ab->dp; + pmon = &ar->dp.mon_data; + srng_id = ath12k_hw_mac_id_to_srng_id(ab->hw_params, mac_id); + rx_ring = &dp->rx_mon_status_refill_ring[srng_id]; + + srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; + + spin_lock_bh(&srng->lock); + + ath12k_hal_srng_access_begin(ab, srng); + + while (*budget) { + *budget -= 1; + rx_mon_status_desc = ath12k_hal_srng_src_peek(ab, srng); + if (!rx_mon_status_desc) { + pmon->buf_state = DP_MON_STATUS_REPLINISH; + break; + } + ath12k_hal_rx_buf_addr_info_get(rx_mon_status_desc, &paddr, + &cookie, &rbm); + if (paddr) { + buf_id = u32_get_bits(cookie, DP_RXDMA_BUF_COOKIE_BUF_ID); + + spin_lock_bh(&rx_ring->idr_lock); + skb = idr_find(&rx_ring->bufs_idr, buf_id); + spin_unlock_bh(&rx_ring->idr_lock); + + if (!skb) { + ath12k_warn(ab, "rx monitor status with invalid buf_id %d\n", + buf_id); + pmon->buf_state = DP_MON_STATUS_REPLINISH; + goto move_next; + } + + rxcb = ATH12K_SKB_RXCB(skb); + + dma_sync_single_for_cpu(ab->dev, rxcb->paddr, + skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); + + tlv = (struct hal_tlv_64_hdr *)skb->data; + if (le64_get_bits(tlv->tl, HAL_TLV_HDR_TAG) != + HAL_RX_STATUS_BUFFER_DONE) { + pmon->buf_state = DP_MON_STATUS_NO_DMA; + ath12k_warn(ab, + "mon status DONE not set %llx, buf_id %d\n", + le64_get_bits(tlv->tl, HAL_TLV_HDR_TAG), + buf_id); + /* RxDMA status done bit might not be set even + * though tp is moved by HW. + */ + + /* If done status is missing: + * 1. As per MAC team's suggestion, + * when HP + 1 entry is peeked and if DMA + * is not done and if HP + 2 entry's DMA done + * is set. skip HP + 1 entry and + * start processing in next interrupt. + * 2. If HP + 2 entry's DMA done is not set, + * poll onto HP + 1 entry DMA done to be set. + * Check status for same buffer for next time + * dp_rx_mon_status_srng_process + */ + reap_status = ath12k_dp_rx_mon_buf_done(ab, srng, + rx_ring); + if (reap_status == DP_MON_STATUS_NO_DMA) + continue; + + spin_lock_bh(&rx_ring->idr_lock); + idr_remove(&rx_ring->bufs_idr, buf_id); + spin_unlock_bh(&rx_ring->idr_lock); + + dma_unmap_single(ab->dev, rxcb->paddr, + skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); + + dev_kfree_skb_any(skb); + pmon->buf_state = DP_MON_STATUS_REPLINISH; + goto move_next; + } + + spin_lock_bh(&rx_ring->idr_lock); + idr_remove(&rx_ring->bufs_idr, buf_id); + spin_unlock_bh(&rx_ring->idr_lock); + + dma_unmap_single(ab->dev, rxcb->paddr, + skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); + + if (ath12k_dp_pkt_set_pktlen(skb, RX_MON_STATUS_BUF_SIZE)) { + dev_kfree_skb_any(skb); + goto move_next; + } + __skb_queue_tail(skb_list, skb); + } else { + pmon->buf_state = DP_MON_STATUS_REPLINISH; + } +move_next: + skb = ath12k_dp_rx_alloc_mon_status_buf(ab, rx_ring, + &buf_id); + + if (!skb) { + ath12k_warn(ab, "failed to alloc buffer for status ring\n"); + hal_params = ab->hw_params->hal_params; + ath12k_hal_rx_buf_addr_info_set(rx_mon_status_desc, 0, 0, + hal_params->rx_buf_rbm); + num_buffs_reaped++; + break; + } + rxcb = ATH12K_SKB_RXCB(skb); + + cookie = u32_encode_bits(mac_id, DP_RXDMA_BUF_COOKIE_PDEV_ID) | + u32_encode_bits(buf_id, DP_RXDMA_BUF_COOKIE_BUF_ID); + + ath12k_hal_rx_buf_addr_info_set(rx_mon_status_desc, rxcb->paddr, + cookie, + ab->hw_params->hal_params->rx_buf_rbm); + ath12k_hal_srng_src_get_next_entry(ab, srng); + num_buffs_reaped++; + } + ath12k_hal_srng_access_end(ab, srng); + spin_unlock_bh(&srng->lock); + + return num_buffs_reaped; +} + +static u32 +ath12k_dp_rx_mon_mpdu_pop(struct ath12k *ar, int mac_id, + void *ring_entry, struct sk_buff **head_msdu, + struct sk_buff **tail_msdu, + struct list_head *used_list, + u32 *npackets, u32 *ppdu_id) +{ + struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&ar->dp.mon_data; + struct ath12k_buffer_addr *p_buf_addr_info, *p_last_buf_addr_info; + u32 msdu_ppdu_id = 0, msdu_cnt = 0, total_len = 0, frag_len = 0; + u32 rx_buf_size, rx_pkt_offset, sw_cookie; + bool is_frag, is_first_msdu, drop_mpdu = false; + struct hal_reo_entrance_ring *ent_desc = + (struct hal_reo_entrance_ring *)ring_entry; + u32 rx_bufs_used = 0, i = 0, desc_bank = 0; + struct hal_rx_desc *rx_desc, *tail_rx_desc; + struct hal_rx_msdu_link *msdu_link_desc; + struct sk_buff *msdu = NULL, *last = NULL; + struct ath12k_rx_desc_info *desc_info; + struct ath12k_buffer_addr buf_info; + struct hal_rx_msdu_list msdu_list; + struct ath12k_skb_rxcb *rxcb; + u16 num_msdus = 0; + dma_addr_t paddr; + u8 rbm; + + ath12k_hal_rx_reo_ent_buf_paddr_get(ring_entry, &paddr, + &sw_cookie, + &p_last_buf_addr_info, &rbm, + &msdu_cnt); + + spin_lock_bh(&pmon->mon_lock); + + if (le32_get_bits(ent_desc->info1, + HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON) == + HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) { + u8 rxdma_err = le32_get_bits(ent_desc->info1, + HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE); + if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR || + rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR || + rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) { + drop_mpdu = true; + pmon->rx_mon_stats.dest_mpdu_drop++; + } + } + + is_frag = false; + is_first_msdu = true; + rx_pkt_offset = sizeof(struct hal_rx_desc); + + do { + if (pmon->mon_last_linkdesc_paddr == paddr) { + pmon->rx_mon_stats.dup_mon_linkdesc_cnt++; + spin_unlock_bh(&pmon->mon_lock); + return rx_bufs_used; + } + + desc_bank = u32_get_bits(sw_cookie, DP_LINK_DESC_BANK_MASK); + msdu_link_desc = + ar->ab->dp.link_desc_banks[desc_bank].vaddr + + (paddr - ar->ab->dp.link_desc_banks[desc_bank].paddr); + + ath12k_hal_rx_msdu_list_get(ar, msdu_link_desc, &msdu_list, + &num_msdus); + desc_info = ath12k_dp_get_rx_desc(ar->ab, + msdu_list.sw_cookie[num_msdus - 1]); + tail_rx_desc = (struct hal_rx_desc *)(desc_info->skb)->data; + + for (i = 0; i < num_msdus; i++) { + u32 l2_hdr_offset; + + if (pmon->mon_last_buf_cookie == msdu_list.sw_cookie[i]) { + ath12k_dbg(ar->ab, ATH12K_DBG_DATA, + "i %d last_cookie %d is same\n", + i, pmon->mon_last_buf_cookie); + drop_mpdu = true; + pmon->rx_mon_stats.dup_mon_buf_cnt++; + continue; + } + + desc_info = + ath12k_dp_get_rx_desc(ar->ab, msdu_list.sw_cookie[i]); + msdu = desc_info->skb; + + if (!msdu) { + ath12k_dbg(ar->ab, ATH12K_DBG_DATA, + "msdu_pop: invalid msdu (%d/%d)\n", + i + 1, num_msdus); + goto next_msdu; + } + rxcb = ATH12K_SKB_RXCB(msdu); + if (rxcb->paddr != msdu_list.paddr[i]) { + ath12k_dbg(ar->ab, ATH12K_DBG_DATA, + "i %d paddr %lx != %lx\n", + i, (unsigned long)rxcb->paddr, + (unsigned long)msdu_list.paddr[i]); + drop_mpdu = true; + continue; + } + if (!rxcb->unmapped) { + dma_unmap_single(ar->ab->dev, rxcb->paddr, + msdu->len + + skb_tailroom(msdu), + DMA_FROM_DEVICE); + rxcb->unmapped = 1; + } + if (drop_mpdu) { + ath12k_dbg(ar->ab, ATH12K_DBG_DATA, + "i %d drop msdu %p *ppdu_id %x\n", + i, msdu, *ppdu_id); + dev_kfree_skb_any(msdu); + msdu = NULL; + goto next_msdu; + } + + rx_desc = (struct hal_rx_desc *)msdu->data; + l2_hdr_offset = ath12k_dp_rx_h_l3pad(ar->ab, tail_rx_desc); + if (is_first_msdu) { + if (!ath12k_dp_rxdesc_mpdu_valid(ar->ab, rx_desc)) { + drop_mpdu = true; + dev_kfree_skb_any(msdu); + msdu = NULL; + pmon->mon_last_linkdesc_paddr = paddr; + goto next_msdu; + } + msdu_ppdu_id = + ath12k_dp_rxdesc_get_ppduid(ar->ab, rx_desc); + + if (ath12k_dp_mon_comp_ppduid(msdu_ppdu_id, + ppdu_id)) { + spin_unlock_bh(&pmon->mon_lock); + return rx_bufs_used; + } + pmon->mon_last_linkdesc_paddr = paddr; + is_first_msdu = false; + } + ath12k_dp_mon_get_buf_len(&msdu_list.msdu_info[i], + &is_frag, &total_len, + &frag_len, &msdu_cnt); + rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len; + + if (ath12k_dp_pkt_set_pktlen(msdu, rx_buf_size)) { + dev_kfree_skb_any(msdu); + goto next_msdu; + } + + if (!(*head_msdu)) + *head_msdu = msdu; + else if (last) + last->next = msdu; + + last = msdu; +next_msdu: + pmon->mon_last_buf_cookie = msdu_list.sw_cookie[i]; + rx_bufs_used++; + desc_info->skb = NULL; + list_add_tail(&desc_info->list, used_list); + } + + ath12k_hal_rx_buf_addr_info_set(&buf_info, paddr, sw_cookie, rbm); + + ath12k_dp_mon_next_link_desc_get(msdu_link_desc, &paddr, + &sw_cookie, &rbm, + &p_buf_addr_info); + + ath12k_dp_rx_link_desc_return(ar->ab, &buf_info, + HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); + + p_last_buf_addr_info = p_buf_addr_info; + + } while (paddr && msdu_cnt); + + spin_unlock_bh(&pmon->mon_lock); + + if (last) + last->next = NULL; + + *tail_msdu = msdu; + + if (msdu_cnt == 0) + *npackets = 1; + + return rx_bufs_used; +} + +/* The destination ring processing is stuck if the destination is not + * moving while status ring moves 16 PPDU. The destination ring processing + * skips this destination ring PPDU as a workaround. + */ +#define MON_DEST_RING_STUCK_MAX_CNT 16 + +static void ath12k_dp_rx_mon_dest_process(struct ath12k *ar, int mac_id, + u32 quota, struct napi_struct *napi) +{ + struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&ar->dp.mon_data; + struct ath12k_pdev_mon_stats *rx_mon_stats; + u32 ppdu_id, rx_bufs_used = 0, ring_id; + u32 mpdu_rx_bufs_used, npackets = 0; + struct ath12k_dp *dp = &ar->ab->dp; + struct ath12k_base *ab = ar->ab; + void *ring_entry, *mon_dst_srng; + struct dp_mon_mpdu *tmp_mpdu; + LIST_HEAD(rx_desc_used_list); + struct hal_srng *srng; + + ring_id = dp->rxdma_err_dst_ring[mac_id].ring_id; + srng = &ab->hal.srng_list[ring_id]; + + mon_dst_srng = &ab->hal.srng_list[ring_id]; + + spin_lock_bh(&srng->lock); + + ath12k_hal_srng_access_begin(ab, mon_dst_srng); + + ppdu_id = pmon->mon_ppdu_info.ppdu_id; + rx_mon_stats = &pmon->rx_mon_stats; + + while ((ring_entry = ath12k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) { + struct sk_buff *head_msdu, *tail_msdu; + + head_msdu = NULL; + tail_msdu = NULL; + + mpdu_rx_bufs_used = ath12k_dp_rx_mon_mpdu_pop(ar, mac_id, ring_entry, + &head_msdu, &tail_msdu, + &rx_desc_used_list, + &npackets, &ppdu_id); + + rx_bufs_used += mpdu_rx_bufs_used; + + if (mpdu_rx_bufs_used) { + dp->mon_dest_ring_stuck_cnt = 0; + } else { + dp->mon_dest_ring_stuck_cnt++; + rx_mon_stats->dest_mon_not_reaped++; + } + + if (dp->mon_dest_ring_stuck_cnt > MON_DEST_RING_STUCK_MAX_CNT) { + rx_mon_stats->dest_mon_stuck++; + ath12k_dbg(ar->ab, ATH12K_DBG_DATA, + "status ring ppdu_id=%d dest ring ppdu_id=%d mon_dest_ring_stuck_cnt=%d dest_mon_not_reaped=%u dest_mon_stuck=%u\n", + pmon->mon_ppdu_info.ppdu_id, ppdu_id, + dp->mon_dest_ring_stuck_cnt, + rx_mon_stats->dest_mon_not_reaped, + rx_mon_stats->dest_mon_stuck); + spin_lock_bh(&pmon->mon_lock); + pmon->mon_ppdu_info.ppdu_id = ppdu_id; + spin_unlock_bh(&pmon->mon_lock); + continue; + } + + if (ppdu_id != pmon->mon_ppdu_info.ppdu_id) { + spin_lock_bh(&pmon->mon_lock); + pmon->mon_ppdu_status = DP_PPDU_STATUS_START; + spin_unlock_bh(&pmon->mon_lock); + ath12k_dbg(ar->ab, ATH12K_DBG_DATA, + "dest_rx: new ppdu_id %x != status ppdu_id %x dest_mon_not_reaped = %u dest_mon_stuck = %u\n", + ppdu_id, pmon->mon_ppdu_info.ppdu_id, + rx_mon_stats->dest_mon_not_reaped, + rx_mon_stats->dest_mon_stuck); + break; + } + + if (head_msdu && tail_msdu) { + tmp_mpdu = kzalloc(sizeof(*tmp_mpdu), GFP_ATOMIC); + if (!tmp_mpdu) + break; + + tmp_mpdu->head = head_msdu; + tmp_mpdu->tail = tail_msdu; + tmp_mpdu->err_bitmap = pmon->err_bitmap; + tmp_mpdu->decap_format = pmon->decap_format; + ath12k_dp_mon_rx_deliver(ar, tmp_mpdu, + &pmon->mon_ppdu_info, napi); + rx_mon_stats->dest_mpdu_done++; + kfree(tmp_mpdu); + } + + ring_entry = ath12k_hal_srng_dst_get_next_entry(ar->ab, + mon_dst_srng); + } + ath12k_hal_srng_access_end(ar->ab, mon_dst_srng); + + spin_unlock_bh(&srng->lock); + + if (rx_bufs_used) { + rx_mon_stats->dest_ppdu_done++; + ath12k_dp_rx_bufs_replenish(ar->ab, + &dp->rx_refill_buf_ring, + &rx_desc_used_list, + rx_bufs_used); + } +} + +static int +__ath12k_dp_mon_process_ring(struct ath12k *ar, int mac_id, + struct napi_struct *napi, int *budget) +{ + struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&ar->dp.mon_data; + struct ath12k_pdev_mon_stats *rx_mon_stats = &pmon->rx_mon_stats; + struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info; + enum hal_rx_mon_status hal_status; + struct sk_buff_head skb_list; + int num_buffs_reaped; + struct sk_buff *skb; + + __skb_queue_head_init(&skb_list); + + num_buffs_reaped = ath12k_dp_rx_reap_mon_status_ring(ar->ab, mac_id, + budget, &skb_list); + if (!num_buffs_reaped) + goto exit; + + while ((skb = __skb_dequeue(&skb_list))) { + memset(ppdu_info, 0, sizeof(*ppdu_info)); + ppdu_info->peer_id = HAL_INVALID_PEERID; + + hal_status = ath12k_dp_mon_parse_rx_dest(ar, pmon, skb); + + if (ar->monitor_started && + pmon->mon_ppdu_status == DP_PPDU_STATUS_START && + hal_status == HAL_TLV_STATUS_PPDU_DONE) { + rx_mon_stats->status_ppdu_done++; + pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE; + ath12k_dp_rx_mon_dest_process(ar, mac_id, *budget, napi); + pmon->mon_ppdu_status = DP_PPDU_STATUS_START; + } + + dev_kfree_skb_any(skb); + } + +exit: + return num_buffs_reaped; +} + int ath12k_dp_mon_process_ring(struct ath12k_base *ab, int mac_id, struct napi_struct *napi, int budget, enum dp_monitor_mode monitor_mode) @@ -3651,6 +4381,10 @@ int ath12k_dp_mon_process_ring(struct ath12k_base *ab, int mac_id, if (ab->hw_params->rxdma1_enable) { if (monitor_mode == ATH12K_DP_RX_MONITOR_MODE) num_buffs_reaped = ath12k_dp_mon_srng_process(ar, &budget, napi); + } else { + if (ar->monitor_started) + num_buffs_reaped = + __ath12k_dp_mon_process_ring(ar, mac_id, napi, &budget); } return num_buffs_reaped; diff --git a/drivers/net/wireless/ath/ath12k/dp_mon.h b/drivers/net/wireless/ath/ath12k/dp_mon.h index 9f3adee51cb2..e25595cbdcf3 100644 --- a/drivers/net/wireless/ath/ath12k/dp_mon.h +++ b/drivers/net/wireless/ath/ath12k/dp_mon.h @@ -85,6 +85,9 @@ ath12k_dp_mon_rx_parse_mon_status(struct ath12k *ar, int ath12k_dp_mon_buf_replenish(struct ath12k_base *ab, struct dp_rxdma_mon_ring *buf_ring, int req_entries); +int ath12k_dp_mon_status_bufs_replenish(struct ath12k_base *ab, + struct dp_rxdma_mon_ring *rx_ring, + int req_entries); int ath12k_dp_mon_process_ring(struct ath12k_base *ab, int mac_id, struct napi_struct *napi, int budget, enum dp_monitor_mode monitor_mode); diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.c b/drivers/net/wireless/ath/ath12k/dp_rx.c index 1abfbd15f13c..7e1a8f29c7b6 100644 --- a/drivers/net/wireless/ath/ath12k/dp_rx.c +++ b/drivers/net/wireless/ath/ath12k/dp_rx.c @@ -194,6 +194,22 @@ static void ath12k_dp_rxdesc_set_msdu_len(struct ath12k_base *ab, ab->hal_rx_ops->rx_desc_set_msdu_len(desc, len); } +u32 ath12k_dp_rxdesc_get_ppduid(struct ath12k_base *ab, + struct hal_rx_desc *rx_desc) +{ + return ab->hal_rx_ops->rx_desc_get_mpdu_ppdu_id(rx_desc); +} + +bool ath12k_dp_rxdesc_mpdu_valid(struct ath12k_base *ab, + struct hal_rx_desc *rx_desc) +{ + u32 tlv_tag; + + tlv_tag = ab->hal_rx_ops->rx_desc_get_mpdu_start_tag(rx_desc); + + return tlv_tag == HAL_RX_MPDU_START; +} + static bool ath12k_dp_rx_h_is_da_mcbc(struct ath12k_base *ab, struct hal_rx_desc *desc) { @@ -414,9 +430,17 @@ static int ath12k_dp_rxdma_mon_buf_ring_free(struct ath12k_base *ab, static int ath12k_dp_rxdma_buf_free(struct ath12k_base *ab) { struct ath12k_dp *dp = &ab->dp; + int i; ath12k_dp_rxdma_mon_buf_ring_free(ab, &dp->rxdma_mon_buf_ring); + if (ab->hw_params->rxdma1_enable) + return 0; + + for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) + ath12k_dp_rxdma_mon_buf_ring_free(ab, + &dp->rx_mon_status_refill_ring[i]); + return 0; } @@ -430,7 +454,12 @@ static int ath12k_dp_rxdma_mon_ring_buf_setup(struct ath12k_base *ab, ath12k_hal_srng_get_entrysize(ab, ringtype); rx_ring->bufs_max = num_entries; - ath12k_dp_mon_buf_replenish(ab, rx_ring, num_entries); + + if (ringtype == HAL_RXDMA_MONITOR_STATUS) + ath12k_dp_mon_status_bufs_replenish(ab, rx_ring, + num_entries); + else + ath12k_dp_mon_buf_replenish(ab, rx_ring, num_entries); return 0; } @@ -451,7 +480,8 @@ static int ath12k_dp_rxdma_ring_buf_setup(struct ath12k_base *ab, static int ath12k_dp_rxdma_buf_setup(struct ath12k_base *ab) { struct ath12k_dp *dp = &ab->dp; - int ret; + struct dp_rxdma_mon_ring *mon_ring; + int ret, i; ret = ath12k_dp_rxdma_ring_buf_setup(ab, &dp->rx_refill_buf_ring); if (ret) { @@ -464,9 +494,19 @@ static int ath12k_dp_rxdma_buf_setup(struct ath12k_base *ab) ret = ath12k_dp_rxdma_mon_ring_buf_setup(ab, &dp->rxdma_mon_buf_ring, HAL_RXDMA_MONITOR_BUF); - if (ret) { + if (ret) ath12k_warn(ab, "failed to setup HAL_RXDMA_MONITOR_BUF\n"); + return ret; + } + + for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { + mon_ring = &dp->rx_mon_status_refill_ring[i]; + ret = ath12k_dp_rxdma_mon_ring_buf_setup(ab, mon_ring, + HAL_RXDMA_MONITOR_STATUS); + if (ret) { + ath12k_warn(ab, + "failed to setup HAL_RXDMA_MONITOR_STATUS\n"); return ret; } } @@ -819,15 +859,10 @@ void ath12k_dp_rx_peer_tid_delete(struct ath12k *ar, rx_tid->active = false; } -/* TODO: it's strange (and ugly) that struct hal_reo_dest_ring is converted - * to struct hal_wbm_release_ring, I couldn't figure out the logic behind - * that. - */ -static int ath12k_dp_rx_link_desc_return(struct ath12k_base *ab, - struct hal_reo_dest_ring *ring, - enum hal_wbm_rel_bm_act action) +int ath12k_dp_rx_link_desc_return(struct ath12k_base *ab, + struct ath12k_buffer_addr *buf_addr_info, + enum hal_wbm_rel_bm_act action) { - struct hal_wbm_release_ring *link_desc = (struct hal_wbm_release_ring *)ring; struct hal_wbm_release_ring *desc; struct ath12k_dp *dp = &ab->dp; struct hal_srng *srng; @@ -845,7 +880,7 @@ static int ath12k_dp_rx_link_desc_return(struct ath12k_base *ab, goto exit; } - ath12k_hal_rx_msdu_link_desc_set(ab, desc, link_desc, action); + ath12k_hal_rx_msdu_link_desc_set(ab, desc, buf_addr_info, action); exit: ath12k_hal_srng_access_end(ab, srng); @@ -858,14 +893,17 @@ exit: static void ath12k_dp_rx_frags_cleanup(struct ath12k_dp_rx_tid *rx_tid, bool rel_link_desc) { + struct ath12k_buffer_addr *buf_addr_info; struct ath12k_base *ab = rx_tid->ab; lockdep_assert_held(&ab->base_lock); if (rx_tid->dst_ring_desc) { - if (rel_link_desc) - ath12k_dp_rx_link_desc_return(ab, rx_tid->dst_ring_desc, + if (rel_link_desc) { + buf_addr_info = &rx_tid->dst_ring_desc->buf_addr_info; + ath12k_dp_rx_link_desc_return(ab, buf_addr_info, HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); + } kfree(rx_tid->dst_ring_desc); rx_tid->dst_ring_desc = NULL; } @@ -1802,8 +1840,12 @@ void ath12k_dp_htt_htc_t2h_msg_handler(struct ath12k_base *ab, HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16); ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32), peer_mac_h16, mac_addr); + ast_hash = le32_get_bits(resp->peer_map_ev.info2, + HTT_T2H_PEER_MAP3_INFO2_AST_HASH_VAL); + hw_peer_id = le32_get_bits(resp->peer_map_ev.info2, + HTT_T2H_PEER_MAP3_INFO2_HW_PEER_ID); ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash, - peer_id); + hw_peer_id); break; case HTT_T2H_MSG_TYPE_PEER_UNMAP: case HTT_T2H_MSG_TYPE_PEER_UNMAP2: @@ -2592,7 +2634,7 @@ static bool ath12k_dp_rx_check_nwifi_hdr_len_valid(struct ath12k_base *ab, if ((likely(hdr_len <= DP_MAX_NWIFI_HDR_LEN))) return true; - ab->soc_stats.invalid_rbm++; + ab->device_stats.invalid_rbm++; WARN_ON_ONCE(1); return false; } @@ -2755,9 +2797,9 @@ int ath12k_dp_rx_process(struct ath12k_base *ab, int ring_id, struct napi_struct *napi, int budget) { struct ath12k_hw_group *ag = ab->ag; - struct list_head rx_desc_used_list[ATH12K_MAX_SOCS]; + struct list_head rx_desc_used_list[ATH12K_MAX_DEVICES]; struct ath12k_hw_link *hw_links = ag->hw_links; - int num_buffs_reaped[ATH12K_MAX_SOCS] = {}; + int num_buffs_reaped[ATH12K_MAX_DEVICES] = {}; struct ath12k_rx_desc_info *desc_info; struct ath12k_dp *dp = &ab->dp; struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; @@ -2774,7 +2816,7 @@ int ath12k_dp_rx_process(struct ath12k_base *ab, int ring_id, __skb_queue_head_init(&msdu_list); - for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++) + for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) INIT_LIST_HEAD(&rx_desc_used_list[device_id]); srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id]; @@ -2835,13 +2877,14 @@ try_again: DMA_FROM_DEVICE); num_buffs_reaped[device_id]++; + ab->device_stats.reo_rx[ring_id][ab->device_id]++; push_reason = le32_get_bits(desc->info0, HAL_REO_DEST_RING_INFO0_PUSH_REASON); if (push_reason != HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) { dev_kfree_skb_any(msdu); - ab->soc_stats.hal_reo_error[ring_id]++; + ab->device_stats.hal_reo_error[ring_id]++; continue; } @@ -2891,7 +2934,7 @@ try_again: if (!total_msdu_reaped) goto exit; - for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++) { + for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) { if (!num_buffs_reaped[device_id]) continue; @@ -3483,7 +3526,7 @@ static int ath12k_dp_rx_frag_h_mpdu(struct ath12k *ar, goto out_unlock; } } else { - ath12k_dp_rx_link_desc_return(ab, ring_desc, + ath12k_dp_rx_link_desc_return(ab, &ring_desc->buf_addr_info, HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); } @@ -3596,7 +3639,7 @@ ath12k_dp_process_rx_err_buf(struct ath12k *ar, struct hal_reo_dest_ring *desc, if (ath12k_dp_rx_frag_h_mpdu(ar, msdu, desc)) { dev_kfree_skb_any(msdu); - ath12k_dp_rx_link_desc_return(ar->ab, desc, + ath12k_dp_rx_link_desc_return(ar->ab, &desc->buf_addr_info, HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); } exit: @@ -3608,9 +3651,9 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi, int budget) { struct ath12k_hw_group *ag = ab->ag; - struct list_head rx_desc_used_list[ATH12K_MAX_SOCS]; + struct list_head rx_desc_used_list[ATH12K_MAX_DEVICES]; u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC]; - int num_buffs_reaped[ATH12K_MAX_SOCS] = {}; + int num_buffs_reaped[ATH12K_MAX_DEVICES] = {}; struct dp_link_desc_bank *link_desc_banks; enum hal_rx_buf_return_buf_manager rbm; struct hal_rx_msdu_link *link_desc_va; @@ -3632,7 +3675,7 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi, tot_n_bufs_reaped = 0; quota = budget; - for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++) + for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) INIT_LIST_HEAD(&rx_desc_used_list[device_id]); reo_except = &ab->dp.reo_except_ring; @@ -3646,7 +3689,7 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi, while (budget && (reo_desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) { drop = false; - ab->soc_stats.err_ring_pkts++; + ab->device_stats.err_ring_pkts++; ret = ath12k_hal_desc_reo_parse_err(ab, reo_desc, &paddr, &desc_bank); @@ -3673,9 +3716,10 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi, if (rbm != partner_ab->dp.idle_link_rbm && rbm != HAL_RX_BUF_RBM_SW3_BM && rbm != partner_ab->hw_params->hal_params->rx_buf_rbm) { - ab->soc_stats.invalid_rbm++; + ab->device_stats.invalid_rbm++; ath12k_warn(ab, "invalid return buffer manager %d\n", rbm); - ath12k_dp_rx_link_desc_return(partner_ab, reo_desc, + ath12k_dp_rx_link_desc_return(partner_ab, + &reo_desc->buf_addr_info, HAL_WBM_REL_BM_ACT_REL_MSDU); continue; } @@ -3693,7 +3737,8 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi, drop = true; /* Return the link desc back to wbm idle list */ - ath12k_dp_rx_link_desc_return(partner_ab, reo_desc, + ath12k_dp_rx_link_desc_return(partner_ab, + &reo_desc->buf_addr_info, HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); } @@ -3720,7 +3765,7 @@ exit: spin_unlock_bh(&srng->lock); - for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++) { + for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) { if (!num_buffs_reaped[device_id]) continue; @@ -3836,7 +3881,7 @@ static bool ath12k_dp_rx_h_reo_err(struct ath12k *ar, struct sk_buff *msdu, struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); bool drop = false; - ar->ab->soc_stats.reo_error[rxcb->err_code]++; + ar->ab->device_stats.reo_error[rxcb->err_code]++; switch (rxcb->err_code) { case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO: @@ -3909,7 +3954,7 @@ static bool ath12k_dp_rx_h_rxdma_err(struct ath12k *ar, struct sk_buff *msdu, bool drop = false; u32 err_bitmap; - ar->ab->soc_stats.rxdma_error[rxcb->err_code]++; + ar->ab->device_stats.rxdma_error[rxcb->err_code]++; switch (rxcb->err_code) { case HAL_REO_ENTR_RING_RXDMA_ECODE_DECRYPT_ERR: @@ -3968,7 +4013,7 @@ static void ath12k_dp_rx_wbm_err(struct ath12k *ar, int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab, struct napi_struct *napi, int budget) { - struct list_head rx_desc_used_list[ATH12K_MAX_SOCS]; + struct list_head rx_desc_used_list[ATH12K_MAX_DEVICES]; struct ath12k_hw_group *ag = ab->ag; struct ath12k *ar; struct ath12k_dp *dp = &ab->dp; @@ -3979,9 +4024,10 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab, struct sk_buff_head msdu_list, scatter_msdu_list; struct ath12k_skb_rxcb *rxcb; void *rx_desc; - int num_buffs_reaped[ATH12K_MAX_SOCS] = {}; + int num_buffs_reaped[ATH12K_MAX_DEVICES] = {}; int total_num_buffs_reaped = 0; struct ath12k_rx_desc_info *desc_info; + struct ath12k_device_dp_stats *device_stats = &ab->device_stats; struct ath12k_hw_link *hw_links = ag->hw_links; struct ath12k_base *partner_ab; u8 hw_link_id, device_id; @@ -3991,7 +4037,7 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab, __skb_queue_head_init(&msdu_list); __skb_queue_head_init(&scatter_msdu_list); - for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++) + for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) INIT_LIST_HEAD(&rx_desc_used_list[device_id]); srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id]; @@ -4115,7 +4161,7 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab, if (!total_num_buffs_reaped) goto done; - for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++) { + for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) { if (!num_buffs_reaped[device_id]) continue; @@ -4155,6 +4201,12 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab, dev_kfree_skb_any(msdu); continue; } + + if (rxcb->err_rel_src < HAL_WBM_REL_SRC_MODULE_MAX) { + device_id = ar->ab->device_id; + device_stats->rx_wbm_rel_source[rxcb->err_rel_src][device_id]++; + } + ath12k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list); } rcu_read_unlock(); @@ -4244,6 +4296,7 @@ void ath12k_dp_rx_process_reo_status(struct ath12k_base *ab) void ath12k_dp_rx_free(struct ath12k_base *ab) { struct ath12k_dp *dp = &ab->dp; + struct dp_srng *srng; int i; ath12k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring); @@ -4251,6 +4304,10 @@ void ath12k_dp_rx_free(struct ath12k_base *ab) for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { if (ab->hw_params->rx_mac_buf_ring) ath12k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]); + if (!ab->hw_params->rxdma1_enable) { + srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring; + ath12k_dp_srng_cleanup(ab, srng); + } } for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++) @@ -4399,6 +4456,19 @@ int ath12k_dp_rx_htt_setup(struct ath12k_base *ab) ret); return ret; } + } else { + for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { + ring_id = + dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id; + ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, i, + HAL_RXDMA_MONITOR_STATUS); + if (ret) { + ath12k_warn(ab, + "failed to configure mon_status_refill_ring%d %d\n", + i, ret); + return ret; + } + } } ret = ab->hw_params->hw_ops->rxdma_ring_sel_config(ab); @@ -4413,6 +4483,7 @@ int ath12k_dp_rx_htt_setup(struct ath12k_base *ab) int ath12k_dp_rx_alloc(struct ath12k_base *ab) { struct ath12k_dp *dp = &ab->dp; + struct dp_srng *srng; int i, ret; idr_init(&dp->rxdma_mon_buf_ring.bufs_idr); @@ -4460,6 +4531,23 @@ int ath12k_dp_rx_alloc(struct ath12k_base *ab) ath12k_warn(ab, "failed to setup HAL_RXDMA_MONITOR_BUF\n"); return ret; } + } else { + for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { + idr_init(&dp->rx_mon_status_refill_ring[i].bufs_idr); + spin_lock_init(&dp->rx_mon_status_refill_ring[i].idr_lock); + } + + for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { + srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring; + ret = ath12k_dp_srng_setup(ab, srng, + HAL_RXDMA_MONITOR_STATUS, 0, i, + DP_RXDMA_MON_STATUS_RING_SIZE); + if (ret) { + ath12k_warn(ab, "failed to setup mon status ring %d\n", + i); + return ret; + } + } } ret = ath12k_dp_rxdma_buf_setup(ab); @@ -4530,17 +4618,15 @@ int ath12k_dp_rx_pdev_mon_attach(struct ath12k *ar) return ret; } - /* if rxdma1_enable is false, no need to setup - * rxdma_mon_desc_ring. - */ + pmon->mon_last_linkdesc_paddr = 0; + pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1; + spin_lock_init(&pmon->mon_lock); + if (!ar->ab->hw_params->rxdma1_enable) return 0; - pmon->mon_last_linkdesc_paddr = 0; - pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1; INIT_LIST_HEAD(&pmon->dp_rx_mon_mpdu_list); pmon->mon_mpdu = NULL; - spin_lock_init(&pmon->mon_lock); return 0; } diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.h b/drivers/net/wireless/ath/ath12k/dp_rx.h index f5c05f17813b..e971a314bd2d 100644 --- a/drivers/net/wireless/ath/ath12k/dp_rx.h +++ b/drivers/net/wireless/ath/ath12k/dp_rx.h @@ -165,5 +165,13 @@ void ath12k_dp_rx_h_fetch_info(struct ath12k_base *ab, struct hal_rx_desc *rx_d struct ath12k_dp_rx_info *rx_info); int ath12k_dp_rx_crypto_mic_len(struct ath12k *ar, enum hal_encrypt_type enctype); - +u32 ath12k_dp_rxdesc_get_ppduid(struct ath12k_base *ab, + struct hal_rx_desc *rx_desc); +bool ath12k_dp_rxdesc_mpdu_valid(struct ath12k_base *ab, + struct hal_rx_desc *rx_desc); +int ath12k_dp_rx_link_desc_return(struct ath12k_base *ab, + struct ath12k_buffer_addr *buf_addr_info, + enum hal_wbm_rel_bm_act action); +bool ath12k_dp_rxdesc_mpdu_valid(struct ath12k_base *ab, + struct hal_rx_desc *rx_desc); #endif /* ATH12K_DP_RX_H */ diff --git a/drivers/net/wireless/ath/ath12k/dp_tx.c b/drivers/net/wireless/ath/ath12k/dp_tx.c index 2136eeb278af..b6816b6c2c04 100644 --- a/drivers/net/wireless/ath/ath12k/dp_tx.c +++ b/drivers/net/wireless/ath/ath12k/dp_tx.c @@ -350,7 +350,7 @@ tcl_ring_sel: default: /* TODO: Take care of other encap modes as well */ ret = -EINVAL; - atomic_inc(&ab->soc_stats.tx_err.misc_fail); + atomic_inc(&ab->device_stats.tx_err.misc_fail); goto fail_remove_tx_buf; } @@ -373,7 +373,7 @@ tcl_ring_sel: map: ti.paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE); if (dma_mapping_error(ab->dev, ti.paddr)) { - atomic_inc(&ab->soc_stats.tx_err.misc_fail); + atomic_inc(&ab->device_stats.tx_err.misc_fail); ath12k_warn(ab, "failed to DMA map data Tx buffer\n"); ret = -ENOMEM; goto fail_remove_tx_buf; @@ -448,7 +448,7 @@ map: * desc because the desc is directly enqueued onto hw queue. */ ath12k_hal_srng_access_end(ab, tcl_ring); - ab->soc_stats.tx_err.desc_na[ti.ring_id]++; + ab->device_stats.tx_err.desc_na[ti.ring_id]++; spin_unlock_bh(&tcl_ring->lock); ret = -ENOMEM; @@ -477,6 +477,8 @@ map: arvif->link_stats.tx_enqueued++; spin_unlock_bh(&arvif->link_stats_lock); + ab->device_stats.tx_enqueued[ti.ring_id]++; + ath12k_hal_tx_cmd_desc_setup(ab, hal_tcl_desc, &ti); ath12k_hal_srng_access_end(ab, tcl_ring); @@ -557,6 +559,7 @@ ath12k_dp_tx_htt_tx_complete_buf(struct ath12k_base *ab, info = IEEE80211_SKB_CB(msdu); ar = skb_cb->ar; + ab->device_stats.tx_completed[tx_ring->tcl_data_ring_id]++; if (atomic_dec_and_test(&ar->dp.num_tx_pending)) wake_up(&ar->dp.tx_empty_waitq); @@ -614,6 +617,7 @@ ath12k_dp_tx_process_htt_tx_complete(struct ath12k_base *ab, void *desc, wbm_status = le32_get_bits(status_desc->info0, HTT_TX_WBM_COMP_INFO0_STATUS); + ab->device_stats.fw_tx_status[wbm_status]++; switch (wbm_status) { case HAL_WBM_REL_HTT_TX_COMP_STATUS_OK: @@ -760,7 +764,8 @@ static void ath12k_dp_tx_update_txcompl(struct ath12k *ar, struct hal_tx_status static void ath12k_dp_tx_complete_msdu(struct ath12k *ar, struct ath12k_tx_desc_params *desc_params, - struct hal_tx_status *ts) + struct hal_tx_status *ts, + int ring) { struct ath12k_base *ab = ar->ab; struct ath12k_hw *ah = ar->ah; @@ -777,6 +782,7 @@ static void ath12k_dp_tx_complete_msdu(struct ath12k *ar, } skb_cb = ATH12K_SKB_CB(msdu); + ab->device_stats.tx_completed[ring]++; dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); if (skb_cb->paddr_ext_desc) { @@ -907,6 +913,8 @@ void ath12k_dp_tx_completion_handler(struct ath12k_base *ab, int ring_id) struct hal_wbm_release_ring *desc; u8 pdev_id; u64 desc_va; + enum hal_wbm_rel_src_module buf_rel_source; + enum hal_wbm_tqm_rel_reason rel_status; spin_lock_bh(&status_ring->lock); @@ -963,6 +971,15 @@ void ath12k_dp_tx_completion_handler(struct ath12k_base *ab, int ring_id) desc_params.skb = tx_desc->skb; desc_params.skb_ext_desc = tx_desc->skb_ext_desc; + /* Find the HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE value */ + buf_rel_source = le32_get_bits(tx_status->info0, + HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE); + ab->device_stats.tx_wbm_rel_source[buf_rel_source]++; + + rel_status = le32_get_bits(tx_status->info0, + HAL_WBM_COMPL_TX_INFO0_TQM_RELEASE_REASON); + ab->device_stats.tqm_rel_reason[rel_status]++; + /* Release descriptor as soon as extracting necessary info * to reduce contention */ @@ -979,7 +996,8 @@ void ath12k_dp_tx_completion_handler(struct ath12k_base *ab, int ring_id) if (atomic_dec_and_test(&ar->dp.num_tx_pending)) wake_up(&ar->dp.tx_empty_waitq); - ath12k_dp_tx_complete_msdu(ar, &desc_params, &ts); + ath12k_dp_tx_complete_msdu(ar, &desc_params, &ts, + tx_ring->tcl_data_ring_id); } } @@ -1511,6 +1529,44 @@ int ath12k_dp_tx_htt_rx_monitor_mode_ring_config(struct ath12k *ar, bool reset) return ret; } } + return 0; + } + + if (!reset) { + for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { + ring_id = ab->dp.rx_mac_buf_ring[i].ring_id; + ret = ath12k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, + i, + HAL_RXDMA_BUF, + DP_RXDMA_REFILL_RING_SIZE, + &tlv_filter); + if (ret) { + ath12k_err(ab, + "failed to setup filter for mon rx buf %d\n", + ret); + return ret; + } + } + } + + for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { + ring_id = ab->dp.rx_mon_status_refill_ring[i].refill_buf_ring.ring_id; + if (!reset) { + tlv_filter.rx_filter = + HTT_RX_MON_FILTER_TLV_FLAGS_MON_STATUS_RING; + } + + ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, + i, + HAL_RXDMA_MONITOR_STATUS, + RX_MON_STATUS_BUF_SIZE, + &tlv_filter); + if (ret) { + ath12k_err(ab, + "failed to setup filter for mon status buf %d\n", + ret); + return ret; + } } return 0; diff --git a/drivers/net/wireless/ath/ath12k/hal.c b/drivers/net/wireless/ath/ath12k/hal.c index 12d0f991a47f..a301898e5849 100644 --- a/drivers/net/wireless/ath/ath12k/hal.c +++ b/drivers/net/wireless/ath/ath12k/hal.c @@ -154,7 +154,14 @@ static const struct hal_srng_config hw_srng_config_template[] = { .ring_dir = HAL_SRNG_DIR_SRC, .max_size = HAL_RXDMA_RING_MAX_SIZE_BE, }, - [HAL_RXDMA_MONITOR_STATUS] = { 0, }, + [HAL_RXDMA_MONITOR_STATUS] = { + .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_STATBUF, + .max_rings = 1, + .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2, + .mac_type = ATH12K_HAL_SRNG_PMAC, + .ring_dir = HAL_SRNG_DIR_SRC, + .max_size = HAL_RXDMA_RING_MAX_SIZE_BE, + }, [HAL_RXDMA_MONITOR_DESC] = { 0, }, [HAL_RXDMA_DIR_BUF] = { .start_ring_id = HAL_SRNG_RING_ID_RXDMA_DIR_BUF, @@ -1943,7 +1950,7 @@ u32 ath12k_hal_ce_dst_status_get_length(struct hal_ce_srng_dst_status_desc *desc { u32 len; - len = le32_get_bits(desc->flags, HAL_CE_DST_STATUS_DESC_FLAGS_LEN); + len = le32_get_bits(READ_ONCE(desc->flags), HAL_CE_DST_STATUS_DESC_FLAGS_LEN); desc->flags &= ~cpu_to_le32(HAL_CE_DST_STATUS_DESC_FLAGS_LEN); return len; @@ -2035,6 +2042,24 @@ int ath12k_hal_srng_src_num_free(struct ath12k_base *ab, struct hal_srng *srng, return ((srng->ring_size - hp + tp) / srng->entry_size) - 1; } +void *ath12k_hal_srng_src_next_peek(struct ath12k_base *ab, + struct hal_srng *srng) +{ + void *desc; + u32 next_hp; + + lockdep_assert_held(&srng->lock); + + next_hp = (srng->u.src_ring.hp + srng->entry_size) % srng->ring_size; + + if (next_hp == srng->u.src_ring.cached_tp) + return NULL; + + desc = srng->ring_base_vaddr + next_hp; + + return desc; +} + void *ath12k_hal_srng_src_get_next_entry(struct ath12k_base *ab, struct hal_srng *srng) { @@ -2068,6 +2093,17 @@ void *ath12k_hal_srng_src_get_next_entry(struct ath12k_base *ab, return desc; } +void *ath12k_hal_srng_src_peek(struct ath12k_base *ab, struct hal_srng *srng) +{ + lockdep_assert_held(&srng->lock); + + if (((srng->u.src_ring.hp + srng->entry_size) % srng->ring_size) == + srng->u.src_ring.cached_tp) + return NULL; + + return srng->ring_base_vaddr + srng->u.src_ring.hp; +} + void *ath12k_hal_srng_src_reap_next(struct ath12k_base *ab, struct hal_srng *srng) { @@ -2113,7 +2149,7 @@ void ath12k_hal_srng_access_begin(struct ath12k_base *ab, struct hal_srng *srng) srng->u.src_ring.cached_tp = *(volatile u32 *)srng->u.src_ring.tp_addr; else - srng->u.dst_ring.cached_hp = *srng->u.dst_ring.hp_addr; + srng->u.dst_ring.cached_hp = READ_ONCE(*srng->u.dst_ring.hp_addr); } /* Update cached ring head/tail pointers to HW. ath12k_hal_srng_access_begin() diff --git a/drivers/net/wireless/ath/ath12k/hal.h b/drivers/net/wireless/ath/ath12k/hal.h index 4adef9240409..0ee9c6b26dab 100644 --- a/drivers/net/wireless/ath/ath12k/hal.h +++ b/drivers/net/wireless/ath/ath12k/hal.h @@ -498,6 +498,7 @@ enum hal_srng_ring_id { HAL_SRNG_RING_ID_WMAC1_SW2RXMON_BUF0 = HAL_SRNG_RING_ID_PMAC1_ID_START, + HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_STATBUF, HAL_SRNG_RING_ID_WMAC1_RXDMA2SW0, HAL_SRNG_RING_ID_WMAC1_RXDMA2SW1, HAL_SRNG_RING_ID_WMAC1_RXMON2SW0 = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW1, @@ -1143,6 +1144,7 @@ void ath12k_hal_srng_get_params(struct ath12k_base *ab, struct hal_srng *srng, struct hal_srng_params *params); void *ath12k_hal_srng_dst_get_next_entry(struct ath12k_base *ab, struct hal_srng *srng); +void *ath12k_hal_srng_src_peek(struct ath12k_base *ab, struct hal_srng *srng); void *ath12k_hal_srng_dst_peek(struct ath12k_base *ab, struct hal_srng *srng); int ath12k_hal_srng_dst_num_free(struct ath12k_base *ab, struct hal_srng *srng, bool sync_hw_ptr); @@ -1150,6 +1152,8 @@ void *ath12k_hal_srng_src_get_next_reaped(struct ath12k_base *ab, struct hal_srng *srng); void *ath12k_hal_srng_src_reap_next(struct ath12k_base *ab, struct hal_srng *srng); +void *ath12k_hal_srng_src_next_peek(struct ath12k_base *ab, + struct hal_srng *srng); void *ath12k_hal_srng_src_get_next_entry(struct ath12k_base *ab, struct hal_srng *srng); int ath12k_hal_srng_src_num_free(struct ath12k_base *ab, struct hal_srng *srng, diff --git a/drivers/net/wireless/ath/ath12k/hal_desc.h b/drivers/net/wireless/ath/ath12k/hal_desc.h index 49eededbfa9d..0173f731bfef 100644 --- a/drivers/net/wireless/ath/ath12k/hal_desc.h +++ b/drivers/net/wireless/ath/ath12k/hal_desc.h @@ -707,7 +707,7 @@ enum hal_rx_msdu_desc_reo_dest_ind { #define RX_MSDU_DESC_INFO0_DECAP_FORMAT GENMASK(30, 29) #define HAL_RX_MSDU_PKT_LENGTH_GET(val) \ - (u32_get_bits((val), RX_MSDU_DESC_INFO0_MSDU_LENGTH)) + (le32_get_bits((val), RX_MSDU_DESC_INFO0_MSDU_LENGTH)) struct rx_msdu_desc { __le32 info0; @@ -1008,6 +1008,10 @@ enum hal_reo_entr_rxdma_ecode { HAL_REO_ENTR_RING_RXDMA_ECODE_FLOW_TIMEOUT_ERR, HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR, HAL_REO_ENTR_RING_RXDMA_ECODE_AMSDU_FRAG_ERR, + HAL_REO_ENTR_RING_RXDMA_ECODE_MULTICAST_ECHO_ERR, + HAL_REO_ENTR_RING_RXDMA_ECODE_AMSDU_MISMATCH_ERR, + HAL_REO_ENTR_RING_RXDMA_ECODE_UNAUTH_WDS_ERR, + HAL_REO_ENTR_RING_RXDMA_ECODE_GRPCAST_AMSDU_WDS_ERR, HAL_REO_ENTR_RING_RXDMA_ECODE_MAX, }; @@ -1809,6 +1813,7 @@ enum hal_wbm_rel_src_module { HAL_WBM_REL_SRC_MODULE_REO, HAL_WBM_REL_SRC_MODULE_FW, HAL_WBM_REL_SRC_MODULE_SW, + HAL_WBM_REL_SRC_MODULE_MAX, }; enum hal_wbm_rel_desc_type { diff --git a/drivers/net/wireless/ath/ath12k/hal_rx.c b/drivers/net/wireless/ath/ath12k/hal_rx.c index 98eeccc68fcd..48aa48c48606 100644 --- a/drivers/net/wireless/ath/ath12k/hal_rx.c +++ b/drivers/net/wireless/ath/ath12k/hal_rx.c @@ -326,7 +326,7 @@ int ath12k_hal_desc_reo_parse_err(struct ath12k_base *ab, HAL_REO_DEST_RING_INFO0_PUSH_REASON); err_code = le32_get_bits(desc->info0, HAL_REO_DEST_RING_INFO0_ERROR_CODE); - ab->soc_stats.reo_error[err_code]++; + ab->device_stats.reo_error[err_code]++; if (push_reason != HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED && push_reason != HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) { @@ -381,7 +381,7 @@ int ath12k_hal_wbm_desc_parse_err(struct ath12k_base *ab, void *desc, val = le32_get_bits(wbm_desc->buf_addr_info.info1, BUFFER_ADDR_INFO1_RET_BUF_MGR); if (val != HAL_RX_BUF_RBM_SW3_BM) { - ab->soc_stats.invalid_rbm++; + ab->device_stats.invalid_rbm++; return -EINVAL; } @@ -393,7 +393,7 @@ int ath12k_hal_wbm_desc_parse_err(struct ath12k_base *ab, void *desc, val = le32_get_bits(wbm_cc_desc->info0, HAL_WBM_RELEASE_RX_CC_INFO0_RBM); if (val != HAL_RX_BUF_RBM_SW3_BM) { - ab->soc_stats.invalid_rbm++; + ab->device_stats.invalid_rbm++; return -EINVAL; } @@ -446,17 +446,97 @@ void ath12k_hal_rx_reo_ent_paddr_get(struct ath12k_base *ab, *cookie = le32_get_bits(buff_addr->info1, BUFFER_ADDR_INFO1_SW_COOKIE); } +void ath12k_hal_rx_reo_ent_buf_paddr_get(void *rx_desc, dma_addr_t *paddr, + u32 *sw_cookie, + struct ath12k_buffer_addr **pp_buf_addr, + u8 *rbm, u32 *msdu_cnt) +{ + struct hal_reo_entrance_ring *reo_ent_ring = + (struct hal_reo_entrance_ring *)rx_desc; + struct ath12k_buffer_addr *buf_addr_info; + struct rx_mpdu_desc *rx_mpdu_desc_info_details; + + rx_mpdu_desc_info_details = + (struct rx_mpdu_desc *)&reo_ent_ring->rx_mpdu_info; + + *msdu_cnt = le32_get_bits(rx_mpdu_desc_info_details->info0, + RX_MPDU_DESC_INFO0_MSDU_COUNT); + + buf_addr_info = (struct ath12k_buffer_addr *)&reo_ent_ring->buf_addr_info; + + *paddr = (((u64)le32_get_bits(buf_addr_info->info1, + BUFFER_ADDR_INFO1_ADDR)) << 32) | + le32_get_bits(buf_addr_info->info0, + BUFFER_ADDR_INFO0_ADDR); + + *sw_cookie = le32_get_bits(buf_addr_info->info1, + BUFFER_ADDR_INFO1_SW_COOKIE); + *rbm = le32_get_bits(buf_addr_info->info1, + BUFFER_ADDR_INFO1_RET_BUF_MGR); + + *pp_buf_addr = (void *)buf_addr_info; +} + +void ath12k_hal_rx_msdu_list_get(struct ath12k *ar, + struct hal_rx_msdu_link *link_desc, + struct hal_rx_msdu_list *msdu_list, + u16 *num_msdus) +{ + struct hal_rx_msdu_details *msdu_details = NULL; + struct rx_msdu_desc *msdu_desc_info = NULL; + u32 last = 0, first = 0; + u8 tmp = 0; + int i; + + last = u32_encode_bits(last, RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU); + first = u32_encode_bits(first, RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU); + msdu_details = &link_desc->msdu_link[0]; + + for (i = 0; i < HAL_RX_NUM_MSDU_DESC; i++) { + if (!i && le32_get_bits(msdu_details[i].buf_addr_info.info0, + BUFFER_ADDR_INFO0_ADDR) == 0) + break; + if (le32_get_bits(msdu_details[i].buf_addr_info.info0, + BUFFER_ADDR_INFO0_ADDR) == 0) { + msdu_desc_info = &msdu_details[i - 1].rx_msdu_info; + msdu_desc_info->info0 |= cpu_to_le32(last); + break; + } + msdu_desc_info = &msdu_details[i].rx_msdu_info; + + if (!i) + msdu_desc_info->info0 |= cpu_to_le32(first); + else if (i == (HAL_RX_NUM_MSDU_DESC - 1)) + msdu_desc_info->info0 |= cpu_to_le32(last); + msdu_list->msdu_info[i].msdu_flags = le32_to_cpu(msdu_desc_info->info0); + msdu_list->msdu_info[i].msdu_len = + HAL_RX_MSDU_PKT_LENGTH_GET(msdu_desc_info->info0); + msdu_list->sw_cookie[i] = + le32_get_bits(msdu_details[i].buf_addr_info.info1, + BUFFER_ADDR_INFO1_SW_COOKIE); + tmp = le32_get_bits(msdu_details[i].buf_addr_info.info1, + BUFFER_ADDR_INFO1_RET_BUF_MGR); + msdu_list->paddr[i] = + ((u64)(le32_get_bits(msdu_details[i].buf_addr_info.info1, + BUFFER_ADDR_INFO1_ADDR)) << 32) | + le32_get_bits(msdu_details[i].buf_addr_info.info0, + BUFFER_ADDR_INFO0_ADDR); + msdu_list->rbm[i] = tmp; + } + *num_msdus = i; +} + void ath12k_hal_rx_msdu_link_desc_set(struct ath12k_base *ab, - struct hal_wbm_release_ring *dst_desc, - struct hal_wbm_release_ring *src_desc, + struct hal_wbm_release_ring *desc, + struct ath12k_buffer_addr *buf_addr_info, enum hal_wbm_rel_bm_act action) { - dst_desc->buf_addr_info = src_desc->buf_addr_info; - dst_desc->info0 |= le32_encode_bits(HAL_WBM_REL_SRC_MODULE_SW, - HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE) | - le32_encode_bits(action, HAL_WBM_RELEASE_INFO0_BM_ACTION) | - le32_encode_bits(HAL_WBM_REL_DESC_TYPE_MSDU_LINK, - HAL_WBM_RELEASE_INFO0_DESC_TYPE); + desc->buf_addr_info = *buf_addr_info; + desc->info0 |= le32_encode_bits(HAL_WBM_REL_SRC_MODULE_SW, + HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE) | + le32_encode_bits(action, HAL_WBM_RELEASE_INFO0_BM_ACTION) | + le32_encode_bits(HAL_WBM_REL_DESC_TYPE_MSDU_LINK, + HAL_WBM_RELEASE_INFO0_DESC_TYPE); } void ath12k_hal_reo_status_queue_stats(struct ath12k_base *ab, struct hal_tlv_64_hdr *tlv, diff --git a/drivers/net/wireless/ath/ath12k/hal_rx.h b/drivers/net/wireless/ath/ath12k/hal_rx.h index c753eb2a03ad..a3ab588aae19 100644 --- a/drivers/net/wireless/ath/ath12k/hal_rx.h +++ b/drivers/net/wireless/ath/ath12k/hal_rx.h @@ -538,6 +538,7 @@ struct hal_rx_msdu_desc_info { #define HAL_RX_NUM_MSDU_DESC 6 struct hal_rx_msdu_list { struct hal_rx_msdu_desc_info msdu_info[HAL_RX_NUM_MSDU_DESC]; + u64 paddr[HAL_RX_NUM_MSDU_DESC]; u32 sw_cookie[HAL_RX_NUM_MSDU_DESC]; u8 rbm[HAL_RX_NUM_MSDU_DESC]; }; @@ -1141,8 +1142,8 @@ void ath12k_hal_rx_msdu_link_info_get(struct hal_rx_msdu_link *link, u32 *num_ms u32 *msdu_cookies, enum hal_rx_buf_return_buf_manager *rbm); void ath12k_hal_rx_msdu_link_desc_set(struct ath12k_base *ab, - struct hal_wbm_release_ring *dst_desc, - struct hal_wbm_release_ring *src_desc, + struct hal_wbm_release_ring *desc, + struct ath12k_buffer_addr *buf_addr_info, enum hal_wbm_rel_bm_act action); void ath12k_hal_rx_buf_addr_info_set(struct ath12k_buffer_addr *binfo, dma_addr_t paddr, u32 cookie, u8 manager); @@ -1157,5 +1158,12 @@ int ath12k_hal_wbm_desc_parse_err(struct ath12k_base *ab, void *desc, void ath12k_hal_rx_reo_ent_paddr_get(struct ath12k_base *ab, struct ath12k_buffer_addr *buff_addr, dma_addr_t *paddr, u32 *cookie); +void ath12k_hal_rx_reo_ent_buf_paddr_get(void *rx_desc, dma_addr_t *paddr, u32 *sw_cookie, + struct ath12k_buffer_addr **pp_buf_addr, + u8 *rbm, u32 *msdu_cnt); +void ath12k_hal_rx_msdu_list_get(struct ath12k *ar, + struct hal_rx_msdu_link *link_desc, + struct hal_rx_msdu_list *msdu_list, + u16 *num_msdus); #endif diff --git a/drivers/net/wireless/ath/ath12k/hw.c b/drivers/net/wireless/ath/ath12k/hw.c index a46d82857c5d..7e2cf0fb2085 100644 --- a/drivers/net/wireless/ath/ath12k/hw.c +++ b/drivers/net/wireless/ath/ath12k/hw.c @@ -118,6 +118,10 @@ static const struct ath12k_hw_ops wcn7850_ops = { #define ATH12K_TX_MON_RING_MASK_0 0x1 #define ATH12K_TX_MON_RING_MASK_1 0x2 +#define ATH12K_RX_MON_STATUS_RING_MASK_0 0x1 +#define ATH12K_RX_MON_STATUS_RING_MASK_1 0x2 +#define ATH12K_RX_MON_STATUS_RING_MASK_2 0x4 + /* Target firmware's Copy Engine configuration. */ static const struct ce_pipe_config ath12k_target_ce_config_wlan_qcn9274[] = { /* CE0: host->target HTC control and raw streams */ @@ -836,6 +840,12 @@ static const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_wcn7850 = { }, .rx_mon_dest = { }, + .rx_mon_status = { + 0, 0, 0, 0, + ATH12K_RX_MON_STATUS_RING_MASK_0, + ATH12K_RX_MON_STATUS_RING_MASK_1, + ATH12K_RX_MON_STATUS_RING_MASK_2, + }, .rx = { 0, 0, 0, ATH12K_RX_RING_MASK_0, @@ -1370,7 +1380,7 @@ static const struct ath12k_hw_params ath12k_hw_params[] = { BIT(NL80211_IFTYPE_P2P_DEVICE) | BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO), - .supports_monitor = false, + .supports_monitor = true, .idle_ps = true, .download_calib = false, diff --git a/drivers/net/wireless/ath/ath12k/hw.h b/drivers/net/wireless/ath/ath12k/hw.h index 024cfcd2cc15..0fbc17649df4 100644 --- a/drivers/net/wireless/ath/ath12k/hw.h +++ b/drivers/net/wireless/ath/ath12k/hw.h @@ -135,6 +135,7 @@ enum hal_encrypt_type; struct ath12k_hw_ring_mask { u8 tx[ATH12K_EXT_IRQ_GRP_NUM_MAX]; u8 rx_mon_dest[ATH12K_EXT_IRQ_GRP_NUM_MAX]; + u8 rx_mon_status[ATH12K_EXT_IRQ_GRP_NUM_MAX]; u8 rx[ATH12K_EXT_IRQ_GRP_NUM_MAX]; u8 rx_err[ATH12K_EXT_IRQ_GRP_NUM_MAX]; u8 rx_wbm_rel[ATH12K_EXT_IRQ_GRP_NUM_MAX]; diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c index 4dae941c9615..88b59f3ff87a 100644 --- a/drivers/net/wireless/ath/ath12k/mac.c +++ b/drivers/net/wireless/ath/ath12k/mac.c @@ -229,7 +229,8 @@ ath12k_phymodes[NUM_NL80211_BANDS][ATH12K_CHAN_WIDTH_NUM] = { const struct htt_rx_ring_tlv_filter ath12k_mac_mon_status_filter_default = { .rx_filter = HTT_RX_FILTER_TLV_FLAGS_MPDU_START | HTT_RX_FILTER_TLV_FLAGS_PPDU_END | - HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE, + HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE | + HTT_RX_FILTER_TLV_FLAGS_PPDU_START_USER_INFO, .pkt_filter_flags0 = HTT_RX_FP_MGMT_FILTER_FLAGS0, .pkt_filter_flags1 = HTT_RX_FP_MGMT_FILTER_FLAGS1, .pkt_filter_flags2 = HTT_RX_FP_CTRL_FILTER_FLASG2, @@ -580,22 +581,16 @@ static int ath12k_mac_vif_link_chan(struct ieee80211_vif *vif, u8 link_id, return 0; } -static struct ath12k_link_vif *ath12k_mac_get_tx_arvif(struct ath12k_link_vif *arvif) +static struct ath12k_link_vif * +ath12k_mac_get_tx_arvif(struct ath12k_link_vif *arvif, + struct ieee80211_bss_conf *link_conf) { - struct ieee80211_bss_conf *link_conf, *tx_bss_conf; + struct ieee80211_bss_conf *tx_bss_conf; struct ath12k *ar = arvif->ar; struct ath12k_vif *tx_ahvif; lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); - link_conf = ath12k_mac_get_link_bss_conf(arvif); - if (!link_conf) { - ath12k_warn(ar->ab, - "unable to access bss link conf for link %u required to retrieve transmitting link conf\n", - arvif->link_id); - return NULL; - } - tx_bss_conf = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy, link_conf->tx_bss_conf); if (tx_bss_conf) { @@ -1623,7 +1618,7 @@ static int ath12k_mac_setup_bcn_tmpl(struct ath12k_link_vif *arvif) return -ENOLINK; } - tx_arvif = ath12k_mac_get_tx_arvif(arvif); + tx_arvif = ath12k_mac_get_tx_arvif(arvif, link_conf); if (tx_arvif) { if (tx_arvif != arvif && arvif->is_up) return 0; @@ -1693,6 +1688,7 @@ static void ath12k_control_beaconing(struct ath12k_link_vif *arvif, { struct ath12k_wmi_vdev_up_params params = {}; struct ath12k_vif *ahvif = arvif->ahvif; + struct ieee80211_bss_conf *link_conf; struct ath12k_link_vif *tx_arvif; struct ath12k *ar = arvif->ar; int ret; @@ -1725,7 +1721,15 @@ static void ath12k_control_beaconing(struct ath12k_link_vif *arvif, params.aid = ahvif->aid; params.bssid = arvif->bssid; - tx_arvif = ath12k_mac_get_tx_arvif(arvif); + link_conf = ath12k_mac_get_link_bss_conf(arvif); + if (!link_conf) { + ath12k_warn(ar->ab, + "unable to access bss link conf for link %u required to retrieve transmitting link conf\n", + arvif->link_id); + return; + } + + tx_arvif = ath12k_mac_get_tx_arvif(arvif, link_conf); if (tx_arvif) { params.tx_bssid = tx_arvif->bssid; params.nontx_profile_idx = info->bssid_index; @@ -2935,6 +2939,7 @@ static void ath12k_peer_assoc_h_eht(struct ath12k *ar, const struct ieee80211_sta_eht_cap *eht_cap; const struct ieee80211_sta_he_cap *he_cap; struct ieee80211_link_sta *link_sta; + struct ieee80211_bss_conf *link_conf; u32 *rx_mcs, *tx_mcs; lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); @@ -2946,6 +2951,12 @@ static void ath12k_peer_assoc_h_eht(struct ath12k *ar, return; } + link_conf = ath12k_mac_get_link_bss_conf(arvif); + if (!link_conf) { + ath12k_warn(ar->ab, "unable to access link_conf in peer assoc eht set\n"); + return; + } + eht_cap = &link_sta->eht_cap; he_cap = &link_sta->he_cap; if (!he_cap->has_he || !eht_cap->has_eht) @@ -3017,6 +3028,7 @@ static void ath12k_peer_assoc_h_eht(struct ath12k *ar, } arg->punct_bitmap = ~arvif->punct_bitmap; + arg->eht_disable_mcs15 = link_conf->eht_disable_mcs15; } static void ath12k_peer_assoc_h_mlo(struct ath12k_link_sta *arsta, @@ -3047,6 +3059,7 @@ static void ath12k_peer_assoc_h_mlo(struct ath12k_link_sta *arsta, ml->ml_peer_id = ahsta->ml_peer_id; ml->ieee_link_id = arsta->link_id; ml->num_partner_links = 0; + ml->eml_cap = sta->eml_cap; links = ahsta->links_map; rcu_read_lock(); @@ -3480,25 +3493,18 @@ static struct ath12k_link_vif *ath12k_mac_assign_link_vif(struct ath12k_hw *ah, if (arvif) return arvif; - if (!vif->valid_links) { - /* Use deflink for Non-ML VIFs and mark the link id as 0 - */ - link_id = 0; + /* If this is the first link arvif being created for an ML VIF + * use the preallocated deflink memory except for scan arvifs + */ + if (!ahvif->links_map && link_id != ATH12K_DEFAULT_SCAN_LINK) { arvif = &ahvif->deflink; + + if (vif->type == NL80211_IFTYPE_STATION) + arvif->is_sta_assoc_link = true; } else { - /* If this is the first link arvif being created for an ML VIF - * use the preallocated deflink memory except for scan arvifs - */ - if (!ahvif->links_map && link_id != ATH12K_DEFAULT_SCAN_LINK) { - arvif = &ahvif->deflink; - if (vif->type == NL80211_IFTYPE_STATION) - arvif->is_sta_assoc_link = true; - } else { - arvif = (struct ath12k_link_vif *) - kzalloc(sizeof(struct ath12k_link_vif), GFP_KERNEL); - if (!arvif) - return NULL; - } + arvif = kzalloc(sizeof(*arvif), GFP_KERNEL); + if (!arvif) + return NULL; } ath12k_mac_init_arvif(ahvif, arvif, link_id); @@ -3746,6 +3752,18 @@ static void ath12k_mac_vif_setup_ps(struct ath12k_link_vif *arvif) psmode, arvif->vdev_id, ret); } +static bool ath12k_mac_supports_station_tpc(struct ath12k *ar, + struct ath12k_vif *ahvif, + const struct cfg80211_chan_def *chandef) +{ + return ath12k_wmi_supports_6ghz_cc_ext(ar) && + test_bit(WMI_TLV_SERVICE_EXT_TPC_REG_SUPPORT, ar->ab->wmi_ab.svc_map) && + ahvif->vdev_type == WMI_VDEV_TYPE_STA && + ahvif->vdev_subtype == WMI_VDEV_SUBTYPE_NONE && + chandef->chan && + chandef->chan->band == NL80211_BAND_6GHZ; +} + static void ath12k_mac_bss_info_changed(struct ath12k *ar, struct ath12k_link_vif *arvif, struct ieee80211_bss_conf *info, @@ -4647,6 +4665,7 @@ static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw, spin_lock_bh(&ar->data_lock); ar->scan.state = ATH12K_SCAN_IDLE; spin_unlock_bh(&ar->data_lock); + goto exit; } ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac scan started"); @@ -5779,12 +5798,15 @@ static int ath12k_mac_handle_link_sta_state(struct ieee80211_hw *hw, enum ieee80211_sta_state new_state) { struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif); + struct ieee80211_bss_conf *link_conf; struct ath12k *ar = arvif->ar; + struct ath12k_reg_info *reg_info; + struct ath12k_base *ab = ar->ab; int ret = 0; lockdep_assert_wiphy(hw->wiphy); - ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac handle link %u sta %pM state %d -> %d\n", + ath12k_dbg(ab, ATH12K_DBG_MAC, "mac handle link %u sta %pM state %d -> %d\n", arsta->link_id, arsta->addr, old_state, new_state); /* IEEE80211_STA_NONE -> IEEE80211_STA_NOTEXIST: Remove the station @@ -5794,7 +5816,7 @@ static int ath12k_mac_handle_link_sta_state(struct ieee80211_hw *hw, new_state == IEEE80211_STA_NOTEXIST)) { ret = ath12k_mac_station_remove(ar, arvif, arsta); if (ret) { - ath12k_warn(ar->ab, "Failed to remove station: %pM for VDEV: %d\n", + ath12k_warn(ab, "Failed to remove station: %pM for VDEV: %d\n", arsta->addr, arvif->vdev_id); goto exit; } @@ -5805,7 +5827,7 @@ static int ath12k_mac_handle_link_sta_state(struct ieee80211_hw *hw, new_state == IEEE80211_STA_NONE) { ret = ath12k_mac_station_add(ar, arvif, arsta); if (ret) - ath12k_warn(ar->ab, "Failed to add station: %pM for VDEV: %d\n", + ath12k_warn(ab, "Failed to add station: %pM for VDEV: %d\n", arsta->addr, arvif->vdev_id); /* IEEE80211_STA_AUTH -> IEEE80211_STA_ASSOC: Send station assoc command for @@ -5818,7 +5840,7 @@ static int ath12k_mac_handle_link_sta_state(struct ieee80211_hw *hw, vif->type == NL80211_IFTYPE_ADHOC)) { ret = ath12k_mac_station_assoc(ar, arvif, arsta, false); if (ret) - ath12k_warn(ar->ab, "Failed to associate station: %pM\n", + ath12k_warn(ab, "Failed to associate station: %pM\n", arsta->addr); /* IEEE80211_STA_ASSOC -> IEEE80211_STA_AUTHORIZED: set peer status as @@ -5827,9 +5849,21 @@ static int ath12k_mac_handle_link_sta_state(struct ieee80211_hw *hw, } else if (old_state == IEEE80211_STA_ASSOC && new_state == IEEE80211_STA_AUTHORIZED) { ret = ath12k_mac_station_authorize(ar, arvif, arsta); - if (ret) - ath12k_warn(ar->ab, "Failed to authorize station: %pM\n", + if (ret) { + ath12k_warn(ab, "Failed to authorize station: %pM\n", arsta->addr); + goto exit; + } + + if (ath12k_wmi_supports_6ghz_cc_ext(ar) && + arvif->ahvif->vdev_type == WMI_VDEV_TYPE_STA) { + link_conf = ath12k_mac_get_link_bss_conf(arvif); + reg_info = ab->reg_info[ar->pdev_idx]; + ath12k_dbg(ab, ATH12K_DBG_MAC, "connection done, update reg rules\n"); + ath12k_hw_to_ah(hw)->regd_updated = false; + ath12k_reg_handle_chan_list(ab, reg_info, arvif->ahvif->vdev_type, + link_conf->power_type); + } /* IEEE80211_STA_AUTHORIZED -> IEEE80211_STA_ASSOC: station may be in removal, * deauthorize it. @@ -5848,7 +5882,7 @@ static int ath12k_mac_handle_link_sta_state(struct ieee80211_hw *hw, vif->type == NL80211_IFTYPE_ADHOC)) { ret = ath12k_mac_station_disassoc(ar, arvif, arsta); if (ret) - ath12k_warn(ar->ab, "Failed to disassociate station: %pM\n", + ath12k_warn(ab, "Failed to disassociate station: %pM\n", arsta->addr); } @@ -7466,6 +7500,7 @@ static void ath12k_mac_op_tx(struct ieee80211_hw *hw, struct ath12k_peer *peer; unsigned long links_map; bool is_mcast = false; + bool is_dvlan = false; struct ethhdr *eth; bool is_prb_rsp; u16 mcbc_gsn; @@ -7531,7 +7566,14 @@ static void ath12k_mac_op_tx(struct ieee80211_hw *hw, if (vif->type == NL80211_IFTYPE_AP && vif->p2p) ath12k_mac_add_p2p_noa_ie(ar, vif, skb, is_prb_rsp); - if (!vif->valid_links || !is_mcast || + /* Checking if it is a DVLAN frame */ + if (!test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags) && + !(skb_cb->flags & ATH12K_SKB_HW_80211_ENCAP) && + !(skb_cb->flags & ATH12K_SKB_CIPHER_SET) && + ieee80211_has_protected(hdr->frame_control)) + is_dvlan = true; + + if (!vif->valid_links || !is_mcast || is_dvlan || test_bit(ATH12K_FLAG_RAW_MODE, &ar->ab->dev_flags)) { ret = ath12k_dp_tx(ar, arvif, skb, false, 0, is_mcast); if (unlikely(ret)) { @@ -7990,7 +8032,7 @@ static int ath12k_mac_setup_vdev_params_mbssid(struct ath12k_link_vif *arvif, return -ENOLINK; } - tx_arvif = ath12k_mac_get_tx_arvif(arvif); + tx_arvif = ath12k_mac_get_tx_arvif(arvif, link_conf); if (!tx_arvif) return 0; @@ -8319,50 +8361,9 @@ void ath12k_mac_11d_scan_stop_all(struct ath12k_base *ab) } } -int ath12k_mac_vdev_create(struct ath12k *ar, struct ath12k_link_vif *arvif) +static void ath12k_mac_determine_vdev_type(struct ieee80211_vif *vif, + struct ath12k_vif *ahvif) { - struct ath12k_hw *ah = ar->ah; - struct ath12k_base *ab = ar->ab; - struct ieee80211_hw *hw = ah->hw; - struct ath12k_vif *ahvif = arvif->ahvif; - struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif); - struct ath12k_wmi_vdev_create_arg vdev_arg = {0}; - struct ath12k_wmi_peer_create_arg peer_param = {0}; - struct ieee80211_bss_conf *link_conf; - u32 param_id, param_value; - u16 nss; - int i; - int ret, vdev_id; - u8 link_id; - - lockdep_assert_wiphy(hw->wiphy); - - /* In NO_VIRTUAL_MONITOR, its necessary to restrict only one monitor - * interface in each radio - */ - if (vif->type == NL80211_IFTYPE_MONITOR && ar->monitor_vdev_created) - return -EINVAL; - - /* If no link is active and scan vdev is requested - * use a default link conf for scan address purpose. - */ - if (arvif->link_id == ATH12K_DEFAULT_SCAN_LINK && vif->valid_links) - link_id = ffs(vif->valid_links) - 1; - else - link_id = arvif->link_id; - - link_conf = wiphy_dereference(hw->wiphy, vif->link_conf[link_id]); - if (!link_conf) { - ath12k_warn(ar->ab, "unable to access bss link conf in vdev create for vif %pM link %u\n", - vif->addr, arvif->link_id); - return -ENOLINK; - } - - memcpy(arvif->bssid, link_conf->addr, ETH_ALEN); - - arvif->ar = ar; - vdev_id = __ffs64(ab->free_vdev_map); - arvif->vdev_id = vdev_id; ahvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE; switch (vif->type) { @@ -8386,7 +8387,6 @@ int ath12k_mac_vdev_create(struct ath12k *ar, struct ath12k_link_vif *arvif) break; case NL80211_IFTYPE_MONITOR: ahvif->vdev_type = WMI_VDEV_TYPE_MONITOR; - ar->monitor_vdev_id = vdev_id; break; case NL80211_IFTYPE_P2P_DEVICE: ahvif->vdev_type = WMI_VDEV_TYPE_STA; @@ -8396,6 +8396,53 @@ int ath12k_mac_vdev_create(struct ath12k *ar, struct ath12k_link_vif *arvif) WARN_ON(1); break; } +} + +int ath12k_mac_vdev_create(struct ath12k *ar, struct ath12k_link_vif *arvif) +{ + struct ath12k_hw *ah = ar->ah; + struct ath12k_base *ab = ar->ab; + struct ieee80211_hw *hw = ah->hw; + struct ath12k_vif *ahvif = arvif->ahvif; + struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif); + struct ath12k_wmi_vdev_create_arg vdev_arg = {0}; + struct ath12k_wmi_peer_create_arg peer_param = {0}; + struct ieee80211_bss_conf *link_conf = NULL; + u32 param_id, param_value; + u16 nss; + int i; + int ret, vdev_id; + u8 link_id; + + lockdep_assert_wiphy(hw->wiphy); + + /* In NO_VIRTUAL_MONITOR, its necessary to restrict only one monitor + * interface in each radio + */ + if (vif->type == NL80211_IFTYPE_MONITOR && ar->monitor_vdev_created) + return -EINVAL; + + link_id = arvif->link_id; + + if (link_id < IEEE80211_MLD_MAX_NUM_LINKS) { + link_conf = wiphy_dereference(hw->wiphy, vif->link_conf[link_id]); + if (!link_conf) { + ath12k_warn(ar->ab, "unable to access bss link conf in vdev create for vif %pM link %u\n", + vif->addr, arvif->link_id); + return -ENOLINK; + } + } + + if (link_conf) + memcpy(arvif->bssid, link_conf->addr, ETH_ALEN); + else + memcpy(arvif->bssid, vif->addr, ETH_ALEN); + + arvif->ar = ar; + vdev_id = __ffs64(ab->free_vdev_map); + arvif->vdev_id = vdev_id; + if (vif->type == NL80211_IFTYPE_MONITOR) + ar->monitor_vdev_id = vdev_id; ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev create id %d type %d subtype %d map %llx\n", arvif->vdev_id, ahvif->vdev_type, ahvif->vdev_subtype, @@ -8513,7 +8560,11 @@ int ath12k_mac_vdev_create(struct ath12k *ar, struct ath12k_link_vif *arvif) break; } - arvif->txpower = link_conf->txpower; + if (link_conf) + arvif->txpower = link_conf->txpower; + else + arvif->txpower = NL80211_TX_POWER_AUTOMATIC; + ret = ath12k_mac_txpower_recalc(ar); if (ret) goto err_peer_del; @@ -8751,7 +8802,10 @@ static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw, { struct ath12k_hw *ah = ath12k_hw_to_ah(hw); struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); + struct ath12k_reg_info *reg_info; struct ath12k_link_vif *arvif; + struct ath12k_base *ab; + struct ath12k *ar; int i; lockdep_assert_wiphy(hw->wiphy); @@ -8770,6 +8824,22 @@ static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw, vif->hw_queue[i] = ATH12K_HW_DEFAULT_QUEUE; vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD; + + ath12k_mac_determine_vdev_type(vif, ahvif); + + for_each_ar(ah, ar, i) { + if (!ath12k_wmi_supports_6ghz_cc_ext(ar)) + continue; + + ab = ar->ab; + reg_info = ab->reg_info[ar->pdev_idx]; + ath12k_dbg(ab, ATH12K_DBG_MAC, "interface added to change reg rules\n"); + ah->regd_updated = false; + ath12k_reg_handle_chan_list(ab, reg_info, ahvif->vdev_type, + IEEE80211_REG_UNSET_AP); + break; + } + /* Defer vdev creation until assign_chanctx or hw_scan is initiated as driver * will not know if this interface is an ML vif at this point. */ @@ -9326,6 +9396,15 @@ ath12k_mac_vdev_start_restart(struct ath12k_link_vif *arvif, return ret; } + /* TODO: For now we only set TPC power here. However when + * channel changes, say CSA, it should be updated again. + */ + if (ath12k_mac_supports_station_tpc(ar, ahvif, chandef)) { + ath12k_mac_fill_reg_tpc_info(ar, arvif, ctx); + ath12k_wmi_send_vdev_set_tpc_power(ar, arvif->vdev_id, + &arvif->reg_tpc_info); + } + ar->num_started_vdevs++; ath12k_dbg(ab, ATH12K_DBG_MAC, "vdev %pM started, vdev_id %d\n", ahvif->vif->addr, arvif->vdev_id); @@ -9581,7 +9660,7 @@ ath12k_mac_update_vif_chan(struct ath12k *ar, params.aid = ahvif->aid; params.bssid = arvif->bssid; - tx_arvif = ath12k_mac_get_tx_arvif(arvif); + tx_arvif = ath12k_mac_get_tx_arvif(arvif, link_conf); if (tx_arvif) { params.tx_bssid = tx_arvif->bssid; params.nontx_profile_idx = link_conf->bssid_index; @@ -9718,6 +9797,391 @@ static int ath12k_start_vdev_delay(struct ath12k *ar, return 0; } +static u8 ath12k_mac_get_num_pwr_levels(struct cfg80211_chan_def *chan_def) +{ + if (chan_def->chan->flags & IEEE80211_CHAN_PSD) { + switch (chan_def->width) { + case NL80211_CHAN_WIDTH_20: + return 1; + case NL80211_CHAN_WIDTH_40: + return 2; + case NL80211_CHAN_WIDTH_80: + return 4; + case NL80211_CHAN_WIDTH_160: + return 8; + case NL80211_CHAN_WIDTH_320: + return 16; + default: + return 1; + } + } else { + switch (chan_def->width) { + case NL80211_CHAN_WIDTH_20: + return 1; + case NL80211_CHAN_WIDTH_40: + return 2; + case NL80211_CHAN_WIDTH_80: + return 3; + case NL80211_CHAN_WIDTH_160: + return 4; + case NL80211_CHAN_WIDTH_320: + return 5; + default: + return 1; + } + } +} + +static u16 ath12k_mac_get_6ghz_start_frequency(struct cfg80211_chan_def *chan_def) +{ + u16 diff_seq; + + /* It is to get the lowest channel number's center frequency of the chan. + * For example, + * bandwidth=40 MHz, center frequency is 5965, lowest channel is 1 + * with center frequency 5955, its diff is 5965 - 5955 = 10. + * bandwidth=80 MHz, center frequency is 5985, lowest channel is 1 + * with center frequency 5955, its diff is 5985 - 5955 = 30. + * bandwidth=160 MHz, center frequency is 6025, lowest channel is 1 + * with center frequency 5955, its diff is 6025 - 5955 = 70. + * bandwidth=320 MHz, center frequency is 6105, lowest channel is 1 + * with center frequency 5955, its diff is 6105 - 5955 = 70. + */ + switch (chan_def->width) { + case NL80211_CHAN_WIDTH_320: + diff_seq = 150; + break; + case NL80211_CHAN_WIDTH_160: + diff_seq = 70; + break; + case NL80211_CHAN_WIDTH_80: + diff_seq = 30; + break; + case NL80211_CHAN_WIDTH_40: + diff_seq = 10; + break; + default: + diff_seq = 0; + } + + return chan_def->center_freq1 - diff_seq; +} + +static u16 ath12k_mac_get_seg_freq(struct cfg80211_chan_def *chan_def, + u16 start_seq, u8 seq) +{ + u16 seg_seq; + + /* It is to get the center frequency of the specific bandwidth. + * start_seq means the lowest channel number's center frequency. + * seq 0/1/2/3 means 20 MHz/40 MHz/80 MHz/160 MHz. + * For example, + * lowest channel is 1, its center frequency 5955, + * center frequency is 5955 when bandwidth=20 MHz, its diff is 5955 - 5955 = 0. + * lowest channel is 1, its center frequency 5955, + * center frequency is 5965 when bandwidth=40 MHz, its diff is 5965 - 5955 = 10. + * lowest channel is 1, its center frequency 5955, + * center frequency is 5985 when bandwidth=80 MHz, its diff is 5985 - 5955 = 30. + * lowest channel is 1, its center frequency 5955, + * center frequency is 6025 when bandwidth=160 MHz, its diff is 6025 - 5955 = 70. + */ + seg_seq = 10 * (BIT(seq) - 1); + return seg_seq + start_seq; +} + +static void ath12k_mac_get_psd_channel(struct ath12k *ar, + u16 step_freq, + u16 *start_freq, + u16 *center_freq, + u8 i, + struct ieee80211_channel **temp_chan, + s8 *tx_power) +{ + /* It is to get the center frequency for each 20 MHz. + * For example, if the chan is 160 MHz and center frequency is 6025, + * then it include 8 channels, they are 1/5/9/13/17/21/25/29, + * channel number 1's center frequency is 5955, it is parameter start_freq. + * parameter i is the step of the 8 channels. i is 0~7 for the 8 channels. + * the channel 1/5/9/13/17/21/25/29 maps i=0/1/2/3/4/5/6/7, + * and maps its center frequency is 5955/5975/5995/6015/6035/6055/6075/6095, + * the gap is 20 for each channel, parameter step_freq means the gap. + * after get the center frequency of each channel, it is easy to find the + * struct ieee80211_channel of it and get the max_reg_power. + */ + *center_freq = *start_freq + i * step_freq; + *temp_chan = ieee80211_get_channel(ar->ah->hw->wiphy, *center_freq); + *tx_power = (*temp_chan)->max_reg_power; +} + +static void ath12k_mac_get_eirp_power(struct ath12k *ar, + u16 *start_freq, + u16 *center_freq, + u8 i, + struct ieee80211_channel **temp_chan, + struct cfg80211_chan_def *def, + s8 *tx_power) +{ + /* It is to get the center frequency for 20 MHz/40 MHz/80 MHz/ + * 160 MHz bandwidth, and then plus 10 to the center frequency, + * it is the center frequency of a channel number. + * For example, when configured channel number is 1. + * center frequency is 5965 when bandwidth=40 MHz, after plus 10, it is 5975, + * then it is channel number 5. + * center frequency is 5985 when bandwidth=80 MHz, after plus 10, it is 5995, + * then it is channel number 9. + * center frequency is 6025 when bandwidth=160 MHz, after plus 10, it is 6035, + * then it is channel number 17. + * after get the center frequency of each channel, it is easy to find the + * struct ieee80211_channel of it and get the max_reg_power. + */ + *center_freq = ath12k_mac_get_seg_freq(def, *start_freq, i); + + /* For the 20 MHz, its center frequency is same with same channel */ + if (i != 0) + *center_freq += 10; + + *temp_chan = ieee80211_get_channel(ar->ah->hw->wiphy, *center_freq); + *tx_power = (*temp_chan)->max_reg_power; +} + +void ath12k_mac_fill_reg_tpc_info(struct ath12k *ar, + struct ath12k_link_vif *arvif, + struct ieee80211_chanctx_conf *ctx) +{ + struct ath12k_base *ab = ar->ab; + struct ath12k_reg_tpc_power_info *reg_tpc_info = &arvif->reg_tpc_info; + struct ieee80211_bss_conf *bss_conf = ath12k_mac_get_link_bss_conf(arvif); + struct ieee80211_channel *chan, *temp_chan; + u8 pwr_lvl_idx, num_pwr_levels, pwr_reduction; + bool is_psd_power = false, is_tpe_present = false; + s8 max_tx_power[ATH12K_NUM_PWR_LEVELS], + psd_power, tx_power, eirp_power; + u16 start_freq, center_freq; + + chan = ctx->def.chan; + start_freq = ath12k_mac_get_6ghz_start_frequency(&ctx->def); + pwr_reduction = bss_conf->pwr_reduction; + + if (arvif->reg_tpc_info.num_pwr_levels) { + is_tpe_present = true; + num_pwr_levels = arvif->reg_tpc_info.num_pwr_levels; + } else { + num_pwr_levels = ath12k_mac_get_num_pwr_levels(&ctx->def); + } + + for (pwr_lvl_idx = 0; pwr_lvl_idx < num_pwr_levels; pwr_lvl_idx++) { + /* STA received TPE IE*/ + if (is_tpe_present) { + /* local power is PSD power*/ + if (chan->flags & IEEE80211_CHAN_PSD) { + /* Connecting AP is psd power */ + if (reg_tpc_info->is_psd_power) { + is_psd_power = true; + ath12k_mac_get_psd_channel(ar, 20, + &start_freq, + ¢er_freq, + pwr_lvl_idx, + &temp_chan, + &tx_power); + psd_power = temp_chan->psd; + eirp_power = tx_power; + max_tx_power[pwr_lvl_idx] = + min_t(s8, + psd_power, + reg_tpc_info->tpe[pwr_lvl_idx]); + /* Connecting AP is not psd power */ + } else { + ath12k_mac_get_eirp_power(ar, + &start_freq, + ¢er_freq, + pwr_lvl_idx, + &temp_chan, + &ctx->def, + &tx_power); + psd_power = temp_chan->psd; + /* convert psd power to EIRP power based + * on channel width + */ + tx_power = + min_t(s8, tx_power, + psd_power + 13 + pwr_lvl_idx * 3); + max_tx_power[pwr_lvl_idx] = + min_t(s8, + tx_power, + reg_tpc_info->tpe[pwr_lvl_idx]); + } + /* local power is not PSD power */ + } else { + /* Connecting AP is psd power */ + if (reg_tpc_info->is_psd_power) { + is_psd_power = true; + ath12k_mac_get_psd_channel(ar, 20, + &start_freq, + ¢er_freq, + pwr_lvl_idx, + &temp_chan, + &tx_power); + eirp_power = tx_power; + max_tx_power[pwr_lvl_idx] = + reg_tpc_info->tpe[pwr_lvl_idx]; + /* Connecting AP is not psd power */ + } else { + ath12k_mac_get_eirp_power(ar, + &start_freq, + ¢er_freq, + pwr_lvl_idx, + &temp_chan, + &ctx->def, + &tx_power); + max_tx_power[pwr_lvl_idx] = + min_t(s8, + tx_power, + reg_tpc_info->tpe[pwr_lvl_idx]); + } + } + /* STA not received TPE IE */ + } else { + /* local power is PSD power*/ + if (chan->flags & IEEE80211_CHAN_PSD) { + is_psd_power = true; + ath12k_mac_get_psd_channel(ar, 20, + &start_freq, + ¢er_freq, + pwr_lvl_idx, + &temp_chan, + &tx_power); + psd_power = temp_chan->psd; + eirp_power = tx_power; + max_tx_power[pwr_lvl_idx] = psd_power; + } else { + ath12k_mac_get_eirp_power(ar, + &start_freq, + ¢er_freq, + pwr_lvl_idx, + &temp_chan, + &ctx->def, + &tx_power); + max_tx_power[pwr_lvl_idx] = tx_power; + } + } + + if (is_psd_power) { + /* If AP local power constraint is present */ + if (pwr_reduction) + eirp_power = eirp_power - pwr_reduction; + + /* If firmware updated max tx power is non zero, then take + * the min of firmware updated ap tx power + * and max power derived from above mentioned parameters. + */ + ath12k_dbg(ab, ATH12K_DBG_MAC, + "eirp power : %d firmware report power : %d\n", + eirp_power, ar->max_allowed_tx_power); + /* Firmware reports lower max_allowed_tx_power during vdev + * start response. In case of 6 GHz, firmware is not aware + * of EIRP power unless driver sets EIRP power through WMI + * TPC command. So radio which does not support idle power + * save can set maximum calculated EIRP power directly to + * firmware through TPC command without min comparison with + * vdev start response's max_allowed_tx_power. + */ + if (ar->max_allowed_tx_power && ab->hw_params->idle_ps) + eirp_power = min_t(s8, + eirp_power, + ar->max_allowed_tx_power); + } else { + /* If AP local power constraint is present */ + if (pwr_reduction) + max_tx_power[pwr_lvl_idx] = + max_tx_power[pwr_lvl_idx] - pwr_reduction; + /* If firmware updated max tx power is non zero, then take + * the min of firmware updated ap tx power + * and max power derived from above mentioned parameters. + */ + if (ar->max_allowed_tx_power && ab->hw_params->idle_ps) + max_tx_power[pwr_lvl_idx] = + min_t(s8, + max_tx_power[pwr_lvl_idx], + ar->max_allowed_tx_power); + } + reg_tpc_info->chan_power_info[pwr_lvl_idx].chan_cfreq = center_freq; + reg_tpc_info->chan_power_info[pwr_lvl_idx].tx_power = + max_tx_power[pwr_lvl_idx]; + } + + reg_tpc_info->num_pwr_levels = num_pwr_levels; + reg_tpc_info->is_psd_power = is_psd_power; + reg_tpc_info->eirp_power = eirp_power; + reg_tpc_info->ap_power_type = + ath12k_reg_ap_pwr_convert(bss_conf->power_type); +} + +static void ath12k_mac_parse_tx_pwr_env(struct ath12k *ar, + struct ath12k_link_vif *arvif) +{ + struct ieee80211_bss_conf *bss_conf = ath12k_mac_get_link_bss_conf(arvif); + struct ath12k_reg_tpc_power_info *tpc_info = &arvif->reg_tpc_info; + struct ieee80211_parsed_tpe_eirp *local_non_psd, *reg_non_psd; + struct ieee80211_parsed_tpe_psd *local_psd, *reg_psd; + struct ieee80211_parsed_tpe *tpe = &bss_conf->tpe; + enum wmi_reg_6g_client_type client_type; + struct ath12k_reg_info *reg_info; + struct ath12k_base *ab = ar->ab; + bool psd_valid, non_psd_valid; + int i; + + reg_info = ab->reg_info[ar->pdev_idx]; + client_type = reg_info->client_type; + + local_psd = &tpe->psd_local[client_type]; + reg_psd = &tpe->psd_reg_client[client_type]; + local_non_psd = &tpe->max_local[client_type]; + reg_non_psd = &tpe->max_reg_client[client_type]; + + psd_valid = local_psd->valid | reg_psd->valid; + non_psd_valid = local_non_psd->valid | reg_non_psd->valid; + + if (!psd_valid && !non_psd_valid) { + ath12k_warn(ab, + "no transmit power envelope match client power type %d\n", + client_type); + return; + }; + + if (psd_valid) { + tpc_info->is_psd_power = true; + + tpc_info->num_pwr_levels = max(local_psd->count, + reg_psd->count); + if (tpc_info->num_pwr_levels > ATH12K_NUM_PWR_LEVELS) + tpc_info->num_pwr_levels = ATH12K_NUM_PWR_LEVELS; + + for (i = 0; i < tpc_info->num_pwr_levels; i++) { + tpc_info->tpe[i] = min(local_psd->power[i], + reg_psd->power[i]) / 2; + ath12k_dbg(ab, ATH12K_DBG_MAC, + "TPE PSD power[%d] : %d\n", + i, tpc_info->tpe[i]); + } + } else { + tpc_info->is_psd_power = false; + tpc_info->eirp_power = 0; + + tpc_info->num_pwr_levels = max(local_non_psd->count, + reg_non_psd->count); + if (tpc_info->num_pwr_levels > ATH12K_NUM_PWR_LEVELS) + tpc_info->num_pwr_levels = ATH12K_NUM_PWR_LEVELS; + + for (i = 0; i < tpc_info->num_pwr_levels; i++) { + tpc_info->tpe[i] = min(local_non_psd->power[i], + reg_non_psd->power[i]) / 2; + ath12k_dbg(ab, ATH12K_DBG_MAC, + "non PSD power[%d] : %d\n", + i, tpc_info->tpe[i]); + } + } +} + static int ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, @@ -9756,6 +10220,11 @@ ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, "mac chanctx assign ptr %p vdev_id %i\n", ctx, arvif->vdev_id); + if (ath12k_wmi_supports_6ghz_cc_ext(ar) && + ctx->def.chan->band == NL80211_BAND_6GHZ && + ahvif->vdev_type == WMI_VDEV_TYPE_STA) + ath12k_mac_parse_tx_pwr_env(ar, arvif); + arvif->punct_bitmap = ctx->def.punctured; /* for some targets bss peer must be created before vdev_start */ @@ -9860,6 +10329,11 @@ ath12k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw, reinit_completion(&ar->completed_11d_scan); ar->state_11d = ATH12K_11D_PREPARING; } + + if (ar->scan.arvif == arvif && ar->scan.state == ATH12K_SCAN_RUNNING) { + ath12k_scan_abort(ar); + ar->scan.arvif = NULL; + } } static int @@ -10889,8 +11363,20 @@ void ath12k_mac_update_freq_range(struct ath12k *ar, if (!(freq_low && freq_high)) return; - ar->freq_range.start_freq = MHZ_TO_KHZ(freq_low); - ar->freq_range.end_freq = MHZ_TO_KHZ(freq_high); + if (ar->freq_range.start_freq || ar->freq_range.end_freq) { + ar->freq_range.start_freq = min(ar->freq_range.start_freq, + MHZ_TO_KHZ(freq_low)); + ar->freq_range.end_freq = max(ar->freq_range.end_freq, + MHZ_TO_KHZ(freq_high)); + } else { + ar->freq_range.start_freq = MHZ_TO_KHZ(freq_low); + ar->freq_range.end_freq = MHZ_TO_KHZ(freq_high); + } + + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, + "mac pdev %u freq limit updated. New range %u->%u MHz\n", + ar->pdev->pdev_id, KHZ_TO_MHZ(ar->freq_range.start_freq), + KHZ_TO_MHZ(ar->freq_range.end_freq)); } static void ath12k_mac_update_ch_list(struct ath12k *ar, @@ -11374,6 +11860,7 @@ static void ath12k_mac_hw_unregister(struct ath12k_hw *ah) for_each_ar(ah, ar, i) { cancel_work_sync(&ar->regd_update_work); ath12k_debugfs_unregister(ar); + ath12k_fw_stats_reset(ar); } ieee80211_unregister_hw(hw); @@ -11651,6 +12138,18 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah) goto err_unregister_hw; } + if (ar->ab->hw_params->current_cc_support && ab->new_alpha2[0]) { + struct wmi_set_current_country_arg current_cc = {}; + + memcpy(¤t_cc.alpha2, ab->new_alpha2, 2); + memcpy(&ar->alpha2, ab->new_alpha2, 2); + ret = ath12k_wmi_send_set_current_country_cmd(ar, ¤t_cc); + if (ret) + ath12k_warn(ar->ab, + "failed set cc code for mac register: %d\n", + ret); + } + ath12k_fw_stats_init(ar); ath12k_debugfs_register(ar); } @@ -12025,6 +12524,7 @@ int ath12k_mac_allocate(struct ath12k_hw_group *ag) if (!ab) continue; + ath12k_debugfs_pdev_create(ab); ath12k_mac_set_device_defaults(ab); total_radio += ab->num_radios; } diff --git a/drivers/net/wireless/ath/ath12k/mac.h b/drivers/net/wireless/ath/ath12k/mac.h index da37332352fe..e6e74b45bfa4 100644 --- a/drivers/net/wireless/ath/ath12k/mac.h +++ b/drivers/net/wireless/ath/ath12k/mac.h @@ -67,6 +67,46 @@ struct ath12k_mac_get_any_chanctx_conf_arg { struct ieee80211_chanctx_conf *chanctx_conf; }; +/** + * struct ath12k_chan_power_info - TPE containing power info per channel chunk + * @chan_cfreq: channel center freq (MHz) + * e.g. + * channel 37/20 MHz, it is 6135 + * channel 37/40 MHz, it is 6125 + * channel 37/80 MHz, it is 6145 + * channel 37/160 MHz, it is 6185 + * @tx_power: transmit power (dBm) + */ +struct ath12k_chan_power_info { + u16 chan_cfreq; + s8 tx_power; +}; + +/* ath12k only deals with 320 MHz, so 16 subchannels */ +#define ATH12K_NUM_PWR_LEVELS 16 + +/** + * struct ath12k_reg_tpc_power_info - regulatory TPC power info + * @is_psd_power: is PSD power or not + * @eirp_power: Maximum EIRP power (dBm), valid only if power is PSD + * @ap_power_type: type of power (SP/LPI/VLP) + * @num_pwr_levels: number of power levels + * @reg_max: Array of maximum TX power (dBm) per PSD value + * @ap_constraint_power: AP constraint power (dBm) + * @tpe: TPE values processed from TPE IE + * @chan_power_info: power info to send to firmware + */ +struct ath12k_reg_tpc_power_info { + bool is_psd_power; + u8 eirp_power; + enum wmi_reg_6g_ap_type ap_power_type; + u8 num_pwr_levels; + u8 reg_max[ATH12K_NUM_PWR_LEVELS]; + u8 ap_constraint_power; + s8 tpe[ATH12K_NUM_PWR_LEVELS]; + struct ath12k_chan_power_info chan_power_info[ATH12K_NUM_PWR_LEVELS]; +}; + extern const struct htt_rx_ring_tlv_filter ath12k_mac_mon_status_filter_default; #define ATH12K_SCAN_11D_INTERVAL 600000 @@ -128,4 +168,7 @@ struct ath12k *ath12k_get_ar_by_vif(struct ieee80211_hw *hw, int ath12k_mac_get_fw_stats(struct ath12k *ar, struct ath12k_fw_stats_req_params *param); void ath12k_mac_update_freq_range(struct ath12k *ar, u32 freq_low, u32 freq_high); +void ath12k_mac_fill_reg_tpc_info(struct ath12k *ar, + struct ath12k_link_vif *arvif, + struct ieee80211_chanctx_conf *ctx); #endif diff --git a/drivers/net/wireless/ath/ath12k/pci.c b/drivers/net/wireless/ath/ath12k/pci.c index 528a4a57d136..489d546390fc 100644 --- a/drivers/net/wireless/ath/ath12k/pci.c +++ b/drivers/net/wireless/ath/ath12k/pci.c @@ -600,7 +600,8 @@ static int ath12k_pci_ext_irq_config(struct ath12k_base *ab) ab->hw_params->ring_mask->rx_wbm_rel[i] || ab->hw_params->ring_mask->reo_status[i] || ab->hw_params->ring_mask->host2rxdma[i] || - ab->hw_params->ring_mask->rx_mon_dest[i]) { + ab->hw_params->ring_mask->rx_mon_dest[i] || + ab->hw_params->ring_mask->rx_mon_status[i]) { num_irq = 1; } @@ -1733,8 +1734,6 @@ static void ath12k_pci_remove(struct pci_dev *pdev) if (test_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags)) { ath12k_pci_power_down(ab, false); - ath12k_qmi_deinit_service(ab); - ath12k_core_hw_group_unassign(ab); goto qmi_fail; } @@ -1742,9 +1741,10 @@ static void ath12k_pci_remove(struct pci_dev *pdev) cancel_work_sync(&ab->reset_work); cancel_work_sync(&ab->dump_work); - ath12k_core_deinit(ab); + ath12k_core_hw_group_cleanup(ab->ag); qmi_fail: + ath12k_core_deinit(ab); ath12k_fw_unmap(ab); ath12k_mhi_unregister(ab_pci); diff --git a/drivers/net/wireless/ath/ath12k/reg.c b/drivers/net/wireless/ath/ath12k/reg.c index 7048834e0d14..2598b39d5d7e 100644 --- a/drivers/net/wireless/ath/ath12k/reg.c +++ b/drivers/net/wireless/ath/ath12k/reg.c @@ -139,7 +139,7 @@ int ath12k_reg_update_chan_list(struct ath12k *ar, bool wait) int num_channels = 0; int i, ret, left; - if (wait && ar->state_11d != ATH12K_11D_IDLE) { + if (wait && ar->state_11d == ATH12K_11D_RUNNING) { left = wait_for_completion_timeout(&ar->completed_11d_scan, ATH12K_SCAN_TIMEOUT_HZ); if (!left) { @@ -265,8 +265,8 @@ static void ath12k_copy_regd(struct ieee80211_regdomain *regd_orig, int ath12k_regd_update(struct ath12k *ar, bool init) { - u32 phy_id, freq_low = 0, freq_high = 0, supported_bands, band; struct ath12k_wmi_hal_reg_capabilities_ext_arg *reg_cap; + u32 phy_id, freq_low, freq_high, supported_bands; struct ath12k_hw *ah = ath12k_ar_to_ah(ar); struct ieee80211_hw *hw = ah->hw; struct ieee80211_regdomain *regd, *regd_copy = NULL; @@ -276,45 +276,45 @@ int ath12k_regd_update(struct ath12k *ar, bool init) ab = ar->ab; supported_bands = ar->pdev->cap.supported_bands; - if (supported_bands & WMI_HOST_WLAN_2GHZ_CAP) { - band = NL80211_BAND_2GHZ; - } else if (supported_bands & WMI_HOST_WLAN_5GHZ_CAP && !ar->supports_6ghz) { - band = NL80211_BAND_5GHZ; - } else if (supported_bands & WMI_HOST_WLAN_5GHZ_CAP && ar->supports_6ghz) { - band = NL80211_BAND_6GHZ; - } else { - /* This condition is not expected. - */ - WARN_ON(1); - ret = -EINVAL; - goto err; - } - reg_cap = &ab->hal_reg_cap[ar->pdev_idx]; - if (ab->hw_params->single_pdev_only && !ar->supports_6ghz) { - phy_id = ar->pdev->cap.band[band].phy_id; - reg_cap = &ab->hal_reg_cap[phy_id]; - } - /* Possible that due to reg change, current limits for supported - * frequency changed. Update that + * frequency changed. Update it. As a first step, reset the + * previous values and then compute and set the new values. */ + ar->freq_range.start_freq = 0; + ar->freq_range.end_freq = 0; + if (supported_bands & WMI_HOST_WLAN_2GHZ_CAP) { + if (ab->hw_params->single_pdev_only) { + phy_id = ar->pdev->cap.band[WMI_HOST_WLAN_2GHZ_CAP].phy_id; + reg_cap = &ab->hal_reg_cap[phy_id]; + } + freq_low = max(reg_cap->low_2ghz_chan, ab->reg_freq_2ghz.start_freq); freq_high = min(reg_cap->high_2ghz_chan, ab->reg_freq_2ghz.end_freq); - } else if (supported_bands & WMI_HOST_WLAN_5GHZ_CAP && !ar->supports_6ghz) { + + ath12k_mac_update_freq_range(ar, freq_low, freq_high); + } + + if (supported_bands & WMI_HOST_WLAN_5GHZ_CAP && !ar->supports_6ghz) { + if (ab->hw_params->single_pdev_only) { + phy_id = ar->pdev->cap.band[WMI_HOST_WLAN_5GHZ_CAP].phy_id; + reg_cap = &ab->hal_reg_cap[phy_id]; + } + freq_low = max(reg_cap->low_5ghz_chan, ab->reg_freq_5ghz.start_freq); freq_high = min(reg_cap->high_5ghz_chan, ab->reg_freq_5ghz.end_freq); - } else if (supported_bands & WMI_HOST_WLAN_5GHZ_CAP && ar->supports_6ghz) { - freq_low = max(reg_cap->low_5ghz_chan, ab->reg_freq_6ghz.start_freq); - freq_high = min(reg_cap->high_5ghz_chan, ab->reg_freq_6ghz.end_freq); + + ath12k_mac_update_freq_range(ar, freq_low, freq_high); } - ath12k_mac_update_freq_range(ar, freq_low, freq_high); + if (supported_bands & WMI_HOST_WLAN_5GHZ_CAP && ar->supports_6ghz) { + freq_low = max(reg_cap->low_5ghz_chan, ab->reg_freq_6ghz.start_freq); + freq_high = min(reg_cap->high_5ghz_chan, ab->reg_freq_6ghz.end_freq); - ath12k_dbg(ab, ATH12K_DBG_REG, "pdev %u reg updated freq limits %u->%u MHz\n", - ar->pdev->pdev_id, freq_low, freq_high); + ath12k_mac_update_freq_range(ar, freq_low, freq_high); + } /* If one of the radios within ah has already updated the regd for * the wiphy, then avoid setting regd again @@ -454,129 +454,6 @@ static u32 ath12k_map_fw_phy_flags(u32 phy_flags) return flags; } -static bool -ath12k_reg_can_intersect(struct ieee80211_reg_rule *rule1, - struct ieee80211_reg_rule *rule2) -{ - u32 start_freq1, end_freq1; - u32 start_freq2, end_freq2; - - start_freq1 = rule1->freq_range.start_freq_khz; - start_freq2 = rule2->freq_range.start_freq_khz; - - end_freq1 = rule1->freq_range.end_freq_khz; - end_freq2 = rule2->freq_range.end_freq_khz; - - if ((start_freq1 >= start_freq2 && - start_freq1 < end_freq2) || - (start_freq2 > start_freq1 && - start_freq2 < end_freq1)) - return true; - - /* TODO: Should we restrict intersection feasibility - * based on min bandwidth of the intersected region also, - * say the intersected rule should have a min bandwidth - * of 20MHz? - */ - - return false; -} - -static void ath12k_reg_intersect_rules(struct ieee80211_reg_rule *rule1, - struct ieee80211_reg_rule *rule2, - struct ieee80211_reg_rule *new_rule) -{ - u32 start_freq1, end_freq1; - u32 start_freq2, end_freq2; - u32 freq_diff, max_bw; - - start_freq1 = rule1->freq_range.start_freq_khz; - start_freq2 = rule2->freq_range.start_freq_khz; - - end_freq1 = rule1->freq_range.end_freq_khz; - end_freq2 = rule2->freq_range.end_freq_khz; - - new_rule->freq_range.start_freq_khz = max_t(u32, start_freq1, - start_freq2); - new_rule->freq_range.end_freq_khz = min_t(u32, end_freq1, end_freq2); - - freq_diff = new_rule->freq_range.end_freq_khz - - new_rule->freq_range.start_freq_khz; - max_bw = min_t(u32, rule1->freq_range.max_bandwidth_khz, - rule2->freq_range.max_bandwidth_khz); - new_rule->freq_range.max_bandwidth_khz = min_t(u32, max_bw, freq_diff); - - new_rule->power_rule.max_antenna_gain = - min_t(u32, rule1->power_rule.max_antenna_gain, - rule2->power_rule.max_antenna_gain); - - new_rule->power_rule.max_eirp = min_t(u32, rule1->power_rule.max_eirp, - rule2->power_rule.max_eirp); - - /* Use the flags of both the rules */ - new_rule->flags = rule1->flags | rule2->flags; - - /* To be safe, lts use the max cac timeout of both rules */ - new_rule->dfs_cac_ms = max_t(u32, rule1->dfs_cac_ms, - rule2->dfs_cac_ms); -} - -static struct ieee80211_regdomain * -ath12k_regd_intersect(struct ieee80211_regdomain *default_regd, - struct ieee80211_regdomain *curr_regd) -{ - u8 num_old_regd_rules, num_curr_regd_rules, num_new_regd_rules; - struct ieee80211_reg_rule *old_rule, *curr_rule, *new_rule; - struct ieee80211_regdomain *new_regd = NULL; - u8 i, j, k; - - num_old_regd_rules = default_regd->n_reg_rules; - num_curr_regd_rules = curr_regd->n_reg_rules; - num_new_regd_rules = 0; - - /* Find the number of intersecting rules to allocate new regd memory */ - for (i = 0; i < num_old_regd_rules; i++) { - old_rule = default_regd->reg_rules + i; - for (j = 0; j < num_curr_regd_rules; j++) { - curr_rule = curr_regd->reg_rules + j; - - if (ath12k_reg_can_intersect(old_rule, curr_rule)) - num_new_regd_rules++; - } - } - - if (!num_new_regd_rules) - return NULL; - - new_regd = kzalloc(sizeof(*new_regd) + (num_new_regd_rules * - sizeof(struct ieee80211_reg_rule)), - GFP_ATOMIC); - - if (!new_regd) - return NULL; - - /* We set the new country and dfs region directly and only trim - * the freq, power, antenna gain by intersecting with the - * default regdomain. Also MAX of the dfs cac timeout is selected. - */ - new_regd->n_reg_rules = num_new_regd_rules; - memcpy(new_regd->alpha2, curr_regd->alpha2, sizeof(new_regd->alpha2)); - new_regd->dfs_region = curr_regd->dfs_region; - new_rule = new_regd->reg_rules; - - for (i = 0, k = 0; i < num_old_regd_rules; i++) { - old_rule = default_regd->reg_rules + i; - for (j = 0; j < num_curr_regd_rules; j++) { - curr_rule = curr_regd->reg_rules + j; - - if (ath12k_reg_can_intersect(old_rule, curr_rule)) - ath12k_reg_intersect_rules(old_rule, curr_rule, - (new_rule + k++)); - } - } - return new_regd; -} - static const char * ath12k_reg_get_regdom_str(enum nl80211_dfs_regions dfs_region) { @@ -613,13 +490,14 @@ ath12k_reg_adjust_bw(u16 start_freq, u16 end_freq, u16 max_bw) static void ath12k_reg_update_rule(struct ieee80211_reg_rule *reg_rule, u32 start_freq, u32 end_freq, u32 bw, u32 ant_gain, u32 reg_pwr, - u32 reg_flags) + s8 psd, u32 reg_flags) { reg_rule->freq_range.start_freq_khz = MHZ_TO_KHZ(start_freq); reg_rule->freq_range.end_freq_khz = MHZ_TO_KHZ(end_freq); reg_rule->freq_range.max_bandwidth_khz = MHZ_TO_KHZ(bw); reg_rule->power_rule.max_antenna_gain = DBI_TO_MBI(ant_gain); reg_rule->power_rule.max_eirp = DBM_TO_MBM(reg_pwr); + reg_rule->psd = psd; reg_rule->flags = reg_flags; } @@ -641,7 +519,7 @@ ath12k_reg_update_weather_radar_band(struct ath12k_base *ab, ath12k_reg_update_rule(regd->reg_rules + i, reg_rule->start_freq, ETSI_WEATHER_RADAR_BAND_LOW, bw, reg_rule->ant_gain, reg_rule->reg_power, - flags); + reg_rule->psd_eirp, flags); ath12k_dbg(ab, ATH12K_DBG_REG, "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n", @@ -663,7 +541,7 @@ ath12k_reg_update_weather_radar_band(struct ath12k_base *ab, ath12k_reg_update_rule(regd->reg_rules + i, ETSI_WEATHER_RADAR_BAND_LOW, end_freq, bw, reg_rule->ant_gain, reg_rule->reg_power, - flags); + reg_rule->psd_eirp, flags); regd->reg_rules[i].dfs_cac_ms = ETSI_WEATHER_RADAR_BAND_CAC_TIMEOUT; @@ -688,7 +566,7 @@ ath12k_reg_update_weather_radar_band(struct ath12k_base *ab, ath12k_reg_update_rule(regd->reg_rules + i, ETSI_WEATHER_RADAR_BAND_HIGH, reg_rule->end_freq, bw, reg_rule->ant_gain, reg_rule->reg_power, - flags); + reg_rule->psd_eirp, flags); ath12k_dbg(ab, ATH12K_DBG_REG, "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n", @@ -710,26 +588,67 @@ static void ath12k_reg_update_freq_range(struct ath12k_reg_freq *reg_freq, reg_freq->end_freq = reg_rule->end_freq; } +enum wmi_reg_6g_ap_type +ath12k_reg_ap_pwr_convert(enum ieee80211_ap_reg_power power_type) +{ + switch (power_type) { + case IEEE80211_REG_LPI_AP: + return WMI_REG_INDOOR_AP; + case IEEE80211_REG_SP_AP: + return WMI_REG_STD_POWER_AP; + case IEEE80211_REG_VLP_AP: + return WMI_REG_VLP_AP; + default: + return WMI_REG_MAX_AP_TYPE; + } +} + struct ieee80211_regdomain * ath12k_reg_build_regd(struct ath12k_base *ab, - struct ath12k_reg_info *reg_info, bool intersect) + struct ath12k_reg_info *reg_info, + enum wmi_vdev_type vdev_type, + enum ieee80211_ap_reg_power power_type) { - struct ieee80211_regdomain *tmp_regd, *default_regd, *new_regd = NULL; - struct ath12k_reg_rule *reg_rule; + struct ieee80211_regdomain *new_regd = NULL; + struct ath12k_reg_rule *reg_rule, *reg_rule_6ghz; + u32 flags, reg_6ghz_number, max_bw_6ghz; u8 i = 0, j = 0, k = 0; u8 num_rules; u16 max_bw; - u32 flags; char alpha2[3]; num_rules = reg_info->num_5g_reg_rules + reg_info->num_2g_reg_rules; - /* FIXME: Currently taking reg rules for 6G only from Indoor AP mode list. - * This can be updated to choose the combination dynamically based on AP - * type and client type, after complete 6G regulatory support is added. - */ - if (reg_info->is_ext_reg_event) - num_rules += reg_info->num_6g_reg_rules_ap[WMI_REG_INDOOR_AP]; + if (reg_info->is_ext_reg_event) { + if (vdev_type == WMI_VDEV_TYPE_STA) { + enum wmi_reg_6g_ap_type ap_type; + + ap_type = ath12k_reg_ap_pwr_convert(power_type); + if (ap_type == WMI_REG_MAX_AP_TYPE) + ap_type = WMI_REG_INDOOR_AP; + + reg_6ghz_number = reg_info->num_6g_reg_rules_cl + [ap_type][WMI_REG_DEFAULT_CLIENT]; + if (reg_6ghz_number == 0) { + ap_type = WMI_REG_INDOOR_AP; + reg_6ghz_number = reg_info->num_6g_reg_rules_cl + [ap_type][WMI_REG_DEFAULT_CLIENT]; + } + + reg_rule_6ghz = reg_info->reg_rules_6g_client_ptr + [ap_type][WMI_REG_DEFAULT_CLIENT]; + max_bw_6ghz = reg_info->max_bw_6g_client + [ap_type][WMI_REG_DEFAULT_CLIENT]; + } else { + reg_6ghz_number = reg_info->num_6g_reg_rules_ap + [WMI_REG_INDOOR_AP]; + reg_rule_6ghz = + reg_info->reg_rules_6g_ap_ptr[WMI_REG_INDOOR_AP]; + max_bw_6ghz = reg_info->max_bw_6g_ap[WMI_REG_INDOOR_AP]; + } + + num_rules += reg_6ghz_number; + } if (!num_rules) goto ret; @@ -738,20 +657,20 @@ ath12k_reg_build_regd(struct ath12k_base *ab, if (reg_info->dfs_region == ATH12K_DFS_REG_ETSI) num_rules += 2; - tmp_regd = kzalloc(sizeof(*tmp_regd) + + new_regd = kzalloc(sizeof(*new_regd) + (num_rules * sizeof(struct ieee80211_reg_rule)), GFP_ATOMIC); - if (!tmp_regd) + if (!new_regd) goto ret; - memcpy(tmp_regd->alpha2, reg_info->alpha2, REG_ALPHA2_LEN + 1); + memcpy(new_regd->alpha2, reg_info->alpha2, REG_ALPHA2_LEN + 1); memcpy(alpha2, reg_info->alpha2, REG_ALPHA2_LEN + 1); alpha2[2] = '\0'; - tmp_regd->dfs_region = ath12k_map_fw_dfs_region(reg_info->dfs_region); + new_regd->dfs_region = ath12k_map_fw_dfs_region(reg_info->dfs_region); ath12k_dbg(ab, ATH12K_DBG_REG, "\r\nCountry %s, CFG Regdomain %s FW Regdomain %d, num_reg_rules %d\n", - alpha2, ath12k_reg_get_regdom_str(tmp_regd->dfs_region), + alpha2, ath12k_reg_get_regdom_str(new_regd->dfs_region), reg_info->dfs_region, num_rules); /* Reset start and end frequency for each band @@ -788,13 +707,13 @@ ath12k_reg_build_regd(struct ath12k_base *ab, */ flags = NL80211_RRF_AUTO_BW; ath12k_reg_update_freq_range(&ab->reg_freq_5ghz, reg_rule); - } else if (reg_info->is_ext_reg_event && - reg_info->num_6g_reg_rules_ap[WMI_REG_INDOOR_AP] && - (k < reg_info->num_6g_reg_rules_ap[WMI_REG_INDOOR_AP])) { - reg_rule = reg_info->reg_rules_6g_ap_ptr[WMI_REG_INDOOR_AP] + k++; - max_bw = min_t(u16, reg_rule->max_bw, - reg_info->max_bw_6g_ap[WMI_REG_INDOOR_AP]); + } else if (reg_info->is_ext_reg_event && reg_6ghz_number && + (k < reg_6ghz_number)) { + reg_rule = reg_rule_6ghz + k++; + max_bw = min_t(u16, reg_rule->max_bw, max_bw_6ghz); flags = NL80211_RRF_AUTO_BW; + if (reg_rule->psd_flag) + flags |= NL80211_RRF_PSD; ath12k_reg_update_freq_range(&ab->reg_freq_6ghz, reg_rule); } else { break; @@ -803,11 +722,11 @@ ath12k_reg_build_regd(struct ath12k_base *ab, flags |= ath12k_map_fw_reg_flags(reg_rule->flags); flags |= ath12k_map_fw_phy_flags(reg_info->phybitmap); - ath12k_reg_update_rule(tmp_regd->reg_rules + i, + ath12k_reg_update_rule(new_regd->reg_rules + i, reg_rule->start_freq, reg_rule->end_freq, max_bw, reg_rule->ant_gain, reg_rule->reg_power, - flags); + reg_rule->psd_eirp, flags); /* Update dfs cac timeout if the dfs domain is ETSI and the * new rule covers weather radar band. @@ -818,7 +737,7 @@ ath12k_reg_build_regd(struct ath12k_base *ab, reg_info->dfs_region == ATH12K_DFS_REG_ETSI && (reg_rule->end_freq > ETSI_WEATHER_RADAR_BAND_LOW && reg_rule->start_freq < ETSI_WEATHER_RADAR_BAND_HIGH)){ - ath12k_reg_update_weather_radar_band(ab, tmp_regd, + ath12k_reg_update_weather_radar_band(ab, new_regd, reg_rule, &i, flags, max_bw); continue; @@ -828,36 +747,19 @@ ath12k_reg_build_regd(struct ath12k_base *ab, ath12k_dbg(ab, ATH12K_DBG_REG, "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d) (%d, %d)\n", i + 1, reg_rule->start_freq, reg_rule->end_freq, max_bw, reg_rule->ant_gain, reg_rule->reg_power, - tmp_regd->reg_rules[i].dfs_cac_ms, + new_regd->reg_rules[i].dfs_cac_ms, flags, reg_rule->psd_flag, reg_rule->psd_eirp); } else { ath12k_dbg(ab, ATH12K_DBG_REG, "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n", i + 1, reg_rule->start_freq, reg_rule->end_freq, max_bw, reg_rule->ant_gain, reg_rule->reg_power, - tmp_regd->reg_rules[i].dfs_cac_ms, + new_regd->reg_rules[i].dfs_cac_ms, flags); } } - tmp_regd->n_reg_rules = i; - - if (intersect) { - default_regd = ab->default_regd[reg_info->phy_id]; - - /* Get a new regd by intersecting the received regd with - * our default regd. - */ - new_regd = ath12k_regd_intersect(default_regd, tmp_regd); - kfree(tmp_regd); - if (!new_regd) { - ath12k_warn(ab, "Unable to create intersected regdomain\n"); - goto ret; - } - } else { - new_regd = tmp_regd; - } - + new_regd->n_reg_rules = i; ret: return new_regd; } @@ -879,6 +781,105 @@ void ath12k_regd_update_work(struct work_struct *work) } } +void ath12k_reg_reset_reg_info(struct ath12k_reg_info *reg_info) +{ + u8 i, j; + + if (!reg_info) + return; + + kfree(reg_info->reg_rules_2g_ptr); + kfree(reg_info->reg_rules_5g_ptr); + + if (reg_info->is_ext_reg_event) { + for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) { + kfree(reg_info->reg_rules_6g_ap_ptr[i]); + + for (j = 0; j < WMI_REG_MAX_CLIENT_TYPE; j++) + kfree(reg_info->reg_rules_6g_client_ptr[i][j]); + } + } +} + +enum ath12k_reg_status ath12k_reg_validate_reg_info(struct ath12k_base *ab, + struct ath12k_reg_info *reg_info) +{ + int pdev_idx = reg_info->phy_id; + + if (reg_info->status_code != REG_SET_CC_STATUS_PASS) { + /* In case of failure to set the requested country, + * firmware retains the current regd. We print a failure info + * and return from here. + */ + ath12k_warn(ab, "Failed to set the requested Country regulatory setting\n"); + return ATH12K_REG_STATUS_DROP; + } + + if (pdev_idx >= ab->num_radios) { + /* Process the event for phy0 only if single_pdev_only + * is true. If pdev_idx is valid but not 0, discard the + * event. Otherwise, it goes to fallback. + */ + if (ab->hw_params->single_pdev_only && + pdev_idx < ab->hw_params->num_rxdma_per_pdev) + return ATH12K_REG_STATUS_DROP; + else + return ATH12K_REG_STATUS_FALLBACK; + } + + /* Avoid multiple overwrites to default regd, during core + * stop-start after mac registration. + */ + if (ab->default_regd[pdev_idx] && !ab->new_regd[pdev_idx] && + !memcmp(ab->default_regd[pdev_idx]->alpha2, + reg_info->alpha2, 2)) + return ATH12K_REG_STATUS_DROP; + + return ATH12K_REG_STATUS_VALID; +} + +int ath12k_reg_handle_chan_list(struct ath12k_base *ab, + struct ath12k_reg_info *reg_info, + enum wmi_vdev_type vdev_type, + enum ieee80211_ap_reg_power power_type) +{ + struct ieee80211_regdomain *regd = NULL; + int pdev_idx = reg_info->phy_id; + struct ath12k *ar; + + regd = ath12k_reg_build_regd(ab, reg_info, vdev_type, power_type); + if (!regd) + return -EINVAL; + + spin_lock_bh(&ab->base_lock); + if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) { + /* Once mac is registered, ar is valid and all CC events from + * firmware is considered to be received due to user requests + * currently. + * Free previously built regd before assigning the newly + * generated regd to ar. NULL pointer handling will be + * taken care by kfree itself. + */ + ar = ab->pdevs[pdev_idx].ar; + kfree(ab->new_regd[pdev_idx]); + ab->new_regd[pdev_idx] = regd; + queue_work(ab->workqueue, &ar->regd_update_work); + } else { + /* Multiple events for the same *ar is not expected. But we + * can still clear any previously stored default_regd if we + * are receiving this event for the same radio by mistake. + * NULL pointer handling will be taken care by kfree itself. + */ + kfree(ab->default_regd[pdev_idx]); + /* This regd would be applied during mac registration */ + ab->default_regd[pdev_idx] = regd; + } + ab->dfs_region = reg_info->dfs_region; + spin_unlock_bh(&ab->base_lock); + + return 0; +} + void ath12k_reg_init(struct ieee80211_hw *hw) { hw->wiphy->regulatory_flags = REGULATORY_WIPHY_SELF_MANAGED; @@ -891,6 +892,12 @@ void ath12k_reg_free(struct ath12k_base *ab) int i; mutex_lock(&ab->core_lock); + for (i = 0; i < MAX_RADIOS; i++) { + ath12k_reg_reset_reg_info(ab->reg_info[i]); + kfree(ab->reg_info[i]); + ab->reg_info[i] = NULL; + } + for (i = 0; i < ab->hw_params->max_radios; i++) { kfree(ab->default_regd[i]); kfree(ab->new_regd[i]); diff --git a/drivers/net/wireless/ath/ath12k/reg.h b/drivers/net/wireless/ath/ath12k/reg.h index b1eb584ff34c..8af8e9ba462e 100644 --- a/drivers/net/wireless/ath/ath12k/reg.h +++ b/drivers/net/wireless/ath/ath12k/reg.h @@ -92,13 +92,29 @@ enum ath12k_reg_phy_bitmap { ATH12K_REG_PHY_BITMAP_NO11BE = BIT(6), }; +enum ath12k_reg_status { + ATH12K_REG_STATUS_VALID, + ATH12K_REG_STATUS_DROP, + ATH12K_REG_STATUS_FALLBACK, +}; + void ath12k_reg_init(struct ieee80211_hw *hw); void ath12k_reg_free(struct ath12k_base *ab); void ath12k_regd_update_work(struct work_struct *work); struct ieee80211_regdomain *ath12k_reg_build_regd(struct ath12k_base *ab, struct ath12k_reg_info *reg_info, - bool intersect); + enum wmi_vdev_type vdev_type, + enum ieee80211_ap_reg_power power_type); int ath12k_regd_update(struct ath12k *ar, bool init); int ath12k_reg_update_chan_list(struct ath12k *ar, bool wait); +void ath12k_reg_reset_reg_info(struct ath12k_reg_info *reg_info); +int ath12k_reg_handle_chan_list(struct ath12k_base *ab, + struct ath12k_reg_info *reg_info, + enum wmi_vdev_type vdev_type, + enum ieee80211_ap_reg_power power_type); +enum wmi_reg_6g_ap_type +ath12k_reg_ap_pwr_convert(enum ieee80211_ap_reg_power power_type); +enum ath12k_reg_status ath12k_reg_validate_reg_info(struct ath12k_base *ab, + struct ath12k_reg_info *reg_info); #endif diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c index ea303dca38b5..60e2444fe08c 100644 --- a/drivers/net/wireless/ath/ath12k/wmi.c +++ b/drivers/net/wireless/ath/ath12k/wmi.c @@ -2187,9 +2187,10 @@ int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar, struct sk_buff *skb; struct wmi_tlv *tlv; void *ptr; - u32 peer_legacy_rates_align; - u32 peer_ht_rates_align; + u32 peer_legacy_rates_align, eml_pad_delay, eml_trans_delay; + u32 peer_ht_rates_align, eml_trans_timeout; int i, ret, len; + u16 eml_cap; __le32 v; peer_legacy_rates_align = roundup(arg->peer_legacy_rates.num_rates, @@ -2361,6 +2362,24 @@ int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar, ml_params->logical_link_idx = cpu_to_le32(arg->ml.logical_link_idx); ml_params->ml_peer_id = cpu_to_le32(arg->ml.ml_peer_id); ml_params->ieee_link_id = cpu_to_le32(arg->ml.ieee_link_id); + + eml_cap = arg->ml.eml_cap; + if (u16_get_bits(eml_cap, IEEE80211_EML_CAP_EMLSR_SUPP)) { + /* Padding delay */ + eml_pad_delay = ieee80211_emlsr_pad_delay_in_us(eml_cap); + ml_params->emlsr_padding_delay_us = cpu_to_le32(eml_pad_delay); + /* Transition delay */ + eml_trans_delay = ieee80211_emlsr_trans_delay_in_us(eml_cap); + ml_params->emlsr_trans_delay_us = cpu_to_le32(eml_trans_delay); + /* Transition timeout */ + eml_trans_timeout = ieee80211_eml_trans_timeout_in_us(eml_cap); + ml_params->emlsr_trans_timeout_us = + cpu_to_le32(eml_trans_timeout); + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi peer %pM emlsr padding delay %u, trans delay %u trans timeout %u", + arg->peer_mac, eml_pad_delay, eml_trans_delay, + eml_trans_timeout); + } + ptr += sizeof(*ml_params); skip_ml_params: @@ -2380,6 +2399,10 @@ skip_ml_params: ptr += sizeof(*eht_mcs); } + /* Update MCS15 capability */ + if (arg->eht_disable_mcs15) + cmd->peer_eht_ops = cpu_to_le32(IEEE80211_EHT_OPER_MCS15_DISABLE); + tlv = ptr; len = arg->ml.enabled ? arg->ml.num_partner_links * sizeof(*partner_info) : 0; /* fill ML Partner links */ @@ -2420,7 +2443,7 @@ skip_ml_params: send: ath12k_dbg(ar->ab, ATH12K_DBG_WMI, - "wmi peer assoc vdev id %d assoc id %d peer mac %pM peer_flags %x rate_caps %x peer_caps %x listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x peer_flags_ext %x eht mac_cap %x %x eht phy_cap %x %x %x\n", + "wmi peer assoc vdev id %d assoc id %d peer mac %pM peer_flags %x rate_caps %x peer_caps %x listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x peer_flags_ext %x eht mac_cap %x %x eht phy_cap %x %x %x peer_eht_ops %x\n", cmd->vdev_id, cmd->peer_associd, arg->peer_mac, cmd->peer_flags, cmd->peer_rate_caps, cmd->peer_caps, cmd->peer_listen_intval, cmd->peer_ht_caps, @@ -2433,7 +2456,7 @@ send: cmd->peer_bw_rxnss_override, cmd->peer_flags_ext, cmd->peer_eht_cap_mac[0], cmd->peer_eht_cap_mac[1], cmd->peer_eht_cap_phy[0], cmd->peer_eht_cap_phy[1], - cmd->peer_eht_cap_phy[2]); + cmd->peer_eht_cap_phy[2], cmd->peer_eht_ops); ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_ASSOC_CMDID); if (ret) { @@ -4733,6 +4756,7 @@ static int ath12k_service_ready_ext_event(struct ath12k_base *ab, return 0; err: + kfree(svc_rdy_ext.mac_phy_caps); ath12k_wmi_free_dbring_caps(ab); return ret; } @@ -6101,6 +6125,7 @@ static int ath12k_reg_11d_new_cc_event(struct ath12k_base *ab, struct sk_buff *s pdev = &ab->pdevs[i]; ar = pdev->ar; ar->state_11d = ATH12K_11D_IDLE; + ar->ah->regd_updated = false; complete(&ar->completed_11d_scan); } @@ -6115,24 +6140,11 @@ static void ath12k_wmi_htc_tx_complete(struct ath12k_base *ab, dev_kfree_skb(skb); } -static bool ath12k_reg_is_world_alpha(char *alpha) -{ - if (alpha[0] == '0' && alpha[1] == '0') - return true; - - if (alpha[0] == 'n' && alpha[1] == 'a') - return true; - - return false; -} - static int ath12k_reg_chan_list_event(struct ath12k_base *ab, struct sk_buff *skb) { - struct ath12k_reg_info *reg_info = NULL; - struct ieee80211_regdomain *regd = NULL; - bool intersect = false; - int ret = 0, pdev_idx, i, j; - struct ath12k *ar; + struct ath12k_reg_info *reg_info; + u8 pdev_idx; + int ret; reg_info = kzalloc(sizeof(*reg_info), GFP_ATOMIC); if (!reg_info) { @@ -6141,86 +6153,52 @@ static int ath12k_reg_chan_list_event(struct ath12k_base *ab, struct sk_buff *sk } ret = ath12k_pull_reg_chan_list_ext_update_ev(ab, skb, reg_info); - if (ret) { ath12k_warn(ab, "failed to extract regulatory info from received event\n"); - goto fallback; + goto mem_free; } - if (reg_info->status_code != REG_SET_CC_STATUS_PASS) { - /* In case of failure to set the requested ctry, - * fw retains the current regd. We print a failure info - * and return from here. + ret = ath12k_reg_validate_reg_info(ab, reg_info); + if (ret == ATH12K_REG_STATUS_FALLBACK) { + ath12k_warn(ab, "failed to validate reg info %d\n", ret); + /* firmware has successfully switches to new regd but host can not + * continue, so free reginfo and fallback to old regd */ - ath12k_warn(ab, "Failed to set the requested Country regulatory setting\n"); + goto mem_free; + } else if (ret == ATH12K_REG_STATUS_DROP) { + /* reg info is valid but we will not store it and + * not going to create new regd for it + */ + ret = ATH12K_REG_STATUS_VALID; goto mem_free; } + /* free old reg_info if it exist */ pdev_idx = reg_info->phy_id; - - if (pdev_idx >= ab->num_radios) { - /* Process the event for phy0 only if single_pdev_only - * is true. If pdev_idx is valid but not 0, discard the - * event. Otherwise, it goes to fallback. - */ - if (ab->hw_params->single_pdev_only && - pdev_idx < ab->hw_params->num_rxdma_per_pdev) - goto mem_free; - else - goto fallback; + if (ab->reg_info[pdev_idx]) { + ath12k_reg_reset_reg_info(ab->reg_info[pdev_idx]); + kfree(ab->reg_info[pdev_idx]); } - - /* Avoid multiple overwrites to default regd, during core - * stop-start after mac registration. + /* reg_info is valid, we store it for later use + * even below regd build failed */ - if (ab->default_regd[pdev_idx] && !ab->new_regd[pdev_idx] && - !memcmp(ab->default_regd[pdev_idx]->alpha2, - reg_info->alpha2, 2)) - goto mem_free; + ab->reg_info[pdev_idx] = reg_info; - /* Intersect new rules with default regd if a new country setting was - * requested, i.e a default regd was already set during initialization - * and the regd coming from this event has a valid country info. - */ - if (ab->default_regd[pdev_idx] && - !ath12k_reg_is_world_alpha((char *) - ab->default_regd[pdev_idx]->alpha2) && - !ath12k_reg_is_world_alpha((char *)reg_info->alpha2)) - intersect = true; - - regd = ath12k_reg_build_regd(ab, reg_info, intersect); - if (!regd) { - ath12k_warn(ab, "failed to build regd from reg_info\n"); + ret = ath12k_reg_handle_chan_list(ab, reg_info, WMI_VDEV_TYPE_UNSPEC, + IEEE80211_REG_UNSET_AP); + if (ret) { + ath12k_warn(ab, "failed to handle chan list %d\n", ret); goto fallback; } - spin_lock(&ab->base_lock); - if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) { - /* Once mac is registered, ar is valid and all CC events from - * fw is considered to be received due to user requests - * currently. - * Free previously built regd before assigning the newly - * generated regd to ar. NULL pointer handling will be - * taken care by kfree itself. - */ - ar = ab->pdevs[pdev_idx].ar; - kfree(ab->new_regd[pdev_idx]); - ab->new_regd[pdev_idx] = regd; - queue_work(ab->workqueue, &ar->regd_update_work); - } else { - /* Multiple events for the same *ar is not expected. But we - * can still clear any previously stored default_regd if we - * are receiving this event for the same radio by mistake. - * NULL pointer handling will be taken care by kfree itself. - */ - kfree(ab->default_regd[pdev_idx]); - /* This regd would be applied during mac registration */ - ab->default_regd[pdev_idx] = regd; - } - ab->dfs_region = reg_info->dfs_region; - spin_unlock(&ab->base_lock); + goto out; - goto mem_free; +mem_free: + ath12k_reg_reset_reg_info(reg_info); + kfree(reg_info); + + if (ret == ATH12K_REG_STATUS_VALID) + return ret; fallback: /* Fallback to older reg (by sending previous country setting @@ -6232,20 +6210,8 @@ fallback: */ /* TODO: This is rare, but still should also be handled */ WARN_ON(1); -mem_free: - if (reg_info) { - kfree(reg_info->reg_rules_2g_ptr); - kfree(reg_info->reg_rules_5g_ptr); - if (reg_info->is_ext_reg_event) { - for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) - kfree(reg_info->reg_rules_6g_ap_ptr[i]); - - for (j = 0; j < WMI_REG_CURRENT_MAX_AP_TYPE; j++) - for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) - kfree(reg_info->reg_rules_6g_client_ptr[j][i]); - } - kfree(reg_info); - } + +out: return ret; } @@ -6401,13 +6367,14 @@ static void ath12k_vdev_start_resp_event(struct ath12k_base *ab, struct sk_buff ar->last_wmi_vdev_start_status = 0; status = le32_to_cpu(vdev_start_resp.status); - if (WARN_ON_ONCE(status)) { ath12k_warn(ab, "vdev start resp error status %d (%s)\n", status, ath12k_wmi_vdev_resp_print(status)); ar->last_wmi_vdev_start_status = status; } + ar->max_allowed_tx_power = (s8)le32_to_cpu(vdev_start_resp.max_allowed_tx_power); + complete(&ar->vdev_setup_done); rcu_read_unlock(); @@ -9878,3 +9845,69 @@ int ath12k_wmi_mlo_teardown(struct ath12k *ar) return 0; } + +bool ath12k_wmi_supports_6ghz_cc_ext(struct ath12k *ar) +{ + return test_bit(WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT, + ar->ab->wmi_ab.svc_map) && ar->supports_6ghz; +} + +int ath12k_wmi_send_vdev_set_tpc_power(struct ath12k *ar, + u32 vdev_id, + struct ath12k_reg_tpc_power_info *param) +{ + struct wmi_vdev_set_tpc_power_cmd *cmd; + struct ath12k_wmi_pdev *wmi = ar->wmi; + struct wmi_vdev_ch_power_params *ch; + int i, ret, len, array_len; + struct sk_buff *skb; + struct wmi_tlv *tlv; + u8 *ptr; + + array_len = sizeof(*ch) * param->num_pwr_levels; + len = sizeof(*cmd) + TLV_HDR_SIZE + array_len; + + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + ptr = skb->data; + + cmd = (struct wmi_vdev_set_tpc_power_cmd *)ptr; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_TPC_POWER_CMD, + sizeof(*cmd)); + cmd->vdev_id = cpu_to_le32(vdev_id); + cmd->psd_power = cpu_to_le32(param->is_psd_power); + cmd->eirp_power = cpu_to_le32(param->eirp_power); + cmd->power_type_6ghz = cpu_to_le32(param->ap_power_type); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, + "tpc vdev id %d is psd power %d eirp power %d 6 ghz power type %d\n", + vdev_id, param->is_psd_power, param->eirp_power, param->ap_power_type); + + ptr += sizeof(*cmd); + tlv = (struct wmi_tlv *)ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, array_len); + + ptr += TLV_HDR_SIZE; + ch = (struct wmi_vdev_ch_power_params *)ptr; + + for (i = 0; i < param->num_pwr_levels; i++, ch++) { + ch->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_CH_POWER_INFO, + sizeof(*ch)); + ch->chan_cfreq = cpu_to_le32(param->chan_power_info[i].chan_cfreq); + ch->tx_power = cpu_to_le32(param->chan_power_info[i].tx_power); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "tpc chan freq %d TX power %d\n", + ch->chan_cfreq, ch->tx_power); + } + + ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_SET_TPC_POWER_CMDID); + if (ret) { + ath12k_warn(ar->ab, "failed to send WMI_VDEV_SET_TPC_POWER_CMDID\n"); + dev_kfree_skb(skb); + return ret; + } + + return 0; +} diff --git a/drivers/net/wireless/ath/ath12k/wmi.h b/drivers/net/wireless/ath/ath12k/wmi.h index 80fdbc566518..ac18f75e0449 100644 --- a/drivers/net/wireless/ath/ath12k/wmi.h +++ b/drivers/net/wireless/ath/ath12k/wmi.h @@ -26,6 +26,7 @@ struct ath12k_base; struct ath12k; struct ath12k_link_vif; struct ath12k_fw_stats; +struct ath12k_reg_tpc_power_info; /* There is no signed version of __le32, so for a temporary solution come * up with our own version. The idea is from fs/ntfs/endian.h. @@ -386,6 +387,22 @@ enum wmi_tlv_cmd_id { WMI_VDEV_SET_CUSTOM_AGGR_SIZE_CMDID, WMI_VDEV_ENCRYPT_DECRYPT_DATA_REQ_CMDID, WMI_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_CMDID, + WMI_VDEV_SET_ARP_STAT_CMDID, + WMI_VDEV_GET_ARP_STAT_CMDID, + WMI_VDEV_GET_TX_POWER_CMDID, + WMI_VDEV_LIMIT_OFFCHAN_CMDID, + WMI_VDEV_SET_CUSTOM_SW_RETRY_TH_CMDID, + WMI_VDEV_CHAINMASK_CONFIG_CMDID, + WMI_VDEV_GET_BCN_RECEPTION_STATS_CMDID, + WMI_VDEV_GET_MWS_COEX_INFO_CMDID, + WMI_VDEV_DELETE_ALL_PEER_CMDID, + WMI_VDEV_BSS_MAX_IDLE_TIME_CMDID, + WMI_VDEV_AUDIO_SYNC_TRIGGER_CMDID, + WMI_VDEV_AUDIO_SYNC_QTIMER_CMDID, + WMI_VDEV_SET_PCL_CMDID, + WMI_VDEV_GET_BIG_DATA_CMDID, + WMI_VDEV_GET_BIG_DATA_P2_CMDID, + WMI_VDEV_SET_TPC_POWER_CMDID, WMI_PEER_CREATE_CMDID = WMI_TLV_CMD(WMI_GRP_PEER), WMI_PEER_DELETE_CMDID, WMI_PEER_FLUSH_TIDS_CMDID, @@ -1955,6 +1972,8 @@ enum wmi_tlv_tag { WMI_TAG_TPC_STATS_REG_PWR_ALLOWED, WMI_TAG_TPC_STATS_RATES_ARRAY, WMI_TAG_TPC_STATS_CTL_PWR_TABLE_EVENT, + WMI_TAG_VDEV_SET_TPC_POWER_CMD = 0x3B5, + WMI_TAG_VDEV_CH_POWER_INFO, WMI_TAG_EHT_RATE_SET = 0x3C4, WMI_TAG_DCS_AWGN_INT_TYPE = 0x3C5, WMI_TAG_MLO_TX_SEND_PARAMS, @@ -2201,6 +2220,8 @@ enum wmi_tlv_service { WMI_MAX_EXT_SERVICE = 256, + WMI_TLV_SERVICE_EXT_TPC_REG_SUPPORT = 280, + WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT = 281, WMI_TLV_SERVICE_11BE = 289, @@ -3755,6 +3776,7 @@ struct peer_assoc_mlo_params { u32 ieee_link_id; u8 num_partner_links; struct wmi_ml_partner_info partner_info[ATH12K_WMI_MLO_MAX_LINKS]; + u16 eml_cap; }; struct wmi_rate_set_arg { @@ -3833,6 +3855,7 @@ struct ath12k_wmi_peer_assoc_arg { u32 punct_bitmap; bool is_assoc; struct peer_assoc_mlo_params ml; + bool eht_disable_mcs15; }; #define ATH12K_WMI_FLAG_MLO_ENABLED BIT(0) @@ -4162,6 +4185,7 @@ struct wmi_vdev_start_resp_event { }; __le32 cfgd_tx_streams; __le32 cfgd_rx_streams; + __le32 max_allowed_tx_power; } __packed; /* VDEV start response status codes */ @@ -4507,6 +4531,7 @@ struct ath12k_wmi_target_cap_arg { }; enum wmi_vdev_type { + WMI_VDEV_TYPE_UNSPEC = 0, WMI_VDEV_TYPE_AP = 1, WMI_VDEV_TYPE_STA = 2, WMI_VDEV_TYPE_IBSS = 3, @@ -5937,6 +5962,41 @@ struct wmi_tpc_stats_arg { struct wmi_tpc_ctl_pwr_table_arg ctl_array; }; +struct wmi_vdev_ch_power_params { + __le32 tlv_header; + + /* Channel center frequency (MHz) */ + __le32 chan_cfreq; + + /* Unit: dBm, either PSD/EIRP power for this frequency or + * incremental for non-PSD BW + */ + __le32 tx_power; +} __packed; + +struct wmi_vdev_set_tpc_power_cmd { + __le32 tlv_header; + __le32 vdev_id; + + /* Value: 0 or 1, is PSD power or not */ + __le32 psd_power; + + /* Maximum EIRP power (dBm units), valid only if power is PSD */ + __le32 eirp_power; + + /* Type: WMI_6GHZ_REG_TYPE, used for halphy CTL lookup */ + __le32 power_type_6ghz; + + /* This fixed_param TLV is followed by the below TLVs: + * num_pwr_levels of wmi_vdev_ch_power_info + * For PSD power, it is the PSD/EIRP power of the frequency (20 MHz chunks). + * For non-PSD power, the power values are for 20, 40, and till + * BSS BW power levels. + * The num_pwr_levels will be checked by sw how many elements present + * in the variable-length array. + */ +} __packed; + void ath12k_wmi_init_qcn9274(struct ath12k_base *ab, struct ath12k_wmi_resource_config_arg *config); void ath12k_wmi_init_wcn7850(struct ath12k_base *ab, @@ -6131,5 +6191,9 @@ int ath12k_wmi_mlo_teardown(struct ath12k *ar); void ath12k_wmi_fw_stats_dump(struct ath12k *ar, struct ath12k_fw_stats *fw_stats, u32 stats_id, char *buf); +bool ath12k_wmi_supports_6ghz_cc_ext(struct ath12k *ar); +int ath12k_wmi_send_vdev_set_tpc_power(struct ath12k *ar, + u32 vdev_id, + struct ath12k_reg_tpc_power_info *param); #endif diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c index d4805e02b927..49b7ab26c477 100644 --- a/drivers/net/wireless/ath/ath9k/ahb.c +++ b/drivers/net/wireless/ath/ath9k/ahb.c @@ -74,7 +74,6 @@ static int ath_ahb_probe(struct platform_device *pdev) void __iomem *mem; struct ath_softc *sc; struct ieee80211_hw *hw; - struct resource *res; const struct platform_device_id *id = platform_get_device_id(pdev); int irq; int ret = 0; @@ -86,16 +85,10 @@ static int ath_ahb_probe(struct platform_device *pdev) return -EINVAL; } - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (res == NULL) { - dev_err(&pdev->dev, "no memory resource found\n"); - return -ENXIO; - } - - mem = devm_ioremap(&pdev->dev, res->start, resource_size(res)); - if (mem == NULL) { + mem = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(mem)) { dev_err(&pdev->dev, "ioremap failed\n"); - return -ENOMEM; + return PTR_ERR(mem); } irq = platform_get_irq(pdev, 0); diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c index 547634f82183..81fa7cbad892 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c @@ -290,6 +290,9 @@ void ath9k_htc_swba(struct ath9k_htc_priv *priv, struct ath_common *common = ath9k_hw_common(priv->ah); int slot; + if (!priv->cur_beacon_conf.enable_beacon) + return; + if (swba->beacon_pending != 0) { priv->beacon.bmisscnt++; if (priv->beacon.bmisscnt > BSTUCK_THRESHOLD) { diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c index 0226c31a6cae..b7717f9e1e9b 100644 --- a/drivers/net/wireless/ath/carl9170/tx.c +++ b/drivers/net/wireless/ath/carl9170/tx.c @@ -366,8 +366,7 @@ static void carl9170_tx_shift_bm(struct ar9170 *ar, if (WARN_ON_ONCE(off >= CARL9170_BAW_BITS)) return; - if (!bitmap_empty(tid_info->bitmap, off)) - off = find_first_bit(tid_info->bitmap, off); + off = min(off, find_first_bit(tid_info->bitmap, off)); tid_info->bsn += off; tid_info->bsn &= 0x0fff; diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h index 33ccd0b248d4..fff8b7f8abc5 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.h +++ b/drivers/net/wireless/ath/wil6210/txrx.h @@ -617,8 +617,7 @@ static inline bool wil_need_txstat(struct sk_buff *skb) { const u8 *da = wil_skb_get_da(skb); - return is_unicast_ether_addr(da) && skb->sk && - sock_flag(skb->sk, SOCK_WIFI_STATUS); + return is_unicast_ether_addr(da) && sk_requests_wifi_status(skb->sk); } static inline void wil_consume_skb(struct sk_buff *skb, bool acked) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 4b70845e1a26..dc2383faddd1 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -74,7 +74,6 @@ #define VNDR_IE_HDR_SIZE 12 #define VNDR_IE_PARSE_LIMIT 5 -#define DOT11_MGMT_HDR_LEN 24 /* d11 management header len */ #define DOT11_BCN_PRB_FIXED_LEN 12 /* beacon/probe fixed length */ #define BRCMF_SCAN_JOIN_ACTIVE_DWELL_TIME_MS 320 @@ -1945,17 +1944,22 @@ static s32 brcmf_set_wpa_version(struct net_device *ndev, struct brcmf_cfg80211_profile *profile = ndev_to_prof(ndev); struct brcmf_pub *drvr = ifp->drvr; struct brcmf_cfg80211_security *sec; - s32 val = 0; - s32 err = 0; + s32 val; + s32 err; - if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_1) + if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_1) { val = WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED; - else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2) - val = WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED; - else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_3) + } else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2) { + if (drvr->bus_if->fwvid == BRCMF_FWVENDOR_CYW && + sme->crypto.akm_suites[0] == WLAN_AKM_SUITE_SAE) + val = WPA3_AUTH_SAE_PSK; + else + val = WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED; + } else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_3) { val = WPA3_AUTH_SAE_PSK; - else + } else { val = WPA_AUTH_DISABLED; + } brcmf_dbg(CONN, "setting wpa_auth to 0x%0x\n", val); err = brcmf_fil_bsscfg_int_set(ifp, "wpa_auth", val); if (err) { @@ -2163,28 +2167,25 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme) switch (sme->crypto.akm_suites[0]) { case WLAN_AKM_SUITE_SAE: val = WPA3_AUTH_SAE_PSK; - if (sme->crypto.sae_pwd) { - brcmf_dbg(INFO, "using SAE offload\n"); - profile->use_fwsup = BRCMF_PROFILE_FWSUP_SAE; - } break; case WLAN_AKM_SUITE_FT_OVER_SAE: val = WPA3_AUTH_SAE_PSK | WPA2_AUTH_FT; profile->is_ft = true; - if (sme->crypto.sae_pwd) { - brcmf_dbg(INFO, "using SAE offload\n"); - profile->use_fwsup = BRCMF_PROFILE_FWSUP_SAE; - } break; default: bphy_err(drvr, "invalid akm suite (%d)\n", sme->crypto.akm_suites[0]); return -EINVAL; } + if (sme->crypto.sae_pwd) { + profile->use_fwsup = BRCMF_PROFILE_FWSUP_SAE; + } } if (profile->use_fwsup == BRCMF_PROFILE_FWSUP_1X) brcmf_dbg(INFO, "using 1X offload\n"); + if (profile->use_fwsup == BRCMF_PROFILE_FWSUP_SAE) + brcmf_dbg(INFO, "using SAE offload\n"); if (!brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MFP)) goto skip_mfp_config; @@ -2221,7 +2222,7 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme) brcmf_fil_bsscfg_int_set(netdev_priv(ndev), "mfp", mfp); skip_mfp_config: - brcmf_dbg(CONN, "setting wpa_auth to %d\n", val); + brcmf_dbg(CONN, "setting wpa_auth to 0x%0x\n", val); err = brcmf_fil_bsscfg_int_set(netdev_priv(ndev), "wpa_auth", val); if (err) { bphy_err(drvr, "could not set wpa_auth (%d)\n", err); @@ -5509,7 +5510,7 @@ brcmf_cfg80211_update_mgmt_frame_registrations(struct wiphy *wiphy, } -static int +int brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, struct cfg80211_mgmt_tx_params *params, u64 *cookie) { @@ -5616,6 +5617,7 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, exit: return err; } +BRCMF_EXPORT_SYMBOL_GPL(brcmf_cfg80211_mgmt_tx); static int brcmf_cfg80211_set_cqm_rssi_range_config(struct wiphy *wiphy, struct net_device *ndev, @@ -6009,6 +6011,7 @@ struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg, vif->wdev.wiphy = cfg->wiphy; vif->wdev.iftype = type; + init_completion(&vif->mgmt_tx); brcmf_init_prof(&vif->profile); @@ -6760,6 +6763,8 @@ static void brcmf_register_event_handlers(struct brcmf_cfg80211_info *cfg) brcmf_fweh_register(cfg->pub, BRCMF_E_PSK_SUP, brcmf_notify_connect_status); brcmf_fweh_register(cfg->pub, BRCMF_E_RSSI, brcmf_notify_rssi); + + brcmf_fwvid_register_event_handlers(cfg->pub); } static void brcmf_deinit_priv_mem(struct brcmf_cfg80211_info *cfg) @@ -7346,6 +7351,7 @@ brcmf_txrx_stypes[NUM_NL80211_IFTYPES] = { [NL80211_IFTYPE_STATION] = { .tx = 0xffff, .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | + BIT(IEEE80211_STYPE_AUTH >> 4) | BIT(IEEE80211_STYPE_PROBE_REQ >> 4) }, [NL80211_IFTYPE_P2P_CLIENT] = { @@ -7653,6 +7659,8 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp) wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_SAE_OFFLOAD_AP); } + if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_SAE_EXT)) + wiphy->features |= NL80211_FEATURE_SAE; wiphy->mgmt_stypes = brcmf_txrx_stypes; wiphy->max_remain_on_channel_duration = 5000; if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_PNO)) { diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h index 2abae8894614..b83485ec7b87 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h @@ -13,6 +13,8 @@ #include "fwil_types.h" #include "p2p.h" +#define DOT11_MGMT_HDR_LEN 24 /* d11 management header len */ + #define BRCMF_SCAN_IE_LEN_MAX 2048 #define WL_NUM_SCAN_MAX 10 @@ -142,6 +144,21 @@ enum brcmf_profile_fwauth { }; /** + * enum brcmf_mgmt_tx_status - mgmt frame tx status + * + * @BRCMF_MGMT_TX_ACK: mgmt frame acked + * @BRCMF_MGMT_TX_NOACK: mgmt frame not acked + * @BRCMF_MGMT_TX_OFF_CHAN_COMPLETED: off-channel complete + * @BRCMF_MGMT_TX_SEND_FRAME: mgmt frame tx is in progres + */ +enum brcmf_mgmt_tx_status { + BRCMF_MGMT_TX_ACK, + BRCMF_MGMT_TX_NOACK, + BRCMF_MGMT_TX_OFF_CHAN_COMPLETED, + BRCMF_MGMT_TX_SEND_FRAME +}; + +/** * struct brcmf_cfg80211_profile - profile information. * * @bssid: bssid of joined/joining ibss. @@ -211,6 +228,9 @@ struct vif_saved_ie { * @profile: profile information. * @sme_state: SME state using enum brcmf_vif_status bits. * @list: linked list. + * @mgmt_tx: completion for management frame transmit. + * @mgmt_tx_status: status of last management frame sent to firmware. + * @mgmt_tx_id: * @mgmt_rx_reg: registered rx mgmt frame types. * @mbss: Multiple BSS type, set if not first AP (not relevant for P2P). * @cqm_rssi_low: Lower RSSI limit for CQM monitoring @@ -224,6 +244,9 @@ struct brcmf_cfg80211_vif { unsigned long sme_state; struct vif_saved_ie saved_ie; struct list_head list; + struct completion mgmt_tx; + unsigned long mgmt_tx_status; + u32 mgmt_tx_id; u16 mgmt_rx_reg; bool mbss; int is_11d; @@ -468,5 +491,7 @@ void brcmf_abort_scanning(struct brcmf_cfg80211_info *cfg); void brcmf_cfg80211_free_netdev(struct net_device *ndev); int brcmf_set_wsec(struct brcmf_if *ifp, const u8 *key, u16 key_len, u16 flags); +int brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, + struct cfg80211_mgmt_tx_params *params, u64 *cookie); #endif /* BRCMFMAC_CFG80211_H */ diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c index f26e4679e4ff..75f101622db1 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c @@ -491,6 +491,7 @@ void __brcmf_dbg(u32 level, const char *func, const char *fmt, ...) trace_brcmf_dbg(level, func, &vaf); va_end(args); } +BRCMF_EXPORT_SYMBOL_GPL(__brcmf_dbg); #endif static void brcmf_mp_attach(void) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c index 3d63010ae079..04f41c09deca 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c @@ -1363,6 +1363,8 @@ int brcmf_attach(struct device *dev) brcmf_fweh_register(drvr, BRCMF_E_PSM_WATCHDOG, brcmf_psm_watchdog_notify); + brcmf_fwvid_get_cfg80211_ops(drvr); + ret = brcmf_bus_started(drvr, drvr->ops); if (ret != 0) { bphy_err(drvr, "dongle is not responding: err=%d\n", ret); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c index 9a4837881486..c9537fb597ce 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c @@ -8,11 +8,21 @@ #include <bus.h> #include <fwvid.h> #include <fwil.h> +#include <fweh.h> #include "vops.h" +#include "fwil_types.h" +/* event definitions */ +#define BRCMF_CYW_E_EXT_AUTH_REQ 187 +#define BRCMF_CYW_E_EXT_AUTH_FRAME_RX 188 +#define BRCMF_CYW_E_MGMT_FRAME_TXS 189 +#define BRCMF_CYW_E_MGMT_FRAME_TXS_OC 190 #define BRCMF_CYW_E_LAST 197 +#define MGMT_AUTH_FRAME_DWELL_TIME 4000 +#define MGMT_AUTH_FRAME_WAIT_TIME (MGMT_AUTH_FRAME_DWELL_TIME + 100) + static int brcmf_cyw_set_sae_pwd(struct brcmf_if *ifp, struct cfg80211_crypto_settings *crypto) { @@ -39,6 +49,19 @@ static int brcmf_cyw_set_sae_pwd(struct brcmf_if *ifp, return err; } +static const struct brcmf_fweh_event_map brcmf_cyw_event_map = { + .items = { + { BRCMF_E_EXT_AUTH_REQ, BRCMF_CYW_E_EXT_AUTH_REQ }, + { BRCMF_E_EXT_AUTH_FRAME_RX, BRCMF_CYW_E_EXT_AUTH_FRAME_RX }, + { BRCMF_E_MGMT_FRAME_TXSTATUS, BRCMF_CYW_E_MGMT_FRAME_TXS }, + { + BRCMF_E_MGMT_FRAME_OFFCHAN_DONE, + BRCMF_CYW_E_MGMT_FRAME_TXS_OC + }, + }, + .n_items = 4 +}; + static int brcmf_cyw_alloc_fweh_info(struct brcmf_pub *drvr) { struct brcmf_fweh_info *fweh; @@ -49,11 +72,296 @@ static int brcmf_cyw_alloc_fweh_info(struct brcmf_pub *drvr) return -ENOMEM; fweh->num_event_codes = BRCMF_CYW_E_LAST; + fweh->event_map = &brcmf_cyw_event_map; drvr->fweh = fweh; return 0; } +static int brcmf_cyw_activate_events(struct brcmf_if *ifp) +{ + struct brcmf_fweh_info *fweh = ifp->drvr->fweh; + struct brcmf_eventmsgs_ext *eventmask_msg; + u32 msglen; + int err; + + msglen = sizeof(*eventmask_msg) + fweh->event_mask_len; + eventmask_msg = kzalloc(msglen, GFP_KERNEL); + if (!eventmask_msg) + return -ENOMEM; + eventmask_msg->ver = EVENTMSGS_VER; + eventmask_msg->command = CYW_EVENTMSGS_SET_MASK; + eventmask_msg->len = fweh->event_mask_len; + memcpy(eventmask_msg->mask, fweh->event_mask, fweh->event_mask_len); + + err = brcmf_fil_iovar_data_set(ifp, "event_msgs_ext", eventmask_msg, + msglen); + kfree(eventmask_msg); + return err; +} + +static +int brcmf_cyw_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, + struct cfg80211_mgmt_tx_params *params, u64 *cookie) +{ + struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct ieee80211_channel *chan = params->chan; + struct brcmf_pub *drvr = cfg->pub; + const u8 *buf = params->buf; + size_t len = params->len; + const struct ieee80211_mgmt *mgmt; + struct brcmf_cfg80211_vif *vif; + s32 err = 0; + bool ack = false; + s32 chan_nr; + u32 freq; + struct brcmf_mf_params_le *mf_params; + u32 mf_params_len; + s32 ready; + + brcmf_dbg(TRACE, "Enter\n"); + + mgmt = (const struct ieee80211_mgmt *)buf; + + if (!ieee80211_is_auth(mgmt->frame_control)) + return brcmf_cfg80211_mgmt_tx(wiphy, wdev, params, cookie); + + *cookie = 0; + vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev); + + reinit_completion(&vif->mgmt_tx); + clear_bit(BRCMF_MGMT_TX_ACK, &vif->mgmt_tx_status); + clear_bit(BRCMF_MGMT_TX_NOACK, &vif->mgmt_tx_status); + clear_bit(BRCMF_MGMT_TX_OFF_CHAN_COMPLETED, + &vif->mgmt_tx_status); + mf_params_len = offsetof(struct brcmf_mf_params_le, data) + + (len - DOT11_MGMT_HDR_LEN); + mf_params = kzalloc(mf_params_len, GFP_KERNEL); + if (!mf_params) + return -ENOMEM; + + mf_params->dwell_time = cpu_to_le32(MGMT_AUTH_FRAME_DWELL_TIME); + mf_params->len = cpu_to_le16(len - DOT11_MGMT_HDR_LEN); + mf_params->frame_control = mgmt->frame_control; + + if (chan) + freq = chan->center_freq; + else + brcmf_fil_cmd_int_get(vif->ifp, BRCMF_C_GET_CHANNEL, + &freq); + chan_nr = ieee80211_frequency_to_channel(freq); + mf_params->channel = cpu_to_le16(chan_nr); + memcpy(&mf_params->da[0], &mgmt->da[0], ETH_ALEN); + memcpy(&mf_params->bssid[0], &mgmt->bssid[0], ETH_ALEN); + mf_params->packet_id = cpu_to_le32(*cookie); + memcpy(mf_params->data, &buf[DOT11_MGMT_HDR_LEN], + le16_to_cpu(mf_params->len)); + + brcmf_dbg(TRACE, "Auth frame, cookie=%d, fc=%04x, len=%d, channel=%d\n", + le32_to_cpu(mf_params->packet_id), + le16_to_cpu(mf_params->frame_control), + le16_to_cpu(mf_params->len), chan_nr); + + vif->mgmt_tx_id = le32_to_cpu(mf_params->packet_id); + set_bit(BRCMF_MGMT_TX_SEND_FRAME, &vif->mgmt_tx_status); + + err = brcmf_fil_bsscfg_data_set(vif->ifp, "mgmt_frame", + mf_params, mf_params_len); + if (err) { + bphy_err(drvr, "Failed to send Auth frame: err=%d\n", + err); + goto tx_status; + } + + ready = wait_for_completion_timeout(&vif->mgmt_tx, + MGMT_AUTH_FRAME_WAIT_TIME); + if (test_bit(BRCMF_MGMT_TX_ACK, &vif->mgmt_tx_status)) { + brcmf_dbg(TRACE, "TX Auth frame operation is success\n"); + ack = true; + } else { + bphy_err(drvr, "TX Auth frame operation is %s: status=%ld)\n", + ready ? "failed" : "timedout", vif->mgmt_tx_status); + } + +tx_status: + cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, ack, + GFP_KERNEL); + kfree(mf_params); + return err; +} + +static int +brcmf_cyw_external_auth(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_external_auth_params *params) +{ + struct brcmf_if *ifp; + struct brcmf_pub *drvr; + struct brcmf_auth_req_status_le auth_status; + int ret = 0; + + brcmf_dbg(TRACE, "Enter\n"); + + ifp = netdev_priv(dev); + drvr = ifp->drvr; + if (params->status == WLAN_STATUS_SUCCESS) { + auth_status.flags = cpu_to_le16(BRCMF_EXTAUTH_SUCCESS); + } else { + bphy_err(drvr, "External authentication failed: status=%d\n", + params->status); + auth_status.flags = cpu_to_le16(BRCMF_EXTAUTH_FAIL); + } + + memcpy(auth_status.peer_mac, params->bssid, ETH_ALEN); + params->ssid.ssid_len = min_t(u8, params->ssid.ssid_len, + IEEE80211_MAX_SSID_LEN); + auth_status.ssid_len = cpu_to_le32(params->ssid.ssid_len); + memcpy(auth_status.ssid, params->ssid.ssid, params->ssid.ssid_len); + + ret = brcmf_fil_iovar_data_set(ifp, "auth_status", &auth_status, + sizeof(auth_status)); + if (ret < 0) + bphy_err(drvr, "auth_status iovar failed: ret=%d\n", ret); + + return ret; +} + +static void brcmf_cyw_get_cfg80211_ops(struct brcmf_pub *drvr) +{ + drvr->ops->mgmt_tx = brcmf_cyw_mgmt_tx; + drvr->ops->external_auth = brcmf_cyw_external_auth; +} + +static s32 +brcmf_cyw_notify_ext_auth_req(struct brcmf_if *ifp, + const struct brcmf_event_msg *e, void *data) +{ + struct brcmf_pub *drvr = ifp->drvr; + struct cfg80211_external_auth_params params; + struct brcmf_auth_req_status_le *auth_req = + (struct brcmf_auth_req_status_le *)data; + s32 err = 0; + + brcmf_dbg(INFO, "Enter: event %s (%d) received\n", + brcmf_fweh_event_name(e->event_code), e->event_code); + + if (e->datalen < sizeof(*auth_req)) { + bphy_err(drvr, "Event %s (%d) data too small. Ignore\n", + brcmf_fweh_event_name(e->event_code), e->event_code); + return -EINVAL; + } + + memset(¶ms, 0, sizeof(params)); + params.action = NL80211_EXTERNAL_AUTH_START; + params.key_mgmt_suite = WLAN_AKM_SUITE_SAE; + params.status = WLAN_STATUS_SUCCESS; + params.ssid.ssid_len = min_t(u32, 32, le32_to_cpu(auth_req->ssid_len)); + memcpy(params.ssid.ssid, auth_req->ssid, params.ssid.ssid_len); + memcpy(params.bssid, auth_req->peer_mac, ETH_ALEN); + + err = cfg80211_external_auth_request(ifp->ndev, ¶ms, GFP_KERNEL); + if (err) + bphy_err(drvr, "Ext Auth request to supplicant failed (%d)\n", + err); + + return err; +} + +static s32 +brcmf_notify_auth_frame_rx(struct brcmf_if *ifp, + const struct brcmf_event_msg *e, void *data) +{ + struct brcmf_pub *drvr = ifp->drvr; + struct brcmf_cfg80211_info *cfg = drvr->config; + struct wireless_dev *wdev; + u32 mgmt_frame_len = e->datalen - sizeof(struct brcmf_rx_mgmt_data); + struct brcmf_rx_mgmt_data *rxframe = (struct brcmf_rx_mgmt_data *)data; + u8 *frame = (u8 *)(rxframe + 1); + struct brcmu_chan ch; + struct ieee80211_mgmt *mgmt_frame; + s32 freq; + + brcmf_dbg(INFO, "Enter: event %s (%d) received\n", + brcmf_fweh_event_name(e->event_code), e->event_code); + + if (e->datalen < sizeof(*rxframe)) { + bphy_err(drvr, "Event %s (%d) data too small. Ignore\n", + brcmf_fweh_event_name(e->event_code), e->event_code); + return -EINVAL; + } + + wdev = &ifp->vif->wdev; + WARN_ON(!wdev); + + ch.chspec = be16_to_cpu(rxframe->chanspec); + cfg->d11inf.decchspec(&ch); + + mgmt_frame = kzalloc(mgmt_frame_len, GFP_KERNEL); + if (!mgmt_frame) + return -ENOMEM; + + mgmt_frame->frame_control = cpu_to_le16(IEEE80211_STYPE_AUTH); + memcpy(mgmt_frame->da, ifp->mac_addr, ETH_ALEN); + memcpy(mgmt_frame->sa, e->addr, ETH_ALEN); + brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_BSSID, mgmt_frame->bssid, + ETH_ALEN); + frame += offsetof(struct ieee80211_mgmt, u); + memcpy(&mgmt_frame->u, frame, + mgmt_frame_len - offsetof(struct ieee80211_mgmt, u)); + + freq = ieee80211_channel_to_frequency(ch.control_ch_num, + ch.band == BRCMU_CHAN_BAND_2G ? + NL80211_BAND_2GHZ : + NL80211_BAND_5GHZ); + + cfg80211_rx_mgmt(wdev, freq, 0, (u8 *)mgmt_frame, mgmt_frame_len, + NL80211_RXMGMT_FLAG_EXTERNAL_AUTH); + kfree(mgmt_frame); + return 0; +} + +static s32 +brcmf_notify_mgmt_tx_status(struct brcmf_if *ifp, + const struct brcmf_event_msg *e, void *data) +{ + struct brcmf_cfg80211_vif *vif = ifp->vif; + u32 *packet_id = (u32 *)data; + + brcmf_dbg(INFO, "Enter: event %s (%d), status=%d\n", + brcmf_fweh_event_name(e->event_code), e->event_code, + e->status); + + if (!test_bit(BRCMF_MGMT_TX_SEND_FRAME, &vif->mgmt_tx_status) || + (*packet_id != vif->mgmt_tx_id)) + return 0; + + if (e->event_code == BRCMF_E_MGMT_FRAME_TXSTATUS) { + if (e->status == BRCMF_E_STATUS_SUCCESS) + set_bit(BRCMF_MGMT_TX_ACK, &vif->mgmt_tx_status); + else + set_bit(BRCMF_MGMT_TX_NOACK, &vif->mgmt_tx_status); + } else { + set_bit(BRCMF_MGMT_TX_OFF_CHAN_COMPLETED, &vif->mgmt_tx_status); + } + + complete(&vif->mgmt_tx); + return 0; +} + +static void brcmf_cyw_register_event_handlers(struct brcmf_pub *drvr) +{ + brcmf_fweh_register(drvr, BRCMF_E_EXT_AUTH_REQ, + brcmf_cyw_notify_ext_auth_req); + brcmf_fweh_register(drvr, BRCMF_E_EXT_AUTH_FRAME_RX, + brcmf_notify_auth_frame_rx); + brcmf_fweh_register(drvr, BRCMF_E_MGMT_FRAME_TXSTATUS, + brcmf_notify_mgmt_tx_status); + brcmf_fweh_register(drvr, BRCMF_E_MGMT_FRAME_OFFCHAN_DONE, + brcmf_notify_mgmt_tx_status); +} + const struct brcmf_fwvid_ops brcmf_cyw_ops = { .set_sae_password = brcmf_cyw_set_sae_pwd, .alloc_fweh_info = brcmf_cyw_alloc_fweh_info, + .activate_events = brcmf_cyw_activate_events, + .get_cfg80211_ops = brcmf_cyw_get_cfg80211_ops, + .register_event_handlers = brcmf_cyw_register_event_handlers, }; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/fwil_types.h new file mode 100644 index 000000000000..08c69142495a --- /dev/null +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/fwil_types.h @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2012 Broadcom Corporation + */ + +#ifndef CYW_FWIL_TYPES_H_ +#define CYW_FWIL_TYPES_H_ + +#include <fwil_types.h> + +enum brcmf_event_msgs_ext_command { + CYW_EVENTMSGS_NONE = 0, + CYW_EVENTMSGS_SET_BIT = 1, + CYW_EVENTMSGS_RESET_BIT = 2, + CYW_EVENTMSGS_SET_MASK = 3, +}; + +#define EVENTMSGS_VER 1 +#define EVENTMSGS_EXT_STRUCT_SIZE offsetof(struct eventmsgs_ext, mask[0]) + +/** + * struct brcmf_eventmsgs_ext - structure used with "eventmsgs_ext" iovar. + * + * @ver: version. + * @command: requested operation (see &enum event_msgs_ext_command). + * @len: length of the @mask array. + * @maxgetsize: indicates maximum mask size that may be returned by firmware + * upon iovar GET. + * @mask: array where each bit represents firmware event. + */ +struct brcmf_eventmsgs_ext { + u8 ver; + u8 command; + u8 len; + u8 maxgetsize; + u8 mask[] __counted_by(len); +}; + +#define BRCMF_EXTAUTH_START 1 +#define BRCMF_EXTAUTH_ABORT 2 +#define BRCMF_EXTAUTH_FAIL 3 +#define BRCMF_EXTAUTH_SUCCESS 4 + +/** + * struct brcmf_auth_req_status_le - external auth request and status update + * + * @flags: flags for external auth status + * @peer_mac: peer MAC address + * @ssid_len: length of ssid + * @ssid: ssid characters + * @pmkid: PMKSA identifier + */ +struct brcmf_auth_req_status_le { + __le16 flags; + u8 peer_mac[ETH_ALEN]; + __le32 ssid_len; + u8 ssid[IEEE80211_MAX_SSID_LEN]; + u8 pmkid[WLAN_PMKID_LEN]; +}; + +/** + * struct brcmf_mf_params_le - management frame parameters for mgmt_frame iovar + * + * @version: version of the iovar + * @dwell_time: dwell duration in ms + * @len: length of frame data + * @frame_control: frame control + * @channel: channel + * @da: peer MAC address + * @bssid: BSS network identifier + * @packet_id: packet identifier + * @data: frame data + */ +struct brcmf_mf_params_le { + __le32 version; + __le32 dwell_time; + __le16 len; + __le16 frame_control; + __le16 channel; + u8 da[ETH_ALEN]; + u8 bssid[ETH_ALEN]; + __le32 packet_id; + u8 data[] __counted_by(len); +}; + +#endif /* CYW_FWIL_TYPES_H_ */ + diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c index 0d9ae197fa1e..488364ef8ff2 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c @@ -42,8 +42,9 @@ static const struct brcmf_feat_fwcap brcmf_fwcap_map[] = { { BRCMF_FEAT_MONITOR_FLAG, "rtap" }, { BRCMF_FEAT_MONITOR_FMT_RADIOTAP, "rtap" }, { BRCMF_FEAT_DOT11H, "802.11h" }, - { BRCMF_FEAT_SAE, "sae" }, + { BRCMF_FEAT_SAE, "sae " }, { BRCMF_FEAT_FWAUTH, "idauth" }, + { BRCMF_FEAT_SAE_EXT, "sae_ext" }, }; #ifdef DEBUG diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h index 7f4f0b3e4a7b..31f8695ca417 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h @@ -31,6 +31,7 @@ * FWAUTH: Firmware authenticator * DUMP_OBSS: Firmware has capable to dump obss info to support ACS * SCAN_V2: Version 2 scan params + * SAE_EXT: SAE authentication handled by user-space supplicant */ #define BRCMF_FEAT_LIST \ BRCMF_FEAT_DEF(MBSS) \ @@ -57,7 +58,8 @@ BRCMF_FEAT_DEF(DUMP_OBSS) \ BRCMF_FEAT_DEF(SCAN_V2) \ BRCMF_FEAT_DEF(PMKID_V2) \ - BRCMF_FEAT_DEF(PMKID_V3) + BRCMF_FEAT_DEF(PMKID_V3) \ + BRCMF_FEAT_DEF(SAE_EXT) /* * Quirks: diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c index f0b6a7607f16..c2d98ee6652f 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c @@ -75,6 +75,7 @@ const char *brcmf_fweh_event_name(enum brcmf_fweh_event_code code) return "nodebug"; } #endif +BRCMF_EXPORT_SYMBOL_GPL(brcmf_fweh_event_name); /** * brcmf_fweh_queue_event() - create and queue event. @@ -405,6 +406,7 @@ int brcmf_fweh_register(struct brcmf_pub *drvr, enum brcmf_fweh_event_code code, brcmf_fweh_event_name(code)); return 0; } +BRCMF_EXPORT_SYMBOL_GPL(brcmf_fweh_register); /** * brcmf_fweh_unregister() - remove handler for given code. @@ -448,11 +450,14 @@ int brcmf_fweh_activate_events(struct brcmf_if *ifp) brcmf_dbg(EVENT, "enable event IF\n"); setbit(fweh->event_mask, BRCMF_E_IF); + /* allow per-vendor method to activate firmware events */ + if (!brcmf_fwvid_activate_events(ifp)) + return 0; + err = brcmf_fil_iovar_data_set(ifp, "event_msgs", fweh->event_mask, fweh->event_mask_len); if (err) bphy_err(fweh->drvr, "Set event_msgs error (%d)\n", err); - return err; } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h index eed439b84010..e327dd58d29c 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h @@ -94,7 +94,11 @@ struct brcmf_cfg80211_info; BRCMF_ENUM_DEF(FIFO_CREDIT_MAP, 74) \ BRCMF_ENUM_DEF(ACTION_FRAME_RX, 75) \ BRCMF_ENUM_DEF(TDLS_PEER_EVENT, 92) \ - BRCMF_ENUM_DEF(BCMC_CREDIT_SUPPORT, 127) + BRCMF_ENUM_DEF(BCMC_CREDIT_SUPPORT, 127) \ + BRCMF_ABSTRACT_ENUM_DEF(EXT_AUTH_REQ, 0) \ + BRCMF_ABSTRACT_ENUM_DEF(EXT_AUTH_FRAME_RX, 1) \ + BRCMF_ABSTRACT_ENUM_DEF(MGMT_FRAME_TXSTATUS, 2) \ + BRCMF_ABSTRACT_ENUM_DEF(MGMT_FRAME_OFFCHAN_DONE, 3) #define BRCMF_ENUM_DEF(id, val) \ BRCMF_E_##id = (val), @@ -337,7 +341,7 @@ struct brcmf_fweh_info { struct list_head event_q; uint event_mask_len; u8 *event_mask; - struct brcmf_fweh_event_map *event_map; + const struct brcmf_fweh_event_map *event_map; uint num_event_codes; brcmf_fweh_handler_t evt_handler[] __counted_by(num_event_codes); }; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h index e6ac9fc341bc..f3e011d090f2 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h @@ -15,6 +15,9 @@ struct brcmf_fwvid_ops { void (*feat_attach)(struct brcmf_if *ifp); int (*set_sae_password)(struct brcmf_if *ifp, struct cfg80211_crypto_settings *crypto); int (*alloc_fweh_info)(struct brcmf_pub *drvr); + int (*activate_events)(struct brcmf_if *ifp); + void (*get_cfg80211_ops)(struct brcmf_pub *drvr); + void (*register_event_handlers)(struct brcmf_pub *drvr); }; /* exported functions */ @@ -56,4 +59,30 @@ static inline int brcmf_fwvid_alloc_fweh_info(struct brcmf_pub *drvr) return drvr->vops->alloc_fweh_info(drvr); } +static inline int brcmf_fwvid_activate_events(struct brcmf_if *ifp) +{ + const struct brcmf_fwvid_ops *vops = ifp->drvr->vops; + + if (!vops || !vops->activate_events) + return -EOPNOTSUPP; + + return vops->activate_events(ifp); +} + +static inline void brcmf_fwvid_get_cfg80211_ops(struct brcmf_pub *drvr) +{ + if (!drvr->vops || !drvr->vops->get_cfg80211_ops) + return; + + drvr->vops->get_cfg80211_ops(drvr); +} + +static inline void brcmf_fwvid_register_event_handlers(struct brcmf_pub *drvr) +{ + if (!drvr->vops || !drvr->vops->register_event_handlers) + return; + + drvr->vops->register_event_handlers(drvr); +} + #endif /* FWVID_H_ */ diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile index 9546ceeaf5e3..3f476e333726 100644 --- a/drivers/net/wireless/intel/iwlwifi/Makefile +++ b/drivers/net/wireless/intel/iwlwifi/Makefile @@ -8,12 +8,23 @@ iwlwifi-objs += iwl-nvm-utils.o iwlwifi-objs += iwl-utils.o iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o -iwlwifi-objs += pcie/ctxt-info.o pcie/ctxt-info-gen3.o +iwlwifi-objs += pcie/ctxt-info.o pcie/ctxt-info-v2.o iwlwifi-objs += pcie/trans-gen2.o pcie/tx-gen2.o -iwlwifi-$(CONFIG_IWLDVM) += cfg/1000.o cfg/2000.o cfg/5000.o cfg/6000.o -iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/22000.o + +CFLAGS_pcie/drv.o += -Wno-override-init + +# combined MAC/RF configurations +iwlwifi-$(CONFIG_IWLDVM) += cfg/1000.o cfg/2000.o +iwlwifi-$(CONFIG_IWLDVM) += cfg/5000.o cfg/6000.o +iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o +# MAC configurations +iwlwifi-$(CONFIG_IWLMVM) += cfg/9000.o cfg/22000.o iwlwifi-$(CONFIG_IWLMVM) += cfg/ax210.o iwlwifi-$(CONFIG_IWLMLD) += cfg/bz.o cfg/sc.o cfg/dr.o +# RF configurations +iwlwifi-$(CONFIG_IWLMVM) += cfg/rf-jf.o cfg/rf-hr.o cfg/rf-gf.o +iwlwifi-$(CONFIG_IWLMLD) += cfg/rf-fm.o cfg/rf-wh.o cfg/rf-pe.o + iwlwifi-objs += iwl-dbg-tlv.o iwlwifi-objs += iwl-trans.o diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/1000.c b/drivers/net/wireless/intel/iwlwifi/cfg/1000.c index f172ffd2a841..c029e595e7c2 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/1000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/1000.c @@ -2,7 +2,7 @@ /****************************************************************************** * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2018 - 2020, 2023 Intel Corporation + * Copyright(c) 2018 - 2020, 2023, 2025 Intel Corporation *****************************************************************************/ #include <linux/module.h> @@ -29,7 +29,7 @@ #define IWL100_MODULE_FIRMWARE(api) IWL100_FW_PRE "-" __stringify(api) ".ucode" -static const struct iwl_base_params iwl1000_base_params = { +static const struct iwl_family_base_params iwl1000_base = { .num_of_queues = IWLAGN_NUM_QUEUES, .max_tfd_queue_size = 256, .eeprom_size = OTP_LOW_IMAGE_SIZE_2K, @@ -42,12 +42,6 @@ static const struct iwl_base_params iwl1000_base_params = { .scd_chain_ext_wa = true, }; -static const struct iwl_ht_params iwl1000_ht_params = { - .ht_greenfield_support = true, - .use_rts_for_aggregation = true, /* use rts/cts protection */ - .ht40_bands = BIT(NL80211_BAND_2GHZ), -}; - static const struct iwl_eeprom_params iwl1000_eeprom_params = { .regulatory_bands = { EEPROM_REG_BAND_1_CHANNELS, @@ -60,54 +54,67 @@ static const struct iwl_eeprom_params iwl1000_eeprom_params = { } }; +const struct iwl_mac_cfg iwl1000_mac_cfg = { + .device_family = IWL_DEVICE_FAMILY_1000, + .base = &iwl1000_base, +}; + #define IWL_DEVICE_1000 \ .fw_name_pre = IWL1000_FW_PRE, \ .ucode_api_max = IWL1000_UCODE_API_MAX, \ .ucode_api_min = IWL1000_UCODE_API_MIN, \ - .trans.device_family = IWL_DEVICE_FAMILY_1000, \ .max_inst_size = IWLAGN_RTC_INST_SIZE, \ .max_data_size = IWLAGN_RTC_DATA_SIZE, \ .nvm_ver = EEPROM_1000_EEPROM_VERSION, \ .nvm_calib_ver = EEPROM_1000_TX_POWER_VERSION, \ - .trans.base_params = &iwl1000_base_params, \ .eeprom_params = &iwl1000_eeprom_params, \ .led_mode = IWL_LED_BLINK -const struct iwl_cfg iwl1000_bgn_cfg = { - .name = "Intel(R) Centrino(R) Wireless-N 1000 BGN", +const struct iwl_rf_cfg iwl1000_bgn_cfg = { IWL_DEVICE_1000, - .ht_params = &iwl1000_ht_params, + .ht_params = { + .ht_greenfield_support = true, + .use_rts_for_aggregation = true, /* use rts/cts protection */ + .ht40_bands = BIT(NL80211_BAND_2GHZ), + }, }; -const struct iwl_cfg iwl1000_bg_cfg = { - .name = "Intel(R) Centrino(R) Wireless-N 1000 BG", +const char iwl1000_bgn_name[] = "Intel(R) Centrino(R) Wireless-N 1000 BGN"; + +const struct iwl_rf_cfg iwl1000_bg_cfg = { IWL_DEVICE_1000, }; +const char iwl1000_bg_name[] = "Intel(R) Centrino(R) Wireless-N 1000 BG"; + #define IWL_DEVICE_100 \ .fw_name_pre = IWL100_FW_PRE, \ .ucode_api_max = IWL100_UCODE_API_MAX, \ .ucode_api_min = IWL100_UCODE_API_MIN, \ - .trans.device_family = IWL_DEVICE_FAMILY_100, \ .max_inst_size = IWLAGN_RTC_INST_SIZE, \ .max_data_size = IWLAGN_RTC_DATA_SIZE, \ .nvm_ver = EEPROM_1000_EEPROM_VERSION, \ .nvm_calib_ver = EEPROM_1000_TX_POWER_VERSION, \ - .trans.base_params = &iwl1000_base_params, \ .eeprom_params = &iwl1000_eeprom_params, \ .led_mode = IWL_LED_RF_STATE, \ .rx_with_siso_diversity = true -const struct iwl_cfg iwl100_bgn_cfg = { - .name = "Intel(R) Centrino(R) Wireless-N 100 BGN", +const struct iwl_rf_cfg iwl100_bgn_cfg = { IWL_DEVICE_100, - .ht_params = &iwl1000_ht_params, + .ht_params = { + .ht_greenfield_support = true, + .use_rts_for_aggregation = true, /* use rts/cts protection */ + .ht40_bands = BIT(NL80211_BAND_2GHZ), + }, }; -const struct iwl_cfg iwl100_bg_cfg = { - .name = "Intel(R) Centrino(R) Wireless-N 100 BG", +const char iwl100_bgn_name[] = "Intel(R) Centrino(R) Wireless-N 100 BGN"; + +const struct iwl_rf_cfg iwl100_bg_cfg = { IWL_DEVICE_100, }; +const char iwl100_bg_name[] = "Intel(R) Centrino(R) Wireless-N 100 BG"; + MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL100_MODULE_FIRMWARE(IWL100_UCODE_API_MAX)); diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/2000.c b/drivers/net/wireless/intel/iwlwifi/cfg/2000.c index 6f3f26da0ad5..47554a5f406e 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/2000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/2000.c @@ -2,7 +2,7 @@ /****************************************************************************** * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2018 - 2020, 2023 Intel Corporation + * Copyright(c) 2018 - 2020, 2023, 2025 Intel Corporation *****************************************************************************/ #include <linux/module.h> @@ -40,7 +40,7 @@ #define IWL135_FW_PRE "iwlwifi-135" #define IWL135_MODULE_FIRMWARE(api) IWL135_FW_PRE "-" __stringify(api) ".ucode" -static const struct iwl_base_params iwl2000_base_params = { +static const struct iwl_family_base_params iwl2000_base = { .eeprom_size = OTP_LOW_IMAGE_SIZE_2K, .num_of_queues = IWLAGN_NUM_QUEUES, .max_tfd_queue_size = 256, @@ -54,7 +54,7 @@ static const struct iwl_base_params iwl2000_base_params = { }; -static const struct iwl_base_params iwl2030_base_params = { +static const struct iwl_family_base_params iwl2030_base = { .eeprom_size = OTP_LOW_IMAGE_SIZE_2K, .num_of_queues = IWLAGN_NUM_QUEUES, .max_tfd_queue_size = 256, @@ -67,12 +67,6 @@ static const struct iwl_base_params iwl2030_base_params = { .scd_chain_ext_wa = true, }; -static const struct iwl_ht_params iwl2000_ht_params = { - .ht_greenfield_support = true, - .use_rts_for_aggregation = true, /* use rts/cts protection */ - .ht40_bands = BIT(NL80211_BAND_2GHZ), -}; - static const struct iwl_eeprom_params iwl20x0_eeprom_params = { .regulatory_bands = { EEPROM_REG_BAND_1_CHANNELS, @@ -86,97 +80,119 @@ static const struct iwl_eeprom_params iwl20x0_eeprom_params = { .enhanced_txpower = true, }; +const struct iwl_mac_cfg iwl2000_mac_cfg = { + .device_family = IWL_DEVICE_FAMILY_2000, + .base = &iwl2000_base, +}; + #define IWL_DEVICE_2000 \ .fw_name_pre = IWL2000_FW_PRE, \ .ucode_api_max = IWL2000_UCODE_API_MAX, \ .ucode_api_min = IWL2000_UCODE_API_MIN, \ - .trans.device_family = IWL_DEVICE_FAMILY_2000, \ .max_inst_size = IWL60_RTC_INST_SIZE, \ .max_data_size = IWL60_RTC_DATA_SIZE, \ .nvm_ver = EEPROM_2000_EEPROM_VERSION, \ .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ - .trans.base_params = &iwl2000_base_params, \ .eeprom_params = &iwl20x0_eeprom_params, \ .led_mode = IWL_LED_RF_STATE -const struct iwl_cfg iwl2000_2bgn_cfg = { - .name = "Intel(R) Centrino(R) Wireless-N 2200 BGN", +const struct iwl_rf_cfg iwl2000_2bgn_cfg = { IWL_DEVICE_2000, - .ht_params = &iwl2000_ht_params, + .ht_params = { + .ht_greenfield_support = true, + .use_rts_for_aggregation = true, /* use rts/cts protection */ + .ht40_bands = BIT(NL80211_BAND_2GHZ), + }, }; -const struct iwl_cfg iwl2000_2bgn_d_cfg = { - .name = "Intel(R) Centrino(R) Wireless-N 2200D BGN", - IWL_DEVICE_2000, - .ht_params = &iwl2000_ht_params, +const char iwl2000_2bgn_name[] = "Intel(R) Centrino(R) Wireless-N 2200 BGN"; +const char iwl2000_2bgn_d_name[] = "Intel(R) Centrino(R) Wireless-N 2200D BGN"; + +const struct iwl_mac_cfg iwl2030_mac_cfg = { + .device_family = IWL_DEVICE_FAMILY_2030, + .base = &iwl2030_base, }; #define IWL_DEVICE_2030 \ .fw_name_pre = IWL2030_FW_PRE, \ .ucode_api_max = IWL2030_UCODE_API_MAX, \ .ucode_api_min = IWL2030_UCODE_API_MIN, \ - .trans.device_family = IWL_DEVICE_FAMILY_2030, \ .max_inst_size = IWL60_RTC_INST_SIZE, \ .max_data_size = IWL60_RTC_DATA_SIZE, \ .nvm_ver = EEPROM_2000_EEPROM_VERSION, \ .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ - .trans.base_params = &iwl2030_base_params, \ .eeprom_params = &iwl20x0_eeprom_params, \ .led_mode = IWL_LED_RF_STATE -const struct iwl_cfg iwl2030_2bgn_cfg = { - .name = "Intel(R) Centrino(R) Wireless-N 2230 BGN", +const struct iwl_rf_cfg iwl2030_2bgn_cfg = { IWL_DEVICE_2030, - .ht_params = &iwl2000_ht_params, + .ht_params = { + .ht_greenfield_support = true, + .use_rts_for_aggregation = true, /* use rts/cts protection */ + .ht40_bands = BIT(NL80211_BAND_2GHZ), + }, +}; + +const char iwl2030_2bgn_name[] = "Intel(R) Centrino(R) Wireless-N 2230 BGN"; + +const struct iwl_mac_cfg iwl105_mac_cfg = { + .device_family = IWL_DEVICE_FAMILY_105, + .base = &iwl2000_base, }; #define IWL_DEVICE_105 \ .fw_name_pre = IWL105_FW_PRE, \ .ucode_api_max = IWL105_UCODE_API_MAX, \ .ucode_api_min = IWL105_UCODE_API_MIN, \ - .trans.device_family = IWL_DEVICE_FAMILY_105, \ .max_inst_size = IWL60_RTC_INST_SIZE, \ .max_data_size = IWL60_RTC_DATA_SIZE, \ .nvm_ver = EEPROM_2000_EEPROM_VERSION, \ .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ - .trans.base_params = &iwl2000_base_params, \ .eeprom_params = &iwl20x0_eeprom_params, \ .led_mode = IWL_LED_RF_STATE, \ .rx_with_siso_diversity = true -const struct iwl_cfg iwl105_bgn_cfg = { - .name = "Intel(R) Centrino(R) Wireless-N 105 BGN", +const struct iwl_rf_cfg iwl105_bgn_cfg = { IWL_DEVICE_105, - .ht_params = &iwl2000_ht_params, + .ht_params = { + .ht_greenfield_support = true, + .use_rts_for_aggregation = true, /* use rts/cts protection */ + .ht40_bands = BIT(NL80211_BAND_2GHZ), + }, }; -const struct iwl_cfg iwl105_bgn_d_cfg = { - .name = "Intel(R) Centrino(R) Wireless-N 105D BGN", - IWL_DEVICE_105, - .ht_params = &iwl2000_ht_params, +const char iwl105_bgn_name[] = "Intel(R) Centrino(R) Wireless-N 105 BGN"; +const char iwl105_bgn_d_name[] = "Intel(R) Centrino(R) Wireless-N 105D BGN"; + +const struct iwl_mac_cfg iwl135_mac_cfg = { + .device_family = IWL_DEVICE_FAMILY_135, + .base = &iwl2030_base, }; #define IWL_DEVICE_135 \ .fw_name_pre = IWL135_FW_PRE, \ .ucode_api_max = IWL135_UCODE_API_MAX, \ .ucode_api_min = IWL135_UCODE_API_MIN, \ - .trans.device_family = IWL_DEVICE_FAMILY_135, \ .max_inst_size = IWL60_RTC_INST_SIZE, \ .max_data_size = IWL60_RTC_DATA_SIZE, \ .nvm_ver = EEPROM_2000_EEPROM_VERSION, \ .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ - .trans.base_params = &iwl2030_base_params, \ .eeprom_params = &iwl20x0_eeprom_params, \ .led_mode = IWL_LED_RF_STATE, \ .rx_with_siso_diversity = true -const struct iwl_cfg iwl135_bgn_cfg = { - .name = "Intel(R) Centrino(R) Wireless-N 135 BGN", +const struct iwl_rf_cfg iwl135_bgn_cfg = { IWL_DEVICE_135, - .ht_params = &iwl2000_ht_params, + .ht_params = { + .ht_greenfield_support = true, + .use_rts_for_aggregation = true, /* use rts/cts protection */ + .ht40_bands = BIT(NL80211_BAND_2GHZ), + }, }; +const char iwl135_bgn_name[] = "Intel(R) Centrino(R) Wireless-N 135 BGN"; + MODULE_FIRMWARE(IWL2000_MODULE_FIRMWARE(IWL2000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL2030_MODULE_FIRMWARE(IWL2030_UCODE_API_MAX)); MODULE_FIRMWARE(IWL105_MODULE_FIRMWARE(IWL105_UCODE_API_MAX)); diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c index 67ee3b6e6d85..52e0beebf9ce 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2024 Intel Corporation + * Copyright (C) 2018-2025 Intel Corporation */ #include <linux/module.h> #include <linux/stringify.h> @@ -15,14 +15,7 @@ /* Lowest firmware API version supported */ #define IWL_22000_UCODE_API_MIN 77 -/* NVM versions */ -#define IWL_22000_NVM_VERSION 0x0a1d - /* Memory offsets and lengths */ -#define IWL_22000_DCCM_OFFSET 0x800000 /* LMAC1 */ -#define IWL_22000_DCCM_LEN 0x10000 /* LMAC1 */ -#define IWL_22000_DCCM2_OFFSET 0x880000 -#define IWL_22000_DCCM2_LEN 0x8000 #define IWL_22000_SMEM_OFFSET 0x400000 #define IWL_22000_SMEM_LEN 0xD0000 @@ -49,8 +42,7 @@ #define IWL_CC_A_MODULE_FIRMWARE(api) \ IWL_CC_A_FW_PRE "-" __stringify(api) ".ucode" -static const struct iwl_base_params iwl_22000_base_params = { - .eeprom_size = OTP_LOW_IMAGE_SIZE_32K, +static const struct iwl_family_base_params iwl_22000_base = { .num_of_queues = 512, .max_tfd_queue_size = 256, .shadow_ram_support = true, @@ -59,155 +51,78 @@ static const struct iwl_base_params iwl_22000_base_params = { .max_event_log_size = 512, .shadow_reg_enable = true, .pcie_l1_allowed = true, -}; - -const struct iwl_ht_params iwl_22000_ht_params = { - .stbc = true, - .ldpc = true, - .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ) | - BIT(NL80211_BAND_6GHZ), -}; - -#define IWL_DEVICE_22000_COMMON \ - .ucode_api_min = IWL_22000_UCODE_API_MIN, \ - .led_mode = IWL_LED_RF_STATE, \ - .nvm_hw_section_num = 10, \ - .non_shared_ant = ANT_B, \ - .dccm_offset = IWL_22000_DCCM_OFFSET, \ - .dccm_len = IWL_22000_DCCM_LEN, \ - .dccm2_offset = IWL_22000_DCCM2_OFFSET, \ - .dccm2_len = IWL_22000_DCCM2_LEN, \ - .smem_offset = IWL_22000_SMEM_OFFSET, \ - .smem_len = IWL_22000_SMEM_LEN, \ - .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \ - .apmg_not_supported = true, \ - .trans.mq_rx_supported = true, \ - .vht_mu_mimo_supported = true, \ - .mac_addr_from_csr = 0x380, \ - .ht_params = &iwl_22000_ht_params, \ - .nvm_ver = IWL_22000_NVM_VERSION, \ - .trans.rf_id = true, \ - .trans.gen2 = true, \ - .nvm_type = IWL_NVM_EXT, \ - .dbgc_supported = true, \ - .min_umac_error_event_table = 0x400000, \ - .d3_debug_data_base_addr = 0x401000, \ - .d3_debug_data_length = 60 * 1024, \ - .mon_smem_regs = { \ - .write_ptr = { \ - .addr = LDBG_M2S_BUF_WPTR, \ - .mask = LDBG_M2S_BUF_WPTR_VAL_MSK, \ - }, \ - .cycle_cnt = { \ - .addr = LDBG_M2S_BUF_WRAP_CNT, \ - .mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK, \ - }, \ - } - -#define IWL_DEVICE_22500 \ - IWL_DEVICE_22000_COMMON, \ - .ucode_api_max = IWL_22000_UCODE_API_MAX, \ - .trans.device_family = IWL_DEVICE_FAMILY_22000, \ - .trans.base_params = &iwl_22000_base_params, \ - .gp2_reg_addr = 0xa02c68, \ - .mon_dram_regs = { \ - .write_ptr = { \ - .addr = MON_BUFF_WRPTR_VER2, \ - .mask = 0xffffffff, \ - }, \ - .cycle_cnt = { \ - .addr = MON_BUFF_CYCLE_CNT_VER2, \ - .mask = 0xffffffff, \ - }, \ - } - -const struct iwl_cfg_trans_params iwl_qu_trans_cfg = { + .smem_offset = IWL_22000_SMEM_OFFSET, + .smem_len = IWL_22000_SMEM_LEN, + .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, + .apmg_not_supported = true, + .mac_addr_from_csr = 0x380, + .min_umac_error_event_table = 0x400000, + .d3_debug_data_base_addr = 0x401000, + .d3_debug_data_length = 60 * 1024, + .mon_smem_regs = { + .write_ptr = { + .addr = LDBG_M2S_BUF_WPTR, + .mask = LDBG_M2S_BUF_WPTR_VAL_MSK, + }, + .cycle_cnt = { + .addr = LDBG_M2S_BUF_WRAP_CNT, + .mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK, + }, + }, + .gp2_reg_addr = 0xa02c68, + .mon_dram_regs = { + .write_ptr = { + .addr = MON_BUFF_WRPTR_VER2, + .mask = 0xffffffff, + }, + .cycle_cnt = { + .addr = MON_BUFF_CYCLE_CNT_VER2, + .mask = 0xffffffff, + }, + }, + .ucode_api_min = IWL_22000_UCODE_API_MIN, + .ucode_api_max = IWL_22000_UCODE_API_MAX, +}; + +const struct iwl_mac_cfg iwl_qu_mac_cfg = { .mq_rx_supported = true, - .rf_id = true, .gen2 = true, .device_family = IWL_DEVICE_FAMILY_22000, - .base_params = &iwl_22000_base_params, + .base = &iwl_22000_base, .integrated = true, .xtal_latency = 500, .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_200US, }; -const struct iwl_cfg_trans_params iwl_qu_medium_latency_trans_cfg = { +const struct iwl_mac_cfg iwl_qu_medium_latency_mac_cfg = { .mq_rx_supported = true, - .rf_id = true, .gen2 = true, .device_family = IWL_DEVICE_FAMILY_22000, - .base_params = &iwl_22000_base_params, + .base = &iwl_22000_base, .integrated = true, .xtal_latency = 1820, .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_1820US, }; -const struct iwl_cfg_trans_params iwl_qu_long_latency_trans_cfg = { +const struct iwl_mac_cfg iwl_qu_long_latency_mac_cfg = { .mq_rx_supported = true, - .rf_id = true, .gen2 = true, .device_family = IWL_DEVICE_FAMILY_22000, - .base_params = &iwl_22000_base_params, + .base = &iwl_22000_base, .integrated = true, .xtal_latency = 12000, .low_latency_xtal = true, .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US, }; -/* - * If the device doesn't support HE, no need to have that many buffers. - * 22000 devices can split multiple frames into a single RB, so fewer are - * needed; AX210 cannot (but use smaller RBs by default) - these sizes - * were picked according to 8 MSDUs inside 256 A-MSDUs in an A-MPDU, with - * additional overhead to account for processing time. - */ -#define IWL_NUM_RBDS_NON_HE 512 -#define IWL_NUM_RBDS_22000_HE 2048 - -/* - * All JF radio modules are part of the 9000 series, but the MAC part - * looks more like 22000. That's why this device is here, but called - * 9560 nevertheless. - */ -const struct iwl_cfg iwl9560_qu_b0_jf_b0_cfg = { - .fw_name_pre = IWL_QU_B_JF_B_FW_PRE, - IWL_DEVICE_22500, - .num_rbds = IWL_NUM_RBDS_NON_HE, -}; - -const struct iwl_cfg iwl9560_qu_c0_jf_b0_cfg = { - .fw_name_pre = IWL_QU_C_JF_B_FW_PRE, - IWL_DEVICE_22500, - .num_rbds = IWL_NUM_RBDS_NON_HE, -}; - -const struct iwl_cfg iwl9560_quz_a0_jf_b0_cfg = { - .fw_name_pre = IWL_QUZ_A_JF_B_FW_PRE, - IWL_DEVICE_22500, - /* - * This device doesn't support receiving BlockAck with a large bitmap - * so we need to restrict the size of transmitted aggregation to the - * HT size; mac80211 would otherwise pick the HE max (256) by default. - */ - .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, - .num_rbds = IWL_NUM_RBDS_NON_HE, -}; - -const struct iwl_cfg_trans_params iwl_ax200_trans_cfg = { +const struct iwl_mac_cfg iwl_ax200_mac_cfg = { .device_family = IWL_DEVICE_FAMILY_22000, - .base_params = &iwl_22000_base_params, + .base = &iwl_22000_base, .mq_rx_supported = true, - .rf_id = true, .gen2 = true, .bisr_workaround = 1, }; -const char iwl_ax101_name[] = "Intel(R) Wi-Fi 6 AX101"; -const char iwl_ax200_name[] = "Intel(R) Wi-Fi 6 AX200 160MHz"; -const char iwl_ax201_name[] = "Intel(R) Wi-Fi 6 AX201 160MHz"; -const char iwl_ax203_name[] = "Intel(R) Wi-Fi 6 AX203"; - const char iwl_ax200_killer_1650w_name[] = "Killer(R) Wi-Fi 6 AX1650w 160MHz Wireless Network Adapter (200D2W)"; const char iwl_ax200_killer_1650x_name[] = @@ -217,210 +132,6 @@ const char iwl_ax201_killer_1650s_name[] = const char iwl_ax201_killer_1650i_name[] = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)"; -const struct iwl_cfg iwl_qu_b0_hr1_b0 = { - .fw_name_pre = IWL_QU_B_HR_B_FW_PRE, - IWL_DEVICE_22500, - /* - * This device doesn't support receiving BlockAck with a large bitmap - * so we need to restrict the size of transmitted aggregation to the - * HT size; mac80211 would otherwise pick the HE max (256) by default. - */ - .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, - .tx_with_siso_diversity = true, - .num_rbds = IWL_NUM_RBDS_22000_HE, -}; - -const struct iwl_cfg iwl_qu_b0_hr_b0 = { - .fw_name_pre = IWL_QU_B_HR_B_FW_PRE, - IWL_DEVICE_22500, - /* - * This device doesn't support receiving BlockAck with a large bitmap - * so we need to restrict the size of transmitted aggregation to the - * HT size; mac80211 would otherwise pick the HE max (256) by default. - */ - .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, - .num_rbds = IWL_NUM_RBDS_22000_HE, -}; - -const struct iwl_cfg iwl_ax201_cfg_qu_hr = { - .name = "Intel(R) Wi-Fi 6 AX201 160MHz", - .fw_name_pre = IWL_QU_B_HR_B_FW_PRE, - IWL_DEVICE_22500, - /* - * This device doesn't support receiving BlockAck with a large bitmap - * so we need to restrict the size of transmitted aggregation to the - * HT size; mac80211 would otherwise pick the HE max (256) by default. - */ - .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, - .num_rbds = IWL_NUM_RBDS_22000_HE, -}; - -const struct iwl_cfg iwl_qu_c0_hr1_b0 = { - .fw_name_pre = IWL_QU_C_HR_B_FW_PRE, - IWL_DEVICE_22500, - /* - * This device doesn't support receiving BlockAck with a large bitmap - * so we need to restrict the size of transmitted aggregation to the - * HT size; mac80211 would otherwise pick the HE max (256) by default. - */ - .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, - .tx_with_siso_diversity = true, - .num_rbds = IWL_NUM_RBDS_22000_HE, -}; - -const struct iwl_cfg iwl_qu_c0_hr_b0 = { - .fw_name_pre = IWL_QU_C_HR_B_FW_PRE, - IWL_DEVICE_22500, - /* - * This device doesn't support receiving BlockAck with a large bitmap - * so we need to restrict the size of transmitted aggregation to the - * HT size; mac80211 would otherwise pick the HE max (256) by default. - */ - .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, - .num_rbds = IWL_NUM_RBDS_22000_HE, -}; - -const struct iwl_cfg iwl_ax201_cfg_qu_c0_hr_b0 = { - .name = "Intel(R) Wi-Fi 6 AX201 160MHz", - .fw_name_pre = IWL_QU_C_HR_B_FW_PRE, - IWL_DEVICE_22500, - /* - * This device doesn't support receiving BlockAck with a large bitmap - * so we need to restrict the size of transmitted aggregation to the - * HT size; mac80211 would otherwise pick the HE max (256) by default. - */ - .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, - .num_rbds = IWL_NUM_RBDS_22000_HE, -}; - -const struct iwl_cfg iwl_quz_a0_hr1_b0 = { - .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE, - IWL_DEVICE_22500, - /* - * This device doesn't support receiving BlockAck with a large bitmap - * so we need to restrict the size of transmitted aggregation to the - * HT size; mac80211 would otherwise pick the HE max (256) by default. - */ - .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, - .tx_with_siso_diversity = true, - .num_rbds = IWL_NUM_RBDS_22000_HE, -}; - -const struct iwl_cfg iwl_ax201_cfg_quz_hr = { - .name = "Intel(R) Wi-Fi 6 AX201 160MHz", - .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE, - IWL_DEVICE_22500, - /* - * This device doesn't support receiving BlockAck with a large bitmap - * so we need to restrict the size of transmitted aggregation to the - * HT size; mac80211 would otherwise pick the HE max (256) by default. - */ - .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, - .num_rbds = IWL_NUM_RBDS_22000_HE, -}; - -const struct iwl_cfg iwl_ax1650s_cfg_quz_hr = { - .name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201D2W)", - .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE, - IWL_DEVICE_22500, - /* - * This device doesn't support receiving BlockAck with a large bitmap - * so we need to restrict the size of transmitted aggregation to the - * HT size; mac80211 would otherwise pick the HE max (256) by default. - */ - .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, - .num_rbds = IWL_NUM_RBDS_22000_HE, -}; - -const struct iwl_cfg iwl_ax1650i_cfg_quz_hr = { - .name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)", - .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE, - IWL_DEVICE_22500, - /* - * This device doesn't support receiving BlockAck with a large bitmap - * so we need to restrict the size of transmitted aggregation to the - * HT size; mac80211 would otherwise pick the HE max (256) by default. - */ - .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, - .num_rbds = IWL_NUM_RBDS_22000_HE, -}; - -const struct iwl_cfg iwl_ax200_cfg_cc = { - .fw_name_pre = IWL_CC_A_FW_PRE, - IWL_DEVICE_22500, - /* - * This device doesn't support receiving BlockAck with a large bitmap - * so we need to restrict the size of transmitted aggregation to the - * HT size; mac80211 would otherwise pick the HE max (256) by default. - */ - .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, - .num_rbds = IWL_NUM_RBDS_22000_HE, -}; - -const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0 = { - .name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201NGW)", - .fw_name_pre = IWL_QU_B_HR_B_FW_PRE, - IWL_DEVICE_22500, - /* - * This device doesn't support receiving BlockAck with a large bitmap - * so we need to restrict the size of transmitted aggregation to the - * HT size; mac80211 would otherwise pick the HE max (256) by default. - */ - .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, - .num_rbds = IWL_NUM_RBDS_22000_HE, -}; - -const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0 = { - .name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201D2W)", - .fw_name_pre = IWL_QU_B_HR_B_FW_PRE, - IWL_DEVICE_22500, - /* - * This device doesn't support receiving BlockAck with a large bitmap - * so we need to restrict the size of transmitted aggregation to the - * HT size; mac80211 would otherwise pick the HE max (256) by default. - */ - .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, - .num_rbds = IWL_NUM_RBDS_22000_HE, -}; - -const struct iwl_cfg killer1650s_2ax_cfg_qu_c0_hr_b0 = { - .name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201NGW)", - .fw_name_pre = IWL_QU_C_HR_B_FW_PRE, - IWL_DEVICE_22500, - /* - * This device doesn't support receiving BlockAck with a large bitmap - * so we need to restrict the size of transmitted aggregation to the - * HT size; mac80211 would otherwise pick the HE max (256) by default. - */ - .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, - .num_rbds = IWL_NUM_RBDS_22000_HE, -}; - -const struct iwl_cfg killer1650i_2ax_cfg_qu_c0_hr_b0 = { - .name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201D2W)", - .fw_name_pre = IWL_QU_C_HR_B_FW_PRE, - IWL_DEVICE_22500, - /* - * This device doesn't support receiving BlockAck with a large bitmap - * so we need to restrict the size of transmitted aggregation to the - * HT size; mac80211 would otherwise pick the HE max (256) by default. - */ - .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, - .num_rbds = IWL_NUM_RBDS_22000_HE, -}; - -const struct iwl_cfg iwl_cfg_quz_a0_hr_b0 = { - .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE, - IWL_DEVICE_22500, - /* - * This device doesn't support receiving BlockAck with a large bitmap - * so we need to restrict the size of transmitted aggregation to the - * HT size; mac80211 would otherwise pick the HE max (256) by default. - */ - .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, - .num_rbds = IWL_NUM_RBDS_22000_HE, -}; - MODULE_FIRMWARE(IWL_QU_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_QU_C_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_QU_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/5000.c b/drivers/net/wireless/intel/iwlwifi/cfg/5000.c index de7ede59a994..aaae3b1f5433 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/5000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/5000.c @@ -2,7 +2,7 @@ /****************************************************************************** * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2018 - 2020, 2023 Intel Corporation + * Copyright(c) 2018 - 2020, 2023, 2025 Intel Corporation *****************************************************************************/ #include <linux/module.h> @@ -30,7 +30,7 @@ #define IWL5150_FW_PRE "iwlwifi-5150" #define IWL5150_MODULE_FIRMWARE(api) IWL5150_FW_PRE "-" __stringify(api) ".ucode" -static const struct iwl_base_params iwl5000_base_params = { +static const struct iwl_family_base_params iwl5000_base = { .eeprom_size = IWLAGN_EEPROM_IMG_SIZE, .num_of_queues = IWLAGN_NUM_QUEUES, .max_tfd_queue_size = 256, @@ -41,11 +41,6 @@ static const struct iwl_base_params iwl5000_base_params = { .scd_chain_ext_wa = true, }; -static const struct iwl_ht_params iwl5000_ht_params = { - .ht_greenfield_support = true, - .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ), -}; - static const struct iwl_eeprom_params iwl5000_eeprom_params = { .regulatory_bands = { EEPROM_REG_BAND_1_CHANNELS, @@ -58,93 +53,107 @@ static const struct iwl_eeprom_params iwl5000_eeprom_params = { }, }; +const struct iwl_mac_cfg iwl5000_mac_cfg = { + .device_family = IWL_DEVICE_FAMILY_5000, + .base = &iwl5000_base, +}; + #define IWL_DEVICE_5000 \ .fw_name_pre = IWL5000_FW_PRE, \ .ucode_api_max = IWL5000_UCODE_API_MAX, \ .ucode_api_min = IWL5000_UCODE_API_MIN, \ - .trans.device_family = IWL_DEVICE_FAMILY_5000, \ .max_inst_size = IWLAGN_RTC_INST_SIZE, \ .max_data_size = IWLAGN_RTC_DATA_SIZE, \ .nvm_ver = EEPROM_5000_EEPROM_VERSION, \ .nvm_calib_ver = EEPROM_5000_TX_POWER_VERSION, \ - .trans.base_params = &iwl5000_base_params, \ .eeprom_params = &iwl5000_eeprom_params, \ .led_mode = IWL_LED_BLINK -const struct iwl_cfg iwl5300_agn_cfg = { - .name = "Intel(R) Ultimate N WiFi Link 5300 AGN", +const struct iwl_rf_cfg iwl5300_agn_cfg = { IWL_DEVICE_5000, /* at least EEPROM 0x11A has wrong info */ .valid_tx_ant = ANT_ABC, /* .cfg overwrite */ .valid_rx_ant = ANT_ABC, /* .cfg overwrite */ - .ht_params = &iwl5000_ht_params, + .ht_params = { + .ht_greenfield_support = true, + .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ), + }, }; -const struct iwl_cfg iwl5100_bgn_cfg = { - .name = "Intel(R) WiFi Link 5100 BGN", - IWL_DEVICE_5000, - .valid_tx_ant = ANT_B, /* .cfg overwrite */ - .valid_rx_ant = ANT_AB, /* .cfg overwrite */ - .ht_params = &iwl5000_ht_params, -}; +const char iwl5300_agn_name[] = "Intel(R) Ultimate N WiFi Link 5300 AGN"; -const struct iwl_cfg iwl5100_abg_cfg = { - .name = "Intel(R) WiFi Link 5100 ABG", +const struct iwl_rf_cfg iwl5100_n_cfg = { IWL_DEVICE_5000, .valid_tx_ant = ANT_B, /* .cfg overwrite */ .valid_rx_ant = ANT_AB, /* .cfg overwrite */ + .ht_params = { + .ht_greenfield_support = true, + .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ), + }, }; -const struct iwl_cfg iwl5100_agn_cfg = { - .name = "Intel(R) WiFi Link 5100 AGN", +const char iwl5100_bgn_name[] = "Intel(R) WiFi Link 5100 BGN"; + +const struct iwl_rf_cfg iwl5100_abg_cfg = { IWL_DEVICE_5000, .valid_tx_ant = ANT_B, /* .cfg overwrite */ .valid_rx_ant = ANT_AB, /* .cfg overwrite */ - .ht_params = &iwl5000_ht_params, }; -const struct iwl_cfg iwl5350_agn_cfg = { - .name = "Intel(R) WiMAX/WiFi Link 5350 AGN", +const char iwl5100_abg_name[] = "Intel(R) WiFi Link 5100 ABG"; +const char iwl5100_agn_name[] = "Intel(R) WiFi Link 5100 AGN"; + +const struct iwl_rf_cfg iwl5350_agn_cfg = { .fw_name_pre = IWL5000_FW_PRE, .ucode_api_max = IWL5000_UCODE_API_MAX, .ucode_api_min = IWL5000_UCODE_API_MIN, - .trans.device_family = IWL_DEVICE_FAMILY_5000, .max_inst_size = IWLAGN_RTC_INST_SIZE, .max_data_size = IWLAGN_RTC_DATA_SIZE, .nvm_ver = EEPROM_5050_EEPROM_VERSION, .nvm_calib_ver = EEPROM_5050_TX_POWER_VERSION, - .trans.base_params = &iwl5000_base_params, .eeprom_params = &iwl5000_eeprom_params, - .ht_params = &iwl5000_ht_params, + .ht_params = { + .ht_greenfield_support = true, + .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ), + }, .led_mode = IWL_LED_BLINK, .internal_wimax_coex = true, }; +const char iwl5350_agn_name[] = "Intel(R) WiMAX/WiFi Link 5350 AGN"; + +const struct iwl_mac_cfg iwl5150_mac_cfg = { + .device_family = IWL_DEVICE_FAMILY_5150, + .base = &iwl5000_base, +}; + #define IWL_DEVICE_5150 \ .fw_name_pre = IWL5150_FW_PRE, \ .ucode_api_max = IWL5150_UCODE_API_MAX, \ .ucode_api_min = IWL5150_UCODE_API_MIN, \ - .trans.device_family = IWL_DEVICE_FAMILY_5150, \ .max_inst_size = IWLAGN_RTC_INST_SIZE, \ .max_data_size = IWLAGN_RTC_DATA_SIZE, \ .nvm_ver = EEPROM_5050_EEPROM_VERSION, \ .nvm_calib_ver = EEPROM_5050_TX_POWER_VERSION, \ - .trans.base_params = &iwl5000_base_params, \ .eeprom_params = &iwl5000_eeprom_params, \ .led_mode = IWL_LED_BLINK, \ .internal_wimax_coex = true -const struct iwl_cfg iwl5150_agn_cfg = { - .name = "Intel(R) WiMAX/WiFi Link 5150 AGN", +const struct iwl_rf_cfg iwl5150_agn_cfg = { IWL_DEVICE_5150, - .ht_params = &iwl5000_ht_params, - + .ht_params = { + .ht_greenfield_support = true, + .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ), + }, }; -const struct iwl_cfg iwl5150_abg_cfg = { - .name = "Intel(R) WiMAX/WiFi Link 5150 ABG", +const char iwl5150_agn_name[] = "Intel(R) WiMAX/WiFi Link 5150 AGN"; + +const struct iwl_rf_cfg iwl5150_abg_cfg = { IWL_DEVICE_5150, }; +const char iwl5150_abg_name[] = "Intel(R) WiMAX/WiFi Link 5150 ABG"; + MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL5150_MODULE_FIRMWARE(IWL5150_UCODE_API_MAX)); diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/6000.c b/drivers/net/wireless/intel/iwlwifi/cfg/6000.c index f013cf420569..ab13394896e0 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/6000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/6000.c @@ -2,7 +2,7 @@ /****************************************************************************** * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2018 - 2020, 2023 Intel Corporation + * Copyright(c) 2018 - 2020, 2023, 2025 Intel Corporation *****************************************************************************/ #include <linux/module.h> @@ -49,7 +49,7 @@ #define IWL6030_FW_PRE "iwlwifi-6000g2b" #define IWL6030_MODULE_FIRMWARE(api) IWL6030_FW_PRE "-" __stringify(api) ".ucode" -static const struct iwl_base_params iwl6000_base_params = { +static const struct iwl_family_base_params iwl6000_base = { .eeprom_size = OTP_LOW_IMAGE_SIZE_2K, .num_of_queues = IWLAGN_NUM_QUEUES, .max_tfd_queue_size = 256, @@ -62,7 +62,7 @@ static const struct iwl_base_params iwl6000_base_params = { .scd_chain_ext_wa = true, }; -static const struct iwl_base_params iwl6050_base_params = { +static const struct iwl_family_base_params iwl6050_base = { .eeprom_size = OTP_LOW_IMAGE_SIZE_2K, .num_of_queues = IWLAGN_NUM_QUEUES, .max_tfd_queue_size = 256, @@ -75,7 +75,7 @@ static const struct iwl_base_params iwl6050_base_params = { .scd_chain_ext_wa = true, }; -static const struct iwl_base_params iwl6000_g2_base_params = { +static const struct iwl_family_base_params iwl6000_g2_base = { .eeprom_size = OTP_LOW_IMAGE_SIZE_2K, .num_of_queues = IWLAGN_NUM_QUEUES, .max_tfd_queue_size = 256, @@ -88,12 +88,6 @@ static const struct iwl_base_params iwl6000_g2_base_params = { .scd_chain_ext_wa = true, }; -static const struct iwl_ht_params iwl6000_ht_params = { - .ht_greenfield_support = true, - .use_rts_for_aggregation = true, /* use rts/cts protection */ - .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ), -}; - static const struct iwl_eeprom_params iwl6000_eeprom_params = { .regulatory_bands = { EEPROM_REG_BAND_1_CHANNELS, @@ -107,141 +101,126 @@ static const struct iwl_eeprom_params iwl6000_eeprom_params = { .enhanced_txpower = true, }; +const struct iwl_mac_cfg iwl6005_mac_cfg = { + .device_family = IWL_DEVICE_FAMILY_6005, + .base = &iwl6000_g2_base, +}; + #define IWL_DEVICE_6005 \ .fw_name_pre = IWL6005_FW_PRE, \ .ucode_api_max = IWL6000G2_UCODE_API_MAX, \ .ucode_api_min = IWL6000G2_UCODE_API_MIN, \ - .trans.device_family = IWL_DEVICE_FAMILY_6005, \ .max_inst_size = IWL60_RTC_INST_SIZE, \ .max_data_size = IWL60_RTC_DATA_SIZE, \ .nvm_ver = EEPROM_6005_EEPROM_VERSION, \ .nvm_calib_ver = EEPROM_6005_TX_POWER_VERSION, \ - .trans.base_params = &iwl6000_g2_base_params, \ .eeprom_params = &iwl6000_eeprom_params, \ .led_mode = IWL_LED_RF_STATE -const struct iwl_cfg iwl6005_2agn_cfg = { - .name = "Intel(R) Centrino(R) Advanced-N 6205 AGN", - IWL_DEVICE_6005, - .ht_params = &iwl6000_ht_params, -}; - -const struct iwl_cfg iwl6005_2abg_cfg = { - .name = "Intel(R) Centrino(R) Advanced-N 6205 ABG", - IWL_DEVICE_6005, -}; - -const struct iwl_cfg iwl6005_2bg_cfg = { - .name = "Intel(R) Centrino(R) Advanced-N 6205 BG", +const struct iwl_rf_cfg iwl6005_n_cfg = { IWL_DEVICE_6005, + .ht_params = { + .ht_greenfield_support = true, + .use_rts_for_aggregation = true, /* use rts/cts protection */ + .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ), + }, }; -const struct iwl_cfg iwl6005_2agn_sff_cfg = { - .name = "Intel(R) Centrino(R) Advanced-N 6205S AGN", - IWL_DEVICE_6005, - .ht_params = &iwl6000_ht_params, -}; +const char iwl6005_2agn_name[] = "Intel(R) Centrino(R) Advanced-N 6205 AGN"; +const char iwl6005_2agn_sff_name[] = "Intel(R) Centrino(R) Advanced-N 6205S AGN"; +const char iwl6005_2agn_d_name[] = "Intel(R) Centrino(R) Advanced-N 6205D AGN"; +const char iwl6005_2agn_mow1_name[] = "Intel(R) Centrino(R) Advanced-N 6206 AGN"; +const char iwl6005_2agn_mow2_name[] = "Intel(R) Centrino(R) Advanced-N 6207 AGN"; -const struct iwl_cfg iwl6005_2agn_d_cfg = { - .name = "Intel(R) Centrino(R) Advanced-N 6205D AGN", +const struct iwl_rf_cfg iwl6005_non_n_cfg = { IWL_DEVICE_6005, - .ht_params = &iwl6000_ht_params, }; -const struct iwl_cfg iwl6005_2agn_mow1_cfg = { - .name = "Intel(R) Centrino(R) Advanced-N 6206 AGN", - IWL_DEVICE_6005, - .ht_params = &iwl6000_ht_params, -}; +const char iwl6005_2abg_name[] = "Intel(R) Centrino(R) Advanced-N 6205 ABG"; +const char iwl6005_2bg_name[] = "Intel(R) Centrino(R) Advanced-N 6205 BG"; -const struct iwl_cfg iwl6005_2agn_mow2_cfg = { - .name = "Intel(R) Centrino(R) Advanced-N 6207 AGN", - IWL_DEVICE_6005, - .ht_params = &iwl6000_ht_params, +const struct iwl_mac_cfg iwl6030_mac_cfg = { + .device_family = IWL_DEVICE_FAMILY_6030, + .base = &iwl6000_g2_base, }; #define IWL_DEVICE_6030 \ .fw_name_pre = IWL6030_FW_PRE, \ .ucode_api_max = IWL6000G2_UCODE_API_MAX, \ .ucode_api_min = IWL6000G2_UCODE_API_MIN, \ - .trans.device_family = IWL_DEVICE_FAMILY_6030, \ .max_inst_size = IWL60_RTC_INST_SIZE, \ .max_data_size = IWL60_RTC_DATA_SIZE, \ .nvm_ver = EEPROM_6030_EEPROM_VERSION, \ .nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \ - .trans.base_params = &iwl6000_g2_base_params, \ .eeprom_params = &iwl6000_eeprom_params, \ .led_mode = IWL_LED_RF_STATE -const struct iwl_cfg iwl6030_2agn_cfg = { - .name = "Intel(R) Centrino(R) Advanced-N 6230 AGN", +const struct iwl_rf_cfg iwl6030_n_cfg = { IWL_DEVICE_6030, - .ht_params = &iwl6000_ht_params, + .ht_params = { + .ht_greenfield_support = true, + .use_rts_for_aggregation = true, /* use rts/cts protection */ + .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ), + }, }; -const struct iwl_cfg iwl6030_2abg_cfg = { - .name = "Intel(R) Centrino(R) Advanced-N 6230 ABG", - IWL_DEVICE_6030, -}; +const char iwl6030_2agn_name[] = "Intel(R) Centrino(R) Advanced-N 6230 AGN"; +const char iwl6030_2bgn_name[] = "Intel(R) Centrino(R) Advanced-N 6230 BGN"; +const char iwl1030_bgn_name[] = "Intel(R) Centrino(R) Wireless-N 1030 BGN"; +const char iwl1030_bg_name[] = "Intel(R) Centrino(R) Wireless-N 1030 BG"; -const struct iwl_cfg iwl6030_2bgn_cfg = { - .name = "Intel(R) Centrino(R) Advanced-N 6230 BGN", +const struct iwl_rf_cfg iwl6030_non_n_cfg = { IWL_DEVICE_6030, - .ht_params = &iwl6000_ht_params, }; -const struct iwl_cfg iwl6030_2bg_cfg = { - .name = "Intel(R) Centrino(R) Advanced-N 6230 BG", - IWL_DEVICE_6030, -}; +const char iwl6030_2abg_name[] = "Intel(R) Centrino(R) Advanced-N 6230 ABG"; +const char iwl6030_2bg_name[] = "Intel(R) Centrino(R) Advanced-N 6230 BG"; #define IWL_DEVICE_6035 \ .fw_name_pre = IWL6030_FW_PRE, \ .ucode_api_max = IWL6035_UCODE_API_MAX, \ .ucode_api_min = IWL6035_UCODE_API_MIN, \ - .trans.device_family = IWL_DEVICE_FAMILY_6030, \ .max_inst_size = IWL60_RTC_INST_SIZE, \ .max_data_size = IWL60_RTC_DATA_SIZE, \ .nvm_ver = EEPROM_6030_EEPROM_VERSION, \ .nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \ - .trans.base_params = &iwl6000_g2_base_params, \ .eeprom_params = &iwl6000_eeprom_params, \ .led_mode = IWL_LED_RF_STATE -const struct iwl_cfg iwl6035_2agn_cfg = { - .name = "Intel(R) Centrino(R) Advanced-N 6235 AGN", +const struct iwl_rf_cfg iwl6035_2agn_cfg = { IWL_DEVICE_6035, - .ht_params = &iwl6000_ht_params, + .ht_params = { + .ht_greenfield_support = true, + .use_rts_for_aggregation = true, /* use rts/cts protection */ + .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ), + }, }; -const struct iwl_cfg iwl6035_2agn_sff_cfg = { - .name = "Intel(R) Centrino(R) Ultimate-N 6235 AGN", - IWL_DEVICE_6035, - .ht_params = &iwl6000_ht_params, -}; +const char iwl6035_2agn_name[] = "Intel(R) Centrino(R) Advanced-N 6235 AGN"; +const char iwl6035_2agn_sff_name[] = "Intel(R) Centrino(R) Ultimate-N 6235 AGN"; -const struct iwl_cfg iwl1030_bgn_cfg = { - .name = "Intel(R) Centrino(R) Wireless-N 1030 BGN", +const struct iwl_rf_cfg iwl130_bgn_cfg = { IWL_DEVICE_6030, - .ht_params = &iwl6000_ht_params, + .ht_params = { + .ht_greenfield_support = true, + .use_rts_for_aggregation = true, /* use rts/cts protection */ + .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ), + }, + .rx_with_siso_diversity = true, }; -const struct iwl_cfg iwl1030_bg_cfg = { - .name = "Intel(R) Centrino(R) Wireless-N 1030 BG", - IWL_DEVICE_6030, -}; +const char iwl130_bgn_name[] = "Intel(R) Centrino(R) Wireless-N 130 BGN"; -const struct iwl_cfg iwl130_bgn_cfg = { - .name = "Intel(R) Centrino(R) Wireless-N 130 BGN", +const struct iwl_rf_cfg iwl130_bg_cfg = { IWL_DEVICE_6030, - .ht_params = &iwl6000_ht_params, .rx_with_siso_diversity = true, }; -const struct iwl_cfg iwl130_bg_cfg = { - .name = "Intel(R) Centrino(R) Wireless-N 130 BG", - IWL_DEVICE_6030, - .rx_with_siso_diversity = true, +const char iwl130_bg_name[] = "Intel(R) Centrino(R) Wireless-N 130 BG"; + +const struct iwl_mac_cfg iwl6000i_mac_cfg = { + .device_family = IWL_DEVICE_FAMILY_6000i, + .base = &iwl6000_base, }; /* @@ -251,101 +230,127 @@ const struct iwl_cfg iwl130_bg_cfg = { .fw_name_pre = IWL6000_FW_PRE, \ .ucode_api_max = IWL6000_UCODE_API_MAX, \ .ucode_api_min = IWL6000_UCODE_API_MIN, \ - .trans.device_family = IWL_DEVICE_FAMILY_6000i, \ .max_inst_size = IWL60_RTC_INST_SIZE, \ .max_data_size = IWL60_RTC_DATA_SIZE, \ .valid_tx_ant = ANT_BC, /* .cfg overwrite */ \ .valid_rx_ant = ANT_BC, /* .cfg overwrite */ \ .nvm_ver = EEPROM_6000_EEPROM_VERSION, \ .nvm_calib_ver = EEPROM_6000_TX_POWER_VERSION, \ - .trans.base_params = &iwl6000_base_params, \ .eeprom_params = &iwl6000_eeprom_params, \ .led_mode = IWL_LED_BLINK -const struct iwl_cfg iwl6000i_2agn_cfg = { - .name = "Intel(R) Centrino(R) Advanced-N 6200 AGN", +const struct iwl_rf_cfg iwl6000i_2agn_cfg = { IWL_DEVICE_6000i, - .ht_params = &iwl6000_ht_params, + .ht_params = { + .ht_greenfield_support = true, + .use_rts_for_aggregation = true, /* use rts/cts protection */ + .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ), + }, }; -const struct iwl_cfg iwl6000i_2abg_cfg = { - .name = "Intel(R) Centrino(R) Advanced-N 6200 ABG", +const char iwl6000i_2agn_name[] = "Intel(R) Centrino(R) Advanced-N 6200 AGN"; + +const struct iwl_rf_cfg iwl6000i_non_n_cfg = { IWL_DEVICE_6000i, }; -const struct iwl_cfg iwl6000i_2bg_cfg = { - .name = "Intel(R) Centrino(R) Advanced-N 6200 BG", - IWL_DEVICE_6000i, +const char iwl6000i_2abg_name[] = "Intel(R) Centrino(R) Advanced-N 6200 ABG"; +const char iwl6000i_2bg_name[] = "Intel(R) Centrino(R) Advanced-N 6200 BG"; + +const struct iwl_mac_cfg iwl6050_mac_cfg = { + .device_family = IWL_DEVICE_FAMILY_6050, + .base = &iwl6050_base, }; #define IWL_DEVICE_6050 \ .fw_name_pre = IWL6050_FW_PRE, \ .ucode_api_max = IWL6050_UCODE_API_MAX, \ .ucode_api_min = IWL6050_UCODE_API_MIN, \ - .trans.device_family = IWL_DEVICE_FAMILY_6050, \ .max_inst_size = IWL60_RTC_INST_SIZE, \ .max_data_size = IWL60_RTC_DATA_SIZE, \ .valid_tx_ant = ANT_AB, /* .cfg overwrite */ \ .valid_rx_ant = ANT_AB, /* .cfg overwrite */ \ .nvm_ver = EEPROM_6050_EEPROM_VERSION, \ .nvm_calib_ver = EEPROM_6050_TX_POWER_VERSION, \ - .trans.base_params = &iwl6050_base_params, \ .eeprom_params = &iwl6000_eeprom_params, \ .led_mode = IWL_LED_BLINK, \ .internal_wimax_coex = true -const struct iwl_cfg iwl6050_2agn_cfg = { - .name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 AGN", +const struct iwl_rf_cfg iwl6050_2agn_cfg = { IWL_DEVICE_6050, - .ht_params = &iwl6000_ht_params, + .ht_params = { + .ht_greenfield_support = true, + .use_rts_for_aggregation = true, /* use rts/cts protection */ + .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ), + }, }; -const struct iwl_cfg iwl6050_2abg_cfg = { - .name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 ABG", +const char iwl6050_2agn_name[] = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 AGN"; + +const struct iwl_rf_cfg iwl6050_2abg_cfg = { IWL_DEVICE_6050, }; +const char iwl6050_2abg_name[] = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 ABG"; + +const struct iwl_mac_cfg iwl6150_mac_cfg = { + .device_family = IWL_DEVICE_FAMILY_6150, + .base = &iwl6050_base, +}; + #define IWL_DEVICE_6150 \ .fw_name_pre = IWL6050_FW_PRE, \ .ucode_api_max = IWL6050_UCODE_API_MAX, \ .ucode_api_min = IWL6050_UCODE_API_MIN, \ - .trans.device_family = IWL_DEVICE_FAMILY_6150, \ .max_inst_size = IWL60_RTC_INST_SIZE, \ .max_data_size = IWL60_RTC_DATA_SIZE, \ .nvm_ver = EEPROM_6150_EEPROM_VERSION, \ .nvm_calib_ver = EEPROM_6150_TX_POWER_VERSION, \ - .trans.base_params = &iwl6050_base_params, \ .eeprom_params = &iwl6000_eeprom_params, \ .led_mode = IWL_LED_BLINK, \ .internal_wimax_coex = true -const struct iwl_cfg iwl6150_bgn_cfg = { - .name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BGN", +const struct iwl_rf_cfg iwl6150_bgn_cfg = { IWL_DEVICE_6150, - .ht_params = &iwl6000_ht_params, + .ht_params = { + .ht_greenfield_support = true, + .use_rts_for_aggregation = true, /* use rts/cts protection */ + .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ), + }, }; -const struct iwl_cfg iwl6150_bg_cfg = { - .name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BG", +const char iwl6150_bgn_name[] = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BGN"; + +const struct iwl_rf_cfg iwl6150_bg_cfg = { IWL_DEVICE_6150, }; -const struct iwl_cfg iwl6000_3agn_cfg = { - .name = "Intel(R) Centrino(R) Ultimate-N 6300 AGN", +const char iwl6150_bg_name[] = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BG"; + +const struct iwl_mac_cfg iwl6000_mac_cfg = { + .device_family = IWL_DEVICE_FAMILY_6000, + .base = &iwl6000_base, +}; + +const struct iwl_rf_cfg iwl6000_3agn_cfg = { .fw_name_pre = IWL6000_FW_PRE, .ucode_api_max = IWL6000_UCODE_API_MAX, .ucode_api_min = IWL6000_UCODE_API_MIN, - .trans.device_family = IWL_DEVICE_FAMILY_6000, .max_inst_size = IWL60_RTC_INST_SIZE, .max_data_size = IWL60_RTC_DATA_SIZE, .nvm_ver = EEPROM_6000_EEPROM_VERSION, .nvm_calib_ver = EEPROM_6000_TX_POWER_VERSION, - .trans.base_params = &iwl6000_base_params, .eeprom_params = &iwl6000_eeprom_params, - .ht_params = &iwl6000_ht_params, + .ht_params = { + .ht_greenfield_support = true, + .use_rts_for_aggregation = true, /* use rts/cts protection */ + .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ), + }, .led_mode = IWL_LED_BLINK, }; +const char iwl6000_3agn_name[] = "Intel(R) Centrino(R) Ultimate-N 6300 AGN"; + MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX)); MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX)); diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/7000.c b/drivers/net/wireless/intel/iwlwifi/cfg/7000.c index 4e2afdedf4c6..f987ad3192c1 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/7000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/7000.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2020, 2023 Intel Corporation + * Copyright (C) 2012-2014, 2018-2020, 2023, 2025 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015 Intel Deutschland GmbH */ @@ -49,7 +49,7 @@ #define IWL7265D_FW_PRE "iwlwifi-7265D" #define IWL7265D_MODULE_FIRMWARE(api) IWL7265D_FW_PRE "-" __stringify(api) ".ucode" -static const struct iwl_base_params iwl7000_base_params = { +static const struct iwl_family_base_params iwl7000_base = { .eeprom_size = OTP_LOW_IMAGE_SIZE_16K, .num_of_queues = 31, .max_tfd_queue_size = 256, @@ -60,6 +60,7 @@ static const struct iwl_base_params iwl7000_base_params = { .shadow_reg_enable = true, .pcie_l1_allowed = true, .apmg_wake_up_wa = true, + .nvm_hw_section_num = 0, }; static const struct iwl_tt_params iwl7000_high_temp_tt_params = { @@ -84,16 +85,13 @@ static const struct iwl_tt_params iwl7000_high_temp_tt_params = { .support_tx_backoff = true, }; -static const struct iwl_ht_params iwl7000_ht_params = { - .stbc = true, - .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ), +const struct iwl_mac_cfg iwl7000_mac_cfg = { + .device_family = IWL_DEVICE_FAMILY_7000, + .base = &iwl7000_base, }; #define IWL_DEVICE_7000_COMMON \ - .trans.device_family = IWL_DEVICE_FAMILY_7000, \ - .trans.base_params = &iwl7000_base_params, \ .led_mode = IWL_LED_RF_STATE, \ - .nvm_hw_section_num = 0, \ .non_shared_ant = ANT_A, \ .dccm_offset = IWL7000_DCCM_OFFSET @@ -117,77 +115,52 @@ static const struct iwl_ht_params iwl7000_ht_params = { .ucode_api_max = IWL7265D_UCODE_API_MAX, \ .ucode_api_min = IWL7265D_UCODE_API_MIN -const struct iwl_cfg iwl7260_2ac_cfg = { - .name = "Intel(R) Dual Band Wireless AC 7260", +const char iwl7260_2ac_name[] = "Intel(R) Dual Band Wireless AC 7260"; +const char iwl7260_2n_name[] = "Intel(R) Dual Band Wireless N 7260"; +const char iwl7260_n_name[] = "Intel(R) Wireless N 7260"; +const char iwl3160_2ac_name[] = "Intel(R) Dual Band Wireless AC 3160"; +const char iwl3160_2n_name[] = "Intel(R) Dual Band Wireless N 3160"; +const char iwl3160_n_name[] = "Intel(R) Wireless N 3160"; +const char iwl3165_2ac_name[] = "Intel(R) Dual Band Wireless-AC 3165"; +const char iwl3168_2ac_name[] = "Intel(R) Dual Band Wireless-AC 3168"; +const char iwl7265_2ac_name[] = "Intel(R) Dual Band Wireless-AC 7265"; +const char iwl7265_2n_name[] = "Intel(R) Dual Band Wireless-N 7265"; +const char iwl7265_n_name[] = "Intel(R) Wireless-N 7265"; + +const struct iwl_rf_cfg iwl7260_cfg = { .fw_name_pre = IWL7260_FW_PRE, IWL_DEVICE_7000, - .ht_params = &iwl7000_ht_params, + .ht_params = { + .stbc = true, + .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ), + }, .nvm_ver = IWL7260_NVM_VERSION, .host_interrupt_operation_mode = true, .lp_xtal_workaround = true, .dccm_len = IWL7260_DCCM_LEN, }; -const struct iwl_cfg iwl7260_2ac_cfg_high_temp = { - .name = "Intel(R) Dual Band Wireless AC 7260", +const struct iwl_rf_cfg iwl7260_high_temp_cfg = { .fw_name_pre = IWL7260_FW_PRE, IWL_DEVICE_7000, - .ht_params = &iwl7000_ht_params, + .ht_params = { + .stbc = true, + .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ), + }, .nvm_ver = IWL7260_NVM_VERSION, - .high_temp = true, .host_interrupt_operation_mode = true, .lp_xtal_workaround = true, .dccm_len = IWL7260_DCCM_LEN, .thermal_params = &iwl7000_high_temp_tt_params, }; -const struct iwl_cfg iwl7260_2n_cfg = { - .name = "Intel(R) Dual Band Wireless N 7260", - .fw_name_pre = IWL7260_FW_PRE, - IWL_DEVICE_7000, - .ht_params = &iwl7000_ht_params, - .nvm_ver = IWL7260_NVM_VERSION, - .host_interrupt_operation_mode = true, - .lp_xtal_workaround = true, - .dccm_len = IWL7260_DCCM_LEN, -}; - -const struct iwl_cfg iwl7260_n_cfg = { - .name = "Intel(R) Wireless N 7260", - .fw_name_pre = IWL7260_FW_PRE, - IWL_DEVICE_7000, - .ht_params = &iwl7000_ht_params, - .nvm_ver = IWL7260_NVM_VERSION, - .host_interrupt_operation_mode = true, - .lp_xtal_workaround = true, - .dccm_len = IWL7260_DCCM_LEN, -}; - -const struct iwl_cfg iwl3160_2ac_cfg = { - .name = "Intel(R) Dual Band Wireless AC 3160", +const struct iwl_rf_cfg iwl3160_cfg = { .fw_name_pre = IWL3160_FW_PRE, IWL_DEVICE_7000, - .ht_params = &iwl7000_ht_params, - .nvm_ver = IWL3160_NVM_VERSION, - .host_interrupt_operation_mode = true, - .dccm_len = IWL3160_DCCM_LEN, -}; - -const struct iwl_cfg iwl3160_2n_cfg = { - .name = "Intel(R) Dual Band Wireless N 3160", - .fw_name_pre = IWL3160_FW_PRE, - IWL_DEVICE_7000, - .ht_params = &iwl7000_ht_params, - .nvm_ver = IWL3160_NVM_VERSION, - .host_interrupt_operation_mode = true, - .dccm_len = IWL3160_DCCM_LEN, -}; - -const struct iwl_cfg iwl3160_n_cfg = { - .name = "Intel(R) Wireless N 3160", - .fw_name_pre = IWL3160_FW_PRE, - IWL_DEVICE_7000, - .ht_params = &iwl7000_ht_params, + .ht_params = { + .stbc = true, + .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ), + }, .nvm_ver = IWL3160_NVM_VERSION, .host_interrupt_operation_mode = true, .dccm_len = IWL3160_DCCM_LEN, @@ -204,88 +177,52 @@ static const struct iwl_pwr_tx_backoff iwl7265_pwr_tx_backoffs[] = { {0}, }; -static const struct iwl_ht_params iwl7265_ht_params = { - .stbc = true, - .ldpc = true, - .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ), -}; - -const struct iwl_cfg iwl3165_2ac_cfg = { - .name = "Intel(R) Dual Band Wireless AC 3165", +const struct iwl_rf_cfg iwl3165_2ac_cfg = { .fw_name_pre = IWL7265D_FW_PRE, IWL_DEVICE_7005D, - .ht_params = &iwl7000_ht_params, + .ht_params = { + .stbc = true, + .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ), + }, .nvm_ver = IWL3165_NVM_VERSION, .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, .dccm_len = IWL7265_DCCM_LEN, }; -const struct iwl_cfg iwl3168_2ac_cfg = { - .name = "Intel(R) Dual Band Wireless AC 3168", +const struct iwl_rf_cfg iwl3168_2ac_cfg = { .fw_name_pre = IWL3168_FW_PRE, IWL_DEVICE_3008, - .ht_params = &iwl7000_ht_params, + .ht_params = { + .stbc = true, + .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ), + }, .nvm_ver = IWL3168_NVM_VERSION, .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, .dccm_len = IWL7265_DCCM_LEN, .nvm_type = IWL_NVM_SDP, }; -const struct iwl_cfg iwl7265_2ac_cfg = { - .name = "Intel(R) Dual Band Wireless AC 7265", - .fw_name_pre = IWL7265_FW_PRE, - IWL_DEVICE_7005, - .ht_params = &iwl7265_ht_params, - .nvm_ver = IWL7265_NVM_VERSION, - .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, - .dccm_len = IWL7265_DCCM_LEN, -}; - -const struct iwl_cfg iwl7265_2n_cfg = { - .name = "Intel(R) Dual Band Wireless N 7265", - .fw_name_pre = IWL7265_FW_PRE, - IWL_DEVICE_7005, - .ht_params = &iwl7265_ht_params, - .nvm_ver = IWL7265_NVM_VERSION, - .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, - .dccm_len = IWL7265_DCCM_LEN, -}; - -const struct iwl_cfg iwl7265_n_cfg = { - .name = "Intel(R) Wireless N 7265", +const struct iwl_rf_cfg iwl7265_cfg = { .fw_name_pre = IWL7265_FW_PRE, IWL_DEVICE_7005, - .ht_params = &iwl7265_ht_params, + .ht_params = { + .stbc = true, + .ldpc = true, + .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ), + }, .nvm_ver = IWL7265_NVM_VERSION, .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, .dccm_len = IWL7265_DCCM_LEN, }; -const struct iwl_cfg iwl7265d_2ac_cfg = { - .name = "Intel(R) Dual Band Wireless AC 7265", - .fw_name_pre = IWL7265D_FW_PRE, - IWL_DEVICE_7005D, - .ht_params = &iwl7265_ht_params, - .nvm_ver = IWL7265D_NVM_VERSION, - .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, - .dccm_len = IWL7265_DCCM_LEN, -}; - -const struct iwl_cfg iwl7265d_2n_cfg = { - .name = "Intel(R) Dual Band Wireless N 7265", +const struct iwl_rf_cfg iwl7265d_cfg = { .fw_name_pre = IWL7265D_FW_PRE, IWL_DEVICE_7005D, - .ht_params = &iwl7265_ht_params, - .nvm_ver = IWL7265D_NVM_VERSION, - .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, - .dccm_len = IWL7265_DCCM_LEN, -}; - -const struct iwl_cfg iwl7265d_n_cfg = { - .name = "Intel(R) Wireless N 7265", - .fw_name_pre = IWL7265D_FW_PRE, - IWL_DEVICE_7005D, - .ht_params = &iwl7265_ht_params, + .ht_params = { + .stbc = true, + .ldpc = true, + .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ), + }, .nvm_ver = IWL7265D_NVM_VERSION, .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, .dccm_len = IWL7265_DCCM_LEN, diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c index d09cf8d7dc01..b56574006ee0 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2014, 2018-2020, 2023 Intel Corporation + * Copyright (C) 2014, 2018-2020, 2023, 2025 Intel Corporation * Copyright (C) 2014-2015 Intel Mobile Communications GmbH * Copyright (C) 2016 Intel Deutschland GmbH */ @@ -35,9 +35,7 @@ #define IWL8265_MODULE_FIRMWARE(api) \ IWL8265_FW_PRE "-" __stringify(api) ".ucode" -#define DEFAULT_NVM_FILE_FAMILY_8000C "nvmData-8000C" - -static const struct iwl_base_params iwl8000_base_params = { +static const struct iwl_family_base_params iwl8000_base = { .eeprom_size = OTP_LOW_IMAGE_SIZE_32K, .num_of_queues = 31, .max_tfd_queue_size = 256, @@ -47,12 +45,12 @@ static const struct iwl_base_params iwl8000_base_params = { .max_event_log_size = 512, .shadow_reg_enable = true, .pcie_l1_allowed = true, -}; - -static const struct iwl_ht_params iwl8000_ht_params = { - .stbc = true, - .ldpc = true, - .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ), + .nvm_hw_section_num = 10, + .features = NETIF_F_RXCSUM, + .smem_offset = IWL8260_SMEM_OFFSET, + .smem_len = IWL8260_SMEM_LEN, + .apmg_not_supported = true, + .min_umac_error_event_table = 0x800000, }; static const struct iwl_tt_params iwl8000_tt_params = { @@ -76,30 +74,20 @@ static const struct iwl_tt_params iwl8000_tt_params = { .support_tx_backoff = true, }; +const struct iwl_mac_cfg iwl8000_mac_cfg = { + .device_family = IWL_DEVICE_FAMILY_8000, + .base = &iwl8000_base, +}; + #define IWL_DEVICE_8000_COMMON \ - .trans.device_family = IWL_DEVICE_FAMILY_8000, \ - .trans.base_params = &iwl8000_base_params, \ .led_mode = IWL_LED_RF_STATE, \ - .nvm_hw_section_num = 10, \ - .features = NETIF_F_RXCSUM, \ .non_shared_ant = ANT_A, \ .dccm_offset = IWL8260_DCCM_OFFSET, \ .dccm_len = IWL8260_DCCM_LEN, \ .dccm2_offset = IWL8260_DCCM2_OFFSET, \ .dccm2_len = IWL8260_DCCM2_LEN, \ - .smem_offset = IWL8260_SMEM_OFFSET, \ - .smem_len = IWL8260_SMEM_LEN, \ - .default_nvm_file_C_step = DEFAULT_NVM_FILE_FAMILY_8000C, \ .thermal_params = &iwl8000_tt_params, \ - .apmg_not_supported = true, \ - .nvm_type = IWL_NVM_EXT, \ - .dbgc_supported = true, \ - .min_umac_error_event_table = 0x800000 - -#define IWL_DEVICE_8000 \ - IWL_DEVICE_8000_COMMON, \ - .ucode_api_max = IWL8000_UCODE_API_MAX, \ - .ucode_api_min = IWL8000_UCODE_API_MIN \ + .nvm_type = IWL_NVM_EXT #define IWL_DEVICE_8260 \ IWL_DEVICE_8000_COMMON, \ @@ -111,47 +99,39 @@ static const struct iwl_tt_params iwl8000_tt_params = { .ucode_api_max = IWL8265_UCODE_API_MAX, \ .ucode_api_min = IWL8265_UCODE_API_MIN \ -const struct iwl_cfg iwl8260_2n_cfg = { - .name = "Intel(R) Dual Band Wireless N 8260", - .fw_name_pre = IWL8000_FW_PRE, - IWL_DEVICE_8260, - .ht_params = &iwl8000_ht_params, - .nvm_ver = IWL8000_NVM_VERSION, -}; +const char iwl8260_2n_name[] = "Intel(R) Dual Band Wireless-N 8260"; +const char iwl8260_2ac_name[] = "Intel(R) Dual Band Wireless-AC 8260"; +const char iwl8265_2ac_name[] = "Intel(R) Dual Band Wireless-AC 8265"; +const char iwl8275_2ac_name[] = "Intel(R) Dual Band Wireless-AC 8275"; +const char iwl4165_2ac_name[] = "Intel(R) Dual Band Wireless-AC 4165"; -const struct iwl_cfg iwl8260_2ac_cfg = { - .name = "Intel(R) Dual Band Wireless AC 8260", +const char iwl_killer_1435i_name[] = + "Killer(R) Wireless-AC 1435i Wireless Network Adapter (8265D2W)"; +const char iwl_killer_1434_kix_name[] = + "Killer(R) Wireless-AC 1435-KIX Wireless Network Adapter (8265NGW)"; + +const struct iwl_rf_cfg iwl8260_cfg = { .fw_name_pre = IWL8000_FW_PRE, IWL_DEVICE_8260, - .ht_params = &iwl8000_ht_params, + .ht_params = { + .stbc = true, + .ldpc = true, + .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ), + }, .nvm_ver = IWL8000_NVM_VERSION, }; -const struct iwl_cfg iwl8265_2ac_cfg = { - .name = "Intel(R) Dual Band Wireless AC 8265", +const struct iwl_rf_cfg iwl8265_cfg = { .fw_name_pre = IWL8265_FW_PRE, IWL_DEVICE_8265, - .ht_params = &iwl8000_ht_params, - .nvm_ver = IWL8000_NVM_VERSION, - .vht_mu_mimo_supported = true, -}; - -const struct iwl_cfg iwl8275_2ac_cfg = { - .name = "Intel(R) Dual Band Wireless AC 8275", - .fw_name_pre = IWL8265_FW_PRE, - IWL_DEVICE_8265, - .ht_params = &iwl8000_ht_params, + .ht_params = { + .stbc = true, + .ldpc = true, + .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ), + }, .nvm_ver = IWL8000_NVM_VERSION, .vht_mu_mimo_supported = true, }; -const struct iwl_cfg iwl4165_2ac_cfg = { - .name = "Intel(R) Dual Band Wireless AC 4165", - .fw_name_pre = IWL8000_FW_PRE, - IWL_DEVICE_8000, - .ht_params = &iwl8000_ht_params, - .nvm_ver = IWL8000_NVM_VERSION, -}; - MODULE_FIRMWARE(IWL8000_MODULE_FIRMWARE(IWL8000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL8265_MODULE_FIRMWARE(IWL8265_UCODE_API_MAX)); diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c index 0130d9a9b78b..ac1fa291cf2f 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2021, 2023 Intel Corporation + * Copyright (C) 2018-2021, 2023, 2025 Intel Corporation */ #include <linux/module.h> #include <linux/stringify.h> @@ -15,14 +15,7 @@ /* Lowest firmware API version supported */ #define IWL9000_UCODE_API_MIN 30 -/* NVM versions */ -#define IWL9000_NVM_VERSION 0x0a1d - /* Memory offsets and lengths */ -#define IWL9000_DCCM_OFFSET 0x800000 -#define IWL9000_DCCM_LEN 0x18000 -#define IWL9000_DCCM2_OFFSET 0x880000 -#define IWL9000_DCCM2_LEN 0x8000 #define IWL9000_SMEM_OFFSET 0x400000 #define IWL9000_SMEM_LEN 0x68000 @@ -33,7 +26,7 @@ #define IWL9260_MODULE_FIRMWARE(api) \ IWL9260_FW_PRE "-" __stringify(api) ".ucode" -static const struct iwl_base_params iwl9000_base_params = { +static const struct iwl_family_base_params iwl9000_base = { .eeprom_size = OTP_LOW_IMAGE_SIZE_32K, .num_of_queues = 31, .max_tfd_queue_size = 256, @@ -43,150 +36,69 @@ static const struct iwl_base_params iwl9000_base_params = { .max_event_log_size = 512, .shadow_reg_enable = true, .pcie_l1_allowed = true, -}; - -static const struct iwl_ht_params iwl9000_ht_params = { - .stbc = true, - .ldpc = true, - .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ), -}; - -static const struct iwl_tt_params iwl9000_tt_params = { - .ct_kill_entry = 115, - .ct_kill_exit = 93, - .ct_kill_duration = 5, - .dynamic_smps_entry = 111, - .dynamic_smps_exit = 107, - .tx_protection_entry = 112, - .tx_protection_exit = 105, - .tx_backoff = { - {.temperature = 110, .backoff = 200}, - {.temperature = 111, .backoff = 600}, - {.temperature = 112, .backoff = 1200}, - {.temperature = 113, .backoff = 2000}, - {.temperature = 114, .backoff = 4000}, + .smem_offset = IWL9000_SMEM_OFFSET, + .smem_len = IWL9000_SMEM_LEN, + .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, + .apmg_not_supported = true, + .mac_addr_from_csr = 0x380, + .min_umac_error_event_table = 0x800000, + .d3_debug_data_base_addr = 0x401000, + .d3_debug_data_length = 92 * 1024, + .nvm_hw_section_num = 10, + .mon_smem_regs = { + .write_ptr = { + .addr = LDBG_M2S_BUF_WPTR, + .mask = LDBG_M2S_BUF_WPTR_VAL_MSK, + }, + .cycle_cnt = { + .addr = LDBG_M2S_BUF_WRAP_CNT, + .mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK, + }, + }, + .mon_dram_regs = { + .write_ptr = { + .addr = MON_BUFF_WRPTR_VER2, + .mask = 0xffffffff, + }, + .cycle_cnt = { + .addr = MON_BUFF_CYCLE_CNT_VER2, + .mask = 0xffffffff, + }, }, - .support_ct_kill = true, - .support_dynamic_smps = true, - .support_tx_protection = true, - .support_tx_backoff = true, + .ucode_api_max = IWL9000_UCODE_API_MAX, + .ucode_api_min = IWL9000_UCODE_API_MIN, }; -#define IWL_DEVICE_9000 \ - .ucode_api_max = IWL9000_UCODE_API_MAX, \ - .ucode_api_min = IWL9000_UCODE_API_MIN, \ - .led_mode = IWL_LED_RF_STATE, \ - .nvm_hw_section_num = 10, \ - .non_shared_ant = ANT_B, \ - .dccm_offset = IWL9000_DCCM_OFFSET, \ - .dccm_len = IWL9000_DCCM_LEN, \ - .dccm2_offset = IWL9000_DCCM2_OFFSET, \ - .dccm2_len = IWL9000_DCCM2_LEN, \ - .smem_offset = IWL9000_SMEM_OFFSET, \ - .smem_len = IWL9000_SMEM_LEN, \ - .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \ - .thermal_params = &iwl9000_tt_params, \ - .apmg_not_supported = true, \ - .num_rbds = 512, \ - .vht_mu_mimo_supported = true, \ - .mac_addr_from_csr = 0x380, \ - .nvm_type = IWL_NVM_EXT, \ - .dbgc_supported = true, \ - .min_umac_error_event_table = 0x800000, \ - .d3_debug_data_base_addr = 0x401000, \ - .d3_debug_data_length = 92 * 1024, \ - .ht_params = &iwl9000_ht_params, \ - .nvm_ver = IWL9000_NVM_VERSION, \ - .mon_smem_regs = { \ - .write_ptr = { \ - .addr = LDBG_M2S_BUF_WPTR, \ - .mask = LDBG_M2S_BUF_WPTR_VAL_MSK, \ - }, \ - .cycle_cnt = { \ - .addr = LDBG_M2S_BUF_WRAP_CNT, \ - .mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK, \ - }, \ - }, \ - .mon_dram_regs = { \ - .write_ptr = { \ - .addr = MON_BUFF_WRPTR_VER2, \ - .mask = 0xffffffff, \ - }, \ - .cycle_cnt = { \ - .addr = MON_BUFF_CYCLE_CNT_VER2, \ - .mask = 0xffffffff, \ - }, \ - } - -const struct iwl_cfg_trans_params iwl9000_trans_cfg = { +const struct iwl_mac_cfg iwl9000_mac_cfg = { .device_family = IWL_DEVICE_FAMILY_9000, - .base_params = &iwl9000_base_params, + .base = &iwl9000_base, .mq_rx_supported = true, - .rf_id = true, }; -const struct iwl_cfg_trans_params iwl9560_trans_cfg = { +const struct iwl_mac_cfg iwl9560_mac_cfg = { .device_family = IWL_DEVICE_FAMILY_9000, - .base_params = &iwl9000_base_params, + .base = &iwl9000_base, .mq_rx_supported = true, - .rf_id = true, .integrated = true, .xtal_latency = 650, }; -const struct iwl_cfg_trans_params iwl9560_long_latency_trans_cfg = { +const struct iwl_mac_cfg iwl9560_long_latency_mac_cfg = { .device_family = IWL_DEVICE_FAMILY_9000, - .base_params = &iwl9000_base_params, + .base = &iwl9000_base, .mq_rx_supported = true, - .rf_id = true, .integrated = true, .xtal_latency = 2820, }; -const struct iwl_cfg_trans_params iwl9560_shared_clk_trans_cfg = { +const struct iwl_mac_cfg iwl9560_shared_clk_mac_cfg = { .device_family = IWL_DEVICE_FAMILY_9000, - .base_params = &iwl9000_base_params, + .base = &iwl9000_base, .mq_rx_supported = true, - .rf_id = true, .integrated = true, .xtal_latency = 670, .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK }; -const char iwl9162_name[] = "Intel(R) Wireless-AC 9162"; -const char iwl9260_name[] = "Intel(R) Wireless-AC 9260"; -const char iwl9260_1_name[] = "Intel(R) Wireless-AC 9260-1"; -const char iwl9270_name[] = "Intel(R) Wireless-AC 9270"; -const char iwl9461_name[] = "Intel(R) Wireless-AC 9461"; -const char iwl9462_name[] = "Intel(R) Wireless-AC 9462"; -const char iwl9560_name[] = "Intel(R) Wireless-AC 9560"; -const char iwl9162_160_name[] = "Intel(R) Wireless-AC 9162 160MHz"; -const char iwl9260_160_name[] = "Intel(R) Wireless-AC 9260 160MHz"; -const char iwl9270_160_name[] = "Intel(R) Wireless-AC 9270 160MHz"; -const char iwl9461_160_name[] = "Intel(R) Wireless-AC 9461 160MHz"; -const char iwl9462_160_name[] = "Intel(R) Wireless-AC 9462 160MHz"; -const char iwl9560_160_name[] = "Intel(R) Wireless-AC 9560 160MHz"; - -const char iwl9260_killer_1550_name[] = - "Killer (R) Wireless-AC 1550 Wireless Network Adapter (9260NGW) 160MHz"; -const char iwl9560_killer_1550i_name[] = - "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)"; -const char iwl9560_killer_1550i_160_name[] = - "Killer(R) Wireless-AC 1550i Wireless Network Adapter (9560NGW) 160MHz"; -const char iwl9560_killer_1550s_name[] = - "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)"; -const char iwl9560_killer_1550s_160_name[] = - "Killer(R) Wireless-AC 1550s Wireless Network Adapter (9560D2W) 160MHz"; - -const struct iwl_cfg iwl9260_2ac_cfg = { - .fw_name_pre = IWL9260_FW_PRE, - IWL_DEVICE_9000, -}; - -const struct iwl_cfg iwl9560_2ac_cfg_soc = { - .fw_name_pre = IWL9000_FW_PRE, - IWL_DEVICE_9000, -}; - MODULE_FIRMWARE(IWL9000_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL9260_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c b/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c index e87b57b9e2c0..3bf9fdbe01c6 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2024 Intel Corporation + * Copyright (C) 2018-2025 Intel Corporation */ #include <linux/module.h> #include <linux/stringify.h> @@ -15,14 +15,7 @@ /* Lowest firmware API version supported */ #define IWL_AX210_UCODE_API_MIN 77 -/* NVM versions */ -#define IWL_AX210_NVM_VERSION 0x0a1d - /* Memory offsets and lengths */ -#define IWL_AX210_DCCM_OFFSET 0x800000 /* LMAC1 */ -#define IWL_AX210_DCCM_LEN 0x10000 /* LMAC1 */ -#define IWL_AX210_DCCM2_OFFSET 0x880000 -#define IWL_AX210_DCCM2_LEN 0x8000 #define IWL_AX210_SMEM_OFFSET 0x400000 #define IWL_AX210_SMEM_LEN 0xD0000 @@ -47,8 +40,7 @@ #define IWL_MA_B_HR_B_FW_MODULE_FIRMWARE(api) \ IWL_MA_B_HR_B_FW_PRE "-" __stringify(api) ".ucode" -static const struct iwl_base_params iwl_ax210_base_params = { - .eeprom_size = OTP_LOW_IMAGE_SIZE_32K, +static const struct iwl_family_base_params iwl_ax210_base = { .num_of_queues = 512, .max_tfd_queue_size = 65536, .shadow_ram_support = true, @@ -57,74 +49,60 @@ static const struct iwl_base_params iwl_ax210_base_params = { .max_event_log_size = 512, .shadow_reg_enable = true, .pcie_l1_allowed = true, + .smem_offset = IWL_AX210_SMEM_OFFSET, + .smem_len = IWL_AX210_SMEM_LEN, + .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, + .apmg_not_supported = true, + .mac_addr_from_csr = 0x380, + .min_umac_error_event_table = 0x400000, + .d3_debug_data_base_addr = 0x401000, + .d3_debug_data_length = 60 * 1024, + .mon_smem_regs = { + .write_ptr = { + .addr = LDBG_M2S_BUF_WPTR, + .mask = LDBG_M2S_BUF_WPTR_VAL_MSK, + }, + .cycle_cnt = { + .addr = LDBG_M2S_BUF_WRAP_CNT, + .mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK, + }, + }, + .min_txq_size = 128, + .gp2_reg_addr = 0xd02c68, + .min_ba_txq_size = IWL_DEFAULT_QUEUE_SIZE_HE, + .mon_dram_regs = { + .write_ptr = { + .addr = DBGC_CUR_DBGBUF_STATUS, + .mask = DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK, + }, + .cycle_cnt = { + .addr = DBGC_DBGBUF_WRAP_AROUND, + .mask = 0xffffffff, + }, + .cur_frag = { + .addr = DBGC_CUR_DBGBUF_STATUS, + .mask = DBGC_CUR_DBGBUF_STATUS_IDX_MSK, + }, + }, + .ucode_api_min = IWL_AX210_UCODE_API_MIN, + .ucode_api_max = IWL_AX210_UCODE_API_MAX, }; -#define IWL_DEVICE_AX210_COMMON \ - .ucode_api_min = IWL_AX210_UCODE_API_MIN, \ - .led_mode = IWL_LED_RF_STATE, \ - .nvm_hw_section_num = 10, \ - .non_shared_ant = ANT_B, \ - .dccm_offset = IWL_AX210_DCCM_OFFSET, \ - .dccm_len = IWL_AX210_DCCM_LEN, \ - .dccm2_offset = IWL_AX210_DCCM2_OFFSET, \ - .dccm2_len = IWL_AX210_DCCM2_LEN, \ - .smem_offset = IWL_AX210_SMEM_OFFSET, \ - .smem_len = IWL_AX210_SMEM_LEN, \ - .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \ - .apmg_not_supported = true, \ - .trans.mq_rx_supported = true, \ - .vht_mu_mimo_supported = true, \ - .mac_addr_from_csr = 0x380, \ - .ht_params = &iwl_22000_ht_params, \ - .nvm_ver = IWL_AX210_NVM_VERSION, \ - .trans.rf_id = true, \ - .trans.gen2 = true, \ - .nvm_type = IWL_NVM_EXT, \ - .dbgc_supported = true, \ - .min_umac_error_event_table = 0x400000, \ - .d3_debug_data_base_addr = 0x401000, \ - .d3_debug_data_length = 60 * 1024, \ - .mon_smem_regs = { \ - .write_ptr = { \ - .addr = LDBG_M2S_BUF_WPTR, \ - .mask = LDBG_M2S_BUF_WPTR_VAL_MSK, \ - }, \ - .cycle_cnt = { \ - .addr = LDBG_M2S_BUF_WRAP_CNT, \ - .mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK, \ - }, \ - } - -#define IWL_DEVICE_AX210 \ - IWL_DEVICE_AX210_COMMON, \ - .ucode_api_max = IWL_AX210_UCODE_API_MAX, \ - .trans.umac_prph_offset = 0x300000, \ - .trans.device_family = IWL_DEVICE_FAMILY_AX210, \ - .trans.base_params = &iwl_ax210_base_params, \ - .min_txq_size = 128, \ - .gp2_reg_addr = 0xd02c68, \ - .min_ba_txq_size = IWL_DEFAULT_QUEUE_SIZE_HE, \ - .mon_dram_regs = { \ - .write_ptr = { \ - .addr = DBGC_CUR_DBGBUF_STATUS, \ - .mask = DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK, \ - }, \ - .cycle_cnt = { \ - .addr = DBGC_DBGBUF_WRAP_AROUND, \ - .mask = 0xffffffff, \ - }, \ - .cur_frag = { \ - .addr = DBGC_CUR_DBGBUF_STATUS, \ - .mask = DBGC_CUR_DBGBUF_STATUS_IDX_MSK, \ - }, \ - } +const struct iwl_mac_cfg iwl_ty_mac_cfg = { + .mq_rx_supported = true, + .gen2 = true, + .device_family = IWL_DEVICE_FAMILY_AX210, + .base = &iwl_ax210_base, + .umac_prph_offset = 0x300000, + /* TODO: the following values need to be checked */ + .xtal_latency = 500, +}; -const struct iwl_cfg_trans_params iwl_so_trans_cfg = { +const struct iwl_mac_cfg iwl_so_mac_cfg = { .mq_rx_supported = true, - .rf_id = true, .gen2 = true, .device_family = IWL_DEVICE_FAMILY_AX210, - .base_params = &iwl_ax210_base_params, + .base = &iwl_ax210_base, .umac_prph_offset = 0x300000, .integrated = true, /* TODO: the following values need to be checked */ @@ -132,12 +110,11 @@ const struct iwl_cfg_trans_params iwl_so_trans_cfg = { .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_200US, }; -const struct iwl_cfg_trans_params iwl_so_long_latency_trans_cfg = { +const struct iwl_mac_cfg iwl_so_long_latency_mac_cfg = { .mq_rx_supported = true, - .rf_id = true, .gen2 = true, .device_family = IWL_DEVICE_FAMILY_AX210, - .base_params = &iwl_ax210_base_params, + .base = &iwl_ax210_base, .umac_prph_offset = 0x300000, .integrated = true, .low_latency_xtal = true, @@ -145,12 +122,11 @@ const struct iwl_cfg_trans_params iwl_so_long_latency_trans_cfg = { .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US, }; -const struct iwl_cfg_trans_params iwl_so_long_latency_imr_trans_cfg = { +const struct iwl_mac_cfg iwl_so_long_latency_imr_mac_cfg = { .mq_rx_supported = true, - .rf_id = true, .gen2 = true, .device_family = IWL_DEVICE_FAMILY_AX210, - .base_params = &iwl_ax210_base_params, + .base = &iwl_ax210_base, .umac_prph_offset = 0x300000, .integrated = true, .low_latency_xtal = true, @@ -159,107 +135,15 @@ const struct iwl_cfg_trans_params iwl_so_long_latency_imr_trans_cfg = { .imr_enabled = true, }; -/* - * If the device doesn't support HE, no need to have that many buffers. - * AX210 devices can split multiple frames into a single RB, so fewer are - * needed; AX210 cannot (but use smaller RBs by default) - these sizes - * were picked according to 8 MSDUs inside 256 A-MSDUs in an A-MPDU, with - * additional overhead to account for processing time. - */ -#define IWL_NUM_RBDS_NON_HE 512 -#define IWL_NUM_RBDS_AX210_HE 4096 - -const struct iwl_cfg_trans_params iwl_ma_trans_cfg = { +const struct iwl_mac_cfg iwl_ma_mac_cfg = { .device_family = IWL_DEVICE_FAMILY_AX210, - .base_params = &iwl_ax210_base_params, + .base = &iwl_ax210_base, .mq_rx_supported = true, - .rf_id = true, .gen2 = true, .integrated = true, .umac_prph_offset = 0x300000 }; -const char iwl_ax211_name[] = "Intel(R) Wi-Fi 6E AX211 160MHz"; -const char iwl_ax231_name[] = "Intel(R) Wi-Fi 6E AX231 160MHz"; -const char iwl_ax411_name[] = "Intel(R) Wi-Fi 6E AX411 160MHz"; - -const char iwl_ax210_killer_1675w_name[] = - "Killer(R) Wi-Fi 6E AX1675w 160MHz Wireless Network Adapter (210D2W)"; -const char iwl_ax210_killer_1675x_name[] = - "Killer(R) Wi-Fi 6E AX1675x 160MHz Wireless Network Adapter (210NGW)"; -const char iwl_ax211_killer_1675s_name[] = - "Killer(R) Wi-Fi 6E AX1675s 160MHz Wireless Network Adapter (211NGW)"; -const char iwl_ax211_killer_1675i_name[] = - "Killer(R) Wi-Fi 6E AX1675i 160MHz Wireless Network Adapter (211NGW)"; -const char iwl_ax411_killer_1690s_name[] = - "Killer(R) Wi-Fi 6E AX1690s 160MHz Wireless Network Adapter (411D2W)"; -const char iwl_ax411_killer_1690i_name[] = - "Killer(R) Wi-Fi 6E AX1690i 160MHz Wireless Network Adapter (411NGW)"; - -const struct iwl_cfg iwlax210_2ax_cfg_so_jf_b0 = { - .name = "Intel(R) Wireless-AC 9560 160MHz", - .fw_name_pre = IWL_SO_A_JF_B_FW_PRE, - IWL_DEVICE_AX210, - .num_rbds = IWL_NUM_RBDS_NON_HE, -}; - -const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0 = { - .name = iwl_ax211_name, - .fw_name_pre = IWL_SO_A_GF_A_FW_PRE, - .uhb_supported = true, - IWL_DEVICE_AX210, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - -const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0_long = { - .name = iwl_ax211_name, - .fw_name_pre = IWL_SO_A_GF_A_FW_PRE, - .uhb_supported = true, - IWL_DEVICE_AX210, - .num_rbds = IWL_NUM_RBDS_AX210_HE, - .trans.xtal_latency = 12000, - .trans.low_latency_xtal = true, -}; - -const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0 = { - .name = "Intel(R) Wi-Fi 6 AX210 160MHz", - .fw_name_pre = IWL_TY_A_GF_A_FW_PRE, - .uhb_supported = true, - IWL_DEVICE_AX210, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - -const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0 = { - .name = iwl_ax411_name, - .fw_name_pre = IWL_SO_A_GF4_A_FW_PRE, - .uhb_supported = true, - IWL_DEVICE_AX210, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - -const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0_long = { - .name = iwl_ax411_name, - .fw_name_pre = IWL_SO_A_GF4_A_FW_PRE, - .uhb_supported = true, - IWL_DEVICE_AX210, - .num_rbds = IWL_NUM_RBDS_AX210_HE, - .trans.xtal_latency = 12000, - .trans.low_latency_xtal = true, -}; - -const struct iwl_cfg iwl_cfg_ma = { - .fw_name_mac = "ma", - .uhb_supported = true, - IWL_DEVICE_AX210, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - -const struct iwl_cfg iwl_cfg_so_a0_hr_a0 = { - .fw_name_pre = IWL_SO_A_HR_B_FW_PRE, - IWL_DEVICE_AX210, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - MODULE_FIRMWARE(IWL_SO_A_JF_B_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_SO_A_HR_B_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX)); IWL_FW_AND_PNVM(IWL_SO_A_GF_A_FW_PRE, IWL_AX210_UCODE_API_MAX); diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c index f055255a7c93..05e45fff8b36 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c @@ -10,19 +10,12 @@ #include "fw/api/txq.h" /* Highest firmware API version supported */ -#define IWL_BZ_UCODE_API_MAX 98 +#define IWL_BZ_UCODE_API_MAX 99 /* Lowest firmware API version supported */ #define IWL_BZ_UCODE_API_MIN 93 -/* NVM versions */ -#define IWL_BZ_NVM_VERSION 0x0a1d - /* Memory offsets and lengths */ -#define IWL_BZ_DCCM_OFFSET 0x800000 /* LMAC1 */ -#define IWL_BZ_DCCM_LEN 0x10000 /* LMAC1 */ -#define IWL_BZ_DCCM2_OFFSET 0x880000 -#define IWL_BZ_DCCM2_LEN 0x8000 #define IWL_BZ_SMEM_OFFSET 0x400000 #define IWL_BZ_SMEM_LEN 0xD0000 @@ -38,13 +31,7 @@ #define IWL_BZ_A_HR_B_MODULE_FIRMWARE(api) \ IWL_BZ_A_HR_B_FW_PRE "-" __stringify(api) ".ucode" -#if !IS_ENABLED(CONFIG_IWLMVM) -const char iwl_ax211_name[] = "Intel(R) Wi-Fi 6E AX211 160MHz"; -const char iwl_ax201_name[] = "Intel(R) Wi-Fi 6 AX201 160MHz"; -#endif - -static const struct iwl_base_params iwl_bz_base_params = { - .eeprom_size = OTP_LOW_IMAGE_SIZE_32K, +static const struct iwl_family_base_params iwl_bz_base = { .num_of_queues = 512, .max_tfd_queue_size = 65536, .shadow_ram_support = true, @@ -53,91 +40,55 @@ static const struct iwl_base_params iwl_bz_base_params = { .max_event_log_size = 512, .shadow_reg_enable = true, .pcie_l1_allowed = true, + .smem_offset = IWL_BZ_SMEM_OFFSET, + .smem_len = IWL_BZ_SMEM_LEN, + .apmg_not_supported = true, + .mac_addr_from_csr = 0x30, + .min_umac_error_event_table = 0xD0000, + .d3_debug_data_base_addr = 0x401000, + .d3_debug_data_length = 60 * 1024, + .mon_smem_regs = { + .write_ptr = { + .addr = LDBG_M2S_BUF_WPTR, + .mask = LDBG_M2S_BUF_WPTR_VAL_MSK, + }, + .cycle_cnt = { + .addr = LDBG_M2S_BUF_WRAP_CNT, + .mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK, + }, + }, + .min_txq_size = 128, + .gp2_reg_addr = 0xd02c68, + .min_ba_txq_size = IWL_DEFAULT_QUEUE_SIZE_EHT, + .mon_dram_regs = { + .write_ptr = { + .addr = DBGC_CUR_DBGBUF_STATUS, + .mask = DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK, + }, + .cycle_cnt = { + .addr = DBGC_DBGBUF_WRAP_AROUND, + .mask = 0xffffffff, + }, + .cur_frag = { + .addr = DBGC_CUR_DBGBUF_STATUS, + .mask = DBGC_CUR_DBGBUF_STATUS_IDX_MSK, + }, + }, + .mon_dbgi_regs = { + .write_ptr = { + .addr = DBGI_SRAM_FIFO_POINTERS, + .mask = DBGI_SRAM_FIFO_POINTERS_WR_PTR_MSK, + }, + }, + .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, + .ucode_api_max = IWL_BZ_UCODE_API_MAX, + .ucode_api_min = IWL_BZ_UCODE_API_MIN, }; -const struct iwl_ht_params iwl_bz_ht_params = { - .stbc = true, - .ldpc = true, - .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ) | - BIT(NL80211_BAND_6GHZ), -}; - -#define IWL_DEVICE_BZ_COMMON \ - .ucode_api_max = IWL_BZ_UCODE_API_MAX, \ - .ucode_api_min = IWL_BZ_UCODE_API_MIN, \ - .led_mode = IWL_LED_RF_STATE, \ - .nvm_hw_section_num = 10, \ - .non_shared_ant = ANT_B, \ - .dccm_offset = IWL_BZ_DCCM_OFFSET, \ - .dccm_len = IWL_BZ_DCCM_LEN, \ - .dccm2_offset = IWL_BZ_DCCM2_OFFSET, \ - .dccm2_len = IWL_BZ_DCCM2_LEN, \ - .smem_offset = IWL_BZ_SMEM_OFFSET, \ - .smem_len = IWL_BZ_SMEM_LEN, \ - .apmg_not_supported = true, \ - .trans.mq_rx_supported = true, \ - .vht_mu_mimo_supported = true, \ - .mac_addr_from_csr = 0x30, \ - .nvm_ver = IWL_BZ_NVM_VERSION, \ - .trans.rf_id = true, \ - .trans.gen2 = true, \ - .nvm_type = IWL_NVM_EXT, \ - .dbgc_supported = true, \ - .min_umac_error_event_table = 0xD0000, \ - .d3_debug_data_base_addr = 0x401000, \ - .d3_debug_data_length = 60 * 1024, \ - .mon_smem_regs = { \ - .write_ptr = { \ - .addr = LDBG_M2S_BUF_WPTR, \ - .mask = LDBG_M2S_BUF_WPTR_VAL_MSK, \ - }, \ - .cycle_cnt = { \ - .addr = LDBG_M2S_BUF_WRAP_CNT, \ - .mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK, \ - }, \ - }, \ - .trans.umac_prph_offset = 0x300000, \ - .trans.device_family = IWL_DEVICE_FAMILY_BZ, \ - .trans.base_params = &iwl_bz_base_params, \ - .min_txq_size = 128, \ - .gp2_reg_addr = 0xd02c68, \ - .min_ba_txq_size = IWL_DEFAULT_QUEUE_SIZE_EHT, \ - .mon_dram_regs = { \ - .write_ptr = { \ - .addr = DBGC_CUR_DBGBUF_STATUS, \ - .mask = DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK, \ - }, \ - .cycle_cnt = { \ - .addr = DBGC_DBGBUF_WRAP_AROUND, \ - .mask = 0xffffffff, \ - }, \ - .cur_frag = { \ - .addr = DBGC_CUR_DBGBUF_STATUS, \ - .mask = DBGC_CUR_DBGBUF_STATUS_IDX_MSK, \ - }, \ - }, \ - .mon_dbgi_regs = { \ - .write_ptr = { \ - .addr = DBGI_SRAM_FIFO_POINTERS, \ - .mask = DBGI_SRAM_FIFO_POINTERS_WR_PTR_MSK, \ - }, \ - } - -#define IWL_DEVICE_BZ \ - IWL_DEVICE_BZ_COMMON, \ - .ht_params = &iwl_bz_ht_params - -/* - * This size was picked according to 8 MSDUs inside 512 A-MSDUs in an - * A-MPDU, with additional overhead to account for processing time. - */ -#define IWL_NUM_RBDS_BZ_EHT (512 * 16) - -const struct iwl_cfg_trans_params iwl_bz_trans_cfg = { +const struct iwl_mac_cfg iwl_bz_mac_cfg = { .device_family = IWL_DEVICE_FAMILY_BZ, - .base_params = &iwl_bz_base_params, + .base = &iwl_bz_base, .mq_rx_supported = true, - .rf_id = true, .gen2 = true, .integrated = true, .umac_prph_offset = 0x300000, @@ -146,38 +97,16 @@ const struct iwl_cfg_trans_params iwl_bz_trans_cfg = { .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US, }; -const struct iwl_cfg_trans_params iwl_gl_trans_cfg = { +const struct iwl_mac_cfg iwl_gl_mac_cfg = { .device_family = IWL_DEVICE_FAMILY_BZ, - .base_params = &iwl_bz_base_params, + .base = &iwl_bz_base, .mq_rx_supported = true, - .rf_id = true, .gen2 = true, .umac_prph_offset = 0x300000, .xtal_latency = 12000, .low_latency_xtal = true, }; -const char iwl_fm_name[] = "Intel(R) Wi-Fi 7 BE201 320MHz"; -const char iwl_wh_name[] = "Intel(R) Wi-Fi 7 BE211 320MHz"; -const char iwl_gl_name[] = "Intel(R) Wi-Fi 7 BE200 320MHz"; -const char iwl_mtp_name[] = "Intel(R) Wi-Fi 7 BE202 160MHz"; - -const struct iwl_cfg iwl_cfg_bz = { - .fw_name_mac = "bz", - .uhb_supported = true, - IWL_DEVICE_BZ, - .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, - .num_rbds = IWL_NUM_RBDS_BZ_EHT, -}; - -const struct iwl_cfg iwl_cfg_gl = { - .fw_name_mac = "gl", - .uhb_supported = true, - IWL_DEVICE_BZ, - .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, - .num_rbds = IWL_NUM_RBDS_BZ_EHT, -}; - MODULE_FIRMWARE(IWL_BZ_A_HR_B_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX)); IWL_FW_AND_PNVM(IWL_BZ_A_GF_A_FW_PRE, IWL_BZ_UCODE_API_MAX); IWL_FW_AND_PNVM(IWL_BZ_A_GF4_A_FW_PRE, IWL_BZ_UCODE_API_MAX); diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/dr.c b/drivers/net/wireless/intel/iwlwifi/cfg/dr.c index 282b9b846c3a..45e55cef42ea 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/dr.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/dr.c @@ -9,35 +9,21 @@ #include "fw/api/txq.h" /* Highest firmware API version supported */ -#define IWL_DR_UCODE_API_MAX 98 +#define IWL_DR_UCODE_API_MAX 99 /* Lowest firmware API version supported */ -#define IWL_DR_UCODE_API_MIN 96 - -/* NVM versions */ -#define IWL_DR_NVM_VERSION 0x0a1d +#define IWL_DR_UCODE_API_MIN 97 /* Memory offsets and lengths */ -#define IWL_DR_DCCM_OFFSET 0x800000 /* LMAC1 */ -#define IWL_DR_DCCM_LEN 0x10000 /* LMAC1 */ -#define IWL_DR_DCCM2_OFFSET 0x880000 -#define IWL_DR_DCCM2_LEN 0x8000 #define IWL_DR_SMEM_OFFSET 0x400000 #define IWL_DR_SMEM_LEN 0xD0000 #define IWL_DR_A_PE_A_FW_PRE "iwlwifi-dr-a0-pe-a0" -#define IWL_BR_A_PET_A_FW_PRE "iwlwifi-br-a0-petc-a0" -#define IWL_BR_A_PE_A_FW_PRE "iwlwifi-br-a0-pe-a0" #define IWL_DR_A_PE_A_FW_MODULE_FIRMWARE(api) \ IWL_DR_A_PE_A_FW_PRE "-" __stringify(api) ".ucode" -#define IWL_BR_A_PET_A_FW_MODULE_FIRMWARE(api) \ - IWL_BR_A_PET_A_FW_PRE "-" __stringify(api) ".ucode" -#define IWL_BR_A_PE_A_FW_MODULE_FIRMWARE(api) \ - IWL_BR_A_PE_A_FW_PRE "-" __stringify(api) ".ucode" -static const struct iwl_base_params iwl_dr_base_params = { - .eeprom_size = OTP_LOW_IMAGE_SIZE_32K, +static const struct iwl_family_base_params iwl_dr_base = { .num_of_queues = 512, .max_tfd_queue_size = 65536, .shadow_ram_support = true, @@ -46,87 +32,55 @@ static const struct iwl_base_params iwl_dr_base_params = { .max_event_log_size = 512, .shadow_reg_enable = true, .pcie_l1_allowed = true, + .smem_offset = IWL_DR_SMEM_OFFSET, + .smem_len = IWL_DR_SMEM_LEN, + .apmg_not_supported = true, + .mac_addr_from_csr = 0x30, + .min_umac_error_event_table = 0xD0000, + .d3_debug_data_base_addr = 0x401000, + .d3_debug_data_length = 60 * 1024, + .mon_smem_regs = { + .write_ptr = { + .addr = LDBG_M2S_BUF_WPTR, + .mask = LDBG_M2S_BUF_WPTR_VAL_MSK, + }, + .cycle_cnt = { + .addr = LDBG_M2S_BUF_WRAP_CNT, + .mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK, + }, + }, + .min_txq_size = 128, + .gp2_reg_addr = 0xd02c68, + .min_ba_txq_size = IWL_DEFAULT_QUEUE_SIZE_EHT, + .mon_dram_regs = { + .write_ptr = { + .addr = DBGC_CUR_DBGBUF_STATUS, + .mask = DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK, + }, + .cycle_cnt = { + .addr = DBGC_DBGBUF_WRAP_AROUND, + .mask = 0xffffffff, + }, + .cur_frag = { + .addr = DBGC_CUR_DBGBUF_STATUS, + .mask = DBGC_CUR_DBGBUF_STATUS_IDX_MSK, + }, + }, + .mon_dbgi_regs = { + .write_ptr = { + .addr = DBGI_SRAM_FIFO_POINTERS, + .mask = DBGI_SRAM_FIFO_POINTERS_WR_PTR_MSK, + }, + }, + .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, + .ucode_api_max = IWL_DR_UCODE_API_MAX, + .ucode_api_min = IWL_DR_UCODE_API_MIN, }; -#define IWL_DEVICE_DR_COMMON \ - .ucode_api_max = IWL_DR_UCODE_API_MAX, \ - .ucode_api_min = IWL_DR_UCODE_API_MIN, \ - .led_mode = IWL_LED_RF_STATE, \ - .nvm_hw_section_num = 10, \ - .non_shared_ant = ANT_B, \ - .dccm_offset = IWL_DR_DCCM_OFFSET, \ - .dccm_len = IWL_DR_DCCM_LEN, \ - .dccm2_offset = IWL_DR_DCCM2_OFFSET, \ - .dccm2_len = IWL_DR_DCCM2_LEN, \ - .smem_offset = IWL_DR_SMEM_OFFSET, \ - .smem_len = IWL_DR_SMEM_LEN, \ - .apmg_not_supported = true, \ - .trans.mq_rx_supported = true, \ - .vht_mu_mimo_supported = true, \ - .mac_addr_from_csr = 0x30, \ - .nvm_ver = IWL_DR_NVM_VERSION, \ - .trans.rf_id = true, \ - .trans.gen2 = true, \ - .nvm_type = IWL_NVM_EXT, \ - .dbgc_supported = true, \ - .min_umac_error_event_table = 0xD0000, \ - .d3_debug_data_base_addr = 0x401000, \ - .d3_debug_data_length = 60 * 1024, \ - .mon_smem_regs = { \ - .write_ptr = { \ - .addr = LDBG_M2S_BUF_WPTR, \ - .mask = LDBG_M2S_BUF_WPTR_VAL_MSK, \ - }, \ - .cycle_cnt = { \ - .addr = LDBG_M2S_BUF_WRAP_CNT, \ - .mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK, \ - }, \ - }, \ - .trans.umac_prph_offset = 0x300000, \ - .trans.device_family = IWL_DEVICE_FAMILY_DR, \ - .trans.base_params = &iwl_dr_base_params, \ - .min_txq_size = 128, \ - .gp2_reg_addr = 0xd02c68, \ - .min_ba_txq_size = IWL_DEFAULT_QUEUE_SIZE_EHT, \ - .mon_dram_regs = { \ - .write_ptr = { \ - .addr = DBGC_CUR_DBGBUF_STATUS, \ - .mask = DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK, \ - }, \ - .cycle_cnt = { \ - .addr = DBGC_DBGBUF_WRAP_AROUND, \ - .mask = 0xffffffff, \ - }, \ - .cur_frag = { \ - .addr = DBGC_CUR_DBGBUF_STATUS, \ - .mask = DBGC_CUR_DBGBUF_STATUS_IDX_MSK, \ - }, \ - }, \ - .mon_dbgi_regs = { \ - .write_ptr = { \ - .addr = DBGI_SRAM_FIFO_POINTERS, \ - .mask = DBGI_SRAM_FIFO_POINTERS_WR_PTR_MSK, \ - }, \ - } - -#define IWL_DEVICE_DR \ - IWL_DEVICE_DR_COMMON, \ - .uhb_supported = true, \ - .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \ - .num_rbds = IWL_NUM_RBDS_DR_EHT, \ - .ht_params = &iwl_bz_ht_params - -/* - * This size was picked according to 8 MSDUs inside 512 A-MSDUs in an - * A-MPDU, with additional overhead to account for processing time. - */ -#define IWL_NUM_RBDS_DR_EHT (512 * 16) - -const struct iwl_cfg_trans_params iwl_dr_trans_cfg = { +const struct iwl_mac_cfg iwl_dr_mac_cfg = { .device_family = IWL_DEVICE_FAMILY_DR, - .base_params = &iwl_dr_base_params, + .base = &iwl_dr_base, .mq_rx_supported = true, - .rf_id = true, .gen2 = true, .integrated = true, .umac_prph_offset = 0x300000, @@ -135,31 +89,5 @@ const struct iwl_cfg_trans_params iwl_dr_trans_cfg = { .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US, }; -const char iwl_dr_name[] = "Intel(R) TBD Dr device"; - -const struct iwl_cfg iwl_cfg_dr = { - .fw_name_mac = "dr", - IWL_DEVICE_DR, -}; - -const struct iwl_cfg_trans_params iwl_br_trans_cfg = { - .device_family = IWL_DEVICE_FAMILY_DR, - .base_params = &iwl_dr_base_params, - .mq_rx_supported = true, - .rf_id = true, - .gen2 = true, - .umac_prph_offset = 0x300000, - .xtal_latency = 12000, - .low_latency_xtal = true, -}; - -const char iwl_br_name[] = "Intel(R) TBD Br device"; - -const struct iwl_cfg iwl_cfg_br = { - .fw_name_mac = "br", - IWL_DEVICE_DR, -}; - MODULE_FIRMWARE(IWL_DR_A_PE_A_FW_MODULE_FIRMWARE(IWL_DR_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_BR_A_PET_A_FW_MODULE_FIRMWARE(IWL_DR_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_BR_A_PE_A_FW_MODULE_FIRMWARE(IWL_DR_UCODE_API_MAX)); + diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/rf-fm.c b/drivers/net/wireless/intel/iwlwifi/cfg/rf-fm.c new file mode 100644 index 000000000000..456a666c8dfd --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/cfg/rf-fm.c @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2015-2017 Intel Deutschland GmbH + * Copyright (C) 2018-2025 Intel Corporation + */ +#include "iwl-config.h" + +/* NVM versions */ +#define IWL_FM_NVM_VERSION 0x0a1d + +#define IWL_DEVICE_FM \ + .ht_params = { \ + .stbc = true, \ + .ldpc = true, \ + .ht40_bands = BIT(NL80211_BAND_2GHZ) | \ + BIT(NL80211_BAND_5GHZ), \ + }, \ + .led_mode = IWL_LED_RF_STATE, \ + .non_shared_ant = ANT_B, \ + .vht_mu_mimo_supported = true, \ + .uhb_supported = true, \ + .num_rbds = IWL_NUM_RBDS_EHT, \ + .nvm_ver = IWL_FM_NVM_VERSION, \ + .nvm_type = IWL_NVM_EXT + +const struct iwl_rf_cfg iwl_rf_fm = { + IWL_DEVICE_FM, +}; + +const struct iwl_rf_cfg iwl_rf_fm_160mhz = { + IWL_DEVICE_FM, + .bw_limit = 160, +}; + +const char iwl_killer_be1750s_name[] = + "Killer(R) Wi-Fi 7 BE1750s 320MHz Wireless Network Adapter (BE201D2W)"; +const char iwl_killer_be1750i_name[] = + "Killer(R) Wi-Fi 7 BE1750i 320MHz Wireless Network Adapter (BE201NGW)"; +const char iwl_killer_be1750w_name[] = + "Killer(TM) Wi-Fi 7 BE1750w 320MHz Wireless Network Adapter (BE200D2W)"; +const char iwl_killer_be1750x_name[] = + "Killer(TM) Wi-Fi 7 BE1750x 320MHz Wireless Network Adapter (BE200NGW)"; +const char iwl_killer_be1790s_name[] = + "Killer(R) Wi-Fi 7 BE1790s 320MHz Wireless Network Adapter (BE401D2W)"; +const char iwl_killer_be1790i_name[] = + "Killer(R) Wi-Fi 7 BE1790i 320MHz Wireless Network Adapter (BE401NGW)"; + +const char iwl_be201_name[] = "Intel(R) Wi-Fi 7 BE201 320MHz"; +const char iwl_be200_name[] = "Intel(R) Wi-Fi 7 BE200 320MHz"; +const char iwl_be202_name[] = "Intel(R) Wi-Fi 7 BE202 160MHz"; +const char iwl_be401_name[] = "Intel(R) Wi-Fi 7 BE401 320MHz"; diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/rf-gf.c b/drivers/net/wireless/intel/iwlwifi/cfg/rf-gf.c new file mode 100644 index 000000000000..f55c286e83be --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/cfg/rf-gf.c @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2015-2017 Intel Deutschland GmbH + * Copyright (C) 2018-2025 Intel Corporation + */ +#include "iwl-config.h" + +/* NVM versions */ +#define IWL_GF_NVM_VERSION 0x0a1d + +const struct iwl_rf_cfg iwl_rf_gf = { + .uhb_supported = true, + .led_mode = IWL_LED_RF_STATE, + .non_shared_ant = ANT_B, + .vht_mu_mimo_supported = true, + .ht_params = { + .stbc = true, + .ldpc = true, + .ht40_bands = BIT(NL80211_BAND_2GHZ) | + BIT(NL80211_BAND_5GHZ), + }, + .nvm_ver = IWL_GF_NVM_VERSION, + .nvm_type = IWL_NVM_EXT, + .num_rbds = IWL_NUM_RBDS_HE, +}; + +const char iwl_ax210_killer_1675w_name[] = + "Killer(R) Wi-Fi 6E AX1675w 160MHz Wireless Network Adapter (210D2W)"; +const char iwl_ax210_killer_1675x_name[] = + "Killer(R) Wi-Fi 6E AX1675x 160MHz Wireless Network Adapter (210NGW)"; +const char iwl_ax211_killer_1675s_name[] = + "Killer(R) Wi-Fi 6E AX1675s 160MHz Wireless Network Adapter (211D2W)"; +const char iwl_ax211_killer_1675i_name[] = + "Killer(R) Wi-Fi 6E AX1675i 160MHz Wireless Network Adapter (211NGW)"; +const char iwl_ax411_killer_1690s_name[] = + "Killer(R) Wi-Fi 6E AX1690s 160MHz Wireless Network Adapter (411D2W)"; +const char iwl_ax411_killer_1690i_name[] = + "Killer(R) Wi-Fi 6E AX1690i 160MHz Wireless Network Adapter (411NGW)"; + +const char iwl_ax210_name[] = "Intel(R) Wi-Fi 6E AX210 160MHz"; +const char iwl_ax211_name[] = "Intel(R) Wi-Fi 6E AX211 160MHz"; +const char iwl_ax411_name[] = "Intel(R) Wi-Fi 6E AX411 160MHz"; diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/rf-hr.c b/drivers/net/wireless/intel/iwlwifi/cfg/rf-hr.c new file mode 100644 index 000000000000..db02664e3917 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/cfg/rf-hr.c @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2015-2017 Intel Deutschland GmbH + * Copyright (C) 2018-2025 Intel Corporation + */ +#include "iwl-config.h" + +/* NVM versions */ +#define IWL_HR_NVM_VERSION 0x0a1d + +#define IWL_DEVICE_HR \ + .led_mode = IWL_LED_RF_STATE, \ + .non_shared_ant = ANT_B, \ + .vht_mu_mimo_supported = true, \ + .ht_params = { \ + .stbc = true, \ + .ldpc = true, \ + .ht40_bands = BIT(NL80211_BAND_2GHZ) | \ + BIT(NL80211_BAND_5GHZ), \ + }, \ + .num_rbds = IWL_NUM_RBDS_HE, \ + .nvm_ver = IWL_HR_NVM_VERSION, \ + .nvm_type = IWL_NVM_EXT + +const struct iwl_rf_cfg iwl_rf_hr1 = { + IWL_DEVICE_HR, + .tx_with_siso_diversity = true, +}; + +const struct iwl_rf_cfg iwl_rf_hr = { + IWL_DEVICE_HR, +}; + +const struct iwl_rf_cfg iwl_rf_hr_80mhz = { + IWL_DEVICE_HR, + .bw_limit = 80, +}; + +const char iwl_ax101_name[] = "Intel(R) Wi-Fi 6 AX101"; +const char iwl_ax200_name[] = "Intel(R) Wi-Fi 6 AX200 160MHz"; +const char iwl_ax201_name[] = "Intel(R) Wi-Fi 6 AX201 160MHz"; +const char iwl_ax203_name[] = "Intel(R) Wi-Fi 6 AX203"; diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/rf-jf.c b/drivers/net/wireless/intel/iwlwifi/cfg/rf-jf.c new file mode 100644 index 000000000000..467eaeae6deb --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/cfg/rf-jf.c @@ -0,0 +1,84 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2015-2017 Intel Deutschland GmbH + * Copyright (C) 2018-2021, 2023, 2025 Intel Corporation + */ +#include "iwl-config.h" + +/* NVM versions */ +#define IWL_JF_NVM_VERSION 0x0a1d + +/* Memory offsets and lengths */ +#define IWL9000_DCCM_OFFSET 0x800000 +#define IWL9000_DCCM_LEN 0x18000 +#define IWL9000_DCCM2_OFFSET 0x880000 +#define IWL9000_DCCM2_LEN 0x8000 + +static const struct iwl_tt_params iwl_jf_tt_params = { + .ct_kill_entry = 115, + .ct_kill_exit = 93, + .ct_kill_duration = 5, + .dynamic_smps_entry = 111, + .dynamic_smps_exit = 107, + .tx_protection_entry = 112, + .tx_protection_exit = 105, + .tx_backoff = { + {.temperature = 110, .backoff = 200}, + {.temperature = 111, .backoff = 600}, + {.temperature = 112, .backoff = 1200}, + {.temperature = 113, .backoff = 2000}, + {.temperature = 114, .backoff = 4000}, + }, + .support_ct_kill = true, + .support_dynamic_smps = true, + .support_tx_protection = true, + .support_tx_backoff = true, +}; + +/* these values are ignored if not with Pu/Th MAC firmware, due to offload */ +#define IWL_DEVICE_JF_PU \ + .dccm_offset = IWL9000_DCCM_OFFSET, \ + .dccm_len = IWL9000_DCCM_LEN, \ + .dccm2_offset = IWL9000_DCCM2_OFFSET, \ + .dccm2_len = IWL9000_DCCM2_LEN, \ + .thermal_params = &iwl_jf_tt_params + +#define IWL_DEVICE_JF \ + IWL_DEVICE_JF_PU, \ + .led_mode = IWL_LED_RF_STATE, \ + .non_shared_ant = ANT_B, \ + .num_rbds = IWL_NUM_RBDS_NON_HE, \ + .vht_mu_mimo_supported = true, \ + .ht_params = { \ + .stbc = true, \ + .ldpc = true, \ + .ht40_bands = BIT(NL80211_BAND_2GHZ) | \ + BIT(NL80211_BAND_5GHZ), \ + }, \ + .nvm_ver = IWL_JF_NVM_VERSION, \ + .nvm_type = IWL_NVM_EXT + +const struct iwl_rf_cfg iwl_rf_jf = { + IWL_DEVICE_JF, +}; + +const struct iwl_rf_cfg iwl_rf_jf_80mhz = { + IWL_DEVICE_JF, + .bw_limit = 80, +}; + +const char iwl9260_name[] = "Intel(R) Wireless-AC 9260"; +const char iwl9461_name[] = "Intel(R) Wireless-AC 9461"; +const char iwl9462_name[] = "Intel(R) Wireless-AC 9462"; +const char iwl9560_name[] = "Intel(R) Wireless-AC 9560"; +const char iwl9260_160_name[] = "Intel(R) Wireless-AC 9260 160MHz"; +const char iwl9461_160_name[] = "Intel(R) Wireless-AC 9461 160MHz"; +const char iwl9462_160_name[] = "Intel(R) Wireless-AC 9462 160MHz"; +const char iwl9560_160_name[] = "Intel(R) Wireless-AC 9560 160MHz"; + +const char iwl9260_killer_1550_name[] = + "Killer(R) Wireless-AC 1550 Wireless Network Adapter (9260NGW) 160MHz"; +const char iwl9560_killer_1550i_name[] = + "Killer(R) Wireless-AC 1550i Wireless Network Adapter (9560NGW) 160MHz"; +const char iwl9560_killer_1550s_name[] = + "Killer(R) Wireless-AC 1550s Wireless Network Adapter (9560D2W) 160MHz"; diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/rf-pe.c b/drivers/net/wireless/intel/iwlwifi/cfg/rf-pe.c new file mode 100644 index 000000000000..483f21659eff --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/cfg/rf-pe.c @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2025 Intel Corporation + */ +#include "iwl-config.h" + +/* currently iwl_rf_wh/iwl_rf_wh_160mhz are just defines for the FM ones */ + +const char iwl_killer_bn1850w2_name[] = + "Killer(R) Wi-Fi 8 BN1850w2 320MHz Wireless Network Adapter (BN201.D2W)"; +const char iwl_killer_bn1850i_name[] = + "Killer(R) Wi-Fi 8 BN1850i 320MHz Wireless Network Adapter (BN201.NGW)"; + +const char iwl_bn201_name[] = "Intel(R) Wi-Fi 8 BN201"; +const char iwl_be221_name[] = "Intel(R) Wi-Fi 7 BE221"; +const char iwl_be223_name[] = "Intel(R) Wi-Fi 7 BE223"; diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/rf-wh.c b/drivers/net/wireless/intel/iwlwifi/cfg/rf-wh.c new file mode 100644 index 000000000000..97735175cb0e --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/cfg/rf-wh.c @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2025 Intel Corporation + */ +#include "iwl-config.h" + +/* currently iwl_rf_wh/iwl_rf_wh_160mhz are just defines for the FM ones */ + +const char iwl_killer_be1775s_name[] = + "Killer(R) Wi-Fi 7 BE1775s 320MHz Wireless Network Adapter (BE211D2W)"; +const char iwl_killer_be1775i_name[] = + "Killer(R) Wi-Fi 7 BE1775i 320MHz Wireless Network Adapter (BE211NGW)"; + +const char iwl_be211_name[] = "Intel(R) Wi-Fi 7 BE211 320MHz"; +const char iwl_be213_name[] = "Intel(R) Wi-Fi 7 BE213 160MHz"; diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c index 59af36960f9c..b2e4d4035296 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c @@ -10,19 +10,15 @@ #include "fw/api/txq.h" /* Highest firmware API version supported */ -#define IWL_SC_UCODE_API_MAX 98 +#define IWL_SC_UCODE_API_MAX 99 /* Lowest firmware API version supported */ -#define IWL_SC_UCODE_API_MIN 93 +#define IWL_SC_UCODE_API_MIN 97 /* NVM versions */ #define IWL_SC_NVM_VERSION 0x0a1d /* Memory offsets and lengths */ -#define IWL_SC_DCCM_OFFSET 0x800000 /* LMAC1 */ -#define IWL_SC_DCCM_LEN 0x10000 /* LMAC1 */ -#define IWL_SC_DCCM2_OFFSET 0x880000 -#define IWL_SC_DCCM2_LEN 0x8000 #define IWL_SC_SMEM_OFFSET 0x400000 #define IWL_SC_SMEM_LEN 0xD0000 @@ -43,8 +39,7 @@ #define IWL_SC_A_HR_B_FW_MODULE_FIRMWARE(api) \ IWL_SC_A_HR_B_FW_PRE "-" __stringify(api) ".ucode" -static const struct iwl_base_params iwl_sc_base_params = { - .eeprom_size = OTP_LOW_IMAGE_SIZE_32K, +static const struct iwl_family_base_params iwl_sc_base = { .num_of_queues = 512, .max_tfd_queue_size = 65536, .shadow_ram_support = true, @@ -53,87 +48,55 @@ static const struct iwl_base_params iwl_sc_base_params = { .max_event_log_size = 512, .shadow_reg_enable = true, .pcie_l1_allowed = true, + .smem_offset = IWL_SC_SMEM_OFFSET, + .smem_len = IWL_SC_SMEM_LEN, + .apmg_not_supported = true, + .mac_addr_from_csr = 0x30, + .min_umac_error_event_table = 0xD0000, + .d3_debug_data_base_addr = 0x401000, + .d3_debug_data_length = 60 * 1024, + .mon_smem_regs = { + .write_ptr = { + .addr = LDBG_M2S_BUF_WPTR, + .mask = LDBG_M2S_BUF_WPTR_VAL_MSK, + }, + .cycle_cnt = { + .addr = LDBG_M2S_BUF_WRAP_CNT, + .mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK, + }, + }, + .min_txq_size = 128, + .gp2_reg_addr = 0xd02c68, + .min_ba_txq_size = IWL_DEFAULT_QUEUE_SIZE_EHT, + .mon_dram_regs = { + .write_ptr = { + .addr = DBGC_CUR_DBGBUF_STATUS, + .mask = DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK, + }, + .cycle_cnt = { + .addr = DBGC_DBGBUF_WRAP_AROUND, + .mask = 0xffffffff, + }, + .cur_frag = { + .addr = DBGC_CUR_DBGBUF_STATUS, + .mask = DBGC_CUR_DBGBUF_STATUS_IDX_MSK, + }, + }, + .mon_dbgi_regs = { + .write_ptr = { + .addr = DBGI_SRAM_FIFO_POINTERS, + .mask = DBGI_SRAM_FIFO_POINTERS_WR_PTR_MSK, + }, + }, + .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, + .ucode_api_max = IWL_SC_UCODE_API_MAX, + .ucode_api_min = IWL_SC_UCODE_API_MIN, }; -#define IWL_DEVICE_BZ_COMMON \ - .ucode_api_max = IWL_SC_UCODE_API_MAX, \ - .ucode_api_min = IWL_SC_UCODE_API_MIN, \ - .led_mode = IWL_LED_RF_STATE, \ - .nvm_hw_section_num = 10, \ - .non_shared_ant = ANT_B, \ - .dccm_offset = IWL_SC_DCCM_OFFSET, \ - .dccm_len = IWL_SC_DCCM_LEN, \ - .dccm2_offset = IWL_SC_DCCM2_OFFSET, \ - .dccm2_len = IWL_SC_DCCM2_LEN, \ - .smem_offset = IWL_SC_SMEM_OFFSET, \ - .smem_len = IWL_SC_SMEM_LEN, \ - .apmg_not_supported = true, \ - .trans.mq_rx_supported = true, \ - .vht_mu_mimo_supported = true, \ - .mac_addr_from_csr = 0x30, \ - .nvm_ver = IWL_SC_NVM_VERSION, \ - .trans.rf_id = true, \ - .trans.gen2 = true, \ - .nvm_type = IWL_NVM_EXT, \ - .dbgc_supported = true, \ - .min_umac_error_event_table = 0xD0000, \ - .d3_debug_data_base_addr = 0x401000, \ - .d3_debug_data_length = 60 * 1024, \ - .mon_smem_regs = { \ - .write_ptr = { \ - .addr = LDBG_M2S_BUF_WPTR, \ - .mask = LDBG_M2S_BUF_WPTR_VAL_MSK, \ - }, \ - .cycle_cnt = { \ - .addr = LDBG_M2S_BUF_WRAP_CNT, \ - .mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK, \ - }, \ - }, \ - .trans.umac_prph_offset = 0x300000, \ - .trans.device_family = IWL_DEVICE_FAMILY_SC, \ - .trans.base_params = &iwl_sc_base_params, \ - .min_txq_size = 128, \ - .gp2_reg_addr = 0xd02c68, \ - .min_ba_txq_size = IWL_DEFAULT_QUEUE_SIZE_EHT, \ - .mon_dram_regs = { \ - .write_ptr = { \ - .addr = DBGC_CUR_DBGBUF_STATUS, \ - .mask = DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK, \ - }, \ - .cycle_cnt = { \ - .addr = DBGC_DBGBUF_WRAP_AROUND, \ - .mask = 0xffffffff, \ - }, \ - .cur_frag = { \ - .addr = DBGC_CUR_DBGBUF_STATUS, \ - .mask = DBGC_CUR_DBGBUF_STATUS_IDX_MSK, \ - }, \ - }, \ - .mon_dbgi_regs = { \ - .write_ptr = { \ - .addr = DBGI_SRAM_FIFO_POINTERS, \ - .mask = DBGI_SRAM_FIFO_POINTERS_WR_PTR_MSK, \ - }, \ - } - -#define IWL_DEVICE_SC \ - IWL_DEVICE_BZ_COMMON, \ - .uhb_supported = true, \ - .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \ - .num_rbds = IWL_NUM_RBDS_SC_EHT, \ - .ht_params = &iwl_bz_ht_params - -/* - * This size was picked according to 8 MSDUs inside 512 A-MSDUs in an - * A-MPDU, with additional overhead to account for processing time. - */ -#define IWL_NUM_RBDS_SC_EHT (512 * 16) - -const struct iwl_cfg_trans_params iwl_sc_trans_cfg = { +const struct iwl_mac_cfg iwl_sc_mac_cfg = { .device_family = IWL_DEVICE_FAMILY_SC, - .base_params = &iwl_sc_base_params, + .base = &iwl_sc_base, .mq_rx_supported = true, - .rf_id = true, .gen2 = true, .integrated = true, .umac_prph_offset = 0x300000, @@ -142,21 +105,6 @@ const struct iwl_cfg_trans_params iwl_sc_trans_cfg = { .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US, }; -const struct iwl_cfg iwl_cfg_sc = { - .fw_name_mac = "sc", - IWL_DEVICE_SC, -}; - -const struct iwl_cfg iwl_cfg_sc2 = { - .fw_name_mac = "sc2", - IWL_DEVICE_SC, -}; - -const struct iwl_cfg iwl_cfg_sc2f = { - .fw_name_mac = "sc2f", - IWL_DEVICE_SC, -}; - IWL_FW_AND_PNVM(IWL_SC_A_FM_B_FW_PRE, IWL_SC_UCODE_API_MAX); IWL_FW_AND_PNVM(IWL_SC_A_FM_C_FW_PRE, IWL_SC_UCODE_API_MAX); MODULE_FIRMWARE(IWL_SC_A_HR_A_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX)); diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/agn.h b/drivers/net/wireless/intel/iwlwifi/dvm/agn.h index a13add556a7b..1ebc7effcc2a 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/agn.h +++ b/drivers/net/wireless/intel/iwlwifi/dvm/agn.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2021, 2024 Intel Corporation + * Copyright (C) 2005-2014, 2021, 2024-2025 Intel Corporation */ #ifndef __iwl_agn_h__ #define __iwl_agn_h__ @@ -399,7 +399,7 @@ static inline void iwl_dvm_set_pmi(struct iwl_priv *priv, bool state) * later with iwl_free_nvm_data(). */ struct iwl_nvm_data * -iwl_parse_eeprom_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, +iwl_parse_eeprom_data(struct iwl_trans *trans, const struct iwl_rf_cfg *cfg, const u8 *eeprom, size_t eeprom_size); int iwl_read_eeprom(struct iwl_trans *trans, u8 **eeprom, size_t *eeprom_size); diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c index 2ed4b6e798ab..ec94c43ba28c 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c @@ -2,7 +2,7 @@ /****************************************************************************** * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. - * Copyright (C) 2018 Intel Corporation + * Copyright (C) 2018, 2025 Intel Corporation *****************************************************************************/ #include <linux/slab.h> @@ -2097,7 +2097,8 @@ static ssize_t iwl_dbgfs_protection_mode_read(struct file *file, char buf[40]; const size_t bufsz = sizeof(buf); - if (priv->cfg->ht_params) + /* HT devices also have at least one HT40 band */ + if (priv->cfg->ht_params.ht40_bands) pos += scnprintf(buf + pos, bufsz - pos, "use %s for aggregation\n", (priv->hw_params.use_rts_for_aggregation) ? @@ -2117,7 +2118,8 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file, int buf_size; int rts; - if (!priv->cfg->ht_params) + /* HT devices also have at least one HT40 band */ + if (!priv->cfg->ht_params.ht40_bands) return -EINVAL; memset(buf, 0, sizeof(buf)); diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/dev.h b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h index 4ac8b862ad41..25b24820466d 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/dev.h +++ b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h @@ -2,6 +2,7 @@ /****************************************************************************** * * Copyright(c) 2003 - 2014, 2020, 2023 Intel Corporation. All rights reserved. + * Copyright (C) 2025 Intel Corporation *****************************************************************************/ /* * Please use this file (dev.h) for driver implementation definitions. @@ -627,7 +628,7 @@ struct iwl_priv { struct iwl_trans *trans; struct device *dev; /* for debug prints only */ - const struct iwl_cfg *cfg; + const struct iwl_rf_cfg *cfg; const struct iwl_fw *fw; const struct iwl_dvm_cfg *lib; unsigned long status; diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/devices.c b/drivers/net/wireless/intel/iwlwifi/dvm/devices.c index 48a8349680fc..3447ae0b160a 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/devices.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/devices.c @@ -2,7 +2,7 @@ /****************************************************************************** * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. - * Copyright (C) 2019 Intel Corporation + * Copyright (C) 2019, 2025 Intel Corporation *****************************************************************************/ #include <linux/units.h> @@ -481,7 +481,7 @@ static void iwl6000_set_ct_threshold(struct iwl_priv *priv) /* NIC configuration for 6000 series */ static void iwl6000_nic_config(struct iwl_priv *priv) { - switch (priv->trans->trans_cfg->device_family) { + switch (priv->trans->mac_cfg->device_family) { case IWL_DEVICE_FAMILY_6005: case IWL_DEVICE_FAMILY_6030: case IWL_DEVICE_FAMILY_6000: diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/eeprom.c b/drivers/net/wireless/intel/iwlwifi/dvm/eeprom.c index cdc05f7e75a6..2423125e5284 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/eeprom.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/eeprom.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2005-2014, 2018-2019, 2021, 2024 Intel Corporation + * Copyright (C) 2005-2014, 2018-2019, 2021, 2024-2025 Intel Corporation */ #include <linux/types.h> #include <linux/slab.h> @@ -151,7 +151,7 @@ static u16 iwl_eeprom_query16(const u8 *eeprom, size_t eeprom_size, int offset) { if (WARN_ON(offset + sizeof(u16) > eeprom_size)) return 0; - return le16_to_cpup((__le16 *)(eeprom + offset)); + return le16_to_cpup((const __le16 *)(eeprom + offset)); } static u32 eeprom_indirect_address(const u8 *eeprom, size_t eeprom_size, @@ -204,8 +204,8 @@ static u32 eeprom_indirect_address(const u8 *eeprom, size_t eeprom_size, return (address & ADDRESS_MSK) + (offset << 1); } -static const u8 *iwl_eeprom_query_addr(const u8 *eeprom, size_t eeprom_size, - u32 offset) +static const void *iwl_eeprom_query_addr(const u8 *eeprom, size_t eeprom_size, + u32 offset) { u32 address = eeprom_indirect_address(eeprom, eeprom_size, offset); @@ -218,10 +218,9 @@ static const u8 *iwl_eeprom_query_addr(const u8 *eeprom, size_t eeprom_size, static int iwl_eeprom_read_calib(const u8 *eeprom, size_t eeprom_size, struct iwl_nvm_data *data) { - struct iwl_eeprom_calib_hdr *hdr; + const struct iwl_eeprom_calib_hdr *hdr; - hdr = (void *)iwl_eeprom_query_addr(eeprom, eeprom_size, - EEPROM_CALIB_ALL); + hdr = iwl_eeprom_query_addr(eeprom, eeprom_size, EEPROM_CALIB_ALL); if (!hdr) return -ENODATA; data->calib_version = hdr->version; @@ -295,7 +294,7 @@ struct iwl_eeprom_enhanced_txpwr { } __packed; static s8 iwl_get_max_txpwr_half_dbm(const struct iwl_nvm_data *data, - struct iwl_eeprom_enhanced_txpwr *txp) + const struct iwl_eeprom_enhanced_txpwr *txp) { s8 result = 0; /* (.5 dBm) */ @@ -329,7 +328,7 @@ static s8 iwl_get_max_txpwr_half_dbm(const struct iwl_nvm_data *data, static void iwl_eeprom_enh_txp_read_element(struct iwl_nvm_data *data, - struct iwl_eeprom_enhanced_txpwr *txp, + const struct iwl_eeprom_enhanced_txpwr *txp, int n_channels, s8 max_txpower_avg) { int ch_idx; @@ -360,20 +359,18 @@ static void iwl_eeprom_enhanced_txpower(struct device *dev, const u8 *eeprom, size_t eeprom_size, int n_channels) { - struct iwl_eeprom_enhanced_txpwr *txp_array, *txp; + const struct iwl_eeprom_enhanced_txpwr *txp_array, *txp; int idx, entries; - __le16 *txp_len; + const __le16 *txp_len; s8 max_txp_avg_halfdbm; BUILD_BUG_ON(sizeof(struct iwl_eeprom_enhanced_txpwr) != 8); /* the length is in 16-bit words, but we want entries */ - txp_len = (__le16 *)iwl_eeprom_query_addr(eeprom, eeprom_size, - EEPROM_TXP_SZ_OFFS); + txp_len = iwl_eeprom_query_addr(eeprom, eeprom_size, EEPROM_TXP_SZ_OFFS); entries = le16_to_cpup(txp_len) * 2 / EEPROM_TXP_ENTRY_LEN; - txp_array = (void *)iwl_eeprom_query_addr(eeprom, eeprom_size, - EEPROM_TXP_OFFS); + txp_array = iwl_eeprom_query_addr(eeprom, eeprom_size, EEPROM_TXP_OFFS); for (idx = 0; idx < entries; idx++) { txp = &txp_array[idx]; @@ -416,7 +413,7 @@ static void iwl_eeprom_enhanced_txpower(struct device *dev, } } -static void iwl_init_band_reference(const struct iwl_cfg *cfg, +static void iwl_init_band_reference(const struct iwl_rf_cfg *cfg, const u8 *eeprom, size_t eeprom_size, int eeprom_band, int *eeprom_ch_count, const struct iwl_eeprom_channel **ch_info, @@ -426,7 +423,7 @@ static void iwl_init_band_reference(const struct iwl_cfg *cfg, offset |= INDIRECT_ADDRESS | INDIRECT_REGULATORY; - *ch_info = (void *)iwl_eeprom_query_addr(eeprom, eeprom_size, offset); + *ch_info = iwl_eeprom_query_addr(eeprom, eeprom_size, offset); switch (eeprom_band) { case 1: /* 2.4GHz band */ @@ -510,7 +507,7 @@ static void iwl_mod_ht40_chan_info(struct device *dev, #define CHECK_AND_PRINT_I(x) \ ((eeprom_ch_info[ch_idx].flags & EEPROM_CHANNEL_##x) ? # x " " : "") -static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, +static int iwl_init_channel_map(struct device *dev, const struct iwl_rf_cfg *cfg, struct iwl_nvm_data *data, const u8 *eeprom, size_t eeprom_size) { @@ -749,7 +746,7 @@ static int iwl_nvm_is_otp(struct iwl_trans *trans) u32 otpgp; /* OTP only valid for CP/PP and after */ - switch (trans->hw_rev & CSR_HW_REV_TYPE_MSK) { + switch (trans->info.hw_rev & CSR_HW_REV_TYPE_MSK) { case CSR_HW_REV_TYPE_NONE: IWL_ERR(trans, "Unknown hardware type\n"); return -EIO; @@ -784,7 +781,7 @@ static int iwl_init_otp_access(struct iwl_trans *trans) * CSR auto clock gate disable bit - * this is only applicable for HW with OTP shadow RAM */ - if (trans->trans_cfg->base_params->shadow_ram_support) + if (trans->mac_cfg->base->shadow_ram_support) iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, CSR_RESET_LINK_PWR_MGMT_DISABLED); @@ -905,7 +902,7 @@ static int iwl_find_otp_image(struct iwl_trans *trans, } /* more in the link list, continue */ usedblocks++; - } while (usedblocks <= trans->trans_cfg->base_params->max_ll_items); + } while (usedblocks <= trans->mac_cfg->base->max_ll_items); /* OTP has no valid blocks */ IWL_DEBUG_EEPROM(trans->dev, "OTP has no valid blocks\n"); @@ -938,7 +935,7 @@ int iwl_read_eeprom(struct iwl_trans *trans, u8 **eeprom, size_t *eeprom_size) if (nvm_is_otp < 0) return nvm_is_otp; - sz = trans->trans_cfg->base_params->eeprom_size; + sz = trans->mac_cfg->base->eeprom_size; IWL_DEBUG_EEPROM(trans->dev, "NVM size = %d\n", sz); e = kmalloc(sz, GFP_KERNEL); @@ -973,7 +970,7 @@ int iwl_read_eeprom(struct iwl_trans *trans, u8 **eeprom, size_t *eeprom_size) CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK | CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK); /* traversing the linked list if no shadow ram supported */ - if (!trans->trans_cfg->base_params->shadow_ram_support) { + if (!trans->mac_cfg->base->shadow_ram_support) { ret = iwl_find_otp_image(trans, &validblockaddr); if (ret) goto err_unlock; @@ -1027,7 +1024,7 @@ int iwl_read_eeprom(struct iwl_trans *trans, u8 **eeprom, size_t *eeprom_size) return ret; } -static void iwl_init_sbands(struct iwl_trans *trans, const struct iwl_cfg *cfg, +static void iwl_init_sbands(struct iwl_trans *trans, const struct iwl_rf_cfg *cfg, struct iwl_nvm_data *data, const u8 *eeprom, size_t eeprom_size) { @@ -1062,7 +1059,7 @@ static void iwl_init_sbands(struct iwl_trans *trans, const struct iwl_cfg *cfg, /* EEPROM data functions */ struct iwl_nvm_data * -iwl_parse_eeprom_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, +iwl_parse_eeprom_data(struct iwl_trans *trans, const struct iwl_rf_cfg *cfg, const u8 *eeprom, size_t eeprom_size) { struct iwl_nvm_data *data; @@ -1098,14 +1095,14 @@ iwl_parse_eeprom_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, EEPROM_RAW_TEMPERATURE); if (!tmp) goto err_free; - data->raw_temperature = *(__le16 *)tmp; + data->raw_temperature = *(const __le16 *)tmp; tmp = iwl_eeprom_query_addr(eeprom, eeprom_size, EEPROM_KELVIN_TEMPERATURE); if (!tmp) goto err_free; - data->kelvin_temperature = *(__le16 *)tmp; - data->kelvin_voltage = *((__le16 *)tmp + 1); + data->kelvin_temperature = *(const __le16 *)tmp; + data->kelvin_voltage = *((const __le16 *)tmp + 1); radio_cfg = iwl_eeprom_query16(eeprom, eeprom_size, EEPROM_RADIO_CONFIG); diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/led.c b/drivers/net/wireless/intel/iwlwifi/dvm/led.c index 5ca85d90a8d6..cec2ebdfd651 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/led.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/led.c @@ -2,7 +2,7 @@ /****************************************************************************** * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. - * Copyright (C) 2019 Intel Corporation + * Copyright (C) 2019, 2025 Intel Corporation *****************************************************************************/ @@ -116,9 +116,9 @@ static int iwl_led_cmd(struct iwl_priv *priv, } led_cmd.on = iwl_blink_compensation(priv, on, - priv->trans->trans_cfg->base_params->led_compensation); + priv->trans->mac_cfg->base->led_compensation); led_cmd.off = iwl_blink_compensation(priv, off, - priv->trans->trans_cfg->base_params->led_compensation); + priv->trans->mac_cfg->base->led_compensation); ret = iwl_send_led_cmd(priv, &led_cmd); if (!ret) { diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c index 56d19a034c24..0771a46bd552 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c @@ -2,7 +2,7 @@ /****************************************************************************** * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. - * Copyright(C) 2018 - 2019, 2022 - 2024 Intel Corporation + * Copyright(C) 2018 - 2019, 2022 - 2025 Intel Corporation * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -96,7 +96,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv, ieee80211_hw_set(hw, SUPPORT_FAST_XMIT); ieee80211_hw_set(hw, WANT_MONITOR_VIF); - if (priv->trans->max_skb_frags) + if (priv->trans->info.max_skb_frags) hw->netdev_features = NETIF_F_HIGHDMA | NETIF_F_SG; hw->offchannel_tx_hw_queue = IWL_AUX_QUEUE; @@ -188,7 +188,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv, priv->hw->wiphy->bands[NL80211_BAND_5GHZ] = &priv->nvm_data->bands[NL80211_BAND_5GHZ]; - hw->wiphy->hw_version = priv->trans->hw_id; + hw->wiphy->hw_version = priv->trans->info.hw_id; iwl_leds_init(priv); @@ -549,7 +549,7 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw) iwlagn_prepare_restart(priv); - memset((void *)&ctx->active, 0, sizeof(ctx->active)); + memset((void *)(uintptr_t)&ctx->active, 0, sizeof(ctx->active)); iwl_connection_init_rx_config(priv, ctx); iwlagn_set_rxon_chain(priv, ctx); @@ -1091,7 +1091,7 @@ static void iwlagn_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, goto done; } - scd_queues = BIT(priv->trans->trans_cfg->base_params->num_of_queues) - 1; + scd_queues = BIT(priv->trans->mac_cfg->base->num_of_queues) - 1; scd_queues &= ~(BIT(IWL_IPAN_CMD_QUEUE_NUM) | BIT(IWL_DEFAULT_CMD_QUEUE_NUM)); diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c index a27a72cc017a..1d619384c629 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c @@ -2,7 +2,7 @@ /****************************************************************************** * * Copyright(c) 2003 - 2014, 2018 - 2022 Intel Corporation. All rights reserved. - * Copyright(c) 2024 Intel Corporation. All rights reserved. + * Copyright(c) 2024-2025 Intel Corporation. All rights reserved. * Copyright(c) 2015 Intel Deutschland GmbH * * Portions of this file are derived from the ipw3945 project, as well @@ -824,11 +824,11 @@ int iwl_alive_start(struct iwl_priv *priv) iwlagn_send_tx_ant_config(priv, priv->nvm_data->valid_tx_ant); if (iwl_is_associated_ctx(ctx) && !priv->wowlan) { - struct iwl_rxon_cmd *active_rxon = - (struct iwl_rxon_cmd *)&ctx->active; + struct iwl_rxon_cmd *active = (void *)(uintptr_t)&ctx->active; + /* apply any changes in staging */ ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; - active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; + active->filter_flags &= ~RXON_FILTER_ASSOC_MSK; } else { struct iwl_rxon_context *tmp; /* Initialize our rx_config data */ @@ -1137,9 +1137,10 @@ static void iwl_uninit_drv(struct iwl_priv *priv) static void iwl_set_hw_params(struct iwl_priv *priv) { - if (priv->cfg->ht_params) + /* there are no devices with HT but without HT40 on all bands */ + if (priv->cfg->ht_params.ht40_bands) priv->hw_params.use_rts_for_aggregation = - priv->cfg->ht_params->use_rts_for_aggregation; + priv->cfg->ht_params.use_rts_for_aggregation; /* Device-specific setup */ priv->lib->set_hw_params(priv); @@ -1173,8 +1174,9 @@ static int iwl_eeprom_init_hw_params(struct iwl_priv *priv) { struct iwl_nvm_data *data = priv->nvm_data; + /* all HT devices also have HT40 on at least one band */ if (data->sku_cap_11n_enable && - !priv->cfg->ht_params) { + !priv->cfg->ht_params.ht40_bands) { IWL_ERR(priv, "Invalid 11n configuration\n"); return -EINVAL; } @@ -1224,7 +1226,7 @@ static int iwl_nvm_check_version(struct iwl_nvm_data *data, } static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans, - const struct iwl_cfg *cfg, + const struct iwl_rf_cfg *cfg, const struct iwl_fw *fw, struct dentry *dbgfs_dir) { @@ -1233,7 +1235,6 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans, struct iwl_op_mode *op_mode; u16 num_mac; u32 ucode_flags; - struct iwl_trans_config trans_cfg = {}; static const u8 no_reclaim_cmds[] = { REPLY_RX_PHY_CMD, REPLY_RX_MPDU_CMD, @@ -1248,7 +1249,8 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans, ************************/ hw = iwl_alloc_all(); if (!hw) { - pr_err("%s: Cannot allocate network device\n", trans->name); + pr_err("%s: Cannot allocate network device\n", + trans->info.name); err = -ENOMEM; goto out; } @@ -1261,7 +1263,7 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans, priv->cfg = cfg; priv->fw = fw; - switch (priv->trans->trans_cfg->device_family) { + switch (priv->trans->mac_cfg->device_family) { case IWL_DEVICE_FAMILY_1000: case IWL_DEVICE_FAMILY_100: priv->lib = &iwl_dvm_1000_cfg; @@ -1309,52 +1311,50 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans, * Populate the state variables that the transport layer needs * to know about. */ - trans_cfg.op_mode = op_mode; - trans_cfg.no_reclaim_cmds = no_reclaim_cmds; - trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds); + BUILD_BUG_ON(sizeof(no_reclaim_cmds) > + sizeof(trans->conf.no_reclaim_cmds)); + memcpy(trans->conf.no_reclaim_cmds, no_reclaim_cmds, + sizeof(no_reclaim_cmds)); switch (iwlwifi_mod_params.amsdu_size) { case IWL_AMSDU_DEF: case IWL_AMSDU_4K: - trans_cfg.rx_buf_size = IWL_AMSDU_4K; + trans->conf.rx_buf_size = IWL_AMSDU_4K; break; case IWL_AMSDU_8K: - trans_cfg.rx_buf_size = IWL_AMSDU_8K; + trans->conf.rx_buf_size = IWL_AMSDU_8K; break; case IWL_AMSDU_12K: default: - trans_cfg.rx_buf_size = IWL_AMSDU_4K; + trans->conf.rx_buf_size = IWL_AMSDU_4K; pr_err("Unsupported amsdu_size: %d\n", iwlwifi_mod_params.amsdu_size); } - trans_cfg.command_groups = iwl_dvm_groups; - trans_cfg.command_groups_size = ARRAY_SIZE(iwl_dvm_groups); + trans->conf.command_groups = iwl_dvm_groups; + trans->conf.command_groups_size = ARRAY_SIZE(iwl_dvm_groups); - trans_cfg.cmd_fifo = IWLAGN_CMD_FIFO_NUM; - trans_cfg.cb_data_offs = offsetof(struct ieee80211_tx_info, - driver_data[2]); + trans->conf.cmd_fifo = IWLAGN_CMD_FIFO_NUM; + trans->conf.cb_data_offs = offsetof(struct ieee80211_tx_info, + driver_data[2]); WARN_ON(sizeof(priv->transport_queue_stop) * BITS_PER_BYTE < - priv->trans->trans_cfg->base_params->num_of_queues); + priv->trans->mac_cfg->base->num_of_queues); ucode_flags = fw->ucode_capa.flags; if (ucode_flags & IWL_UCODE_TLV_FLAGS_PAN) { priv->sta_key_max_num = STA_KEY_MAX_NUM_PAN; - trans_cfg.cmd_queue = IWL_IPAN_CMD_QUEUE_NUM; + trans->conf.cmd_queue = IWL_IPAN_CMD_QUEUE_NUM; } else { priv->sta_key_max_num = STA_KEY_MAX_NUM; - trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM; + trans->conf.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM; } - /* Configure transport layer */ - iwl_trans_configure(priv->trans, &trans_cfg); + trans->conf.rx_mpdu_cmd = REPLY_RX_MPDU_CMD; + trans->conf.rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_res_start); - trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD; - trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_res_start); - trans->command_groups = trans_cfg.command_groups; - trans->command_groups_size = trans_cfg.command_groups_size; + iwl_trans_op_mode_enter(priv->trans, op_mode); /* At this point both hw and priv are allocated. */ @@ -1378,18 +1378,18 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans, * 2. Read REV register ***********************/ IWL_INFO(priv, "Detected %s, REV=0x%X\n", - priv->trans->name, priv->trans->hw_rev); + priv->trans->info.name, priv->trans->info.hw_rev); err = iwl_trans_start_hw(priv->trans); if (err) - goto out_free_hw; + goto out_leave_trans; /* Read the EEPROM */ err = iwl_read_eeprom(priv->trans, &priv->eeprom_blob, &priv->eeprom_blob_size); if (err) { IWL_ERR(priv, "Unable to init EEPROM\n"); - goto out_free_hw; + goto out_leave_trans; } /* Reset chip to save power until we load uCode during "up". */ @@ -1437,10 +1437,7 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans, * packaging bug or due to the eeprom check above */ priv->sta_key_max_num = STA_KEY_MAX_NUM; - trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM; - - /* Configure transport layer again*/ - iwl_trans_configure(priv->trans, &trans_cfg); + trans->conf.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM; } /******************* @@ -1508,6 +1505,8 @@ out_free_eeprom_blob: kfree(priv->eeprom_blob); out_free_eeprom: kfree(priv->nvm_data); +out_leave_trans: + iwl_trans_op_mode_leave(priv->trans); out_free_hw: ieee80211_free_hw(priv->hw); out: @@ -1992,7 +1991,7 @@ static void iwl_nic_config(struct iwl_op_mode *op_mode) /* SKU Control */ iwl_trans_set_bits_mask(priv->trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP_DASH, - CSR_HW_REV_STEP_DASH(priv->trans->hw_rev)); + CSR_HW_REV_STEP_DASH(priv->trans->info.hw_rev)); /* write radio config values to register */ if (priv->nvm_data->radio_cfg_type <= EEPROM_RF_CONFIG_TYPE_MAX) { diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/power.c b/drivers/net/wireless/intel/iwlwifi/dvm/power.c index 6d16a7105656..6b42d6e5f30f 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/power.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/power.c @@ -2,7 +2,7 @@ /****************************************************************************** * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. - * Copyright (C) 2019 Intel Corporation + * Copyright (C) 2019, 2025 Intel Corporation * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -64,32 +64,32 @@ struct iwl_power_vec_entry { /* for DTIM period 0 through IWL_DTIM_RANGE_0_MAX */ /* DTIM 0 - 2 */ static const struct iwl_power_vec_entry range_0[IWL_POWER_NUM] = { - {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 1, 2, 2, 0xFF)}, 0}, - {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 2, 2, 0xFF)}, 0}, - {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 2, 2, 2, 0xFF)}, 0}, - {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 2, 4, 4, 0xFF)}, 1}, - {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(2, 2, 4, 6, 0xFF)}, 2} + {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 1, 2, 2, 0xFF), 0}, 0}, + {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 2, 2, 0xFF), 0}, 0}, + {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 2, 2, 2, 0xFF), 0}, 0}, + {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 2, 4, 4, 0xFF), 0}, 1}, + {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(2, 2, 4, 6, 0xFF), 0}, 2} }; /* for DTIM period IWL_DTIM_RANGE_0_MAX + 1 through IWL_DTIM_RANGE_1_MAX */ /* DTIM 3 - 10 */ static const struct iwl_power_vec_entry range_1[IWL_POWER_NUM] = { - {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0}, - {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 3, 4, 7)}, 0}, - {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 4, 6, 7, 9)}, 0}, - {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 4, 6, 9, 10)}, 1}, - {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(2, 4, 6, 10, 10)}, 2} + {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 4), 0}, 0}, + {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 3, 4, 7), 0}, 0}, + {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 4, 6, 7, 9), 0}, 0}, + {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 4, 6, 9, 10), 0}, 1}, + {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(2, 4, 6, 10, 10), 0}, 2} }; /* for DTIM period > IWL_DTIM_RANGE_1_MAX */ /* DTIM 11 - */ static const struct iwl_power_vec_entry range_2[IWL_POWER_NUM] = { - {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 0xFF)}, 0}, - {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(2, 4, 6, 7, 0xFF)}, 0}, - {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0}, - {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0}, - {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(4, 7, 10, 10, 0xFF)}, 0} + {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 0xFF), 0}, 0}, + {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(2, 4, 6, 7, 0xFF), 0}, 0}, + {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 7, 9, 9, 0xFF), 0}, 0}, + {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 7, 9, 9, 0xFF), 0}, 0}, + {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(4, 7, 10, 10, 0xFF), 0}, 0} }; /* advance power management */ @@ -196,7 +196,7 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv, else cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK; - if (priv->trans->trans_cfg->base_params->shadow_reg_enable) + if (priv->trans->mac_cfg->base->shadow_reg_enable) cmd->flags |= IWL_POWER_SHADOW_REG_ENA; else cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA; diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c index 7f67e602940c..5f8b60824043 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c @@ -3,7 +3,7 @@ * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2015 Intel Deutschland GmbH - * Copyright(c) 2018, 2020-2021 Intel Corporation + * Copyright(c) 2018, 2020-2021, 2025 Intel Corporation * * Portions of this file are derived from the ipw3945 project, as well * as portionhelp of the ieee80211 subsystem header files. @@ -50,7 +50,7 @@ static void iwlagn_rx_csa(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb) * See iwlagn_mac_channel_switch. */ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - struct iwl_rxon_cmd *rxon = (void *)&ctx->active; + struct iwl_rxon_cmd *rxon = (void *)(uintptr_t)&ctx->active; if (!test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status)) return; @@ -643,8 +643,8 @@ static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv, fraglen = len - hdrlen; if (fraglen) { - int offset = (void *)hdr + hdrlen - - rxb_addr(rxb) + rxb_offset(rxb); + int offset = (u8 *)hdr + hdrlen - + (u8 *)rxb_addr(rxb) + rxb_offset(rxb); skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset, fraglen, rxb->truesize); diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c b/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c index f80cce37e2c0..2d3c1627f283 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /****************************************************************************** * - * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2003 - 2014, 2025 Intel Corporation. All rights reserved. * Copyright(c) 2015 Intel Deutschland GmbH *****************************************************************************/ @@ -341,7 +341,7 @@ static int iwlagn_rxon_disconn(struct iwl_priv *priv, struct iwl_rxon_context *ctx) { int ret; - struct iwl_rxon_cmd *active = (void *)&ctx->active; + struct iwl_rxon_cmd *active = (void *)(uintptr_t)&ctx->active; if (ctx->ctxid == IWL_RXON_CTX_BSS) { ret = iwlagn_disable_bss(priv, ctx, &ctx->staging); @@ -441,7 +441,7 @@ static int iwlagn_rxon_connect(struct iwl_priv *priv, struct iwl_rxon_context *ctx) { int ret; - struct iwl_rxon_cmd *active = (void *)&ctx->active; + struct iwl_rxon_cmd *active = (void *)(uintptr_t)&ctx->active; /* RXON timing must be before associated RXON */ if (ctx->ctxid == IWL_RXON_CTX_BSS) { @@ -1023,7 +1023,7 @@ static void iwl_calc_basic_rates(struct iwl_priv *priv, int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx) { /* cast away the const for active_rxon in this function */ - struct iwl_rxon_cmd *active = (void *)&ctx->active; + struct iwl_rxon_cmd *active = (void *)(uintptr_t)&ctx->active; bool new_assoc = !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK); int ret; diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c index 111ed1873006..24fefa0e8148 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c @@ -3,7 +3,7 @@ * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. * Copyright (C) 2019 Intel Corporation - * Copyright (C) 2023 Intel Corporation + * Copyright (C) 2023, 2025 Intel Corporation *****************************************************************************/ #include <linux/kernel.h> @@ -463,7 +463,7 @@ static int iwlagn_alloc_agg_txq(struct iwl_priv *priv, int mq) int q; for (q = IWLAGN_FIRST_AMPDU_QUEUE; - q < priv->trans->trans_cfg->base_params->num_of_queues; q++) { + q < priv->trans->mac_cfg->base->num_of_queues; q++) { if (!test_and_set_bit(q, priv->agg_q_alloc)) { priv->queue_to_mac80211[q] = mq; return q; @@ -1277,7 +1277,7 @@ void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, * (in Tx queue's circular buffer) of first TFD/frame in window */ u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn); - if (scd_flow >= priv->trans->trans_cfg->base_params->num_of_queues) { + if (scd_flow >= priv->trans->mac_cfg->base->num_of_queues) { IWL_ERR(priv, "BUG_ON scd_flow is bigger than number of queues\n"); return; diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c b/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c index bb13ca5d666c..ac90191a3973 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c @@ -3,6 +3,7 @@ * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright (C) 2025 Intel Corporation *****************************************************************************/ #include <linux/kernel.h> @@ -222,7 +223,7 @@ static int iwl_alive_notify(struct iwl_priv *priv) int ret; int i; - iwl_trans_fw_alive(priv->trans, 0); + iwl_trans_fw_alive(priv->trans); if (priv->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PAN && priv->nvm_data->sku_cap_ipan_enable) { @@ -293,15 +294,10 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv, { struct iwl_notification_wait alive_wait; struct iwl_alive_data alive_data; - const struct fw_img *fw; int ret; enum iwl_ucode_type old_type; static const u16 alive_cmd[] = { REPLY_ALIVE }; - fw = iwl_get_ucode_image(priv->fw, ucode_type); - if (WARN_ON(!fw)) - return -EINVAL; - old_type = priv->cur_ucode; priv->cur_ucode = ucode_type; priv->ucode_loaded = false; @@ -310,7 +306,7 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv, alive_cmd, ARRAY_SIZE(alive_cmd), iwl_alive_fn, &alive_data); - ret = iwl_trans_start_fw(priv->trans, fw, false); + ret = iwl_trans_start_fw(priv->trans, priv->fw, ucode_type, false); if (ret) { priv->cur_ucode = old_type; iwl_remove_notification(&priv->notif_wait, &alive_wait); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c index efa7b673ebc7..bee7d92293b8 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2017 Intel Deutschland GmbH - * Copyright (C) 2019-2024 Intel Corporation + * Copyright (C) 2019-2025 Intel Corporation */ #include <linux/uuid.h> #include "iwl-drv.h" @@ -847,12 +847,12 @@ int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt) if (IS_ERR(data)) return PTR_ERR(data); - /* try to read ppag table rev 3, 2 or 1 (all have the same data size) */ + /* try to read ppag table rev 1 to 4 (all have the same data size) */ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, ACPI_PPAG_WIFI_DATA_SIZE_V2, &tbl_rev); if (!IS_ERR(wifi_pkg)) { - if (tbl_rev >= 1 && tbl_rev <= 3) { + if (tbl_rev >= 1 && tbl_rev <= 4) { num_sub_bands = IWL_NUM_SUB_BANDS_V2; IWL_DEBUG_RADIO(fwrt, "Reading PPAG table (tbl_rev=%d)\n", @@ -882,7 +882,7 @@ int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt) goto out_free; read_table: - fwrt->ppag_ver = tbl_rev; + fwrt->ppag_bios_rev = tbl_rev; flags = &wifi_pkg->package.elements[1]; if (flags->type != ACPI_TYPE_INTEGER) { @@ -891,7 +891,7 @@ read_table: } fwrt->ppag_flags = iwl_bios_get_ppag_flags(flags->integer.value, - fwrt->ppag_ver); + fwrt->ppag_bios_rev); /* * read, verify gain values and save them into the PPAG table. @@ -912,6 +912,7 @@ read_table: } } + fwrt->ppag_bios_source = BIOS_SOURCE_ACPI; ret = 0; out_free: @@ -919,40 +920,39 @@ out_free: return ret; } -void iwl_acpi_get_phy_filters(struct iwl_fw_runtime *fwrt, - struct iwl_phy_specific_cfg *filters) +int iwl_acpi_get_phy_filters(struct iwl_fw_runtime *fwrt) { + struct iwl_phy_specific_cfg *filters = &fwrt->phy_filters; struct iwl_phy_specific_cfg tmp = {}; - union acpi_object *wifi_pkg, *data; + union acpi_object *wifi_pkg, *data __free(kfree); int tbl_rev, i; data = iwl_acpi_get_object(fwrt->dev, ACPI_WPFC_METHOD); if (IS_ERR(data)) - return; + return PTR_ERR(data); wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, ACPI_WPFC_WIFI_DATA_SIZE, &tbl_rev); if (IS_ERR(wifi_pkg)) - goto out_free; + return PTR_ERR(wifi_pkg); if (tbl_rev != 0) - goto out_free; + return -EINVAL; BUILD_BUG_ON(ARRAY_SIZE(filters->filter_cfg_chains) != ACPI_WPFC_WIFI_DATA_SIZE - 1); for (i = 0; i < ARRAY_SIZE(filters->filter_cfg_chains); i++) { if (wifi_pkg->package.elements[i + 1].type != ACPI_TYPE_INTEGER) - goto out_free; + return -EINVAL; tmp.filter_cfg_chains[i] = cpu_to_le32(wifi_pkg->package.elements[i + 1].integer.value); } IWL_DEBUG_RADIO(fwrt, "Loaded WPFC filter config from ACPI\n"); *filters = tmp; -out_free: - kfree(data); + return 0; } IWL_EXPORT_SYMBOL(iwl_acpi_get_phy_filters); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h index e50b93472dd2..68d8fb5f6357 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright (C) 2017 Intel Deutschland GmbH - * Copyright (C) 2018-2023 Intel Corporation + * Copyright (C) 2018-2023, 2025 Intel Corporation */ #ifndef __iwl_fw_acpi__ #define __iwl_fw_acpi__ @@ -180,8 +180,7 @@ int iwl_acpi_get_tas_table(struct iwl_fw_runtime *fwrt, int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt); -void iwl_acpi_get_phy_filters(struct iwl_fw_runtime *fwrt, - struct iwl_phy_specific_cfg *filters); +int iwl_acpi_get_phy_filters(struct iwl_fw_runtime *fwrt); void iwl_acpi_get_guid_lock_status(struct iwl_fw_runtime *fwrt); @@ -244,8 +243,10 @@ static inline int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt) return -ENOENT; } -/* macro since the second argument doesn't always exist */ -#define iwl_acpi_get_phy_filters(fwrt, filters) do { } while (0) +static inline int iwl_acpi_get_phy_filters(struct iwl_fw_runtime *fwrt) +{ + return -ENOENT; +} static inline void iwl_acpi_get_guid_lock_status(struct iwl_fw_runtime *fwrt) { diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h index 42360a8f23aa..3ce477c248ce 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h @@ -112,6 +112,16 @@ struct iwl_alive_ntf_v6 { struct iwl_imr_alive_info imr; } __packed; /* UCODE_ALIVE_NTFY_API_S_VER_6 */ +struct iwl_alive_ntf { + __le16 status; + __le16 flags; + struct iwl_lmac_alive lmac_data[2]; + struct iwl_umac_alive umac_data; + struct iwl_sku_id sku_id; + struct iwl_imr_alive_info imr; + __le64 platform_id; +} __packed; /* UCODE_ALIVE_NTFY_API_S_VER_8 */ + /** * enum iwl_extended_cfg_flags - commands driver may send before * finishing init flow diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h index d43adb6f9220..1c86a858aaab 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h @@ -2,7 +2,7 @@ /* * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2022, 2024 Intel Corporation + * Copyright (C) 2018-2022, 2024-2025 Intel Corporation */ #ifndef __iwl_fw_api_commands_h__ #define __iwl_fw_api_commands_h__ @@ -145,8 +145,8 @@ enum iwl_legacy_cmds { REMOVE_STA = 0x19, /** - * @TX_CMD: uses &struct iwl_tx_cmd or &struct iwl_tx_cmd_gen2 or - * &struct iwl_tx_cmd_gen3, + * @TX_CMD: uses &struct iwl_tx_cmd_v6 or &struct iwl_tx_cmd_v9 or + * &struct iwl_tx_cmd, * response in &struct iwl_tx_resp or * &struct iwl_tx_resp_v3 */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h index c139b965980d..9c88bb280609 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h @@ -98,7 +98,7 @@ enum iwl_data_path_subcmd_ids { /** * @ESR_MODE_NOTIF: notification to recommend/force a wanted esr mode, - * uses &struct iwl_esr_mode_notif + * uses &struct iwl_esr_mode_notif or &struct iwl_esr_mode_notif_v1 */ ESR_MODE_NOTIF = 0xF3, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h index e1952fc6d149..33541f92c7c7 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h @@ -6,6 +6,11 @@ */ #ifndef __iwl_fw_api_location_h__ #define __iwl_fw_api_location_h__ +#include <linux/ieee80211.h> +#include <linux/if_ether.h> +#include <linux/types.h> +#include <linux/bits.h> +#include "rs.h" /** * enum iwl_location_subcmd_ids - location group command IDs @@ -1609,7 +1614,7 @@ struct iwl_tof_range_rsp_ap_entry_ntfy_v5 { } __packed; /* LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_5 */ /** - * struct iwl_tof_range_rsp_ap_entry_ntfy - AP parameters (response) + * struct iwl_tof_range_rsp_ap_entry_ntfy_v7 - AP parameters (response) * @bssid: BSSID of the AP * @measure_status: current APs measurement status, one of * &enum iwl_tof_entry_status. @@ -1645,7 +1650,7 @@ struct iwl_tof_range_rsp_ap_entry_ntfy_v5 { * @tx_pn: the last PN used for this responder Tx in case PMF is configured in * LE byte order. */ -struct iwl_tof_range_rsp_ap_entry_ntfy { +struct iwl_tof_range_rsp_ap_entry_ntfy_v7 { u8 bssid[ETH_ALEN]; u8 measure_status; u8 measure_bw; @@ -1672,6 +1677,65 @@ struct iwl_tof_range_rsp_ap_entry_ntfy { } __packed; /* LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_6, LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_7 */ +/** + * struct iwl_tof_range_rsp_ap_entry_ntfy - AP parameters (response) + * @bssid: BSSID of the AP + * @measure_status: current APs measurement status, one of + * &enum iwl_tof_entry_status. + * @measure_bw: Current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz + * @rtt: The Round Trip Time that took for the last measurement for + * current AP [pSec] + * @rtt_variance: The Variance of the RTT values measured for current AP + * @rtt_spread: The Difference between the maximum and the minimum RTT + * values measured for current AP in the current session [pSec] + * @rssi: RSSI as uploaded in the Channel Estimation notification + * @rssi_spread: The Difference between the maximum and the minimum RSSI values + * measured for current AP in the current session + * @last_burst: 1 if no more FTM sessions are scheduled for this responder + * @refusal_period: refusal period in case of + * @IWL_TOF_ENTRY_RESPONDER_CANNOT_COLABORATE [sec] + * @timestamp: The GP2 Clock [usec] where Channel Estimation notification was + * uploaded by the LMAC + * @start_tsf: measurement start time in TSF of the mac specified in the range + * request + * @reserved1: reserved, for backwards compatibility + * @t2t3_initiator: as calculated from the algo in the initiator + * @t1t4_responder: as calculated from the algo in the responder + * @common_calib: Calib val that was used in for this AP measurement + * @specific_calib: val that was used in for this AP measurement + * @papd_calib_output: The result of the tof papd calibration that was injected + * into the algorithm. + * @rttConfidence: a value between 0 - 31 that represents the rtt accuracy. + * @reserved: for alignment + * @rx_pn: the last PN used for this responder Rx in case PMF is configured in + * LE byte order. + * @tx_pn: the last PN used for this responder Tx in case PMF is configured in + * LE byte order. + */ +struct iwl_tof_range_rsp_ap_entry_ntfy { + u8 bssid[ETH_ALEN]; + u8 measure_status; + u8 measure_bw; + __le32 rtt; + __le32 rtt_variance; + __le32 rtt_spread; + s8 rssi; + u8 rssi_spread; + u8 last_burst; + u8 refusal_period; + __le32 timestamp; + __le32 start_tsf; + __le32 reserved1[2]; + __le32 t2t3_initiator; + __le32 t1t4_responder; + __le16 common_calib; + __le16 specific_calib; + __le32 papd_calib_output; + u8 rttConfidence; + u8 reserved[3]; + u8 rx_pn[IEEE80211_CCMP_PN_LEN]; + u8 tx_pn[IEEE80211_CCMP_PN_LEN]; +} __packed; /* LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_8 */ /** * enum iwl_tof_response_status - tof response status @@ -1739,6 +1803,24 @@ struct iwl_tof_range_rsp_ntfy_v7 { } __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_7 */ /** + * struct iwl_tof_range_rsp_ntfy_v9 - ranging response notification + * @request_id: A Token ID of the corresponding Range request + * @num_of_aps: Number of APs results + * @last_report: 1 if no more FTM sessions are scheduled, 0 otherwise. + * @reserved: reserved + * @ap: per-AP data + */ +struct iwl_tof_range_rsp_ntfy_v9 { + u8 request_id; + u8 num_of_aps; + u8 last_report; + u8 reserved; + struct iwl_tof_range_rsp_ap_entry_ntfy_v7 ap[IWL_TOF_MAX_APS]; +} __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_8, + * LOCATION_RANGE_RSP_NTFY_API_S_VER_9 + */ + +/** * struct iwl_tof_range_rsp_ntfy - ranging response notification * @request_id: A Token ID of the corresponding Range request * @num_of_aps: Number of APs results @@ -1752,8 +1834,7 @@ struct iwl_tof_range_rsp_ntfy { u8 last_report; u8 reserved; struct iwl_tof_range_rsp_ap_entry_ntfy ap[IWL_TOF_MAX_APS]; -} __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_8, - LOCATION_RANGE_RSP_NTFY_API_S_VER_9 */ +} __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_10 */ #define IWL_MVM_TOF_MCSI_BUF_SIZE (245) /** diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h index b511e3aa6bb2..b9f559dac39f 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h @@ -311,8 +311,41 @@ enum iwl_mac_config_filter_flags { }; /* MAC_FILTER_FLAGS_MASK_E_VER_1 */ /** + * struct iwl_mac_wifi_gen_support_v2 - parameters of iwl_mac_config_cmd + * with support up to eht as in version 2 of the command + * + * @he_support: does this MAC support HE + * @he_ap_support: HE AP enabled, "pseudo HE", no trigger frame handling + * @eht_support: does this MAC support EHT. Requires he_support + */ +struct iwl_mac_wifi_gen_support_v2 { + __le16 he_support; + __le16 he_ap_support; + __le32 eht_support; +} __packed; + +/** + * struct iwl_mac_wifi_gen_support - parameters of iwl_mac_config_cmd + * with support up to uhr as in version 3 of the command + * ( MAC_CONTEXT_CONFIG_CMD = 0x8 ) + * + * @he_support: does this MAC support HE + * @he_ap_support: HE AP enabled, "pseudo HE", no trigger frame handling + * @eht_support: does this MAC support EHT. Requires he_support + * @uhr_support: does this MAC support UHR. Requires eht_support + * @reserved: reserved for alignment and to match version 2's size + */ +struct iwl_mac_wifi_gen_support { + u8 he_support; + u8 he_ap_support; + u8 eht_support; + u8 uhr_support; + __le32 reserved; +} __packed; + +/** * struct iwl_mac_config_cmd - command structure to configure MAC contexts in - * MLD API + * MLD API for versions 2 and 3 * ( MAC_CONTEXT_CONFIG_CMD = 0x8 ) * * @id_and_color: ID and color of the MAC @@ -321,9 +354,8 @@ enum iwl_mac_config_filter_flags { * @local_mld_addr: mld address * @reserved_for_local_mld_addr: reserved * @filter_flags: combination of &enum iwl_mac_config_filter_flags - * @he_support: does this MAC support HE - * @he_ap_support: HE AP enabled, "pseudo HE", no trigger frame handling - * @eht_support: does this MAC support EHT. Requires he_support + * @wifi_gen_v2: he/eht parameters as in cmd version 2 + * @wifi_gen: he/eht/uhr parameters as in cmd version 3 * @nic_not_ack_enabled: mark that the NIC doesn't support receiving * ACK-enabled AGG, (i.e. both BACK and non-BACK frames in single AGG). * If the NIC is not ACK_ENABLED it may use the EOF-bit in first non-0 @@ -332,7 +364,6 @@ enum iwl_mac_config_filter_flags { * @p2p_dev: mac data for p2p device */ struct iwl_mac_config_cmd { - /* COMMON_INDEX_HDR_API_S_VER_1 */ __le32 id_and_color; __le32 action; /* MAC_CONTEXT_TYPE_API_E */ @@ -340,16 +371,17 @@ struct iwl_mac_config_cmd { u8 local_mld_addr[6]; __le16 reserved_for_local_mld_addr; __le32 filter_flags; - __le16 he_support; - __le16 he_ap_support; - __le32 eht_support; + union { + struct iwl_mac_wifi_gen_support_v2 wifi_gen_v2; + struct iwl_mac_wifi_gen_support wifi_gen; + }; __le32 nic_not_ack_enabled; /* MAC_CONTEXT_CONFIG_SPECIFIC_DATA_API_U_VER_2 */ union { struct iwl_mac_client_data client; struct iwl_mac_p2p_dev_data p2p_dev; }; -} __packed; /* MAC_CONTEXT_CONFIG_CMD_API_S_VER_2 */ +} __packed; /* MAC_CONTEXT_CONFIG_CMD_API_S_VER_2_VER_3 */ /** * enum iwl_link_ctx_modify_flags - indicate to the fw what fields are being @@ -457,6 +489,24 @@ enum iwl_link_modify_bandwidth { }; /** + * struct iwl_npca_params - NPCA parameters (non-primary channel access) + * + * @switch_delay: after switch, delay TX according to destination AP + * @switch_back_delay: switch back to control channel before OBSS frame end + * @min_dur_threshold: minimum PPDU time to switch to the non-primary + * NPCA channel + * @flags: NPCA flags - bit 0: puncturing allowed, bit 1: new TX allowed + * @reserved: reserved for alignment purposes + */ +struct iwl_npca_params { + u8 switch_delay; + u8 switch_back_delay; + __le16 min_dur_threshold; + __le16 flags; + __le16 reserved; +} __packed; /* NPCA_PARAM_API_S_VER_1 */ + +/** * struct iwl_link_config_cmd - command structure to configure the LINK context * in MLD API * ( LINK_CONFIG_CMD =0x9 ) @@ -513,6 +563,8 @@ enum iwl_link_modify_bandwidth { * IEEE802.11REVme-D5.0 * @ibss_bssid_addr: bssid for ibss * @reserved_for_ibss_bssid_addr: reserved + * @npca_params: NPCA parameters + * @prio_edca_params: priority EDCA parameters for enhanced QoS * @reserved3: reserved for future use */ struct iwl_link_config_cmd { @@ -560,8 +612,10 @@ struct iwl_link_config_cmd { u8 ul_mu_data_disable; u8 ibss_bssid_addr[6]; __le16 reserved_for_ibss_bssid_addr; - __le32 reserved3[8]; -} __packed; /* LINK_CONTEXT_CONFIG_CMD_API_S_VER_1, _VER_2, _VER_3, _VER_4, _VER_5, _VER_6 */ + struct iwl_npca_params npca_params; /* since _VER_7 */ + struct iwl_ac_qos prio_edca_params; /* since _VER_7 */ + __le32 reserved3[4]; +} __packed; /* LINK_CONTEXT_CONFIG_CMD_API_S_VER_1, _VER_2, _VER_3, _VER_4, _VER_5, _VER_6, _VER_7 */ /* Currently FW supports link ids in the range 0-3 and can have * at most two active links for each vif. @@ -589,6 +643,62 @@ enum iwl_fw_sta_type { }; /* STATION_TYPE_E_VER_1 */ /** + * struct iwl_sta_cfg_cmd_v1 - cmd structure to add a peer sta to the uCode's + * station table + * ( STA_CONFIG_CMD = 0xA ) + * + * @sta_id: index of station in uCode's station table + * @link_id: the id of the link that is used to communicate with this sta + * @peer_mld_address: the peers mld address + * @reserved_for_peer_mld_address: reserved + * @peer_link_address: the address of the link that is used to communicate + * with this sta + * @reserved_for_peer_link_address: reserved + * @station_type: type of this station. See &enum iwl_fw_sta_type + * @assoc_id: for GO only + * @beamform_flags: beam forming controls + * @mfp: indicates whether the STA uses management frame protection or not. + * @mimo: indicates whether the sta uses mimo or not + * @mimo_protection: indicates whether the sta uses mimo protection or not + * @ack_enabled: indicates that the AP supports receiving ACK- + * enabled AGG, i.e. both BACK and non-BACK frames in a single AGG + * @trig_rnd_alloc: indicates that trigger based random allocation + * is enabled according to UORA element existence + * @tx_ampdu_spacing: minimum A-MPDU spacing: + * 4 - 2us density, 5 - 4us density, 6 - 8us density, 7 - 16us density + * @tx_ampdu_max_size: maximum A-MPDU length: 0 - 8K, 1 - 16K, 2 - 32K, + * 3 - 64K, 4 - 128K, 5 - 256K, 6 - 512K, 7 - 1024K. + * @sp_length: the size of the SP in actual number of frames + * @uapsd_acs: 4 LS bits are trigger enabled ACs, 4 MS bits are the deliver + * enabled ACs. + * @pkt_ext: optional, exists according to PPE-present bit in the HE/EHT-PHY + * capa + * @htc_flags: which features are supported in HTC + */ +struct iwl_sta_cfg_cmd_v1 { + __le32 sta_id; + __le32 link_id; + u8 peer_mld_address[ETH_ALEN]; + __le16 reserved_for_peer_mld_address; + u8 peer_link_address[ETH_ALEN]; + __le16 reserved_for_peer_link_address; + __le32 station_type; + __le32 assoc_id; + __le32 beamform_flags; + __le32 mfp; + __le32 mimo; + __le32 mimo_protection; + __le32 ack_enabled; + __le32 trig_rnd_alloc; + __le32 tx_ampdu_spacing; + __le32 tx_ampdu_max_size; + __le32 sp_length; + __le32 uapsd_acs; + struct iwl_he_pkt_ext_v2 pkt_ext; + __le32 htc_flags; +} __packed; /* STA_CMD_API_S_VER_1 */ + +/** * struct iwl_sta_cfg_cmd - cmd structure to add a peer sta to the uCode's * station table * ( STA_CONFIG_CMD = 0xA ) @@ -620,6 +730,14 @@ enum iwl_fw_sta_type { * @pkt_ext: optional, exists according to PPE-present bit in the HE/EHT-PHY * capa * @htc_flags: which features are supported in HTC + * @use_ldpc_x2_cw: Indicates whether to use LDPC with double CW + * @use_icf: Indicates whether to use ICF instead of RTS + * @dps_pad_time: DPS (Dynamic Power Save) padding delay resolution to ensure + * proper timing alignment + * @dps_trans_delay: DPS minimal time that takes the peer to return to low power + * @mic_prep_pad_delay: MIC prep time padding + * @mic_compute_pad_delay: MIC compute time padding + * @reserved: Reserved for alignment */ struct iwl_sta_cfg_cmd { __le32 sta_id; @@ -642,7 +760,14 @@ struct iwl_sta_cfg_cmd { __le32 uapsd_acs; struct iwl_he_pkt_ext_v2 pkt_ext; __le32 htc_flags; -} __packed; /* STA_CMD_API_S_VER_1 */ + u8 use_ldpc_x2_cw; + u8 use_icf; + u8 dps_pad_time; + u8 dps_trans_delay; + u8 mic_prep_pad_delay; + u8 mic_compute_pad_delay; + u8 reserved[2]; +} __packed; /* STA_CMD_API_S_VER_2 */ /** * struct iwl_aux_sta_cmd - command for AUX STA configuration @@ -686,9 +811,9 @@ struct iwl_mvm_sta_disable_tx_cmd { /** * enum iwl_mvm_fw_esr_recommendation - FW recommendation code - * @ESR_RECOMMEND_LEAVE: recommendation to leave esr - * @ESR_FORCE_LEAVE: force exiting esr - * @ESR_RECOMMEND_ENTER: recommendation to enter esr + * @ESR_RECOMMEND_LEAVE: recommendation to leave EMLSR + * @ESR_FORCE_LEAVE: force exiting EMLSR + * @ESR_RECOMMEND_ENTER: recommendation to enter EMLSR */ enum iwl_mvm_fw_esr_recommendation { ESR_RECOMMEND_LEAVE, @@ -697,15 +822,46 @@ enum iwl_mvm_fw_esr_recommendation { }; /* ESR_MODE_RECOMMENDATION_CODE_API_E_VER_1 */ /** - * struct iwl_esr_mode_notif - FWs recommendation/force for esr mode + * struct iwl_esr_mode_notif_v1 - FW recommendation/force for EMLSR mode * - * @action: the action to apply on esr state. See &iwl_mvm_fw_esr_recommendation + * @action: the action to apply on EMLSR state. + * See &iwl_mvm_fw_esr_recommendation */ -struct iwl_esr_mode_notif { +struct iwl_esr_mode_notif_v1 { __le32 action; } __packed; /* ESR_MODE_RECOMMENDATION_NTFY_API_S_VER_1 */ /** + * enum iwl_esr_leave_reason - reasons for leaving EMLSR mode + * + * @ESR_LEAVE_REASON_OMI_MU_UL_DISALLOWED: OMI MU UL disallowed + * @ESR_LEAVE_REASON_NO_TRIG_FOR_ESR_STA: No trigger for EMLSR station + * @ESR_LEAVE_REASON_NO_ESR_STA_IN_MU_DL: No EMLSR station in MU DL + * @ESR_LEAVE_REASON_BAD_ACTIV_FRAME_TH: Bad activation frame threshold + * @ESR_LEAVE_REASON_RTS_IN_DUAL_LISTEN: RTS in dual listen + */ +enum iwl_esr_leave_reason { + ESR_LEAVE_REASON_OMI_MU_UL_DISALLOWED = BIT(0), + ESR_LEAVE_REASON_NO_TRIG_FOR_ESR_STA = BIT(1), + ESR_LEAVE_REASON_NO_ESR_STA_IN_MU_DL = BIT(2), + ESR_LEAVE_REASON_BAD_ACTIV_FRAME_TH = BIT(3), + ESR_LEAVE_REASON_RTS_IN_DUAL_LISTEN = BIT(4), +}; + +/** + * struct iwl_esr_mode_notif - FW recommendation/force for EMLSR mode + * + * @action: the action to apply on EMLSR state. + * See &iwl_mvm_fw_esr_recommendation + * @leave_reason_mask: mask for various reasons to leave EMLSR mode. + * See &iwl_esr_leave_reason + */ +struct iwl_esr_mode_notif { + __le32 action; + __le32 leave_reason_mask; +} __packed; /* ESR_MODE_RECOMMENDATION_NTFY_API_S_VER_2 */ + +/** * struct iwl_missed_beacons_notif - sent when by the firmware upon beacon loss * ( MISSED_BEACONS_NOTIF = 0xF6 ) * @link_id: fw link ID diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h index 26301c0b06a1..2a174c00b712 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2022, 2024 Intel Corporation + * Copyright (C) 2012-2014, 2018-2022, 2024-2025 Intel Corporation * Copyright (C) 2017 Intel Deutschland GmbH */ #ifndef __iwl_fw_api_mac_h__ @@ -287,9 +287,9 @@ struct iwl_ac_qos { __le16 cw_min; __le16 cw_max; u8 aifsn; - u8 fifos_mask; + u8 fifos_mask; /* not in use since _VER_3 */ __le16 edca_txop; -} __packed; /* AC_QOS_API_S_VER_2 */ +} __packed; /* AC_QOS_API_S_VER_2, _VER_3 */ /** * struct iwl_mac_ctx_cmd - command structure to configure MAC contexts diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h index 1238c23ac48b..23140205ccb9 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h @@ -7,6 +7,8 @@ #ifndef __iwl_fw_api_power_h__ #define __iwl_fw_api_power_h__ +#include "nvm-reg.h" + /* Power Management Commands, Responses, Notifications */ /** @@ -629,28 +631,37 @@ enum iwl_ppag_flags { /** * union iwl_ppag_table_cmd - union for all versions of PPAG command - * @v1: version 1 - * @v2: version 2 - * version 3, 4, 5 and 6 are the same structure as v2, + * @v1: command version 1 structure. + * @v2: command version from 2 to 6 are same structure as v2. * but has a different format of the flags bitmap + * @v3: command version 7 structure. * @v1.flags: values from &enum iwl_ppag_flags * @v1.gain: table of antenna gain values per chain and sub-band * @v1.reserved: reserved * @v2.flags: values from &enum iwl_ppag_flags * @v2.gain: table of antenna gain values per chain and sub-band - * @v2.reserved: reserved + * @v3.ppag_config_info: see @struct bios_value_u32 + * @v3.gain: table of antenna gain values per chain and sub-band + * @v3.reserved: reserved */ union iwl_ppag_table_cmd { struct { __le32 flags; s8 gain[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V1]; s8 reserved[2]; - } v1; + } __packed v1; /* PER_PLAT_ANTENNA_GAIN_CMD_API_S_VER_1 */ struct { __le32 flags; s8 gain[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V2]; s8 reserved[2]; - } v2; + } __packed v2; /* PER_PLAT_ANTENNA_GAIN_CMD_API_S_VER_2, VER3, VER4, + * VER5, VER6 + */ + struct { + struct bios_value_u32 ppag_config_info; + s8 gain[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V2]; + s8 reserved[2]; + } __packed v3; /* PER_PLAT_ANTENNA_GAIN_CMD_API_S_VER_7 */ } __packed; #define IWL_PPAG_CMD_V4_MASK (IWL_PPAG_ETSI_MASK | IWL_PPAG_CHINA_MASK) @@ -658,6 +669,15 @@ union iwl_ppag_table_cmd { IWL_PPAG_ETSI_LPI_UHB_MASK | \ IWL_PPAG_USA_LPI_UHB_MASK) +#define IWL_PPAG_CMD_V6_MASK (IWL_PPAG_CMD_V5_MASK | \ + IWL_PPAG_ETSI_VLP_UHB_MASK | \ + IWL_PPAG_ETSI_SP_UHB_MASK | \ + IWL_PPAG_USA_VLP_UHB_MASK | \ + IWL_PPAG_USA_SP_UHB_MASK | \ + IWL_PPAG_CANADA_LPI_UHB_MASK | \ + IWL_PPAG_CANADA_VLP_UHB_MASK | \ + IWL_PPAG_CANADA_SP_UHB_MASK) + #define MCC_TO_SAR_OFFSET_TABLE_ROW_SIZE 26 #define MCC_TO_SAR_OFFSET_TABLE_COL_SIZE 13 diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h index c2f806cbab59..3222cbcbe1ab 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h @@ -1,11 +1,13 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2022, 2024 Intel Corporation + * Copyright (C) 2012-2014, 2018-2022, 2024-2025 Intel Corporation * Copyright (C) 2017 Intel Deutschland GmbH */ #ifndef __iwl_fw_api_rs_h__ #define __iwl_fw_api_rs_h__ - +#include <linux/bitfield.h> +#include <linux/types.h> +#include <linux/bits.h> #include "mac.h" /** @@ -213,7 +215,8 @@ enum iwl_tlc_update_flags { * @sta_id: station id * @reserved: reserved * @flags: bitmap of notifications reported - * @rate: current initial rate + * @rate: current initial rate, format depends on the notification + * version * @amsdu_size: Max AMSDU size, in bytes * @amsdu_enabled: bitmap for per-TID AMSDU enablement */ @@ -224,7 +227,7 @@ struct iwl_tlc_update_notif { __le32 rate; __le32 amsdu_size; __le32 amsdu_enabled; -} __packed; /* TLC_MNG_UPDATE_NTFY_API_S_VER_2 */ +} __packed; /* TLC_MNG_UPDATE_NTFY_API_S_VER_2, _VER_3, _VER_4 */ /** * enum iwl_tlc_debug_types - debug options @@ -427,6 +430,7 @@ enum { /* Bit 4-5: (0) SISO, (1) MIMO2 (2) MIMO3 */ #define RATE_VHT_MCS_RATE_CODE_MSK 0xf +#define RATE_VHT_MCS_NSS_MSK 0x30 /* * Legacy OFDM rate format for bits 7:0 @@ -541,7 +545,7 @@ enum { #define RATE_MCS_CTS_REQUIRED_POS (31) #define RATE_MCS_CTS_REQUIRED_MSK (0x1 << RATE_MCS_CTS_REQUIRED_POS) -/* rate_n_flags bit field version 2 +/* rate_n_flags bit field version 2 and 3 * * The 32-bit value has different layouts in the low 8 bits depending on the * format. There are three formats, HT, VHT and legacy (11abg, with subformats @@ -553,23 +557,25 @@ enum { * (0) Legacy CCK (1) Legacy OFDM (2) High-throughput (HT) * (3) Very High-throughput (VHT) (4) High-efficiency (HE) * (5) Extremely High-throughput (EHT) + * (6) Ultra High Reliability (UHR) (v3 rate format only) */ #define RATE_MCS_MOD_TYPE_POS 8 #define RATE_MCS_MOD_TYPE_MSK (0x7 << RATE_MCS_MOD_TYPE_POS) -#define RATE_MCS_CCK_MSK (0 << RATE_MCS_MOD_TYPE_POS) -#define RATE_MCS_LEGACY_OFDM_MSK (1 << RATE_MCS_MOD_TYPE_POS) -#define RATE_MCS_HT_MSK (2 << RATE_MCS_MOD_TYPE_POS) -#define RATE_MCS_VHT_MSK (3 << RATE_MCS_MOD_TYPE_POS) -#define RATE_MCS_HE_MSK (4 << RATE_MCS_MOD_TYPE_POS) -#define RATE_MCS_EHT_MSK (5 << RATE_MCS_MOD_TYPE_POS) +#define RATE_MCS_MOD_TYPE_CCK (0 << RATE_MCS_MOD_TYPE_POS) +#define RATE_MCS_MOD_TYPE_LEGACY_OFDM (1 << RATE_MCS_MOD_TYPE_POS) +#define RATE_MCS_MOD_TYPE_HT (2 << RATE_MCS_MOD_TYPE_POS) +#define RATE_MCS_MOD_TYPE_VHT (3 << RATE_MCS_MOD_TYPE_POS) +#define RATE_MCS_MOD_TYPE_HE (4 << RATE_MCS_MOD_TYPE_POS) +#define RATE_MCS_MOD_TYPE_EHT (5 << RATE_MCS_MOD_TYPE_POS) +#define RATE_MCS_MOD_TYPE_UHR (6 << RATE_MCS_MOD_TYPE_POS) /* * Legacy CCK rate format for bits 0:3: * - * (0) 0xa - 1 Mbps - * (1) 0x14 - 2 Mbps - * (2) 0x37 - 5.5 Mbps - * (3) 0x6e - 11 nbps + * (0) 1 Mbps + * (1) 2 Mbps + * (2) 5.5 Mbps + * (3) 11 Mbps * * Legacy OFDM rate format for bis 3:0: * @@ -586,15 +592,19 @@ enum { #define RATE_LEGACY_RATE_MSK 0x7 /* - * HT, VHT, HE, EHT rate format for bits 3:0 - * 3-0: MCS - * + * HT, VHT, HE, EHT, UHR rate format + * Version 2: (not applicable for UHR) + * 3-0: MCS + * 4: NSS==2 indicator + * Version 3: + * 4-0: MCS + * 5: NSS==2 indicator */ #define RATE_HT_MCS_CODE_MSK 0x7 -#define RATE_MCS_NSS_POS 4 -#define RATE_MCS_NSS_MSK (1 << RATE_MCS_NSS_POS) -#define RATE_MCS_CODE_MSK 0xf -#define RATE_HT_MCS_INDEX(r) ((((r) & RATE_MCS_NSS_MSK) >> 1) | \ +#define RATE_MCS_NSS_MSK_V2 0x10 +#define RATE_MCS_NSS_MSK 0x20 +#define RATE_MCS_CODE_MSK 0x1f +#define RATE_HT_MCS_INDEX(r) ((((r) & RATE_MCS_NSS_MSK) >> 2) | \ ((r) & RATE_HT_MCS_CODE_MSK)) /* Bits 7-5: reserved */ @@ -810,11 +820,38 @@ struct iwl_lq_cmd { }; /* LINK_QUALITY_CMD_API_S_VER_1 */ u8 iwl_fw_rate_idx_to_plcp(int idx); -u32 iwl_new_rate_from_v1(u32 rate_v1); const struct iwl_rate_mcs_info *iwl_rate_mcs(int idx); const char *iwl_rs_pretty_ant(u8 ant); const char *iwl_rs_pretty_bw(int bw); int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate); bool iwl_he_is_sgi(u32 rate_n_flags); +static inline u32 iwl_v3_rate_from_v2_v3(__le32 rate, bool fw_v3) +{ + u32 val; + + if (fw_v3) + return le32_to_cpu(rate); + + val = le32_to_cpu(rate) & ~RATE_MCS_NSS_MSK_V2; + val |= u32_encode_bits(le32_get_bits(rate, RATE_MCS_NSS_MSK_V2), + RATE_MCS_NSS_MSK); + + return val; +} + +static inline __le32 iwl_v3_rate_to_v2_v3(u32 rate, bool fw_v3) +{ + __le32 val; + + if (fw_v3) + return cpu_to_le32(rate); + + val = cpu_to_le32(rate & ~RATE_MCS_NSS_MSK); + val |= le32_encode_bits(u32_get_bits(rate, RATE_MCS_NSS_MSK), + RATE_MCS_NSS_MSK_V2); + + return val; +} + #endif /* __iwl_fw_api_rs_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h index 42ddd03c1c65..7cf6d6ac7430 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h @@ -640,7 +640,9 @@ struct iwl_rx_mpdu_desc_v3 { */ __le32 reserved[1]; } __packed; /* RX_MPDU_RES_START_API_S_VER_3, - RX_MPDU_RES_START_API_S_VER_5 */ + * RX_MPDU_RES_START_API_S_VER_5, + * RX_MPDU_RES_START_API_S_VER_6 + */ /** * struct iwl_rx_mpdu_desc - RX MPDU descriptor @@ -724,8 +726,10 @@ struct iwl_rx_mpdu_desc { struct iwl_rx_mpdu_desc_v3 v3; }; } __packed; /* RX_MPDU_RES_START_API_S_VER_3, - RX_MPDU_RES_START_API_S_VER_4, - RX_MPDU_RES_START_API_S_VER_5 */ + * RX_MPDU_RES_START_API_S_VER_4, + * RX_MPDU_RES_START_API_S_VER_5, + * RX_MPDU_RES_START_API_S_VER_6 + */ #define IWL_RX_DESC_SIZE_V1 offsetofend(struct iwl_rx_mpdu_desc, v1) @@ -821,7 +825,7 @@ struct iwl_rx_no_data { * 15:8 chain-B, measured at FINA time (FINA_ENERGY), 16:23 channel * @on_air_rise_time: GP2 during on air rise * @fr_time: frame time - * @rate: rate/mcs of frame + * @rate: rate/mcs of frame, format depends on the notification version * @phy_info: &enum iwl_rx_phy_eht_data0 and &enum iwl_rx_phy_info_type * @rx_vec: DW-12:9 raw RX vectors from DSP according to modulation type. * for VHT: OFDM_RX_VECTOR_SIGA1_OUT, OFDM_RX_VECTOR_SIGA2_OUT @@ -837,9 +841,7 @@ struct iwl_rx_no_data_ver_3 { __le32 rate; __le32 phy_info[2]; __le32 rx_vec[4]; -} __packed; /* RX_NO_DATA_NTFY_API_S_VER_1, - RX_NO_DATA_NTFY_API_S_VER_2 - RX_NO_DATA_NTFY_API_S_VER_3 */ +} __packed; /* RX_NO_DATA_NTFY_API_S_VER_3, _VER_4 */ struct iwl_frame_release { u8 baid; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h index cfa6532a3cdd..58d5a6ef633e 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018, 2024 Intel Corporation + * Copyright (C) 2012-2014, 2018, 2024-2025 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -50,7 +50,7 @@ struct iwl_tdls_channel_switch_timing { */ struct iwl_tdls_channel_switch_frame { __le32 switch_time_offset; - struct iwl_tx_cmd tx_cmd; + struct iwl_tx_cmd_v6 tx_cmd; u8 data[IWL_TDLS_CH_SW_FRAME_MAX_SIZE]; } __packed; /* TDLS_STA_CHANNEL_SWITCH_FRAME_API_S_VER_1 */ @@ -131,7 +131,7 @@ struct iwl_tdls_config_cmd { struct iwl_tdls_sta_info sta_info[IWL_TDLS_STA_COUNT]; __le32 pti_req_data_offset; - struct iwl_tx_cmd pti_req_tx_cmd; + struct iwl_tx_cmd_v6 pti_req_tx_cmd; u8 pti_req_template[]; } __packed; /* TDLS_CONFIG_CMD_API_S_VER_1 */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h index 0a39e4b6eb62..557832563f89 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2024 Intel Corporation + * Copyright (C) 2012-2014, 2018-2025 Intel Corporation * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #ifndef __iwl_fw_api_tx_h__ @@ -151,7 +151,7 @@ enum iwl_tx_cmd_sec_ctrl { #define IWL_LOW_RETRY_LIMIT 7 /** - * enum iwl_tx_offload_assist_flags_pos - set %iwl_tx_cmd offload_assist values + * enum iwl_tx_offload_assist_flags_pos - set %iwl_tx_cmd_v6 offload_assist values * @TX_CMD_OFFLD_IP_HDR: offset to start of IP header (in words) * from mac header end. For normal case it is 4 words for SNAP. * note: tx_cmd, mac header and pad are not counted in the offset. @@ -181,7 +181,7 @@ enum iwl_tx_offload_assist_flags_pos { /* TODO: complete documentation for try_cnt and btkill_cnt */ /** - * struct iwl_tx_cmd - TX command struct to FW + * struct iwl_tx_cmd_v6 - TX command struct to FW * ( TX_CMD = 0x1c ) * @len: in bytes of the payload, see below for details * @offload_assist: TX offload configuration @@ -221,7 +221,7 @@ enum iwl_tx_offload_assist_flags_pos { * After the struct fields the MAC header is placed, plus any padding, * and then the actial payload. */ -struct iwl_tx_cmd { +struct iwl_tx_cmd_v6 { __le16 len; __le16 offload_assist; __le32 tx_flags; @@ -258,7 +258,7 @@ struct iwl_dram_sec_info { } __packed; /* DRAM_SEC_INFO_API_S_VER_1 */ /** - * struct iwl_tx_cmd_gen2 - TX command struct to FW for 22000 devices + * struct iwl_tx_cmd_v9 - TX command struct to FW for 22000 devices * ( TX_CMD = 0x1c ) * @len: in bytes of the payload, see below for details * @offload_assist: TX offload configuration @@ -268,7 +268,7 @@ struct iwl_dram_sec_info { * cleared. Combination of RATE_MCS_* * @hdr: 802.11 header */ -struct iwl_tx_cmd_gen2 { +struct iwl_tx_cmd_v9 { __le16 len; __le16 offload_assist; __le32 flags; @@ -279,18 +279,18 @@ struct iwl_tx_cmd_gen2 { TX_CMD_API_S_VER_9 */ /** - * struct iwl_tx_cmd_gen3 - TX command struct to FW for AX210+ devices + * struct iwl_tx_cmd - TX command struct to FW for AX210+ devices * ( TX_CMD = 0x1c ) * @len: in bytes of the payload, see below for details * @flags: combination of &enum iwl_tx_cmd_flags * @offload_assist: TX offload configuration * @dram_info: FW internal DRAM storage * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is - * cleared. Combination of RATE_MCS_* + * cleared. Combination of RATE_MCS_*; format depends on version * @reserved: reserved * @hdr: 802.11 header */ -struct iwl_tx_cmd_gen3 { +struct iwl_tx_cmd { __le16 len; __le16 flags; __le32 offload_assist; @@ -298,7 +298,9 @@ struct iwl_tx_cmd_gen3 { __le32 rate_n_flags; u8 reserved[8]; struct ieee80211_hdr hdr[]; -} __packed; /* TX_CMD_API_S_VER_8, TX_CMD_API_S_VER_10 */ +} __packed; /* TX_CMD_API_S_VER_10, + * TX_CMD_API_S_VER_11 + */ /* * TX response related data @@ -549,7 +551,7 @@ struct iwl_tx_resp_v3 { * @failure_rts: num of failures due to unsuccessful RTS * @failure_frame: num failures due to no ACK (unused for agg) * @initial_rate: for non-agg: rate of the successful Tx. For agg: rate of the - * Tx of all the batch. RATE_MCS_* + * Tx of all the batch. RATE_MCS_*; format depends on command version * @wireless_media_time: for non-agg: RTS + CTS + frame tx attempts time + ACK. * for agg: RTS + CTS + aggregation tx time + block-ack time. * in usec. @@ -600,8 +602,10 @@ struct iwl_tx_resp { __le16 reserved2; struct agg_tx_status status; } __packed; /* TX_RSP_API_S_VER_6, - TX_RSP_API_S_VER_7, - TX_RSP_API_S_VER_8 */ + * TX_RSP_API_S_VER_7, + * TX_RSP_API_S_VER_8, + * TX_RSP_API_S_VER_9 + */ /** * struct iwl_mvm_ba_notif - notifies about reception of BA @@ -701,7 +705,8 @@ enum iwl_mvm_ba_resp_flags { * @rts_retry_cnt: RTS retry count * @reserved: reserved (for alignment) * @wireless_time: Wireless-media time - * @tx_rate: the rate the aggregation was sent at + * @tx_rate: the rate the aggregation was sent at. Format depends on command + * version. * @tfd_cnt: number of TFD-Q elements * @ra_tid_cnt: number of RATID-Q elements * @tfd: array of TFD queue status updates. See &iwl_compressed_ba_tfd @@ -730,7 +735,8 @@ struct iwl_compressed_ba_notif { DECLARE_FLEX_ARRAY(struct iwl_compressed_ba_tfd, tfd); }; } __packed; /* COMPRESSED_BA_RES_API_S_VER_4, - COMPRESSED_BA_RES_API_S_VER_5 */ + COMPRESSED_BA_RES_API_S_VER_6, + COMPRESSED_BA_RES_API_S_VER_7 */ /** * struct iwl_mac_beacon_cmd_v6 - beacon template command @@ -742,7 +748,7 @@ struct iwl_compressed_ba_notif { * @frame: the template of the beacon frame */ struct iwl_mac_beacon_cmd_v6 { - struct iwl_tx_cmd tx; + struct iwl_tx_cmd_v6 tx; __le32 template_id; __le32 tim_idx; __le32 tim_size; @@ -761,7 +767,7 @@ struct iwl_mac_beacon_cmd_v6 { * @frame: the template of the beacon frame */ struct iwl_mac_beacon_cmd_v7 { - struct iwl_tx_cmd tx; + struct iwl_tx_cmd_v6 tx; __le32 template_id; __le32 tim_idx; __le32 tim_size; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c index e70eebf079be..ea739ebe7cb0 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c @@ -187,7 +187,7 @@ static void iwl_fw_dump_rxf(struct iwl_fw_runtime *fwrt, /* Pull RXF2 */ iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->rxfifo2_size, RXF_DIFF_FROM_PREV + - fwrt->trans->trans_cfg->umac_prph_offset, 1); + fwrt->trans->mac_cfg->umac_prph_offset, 1); /* Pull LMAC2 RXF1 */ if (fwrt->smem_cfg.num_lmacs > 1) iwl_fwrt_dump_rxf(fwrt, dump_data, @@ -654,10 +654,10 @@ static void iwl_fw_prph_handler(struct iwl_fw_runtime *fwrt, void *ptr, { u32 range_len; - if (fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { + if (fwrt->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { range_len = ARRAY_SIZE(iwl_prph_dump_addr_ax210); handler(fwrt, iwl_prph_dump_addr_ax210, range_len, ptr); - } else if (fwrt->trans->trans_cfg->device_family >= + } else if (fwrt->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_22000) { range_len = ARRAY_SIZE(iwl_prph_dump_addr_22000); handler(fwrt, iwl_prph_dump_addr_22000, range_len, ptr); @@ -665,7 +665,7 @@ static void iwl_fw_prph_handler(struct iwl_fw_runtime *fwrt, void *ptr, range_len = ARRAY_SIZE(iwl_prph_dump_addr_comm); handler(fwrt, iwl_prph_dump_addr_comm, range_len, ptr); - if (fwrt->trans->trans_cfg->mq_rx_supported) { + if (fwrt->trans->mac_cfg->mq_rx_supported) { range_len = ARRAY_SIZE(iwl_prph_dump_addr_9000); handler(fwrt, iwl_prph_dump_addr_9000, range_len, ptr); } @@ -809,13 +809,14 @@ iwl_fw_error_dump_file(struct iwl_fw_runtime *fwrt, const struct iwl_fw_dbg_mem_seg_tlv *fw_mem = fwrt->fw->dbg.mem_tlv; struct iwl_fwrt_shared_mem_cfg *mem_cfg = &fwrt->smem_cfg; u32 file_len, fifo_len = 0, prph_len = 0, radio_len = 0; - u32 smem_len = fwrt->fw->dbg.n_mem_tlv ? 0 : fwrt->trans->cfg->smem_len; + u32 smem_len = fwrt->fw->dbg.n_mem_tlv ? 0 : fwrt->trans->mac_cfg->base->smem_len; u32 sram2_len = fwrt->fw->dbg.n_mem_tlv ? 0 : fwrt->trans->cfg->dccm2_len; int i; /* SRAM - include stack CCM if driver knows the values for it */ - if (!fwrt->trans->cfg->dccm_offset || !fwrt->trans->cfg->dccm_len) { + if (!fwrt->trans->cfg->dccm_offset || + !fwrt->trans->cfg->dccm_len) { const struct fw_img *img; if (fwrt->cur_fw_img >= IWL_UCODE_TYPE_MAX) @@ -838,7 +839,7 @@ iwl_fw_error_dump_file(struct iwl_fw_runtime *fwrt, iwl_fw_prph_handler(fwrt, &prph_len, iwl_fw_get_prph_len); - if (fwrt->trans->trans_cfg->device_family == + if (fwrt->trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_7000 && iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_RADIO_REG)) radio_len = sizeof(*dump_data) + RADIO_REG_MAX_READ; @@ -876,7 +877,7 @@ iwl_fw_error_dump_file(struct iwl_fw_runtime *fwrt, if (iwl_fw_dbg_is_d3_debug_enabled(fwrt) && fwrt->dump.d3_debug_data) { file_len += sizeof(*dump_data) + - fwrt->trans->cfg->d3_debug_data_length * 2; + fwrt->trans->mac_cfg->base->d3_debug_data_length * 2; } /* If we only want a monitor dump, reset the file length */ @@ -904,13 +905,14 @@ iwl_fw_error_dump_file(struct iwl_fw_runtime *fwrt, dump_data->len = cpu_to_le32(sizeof(*dump_info)); dump_info = (void *)dump_data->data; dump_info->hw_type = - cpu_to_le32(CSR_HW_REV_TYPE(fwrt->trans->hw_rev)); + cpu_to_le32(CSR_HW_REV_TYPE(fwrt->trans->info.hw_rev)); dump_info->hw_step = - cpu_to_le32(fwrt->trans->hw_rev_step); + cpu_to_le32(fwrt->trans->info.hw_rev_step); memcpy(dump_info->fw_human_readable, fwrt->fw->human_readable, sizeof(dump_info->fw_human_readable)); - strscpy_pad(dump_info->dev_human_readable, fwrt->trans->name, - sizeof(dump_info->dev_human_readable)); + strscpy_pad(dump_info->dev_human_readable, + fwrt->trans->info.name, + sizeof(dump_info->dev_human_readable)); strscpy_pad(dump_info->bus_human_readable, fwrt->dev->bus->name, sizeof(dump_info->bus_human_readable)); dump_info->num_of_lmacs = fwrt->smem_cfg.num_lmacs; @@ -996,7 +998,7 @@ iwl_fw_error_dump_file(struct iwl_fw_runtime *fwrt, } iwl_fw_dump_mem(fwrt, &dump_data, smem_len, - fwrt->trans->cfg->smem_offset, + fwrt->trans->mac_cfg->base->smem_offset, IWL_FW_ERROR_DUMP_MEM_SMEM); iwl_fw_dump_mem(fwrt, &dump_data, sram2_len, @@ -1005,8 +1007,8 @@ iwl_fw_error_dump_file(struct iwl_fw_runtime *fwrt, } if (iwl_fw_dbg_is_d3_debug_enabled(fwrt) && fwrt->dump.d3_debug_data) { - u32 addr = fwrt->trans->cfg->d3_debug_data_base_addr; - size_t data_size = fwrt->trans->cfg->d3_debug_data_length; + u32 addr = fwrt->trans->mac_cfg->base->d3_debug_data_base_addr; + size_t data_size = fwrt->trans->mac_cfg->base->d3_debug_data_length; dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_D3_DEBUG_DATA); dump_data->len = cpu_to_le32(data_size * 2); @@ -1109,7 +1111,7 @@ static int iwl_dump_ini_prph_phy_iter_common(struct iwl_fw_runtime *fwrt, range->internal_base_addr = cpu_to_le32(addr); range->range_data_size = size; - if (fwrt->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) + if (fwrt->trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210) indirect_wr_addr = WMAL_INDRCT_CMD1; indirect_wr_addr += le32_to_cpu(offset); @@ -1266,7 +1268,7 @@ static int iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt, /* all paged index start from 1 to skip CSS section */ idx++; - if (!fwrt->trans->trans_cfg->gen2) + if (!fwrt->trans->mac_cfg->gen2) return _iwl_dump_ini_paging_iter(fwrt, range_ptr, range_len, idx); range = range_ptr; @@ -1790,7 +1792,7 @@ iwl_dump_ini_mon_fill_header(struct iwl_fw_runtime *fwrt, u32 alloc_id, data->write_ptr = iwl_get_mon_reg(fwrt, alloc_id, &addrs->write_ptr); - if (fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { + if (fwrt->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { u32 wrt_ptr = le32_to_cpu(data->write_ptr); data->write_ptr = cpu_to_le32(wrt_ptr >> 2); @@ -1817,7 +1819,7 @@ iwl_dump_ini_mon_dram_fill_header(struct iwl_fw_runtime *fwrt, u32 alloc_id = le32_to_cpu(reg->dram_alloc_id); return iwl_dump_ini_mon_fill_header(fwrt, alloc_id, mon_dump, - &fwrt->trans->cfg->mon_dram_regs); + &fwrt->trans->mac_cfg->base->mon_dram_regs); } static void * @@ -1830,7 +1832,7 @@ iwl_dump_ini_mon_smem_fill_header(struct iwl_fw_runtime *fwrt, u32 alloc_id = le32_to_cpu(reg->internal_buffer.alloc_id); return iwl_dump_ini_mon_fill_header(fwrt, alloc_id, mon_dump, - &fwrt->trans->cfg->mon_smem_regs); + &fwrt->trans->mac_cfg->base->mon_smem_regs); } static void * @@ -1844,7 +1846,7 @@ iwl_dump_ini_mon_dbgi_fill_header(struct iwl_fw_runtime *fwrt, /* no offset calculation later */ IWL_FW_INI_ALLOCATION_ID_DBGC1, mon_dump, - &fwrt->trans->cfg->mon_dbgi_regs); + &fwrt->trans->mac_cfg->base->mon_dbgi_regs); } static void * @@ -1909,7 +1911,7 @@ iwl_dump_ini_mem_block_ranges(struct iwl_fw_runtime *fwrt, static u32 iwl_dump_ini_paging_ranges(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data) { - if (fwrt->trans->trans_cfg->gen2) { + if (fwrt->trans->mac_cfg->gen2) { if (fwrt->trans->init_dram.paging_cnt) return fwrt->trans->init_dram.paging_cnt - 1; else @@ -2021,7 +2023,7 @@ iwl_dump_ini_paging_get_size(struct iwl_fw_runtime *fwrt, /* start from 1 to skip CSS section */ for (i = 1; i <= iwl_dump_ini_paging_ranges(fwrt, reg_data); i++) { size += range_header_len; - if (fwrt->trans->trans_cfg->gen2) + if (fwrt->trans->mac_cfg->gen2) size += fwrt->trans->init_dram.paging[i].size; else size += fwrt->fw_paging_db[i].fw_paging_size; @@ -2375,7 +2377,7 @@ static u32 iwl_dump_ini_info(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_dump_cfg_name *cfg_name; u32 size = sizeof(*tlv) + sizeof(*dump); u32 num_of_cfg_names = 0; - u32 hw_type; + u32 hw_type, is_cdb, is_jacket; list_for_each_entry(node, &fwrt->trans->dbg.debug_info_tlv_list, list) { size += sizeof(*cfg_name); @@ -2403,33 +2405,24 @@ static u32 iwl_dump_ini_info(struct iwl_fw_runtime *fwrt, dump->ver_type = cpu_to_le32(fwrt->dump.fw_ver.type); dump->ver_subtype = cpu_to_le32(fwrt->dump.fw_ver.subtype); - dump->hw_step = cpu_to_le32(fwrt->trans->hw_rev_step); + dump->hw_step = cpu_to_le32(fwrt->trans->info.hw_rev_step); - /* - * Several HWs all have type == 0x42, so we'll override this value - * according to the detected HW - */ - hw_type = CSR_HW_REV_TYPE(fwrt->trans->hw_rev); - if (hw_type == IWL_AX210_HW_TYPE) { - u32 prph_val = iwl_read_umac_prph(fwrt->trans, WFPM_OTP_CFG1_ADDR); - u32 is_jacket = !!(prph_val & WFPM_OTP_CFG1_IS_JACKET_BIT); - u32 is_cdb = !!(prph_val & WFPM_OTP_CFG1_IS_CDB_BIT); - u32 masked_bits = is_jacket | (is_cdb << 1); + hw_type = CSR_HW_REV_TYPE(fwrt->trans->info.hw_rev); + + is_cdb = CSR_HW_RFID_IS_CDB(fwrt->trans->info.hw_rf_id); + is_jacket = !!(iwl_read_umac_prph(fwrt->trans, WFPM_OTP_CFG1_ADDR) & + WFPM_OTP_CFG1_IS_JACKET_BIT); + + /* Use bits 12 and 13 to indicate jacket/CDB, respectively */ + hw_type |= (is_jacket | (is_cdb << 1)) << IWL_JACKET_CDB_SHIFT; - /* - * The HW type depends on certain bits in this case, so add - * these bits to the HW type. We won't have collisions since we - * add these bits after the highest possible bit in the mask. - */ - hw_type |= masked_bits << IWL_AX210_HW_TYPE_ADDITION_SHIFT; - } dump->hw_type = cpu_to_le32(hw_type); dump->rf_id_flavor = - cpu_to_le32(CSR_HW_RFID_FLAVOR(fwrt->trans->hw_rf_id)); - dump->rf_id_dash = cpu_to_le32(CSR_HW_RFID_DASH(fwrt->trans->hw_rf_id)); - dump->rf_id_step = cpu_to_le32(CSR_HW_RFID_STEP(fwrt->trans->hw_rf_id)); - dump->rf_id_type = cpu_to_le32(CSR_HW_RFID_TYPE(fwrt->trans->hw_rf_id)); + cpu_to_le32(CSR_HW_RFID_FLAVOR(fwrt->trans->info.hw_rf_id)); + dump->rf_id_dash = cpu_to_le32(CSR_HW_RFID_DASH(fwrt->trans->info.hw_rf_id)); + dump->rf_id_step = cpu_to_le32(CSR_HW_RFID_STEP(fwrt->trans->info.hw_rf_id)); + dump->rf_id_type = cpu_to_le32(CSR_HW_RFID_TYPE(fwrt->trans->info.hw_rf_id)); dump->lmac_major = cpu_to_le32(fwrt->dump.fw_ver.lmac_major); dump->lmac_minor = cpu_to_le32(fwrt->dump.fw_ver.lmac_minor); @@ -2731,7 +2724,7 @@ static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt, BUILD_BUG_ON((sizeof(trigger->regions_mask) * BITS_PER_BYTE) < ARRAY_SIZE(fwrt->trans->dbg.active_regions)); - if (trigger->time_point & + if (trigger->apply_policy & cpu_to_le32(IWL_FW_INI_APPLY_POLICY_SPLIT_DUMP_RESET)) { size += iwl_dump_ini_dump_regions(fwrt, dump_data, list, tp_id, regions_mask, &imr_reg_data, @@ -3291,13 +3284,13 @@ void iwl_fw_error_dump_wk(struct work_struct *work) void iwl_fw_dbg_read_d3_debug_data(struct iwl_fw_runtime *fwrt) { - const struct iwl_cfg *cfg = fwrt->trans->cfg; + const struct iwl_mac_cfg *mac_cfg = fwrt->trans->mac_cfg; if (!iwl_fw_dbg_is_d3_debug_enabled(fwrt)) return; if (!fwrt->dump.d3_debug_data) { - fwrt->dump.d3_debug_data = kmalloc(cfg->d3_debug_data_length, + fwrt->dump.d3_debug_data = kmalloc(mac_cfg->base->d3_debug_data_length, GFP_KERNEL); if (!fwrt->dump.d3_debug_data) { IWL_ERR(fwrt, @@ -3307,15 +3300,15 @@ void iwl_fw_dbg_read_d3_debug_data(struct iwl_fw_runtime *fwrt) } /* if the buffer holds previous debug data it is overwritten */ - iwl_trans_read_mem_bytes(fwrt->trans, cfg->d3_debug_data_base_addr, + iwl_trans_read_mem_bytes(fwrt->trans, mac_cfg->base->d3_debug_data_base_addr, fwrt->dump.d3_debug_data, - cfg->d3_debug_data_length); + mac_cfg->base->d3_debug_data_length); if (fwrt->sanitize_ops && fwrt->sanitize_ops->frob_mem) fwrt->sanitize_ops->frob_mem(fwrt->sanitize_ctx, - cfg->d3_debug_data_base_addr, + mac_cfg->base->d3_debug_data_base_addr, fwrt->dump.d3_debug_data, - cfg->d3_debug_data_length); + mac_cfg->base->d3_debug_data_length); } IWL_EXPORT_SYMBOL(iwl_fw_dbg_read_d3_debug_data); @@ -3350,7 +3343,7 @@ static int iwl_fw_dbg_suspend_resume_hcmd(struct iwl_trans *trans, bool suspend) static void iwl_fw_dbg_stop_recording(struct iwl_trans *trans, struct iwl_fw_dbg_params *params) { - if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000) { + if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_7000) { iwl_set_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x100); return; } @@ -3374,7 +3367,7 @@ static int iwl_fw_dbg_restart_recording(struct iwl_trans *trans, if (!params) return -EIO; - if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000) { + if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_7000) { iwl_clear_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x100); iwl_clear_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x1); iwl_set_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x1); @@ -3476,7 +3469,7 @@ void iwl_fw_disable_dbg_asserts(struct iwl_fw_runtime *fwrt) GENMASK(31, IWL_FW_DBG_DOMAIN_POS + 1)); /* supported starting from 9000 devices */ - if (fwrt->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_9000) + if (fwrt->trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_9000) return; if (fwrt->trans->dbg.yoyo_bin_loaded || (preset && preset != 1)) diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h index 35e30e5db462..8034c9ecba69 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2018-2019, 2021-2024 Intel Corporation + * Copyright (C) 2005-2014, 2018-2019, 2021-2025 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH */ @@ -201,7 +201,7 @@ static inline bool iwl_fw_dbg_is_d3_debug_enabled(struct iwl_fw_runtime *fwrt) { return fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_D3_DEBUG) && - fwrt->trans->cfg->d3_debug_data_length && fwrt->ops && + fwrt->trans->mac_cfg->base->d3_debug_data_length && fwrt->ops && fwrt->ops->d3_debug_enable && fwrt->ops->d3_debug_enable(fwrt->ops_ctx) && iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_D3_DEBUG_DATA); @@ -210,7 +210,7 @@ static inline bool iwl_fw_dbg_is_d3_debug_enabled(struct iwl_fw_runtime *fwrt) static inline bool iwl_fw_dbg_is_paging_enabled(struct iwl_fw_runtime *fwrt) { return iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_PAGING) && - !fwrt->trans->trans_cfg->gen2 && + !fwrt->trans->mac_cfg->gen2 && fwrt->cur_fw_img < IWL_UCODE_TYPE_MAX && fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size && fwrt->fw_paging_db[0].fw_paging_block; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c index f0c813d675f4..c70f2a20f7d5 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c @@ -311,7 +311,7 @@ static ssize_t iwl_dbgfs_fw_ver_read(struct iwl_fw_runtime *fwrt, pos += scnprintf(pos, endpos - pos, "FW: %s\n", fwrt->fw->human_readable); pos += scnprintf(pos, endpos - pos, "Device: %s\n", - fwrt->trans->name); + fwrt->trans->info.name); pos += scnprintf(pos, endpos - pos, "Bus: %s\n", fwrt->dev->bus->name); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dump.c b/drivers/net/wireless/intel/iwlwifi/fw/dump.c index c7b261c8ec96..3ec42a4ea801 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dump.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dump.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2024 Intel Corporation + * Copyright (C) 2012-2014, 2018-2025 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH */ @@ -417,10 +417,10 @@ static void iwl_fwrt_dump_iml_error_log(struct iwl_fw_runtime *fwrt) struct iwl_trans *trans = fwrt->trans; u32 error, data1; - if (fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000) { + if (fwrt->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_22000) { error = UMAG_SB_CPU_2_STATUS; data1 = UMAG_SB_CPU_1_STATUS; - } else if (fwrt->trans->trans_cfg->device_family >= + } else if (fwrt->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_8000) { error = SB_CPU_2_STATUS; data1 = SB_CPU_1_STATUS; @@ -439,7 +439,7 @@ static void iwl_fwrt_dump_iml_error_log(struct iwl_fw_runtime *fwrt) IWL_ERR(fwrt, "0x%08X | IML/ROM data1\n", iwl_read_umac_prph(trans, data1)); - if (fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000) + if (fwrt->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_22000) IWL_ERR(fwrt, "0x%08X | IML/ROM WFPM_AUTH_KEY_0\n", iwl_read_umac_prph(trans, SB_MODIFY_CFG_FLAG)); } @@ -508,7 +508,7 @@ void iwl_fwrt_dump_error_logs(struct iwl_fw_runtime *fwrt) iwl_fwrt_dump_rcm_error_log(fwrt, 1); iwl_fwrt_dump_iml_error_log(fwrt); iwl_fwrt_dump_fseq_regs(fwrt); - if (fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000) { + if (fwrt->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_22000) { pc_data = fwrt->trans->dbg.pc_data; if (!iwl_trans_grab_nic_access(fwrt->trans)) @@ -522,7 +522,7 @@ void iwl_fwrt_dump_error_logs(struct iwl_fw_runtime *fwrt) iwl_trans_release_nic_access(fwrt->trans); } - if (fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { + if (fwrt->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { u32 scratch = iwl_read32(fwrt->trans, CSR_FUNC_SCRATCH); IWL_ERR(fwrt, "Function Scratch status:\n"); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h index 3af275133da0..cf41021d59ad 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2014, 2018-2024 Intel Corporation + * Copyright (C) 2014, 2018-2025 Intel Corporation * Copyright (C) 2014-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -372,10 +372,7 @@ struct iwl_fw_ini_dump_cfg_name { u8 cfg_name[IWL_FW_INI_MAX_CFG_NAME]; } __packed; -/* AX210's HW type */ -#define IWL_AX210_HW_TYPE 0x42 -/* How many bits to roll when adding to the HW type of AX210 HW */ -#define IWL_AX210_HW_TYPE_ADDITION_SHIFT 12 +#define IWL_JACKET_CDB_SHIFT 12 /* struct iwl_fw_ini_dump_info - ini dump information * @version: dump version diff --git a/drivers/net/wireless/intel/iwlwifi/fw/init.c b/drivers/net/wireless/intel/iwlwifi/fw/init.c index de87e0e3e072..d1d8058ad29f 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/init.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/init.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2017 Intel Deutschland GmbH - * Copyright (C) 2019-2021, 2024 Intel Corporation + * Copyright (C) 2019-2021, 2024-2025 Intel Corporation */ #include "iwl-drv.h" #include "runtime.h" @@ -72,7 +72,7 @@ int iwl_set_soc_latency(struct iwl_fw_runtime *fwrt) * values in VER_1, this is backwards-compatible with VER_2, * as long as we don't set any other bits. */ - if (!fwrt->trans->trans_cfg->integrated) + if (!fwrt->trans->mac_cfg->integrated) cmd.flags = cpu_to_le32(SOC_CONFIG_CMD_FLAGS_DISCRETE); BUILD_BUG_ON(IWL_CFG_TRANS_LTR_DELAY_NONE != @@ -84,17 +84,17 @@ int iwl_set_soc_latency(struct iwl_fw_runtime *fwrt) BUILD_BUG_ON(IWL_CFG_TRANS_LTR_DELAY_1820US != SOC_FLAGS_LTR_APPLY_DELAY_1820); - if (fwrt->trans->trans_cfg->ltr_delay != IWL_CFG_TRANS_LTR_DELAY_NONE && - !WARN_ON(!fwrt->trans->trans_cfg->integrated)) - cmd.flags |= le32_encode_bits(fwrt->trans->trans_cfg->ltr_delay, + if (fwrt->trans->mac_cfg->ltr_delay != IWL_CFG_TRANS_LTR_DELAY_NONE && + !WARN_ON(!fwrt->trans->mac_cfg->integrated)) + cmd.flags |= le32_encode_bits(fwrt->trans->mac_cfg->ltr_delay, SOC_FLAGS_LTR_APPLY_DELAY_MASK); if (iwl_fw_lookup_cmd_ver(fwrt->fw, SCAN_REQ_UMAC, IWL_FW_CMD_VER_UNKNOWN) >= 2 && - fwrt->trans->trans_cfg->low_latency_xtal) + fwrt->trans->mac_cfg->low_latency_xtal) cmd.flags |= cpu_to_le32(SOC_CONFIG_CMD_FLAGS_LOW_LATENCY); - cmd.latency = cpu_to_le32(fwrt->trans->trans_cfg->xtal_latency); + cmd.latency = cpu_to_le32(fwrt->trans->mac_cfg->xtal_latency); ret = iwl_trans_send_cmd(fwrt->trans, &hcmd); if (ret) @@ -116,14 +116,14 @@ int iwl_configure_rxq(struct iwl_fw_runtime *fwrt) * The default queue is configured via context info, so if we * have a single queue, there's nothing to do here. */ - if (fwrt->trans->num_rx_queues == 1) + if (fwrt->trans->info.num_rxqs == 1) return 0; - if (fwrt->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_22000) + if (fwrt->trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_22000) return 0; /* skip the default queue */ - num_queues = fwrt->trans->num_rx_queues - 1; + num_queues = fwrt->trans->info.num_rxqs - 1; size = struct_size(cmd, data, num_queues); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/paging.c b/drivers/net/wireless/intel/iwlwifi/fw/paging.c index a7b7cae874a2..826409f6f710 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/paging.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/paging.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2019, 2021 Intel Corporation + * Copyright (C) 2012-2014, 2018-2019, 2021, 2025 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -267,7 +267,7 @@ int iwl_init_paging(struct iwl_fw_runtime *fwrt, enum iwl_ucode_type type) const struct fw_img *fw = &fwrt->fw->img[type]; int ret; - if (fwrt->trans->trans_cfg->gen2) + if (fwrt->trans->mac_cfg->gen2) return 0; /* diff --git a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c index 1195e708caa9..4f3c2f7f4f5b 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright(c) 2020-2024 Intel Corporation + * Copyright(c) 2020-2025 Intel Corporation */ #include "iwl-drv.h" @@ -96,8 +96,8 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data, "Got IWL_UCODE_TLV_HW_TYPE mac_type 0x%0x rf_id 0x%0x\n", mac_type, rf_id); - if (mac_type == CSR_HW_REV_TYPE(trans->hw_rev) && - rf_id == CSR_HW_RFID_TYPE(trans->hw_rf_id)) + if (mac_type == CSR_HW_REV_TYPE(trans->info.hw_rev) && + rf_id == CSR_HW_RFID_TYPE(trans->info.hw_rf_id)) hw_match = true; break; case IWL_UCODE_TLV_SEC_RT: { @@ -152,8 +152,8 @@ done: if (!hw_match) { IWL_DEBUG_FW(trans, "HW mismatch, skipping PNVM section (need mac_type 0x%x rf_id 0x%x)\n", - CSR_HW_REV_TYPE(trans->hw_rev), - CSR_HW_RFID_TYPE(trans->hw_rf_id)); + CSR_HW_REV_TYPE(trans->info.hw_rev), + CSR_HW_RFID_TYPE(trans->info.hw_rf_id)); return -ENOENT; } @@ -167,7 +167,8 @@ done: static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data, size_t len, - struct iwl_pnvm_image *pnvm_data) + struct iwl_pnvm_image *pnvm_data, + __le32 sku_id[3]) { const struct iwl_ucode_tlv *tlv; @@ -190,23 +191,23 @@ static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data, } if (tlv_type == IWL_UCODE_TLV_PNVM_SKU) { - const struct iwl_sku_id *sku_id = + const struct iwl_sku_id *tlv_sku_id = (const void *)(data + sizeof(*tlv)); IWL_DEBUG_FW(trans, "Got IWL_UCODE_TLV_PNVM_SKU len %d\n", tlv_len); IWL_DEBUG_FW(trans, "sku_id 0x%0x 0x%0x 0x%0x\n", - le32_to_cpu(sku_id->data[0]), - le32_to_cpu(sku_id->data[1]), - le32_to_cpu(sku_id->data[2])); + le32_to_cpu(tlv_sku_id->data[0]), + le32_to_cpu(tlv_sku_id->data[1]), + le32_to_cpu(tlv_sku_id->data[2])); data += sizeof(*tlv) + ALIGN(tlv_len, 4); len -= ALIGN(tlv_len, 4); trans->reduced_cap_sku = false; - rf_type = CSR_HW_RFID_TYPE(trans->hw_rf_id); - if ((trans->sku_id[0] & IWL_PNVM_REDUCED_CAP_BIT) && + rf_type = CSR_HW_RFID_TYPE(trans->info.hw_rf_id); + if ((sku_id[0] & cpu_to_le32(IWL_PNVM_REDUCED_CAP_BIT)) && rf_type == IWL_CFG_RF_TYPE_FM) trans->reduced_cap_sku = true; @@ -214,9 +215,9 @@ static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data, "Reduced SKU device %d\n", trans->reduced_cap_sku); - if (trans->sku_id[0] == le32_to_cpu(sku_id->data[0]) && - trans->sku_id[1] == le32_to_cpu(sku_id->data[1]) && - trans->sku_id[2] == le32_to_cpu(sku_id->data[2])) { + if (sku_id[0] == tlv_sku_id->data[0] && + sku_id[1] == tlv_sku_id->data[1] && + sku_id[2] == tlv_sku_id->data[2]) { int ret; ret = iwl_pnvm_handle_section(trans, data, len, @@ -263,13 +264,14 @@ static int iwl_pnvm_get_from_fs(struct iwl_trans *trans, u8 **data, size_t *len) return 0; } -static u8 *iwl_get_pnvm_image(struct iwl_trans *trans_p, size_t *len) +static u8 *iwl_get_pnvm_image(struct iwl_trans *trans_p, size_t *len, + __le32 sku_id[3]) { struct pnvm_sku_package *package; u8 *image = NULL; /* Get PNVM from BIOS for non-Intel SKU */ - if (trans_p->sku_id[2]) { + if (sku_id[2]) { package = iwl_uefi_get_pnvm(trans_p, len); if (!IS_ERR_OR_NULL(package)) { if (*len >= sizeof(*package)) { @@ -294,8 +296,10 @@ static u8 *iwl_get_pnvm_image(struct iwl_trans *trans_p, size_t *len) return image; } -static void iwl_pnvm_load_pnvm_to_trans(struct iwl_trans *trans, - const struct iwl_ucode_capabilities *capa) +static void +iwl_pnvm_load_pnvm_to_trans(struct iwl_trans *trans, + const struct iwl_ucode_capabilities *capa, + __le32 sku_id[3]) { struct iwl_pnvm_image *pnvm_data = NULL; u8 *data = NULL; @@ -309,7 +313,7 @@ static void iwl_pnvm_load_pnvm_to_trans(struct iwl_trans *trans, if (trans->pnvm_loaded) goto set; - data = iwl_get_pnvm_image(trans, &length); + data = iwl_get_pnvm_image(trans, &length, sku_id); if (!data) { trans->fail_to_parse_pnvm_image = true; return; @@ -319,7 +323,7 @@ static void iwl_pnvm_load_pnvm_to_trans(struct iwl_trans *trans, if (!pnvm_data) goto free; - ret = iwl_pnvm_parse(trans, data, length, pnvm_data); + ret = iwl_pnvm_parse(trans, data, length, pnvm_data, sku_id); if (ret) { trans->fail_to_parse_pnvm_image = true; goto free; @@ -339,7 +343,8 @@ free: static void iwl_pnvm_load_reduce_power_to_trans(struct iwl_trans *trans, - const struct iwl_ucode_capabilities *capa) + const struct iwl_ucode_capabilities *capa, + __le32 sku_id[3]) { struct iwl_pnvm_image *pnvm_data = NULL; u8 *data = NULL; @@ -362,7 +367,8 @@ iwl_pnvm_load_reduce_power_to_trans(struct iwl_trans *trans, if (!pnvm_data) goto free; - ret = iwl_uefi_reduce_power_parse(trans, data, length, pnvm_data); + ret = iwl_uefi_reduce_power_parse(trans, data, length, pnvm_data, + sku_id); if (ret) { trans->failed_to_load_reduce_power_image = true; goto free; @@ -386,18 +392,19 @@ free: int iwl_pnvm_load(struct iwl_trans *trans, struct iwl_notif_wait_data *notif_wait, - const struct iwl_ucode_capabilities *capa) + const struct iwl_ucode_capabilities *capa, + __le32 sku_id[3]) { struct iwl_notification_wait pnvm_wait; static const u16 ntf_cmds[] = { WIDE_ID(REGULATORY_AND_NVM_GROUP, PNVM_INIT_COMPLETE_NTFY) }; /* if the SKU_ID is empty, there's nothing to do */ - if (!trans->sku_id[0] && !trans->sku_id[1] && !trans->sku_id[2]) + if (!sku_id[0] && !sku_id[1] && !sku_id[2]) return 0; - iwl_pnvm_load_pnvm_to_trans(trans, capa); - iwl_pnvm_load_reduce_power_to_trans(trans, capa); + iwl_pnvm_load_pnvm_to_trans(trans, capa, sku_id); + iwl_pnvm_load_reduce_power_to_trans(trans, capa, sku_id); iwl_init_notification_wait(notif_wait, &pnvm_wait, ntf_cmds, ARRAY_SIZE(ntf_cmds), diff --git a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.h b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.h index 1bac3466154c..9540926e8a0f 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright(c) 2020-2023 Intel Corporation + * Copyright(c) 2020-2023, 2025 Intel Corporation */ #ifndef __IWL_PNVM_H__ #define __IWL_PNVM_H__ @@ -14,7 +14,8 @@ int iwl_pnvm_load(struct iwl_trans *trans, struct iwl_notif_wait_data *notif_wait, - const struct iwl_ucode_capabilities *capa); + const struct iwl_ucode_capabilities *capa, + __le32 sku_id[3]); static inline void iwl_pnvm_get_fs_name(struct iwl_trans *trans, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c index 6adcfa6e214a..74b90bd92c48 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2023 Intel Corporation + * Copyright (C) 2023, 2025 Intel Corporation */ #include <linux/dmi.h> #include "iwl-drv.h" @@ -34,6 +34,7 @@ IWL_BIOS_TABLE_LOADER(wrds_table); IWL_BIOS_TABLE_LOADER(ewrd_table); IWL_BIOS_TABLE_LOADER(wgds_table); IWL_BIOS_TABLE_LOADER(ppag_table); +IWL_BIOS_TABLE_LOADER(phy_filters); IWL_BIOS_TABLE_LOADER_DATA(tas_table, struct iwl_tas_data); IWL_BIOS_TABLE_LOADER_DATA(pwr_limit, u64); IWL_BIOS_TABLE_LOADER_DATA(mcc, char); @@ -180,9 +181,9 @@ bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt) */ return IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) >= 38 || (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 17 && - fwrt->trans->hw_rev != CSR_HW_REV_TYPE_3160) || + fwrt->trans->info.hw_rev != CSR_HW_REV_TYPE_3160) || (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 29 && - ((fwrt->trans->hw_rev & CSR_HW_REV_TYPE_MSK) == + ((fwrt->trans->info.hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D)); } IWL_EXPORT_SYMBOL(iwl_sar_geo_support); @@ -313,7 +314,7 @@ int iwl_fill_ppag_table(struct iwl_fw_runtime *fwrt, bool send_ppag_always; /* many firmware images for JF lie about this */ - if (CSR_HW_RFID_TYPE(fwrt->trans->hw_rf_id) == + if (CSR_HW_RFID_TYPE(fwrt->trans->info.hw_rf_id) == CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_JF)) return -EOPNOTSUPP; @@ -338,31 +339,35 @@ int iwl_fill_ppag_table(struct iwl_fw_runtime *fwrt, return -EINVAL; } - /* The 'flags' field is the same in v1 and in v2 so we can just - * use v1 to access it. - */ - cmd->v1.flags = cpu_to_le32(fwrt->ppag_flags); - IWL_DEBUG_RADIO(fwrt, "PPAG cmd ver is %d\n", cmd_ver); if (cmd_ver == 1) { num_sub_bands = IWL_NUM_SUB_BANDS_V1; gain = cmd->v1.gain[0]; *cmd_size = sizeof(cmd->v1); - if (fwrt->ppag_ver >= 1) { + cmd->v1.flags = cpu_to_le32(fwrt->ppag_flags); + if (fwrt->ppag_bios_rev >= 1) { /* in this case FW supports revision 0 */ IWL_DEBUG_RADIO(fwrt, "PPAG table rev is %d, send truncated table\n", - fwrt->ppag_ver); + fwrt->ppag_bios_rev); } } else if (cmd_ver >= 2 && cmd_ver <= 6) { num_sub_bands = IWL_NUM_SUB_BANDS_V2; gain = cmd->v2.gain[0]; *cmd_size = sizeof(cmd->v2); - if (fwrt->ppag_ver == 0) { + cmd->v2.flags = cpu_to_le32(fwrt->ppag_flags); + if (fwrt->ppag_bios_rev == 0) { /* in this case FW supports revisions 1,2 or 3 */ IWL_DEBUG_RADIO(fwrt, "PPAG table rev is 0, send padded table\n"); } + } else if (cmd_ver == 7) { + num_sub_bands = IWL_NUM_SUB_BANDS_V2; + gain = cmd->v3.gain[0]; + *cmd_size = sizeof(cmd->v3); + cmd->v3.ppag_config_info.table_source = fwrt->ppag_bios_source; + cmd->v3.ppag_config_info.table_revision = fwrt->ppag_bios_rev; + cmd->v3.ppag_config_info.value = cpu_to_le32(fwrt->ppag_flags); } else { IWL_DEBUG_RADIO(fwrt, "Unsupported PPAG command version\n"); return -EINVAL; @@ -371,9 +376,11 @@ int iwl_fill_ppag_table(struct iwl_fw_runtime *fwrt, /* ppag mode */ IWL_DEBUG_RADIO(fwrt, "PPAG MODE bits were read from bios: %d\n", - le32_to_cpu(cmd->v1.flags)); + fwrt->ppag_flags); - if (cmd_ver == 5) + if (cmd_ver == 6) + cmd->v1.flags &= cpu_to_le32(IWL_PPAG_CMD_V6_MASK); + else if (cmd_ver == 5) cmd->v1.flags &= cpu_to_le32(IWL_PPAG_CMD_V5_MASK); else if (cmd_ver < 5) cmd->v1.flags &= cpu_to_le32(IWL_PPAG_CMD_V4_MASK); @@ -381,16 +388,20 @@ int iwl_fill_ppag_table(struct iwl_fw_runtime *fwrt, if ((cmd_ver == 1 && !fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_PPAG_CHINA_BIOS_SUPPORT)) || - (cmd_ver == 2 && fwrt->ppag_ver >= 2)) { + (cmd_ver == 2 && fwrt->ppag_bios_rev >= 2)) { cmd->v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK); IWL_DEBUG_RADIO(fwrt, "masking ppag China bit\n"); } else { IWL_DEBUG_RADIO(fwrt, "isn't masking ppag China bit\n"); } + /* The 'flags' field is the same in v1 and v2 so we can just + * use v1 to access it. + */ IWL_DEBUG_RADIO(fwrt, "PPAG MODE bits going to be sent: %d\n", - le32_to_cpu(cmd->v1.flags)); + (cmd_ver < 7) ? le32_to_cpu(cmd->v1.flags) : + le32_to_cpu(cmd->v3.ppag_config_info.value)); for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) { for (j = 0; j < num_sub_bands; j++) { @@ -480,7 +491,7 @@ __le32 iwl_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt) u32 val; __le32 config_bitmap = 0; - switch (CSR_HW_RFID_TYPE(fwrt->trans->hw_rf_id)) { + switch (CSR_HW_RFID_TYPE(fwrt->trans->info.hw_rf_id)) { case IWL_CFG_RF_TYPE_HR1: case IWL_CFG_RF_TYPE_HR2: case IWL_CFG_RF_TYPE_JF1: diff --git a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h index 53693314d505..9bed3d573b1e 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2023-2024 Intel Corporation + * Copyright (C) 2023-2025 Intel Corporation */ #ifndef __fw_regulatory_h__ @@ -224,10 +224,14 @@ int iwl_bios_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func, u32 *value); static inline u32 iwl_bios_get_ppag_flags(const u32 ppag_modes, - const u8 ppag_ver) + const u8 ppag_bios_rev) { - return ppag_modes & (ppag_ver < 3 ? IWL_PPAG_ETSI_CHINA_MASK : - IWL_PPAG_REV3_MASK); + /* For revision 4 and above driver is pipe */ + if (ppag_bios_rev >= 4) + return ppag_modes; + + return ppag_modes & (ppag_bios_rev < 3 ? IWL_PPAG_ETSI_CHINA_MASK : + IWL_PPAG_REV3_MASK); } bool iwl_puncturing_is_allowed_in_bios(u32 puncturing, u16 mcc); @@ -236,22 +240,25 @@ bool iwl_puncturing_is_allowed_in_bios(u32 puncturing, u16 mcc); #define IWL_DSBR_PERMANENT_URM_MASK BIT(9) int iwl_bios_get_dsbr(struct iwl_fw_runtime *fwrt, u32 *value); +int iwl_bios_get_phy_filters(struct iwl_fw_runtime *fwrt); static inline void iwl_bios_setup_step(struct iwl_trans *trans, struct iwl_fw_runtime *fwrt) { u32 dsbr; - if (!trans->trans_cfg->integrated) + if (!trans->mac_cfg->integrated) return; - if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ) + if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_BZ) return; if (iwl_bios_get_dsbr(fwrt, &dsbr)) dsbr = 0; - trans->dsbr_urm_fw_dependent = !!(dsbr & IWL_DSBR_FW_MODIFIED_URM_MASK); - trans->dsbr_urm_permanent = !!(dsbr & IWL_DSBR_PERMANENT_URM_MASK); + trans->conf.dsbr_urm_fw_dependent = + !!(dsbr & IWL_DSBR_FW_MODIFIED_URM_MASK); + trans->conf.dsbr_urm_permanent = + !!(dsbr & IWL_DSBR_PERMANENT_URM_MASK); } #endif /* __fw_regulatory_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/rs.c b/drivers/net/wireless/intel/iwlwifi/fw/rs.c index 8f99e501629e..746f2acffb8f 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/rs.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2021-2022 Intel Corporation + * Copyright (C) 2021-2022, 2025 Intel Corporation */ #include <net/mac80211.h> @@ -91,104 +91,6 @@ const char *iwl_rs_pretty_bw(int bw) } IWL_EXPORT_SYMBOL(iwl_rs_pretty_bw); -static u32 iwl_legacy_rate_to_fw_idx(u32 rate_n_flags) -{ - int rate = rate_n_flags & RATE_LEGACY_RATE_MSK_V1; - int idx; - bool ofdm = !(rate_n_flags & RATE_MCS_CCK_MSK_V1); - int offset = ofdm ? IWL_FIRST_OFDM_RATE : 0; - int last = ofdm ? IWL_RATE_COUNT_LEGACY : IWL_FIRST_OFDM_RATE; - - for (idx = offset; idx < last; idx++) - if (iwl_fw_rate_idx_to_plcp(idx) == rate) - return idx - offset; - return IWL_RATE_INVALID; -} - -u32 iwl_new_rate_from_v1(u32 rate_v1) -{ - u32 rate_v2 = 0; - u32 dup = 0; - - if (rate_v1 == 0) - return rate_v1; - /* convert rate */ - if (rate_v1 & RATE_MCS_HT_MSK_V1) { - u32 nss = 0; - - rate_v2 |= RATE_MCS_HT_MSK; - rate_v2 |= - rate_v1 & RATE_HT_MCS_RATE_CODE_MSK_V1; - nss = (rate_v1 & RATE_HT_MCS_MIMO2_MSK) >> - RATE_HT_MCS_NSS_POS_V1; - rate_v2 |= nss << RATE_MCS_NSS_POS; - } else if (rate_v1 & RATE_MCS_VHT_MSK_V1 || - rate_v1 & RATE_MCS_HE_MSK_V1) { - rate_v2 |= rate_v1 & RATE_VHT_MCS_RATE_CODE_MSK; - - rate_v2 |= rate_v1 & RATE_MCS_NSS_MSK; - - if (rate_v1 & RATE_MCS_HE_MSK_V1) { - u32 he_type_bits = rate_v1 & RATE_MCS_HE_TYPE_MSK_V1; - u32 he_type = he_type_bits >> RATE_MCS_HE_TYPE_POS_V1; - u32 he_106t = (rate_v1 & RATE_MCS_HE_106T_MSK_V1) >> - RATE_MCS_HE_106T_POS_V1; - u32 he_gi_ltf = (rate_v1 & RATE_MCS_HE_GI_LTF_MSK_V1) >> - RATE_MCS_HE_GI_LTF_POS; - - if ((he_type_bits == RATE_MCS_HE_TYPE_SU || - he_type_bits == RATE_MCS_HE_TYPE_EXT_SU) && - he_gi_ltf == RATE_MCS_HE_SU_4_LTF) - /* the new rate have an additional bit to - * represent the value 4 rather then using SGI - * bit for this purpose - as it was done in the old - * rate */ - he_gi_ltf += (rate_v1 & RATE_MCS_SGI_MSK_V1) >> - RATE_MCS_SGI_POS_V1; - - rate_v2 |= he_gi_ltf << RATE_MCS_HE_GI_LTF_POS; - rate_v2 |= he_type << RATE_MCS_HE_TYPE_POS; - rate_v2 |= he_106t << RATE_MCS_HE_106T_POS; - rate_v2 |= rate_v1 & RATE_HE_DUAL_CARRIER_MODE_MSK; - rate_v2 |= RATE_MCS_HE_MSK; - } else { - rate_v2 |= RATE_MCS_VHT_MSK; - } - /* if legacy format */ - } else { - u32 legacy_rate = iwl_legacy_rate_to_fw_idx(rate_v1); - - if (WARN_ON_ONCE(legacy_rate == IWL_RATE_INVALID)) - legacy_rate = (rate_v1 & RATE_MCS_CCK_MSK_V1) ? - IWL_FIRST_CCK_RATE : IWL_FIRST_OFDM_RATE; - - rate_v2 |= legacy_rate; - if (!(rate_v1 & RATE_MCS_CCK_MSK_V1)) - rate_v2 |= RATE_MCS_LEGACY_OFDM_MSK; - } - - /* convert flags */ - if (rate_v1 & RATE_MCS_LDPC_MSK_V1) - rate_v2 |= RATE_MCS_LDPC_MSK; - rate_v2 |= (rate_v1 & RATE_MCS_CHAN_WIDTH_MSK_V1) | - (rate_v1 & RATE_MCS_ANT_AB_MSK) | - (rate_v1 & RATE_MCS_STBC_MSK) | - (rate_v1 & RATE_MCS_BF_MSK); - - dup = (rate_v1 & RATE_MCS_DUP_MSK_V1) >> RATE_MCS_DUP_POS_V1; - if (dup) { - rate_v2 |= RATE_MCS_DUP_MSK; - rate_v2 |= dup << RATE_MCS_CHAN_WIDTH_POS; - } - - if ((!(rate_v1 & RATE_MCS_HE_MSK_V1)) && - (rate_v1 & RATE_MCS_SGI_MSK_V1)) - rate_v2 |= RATE_MCS_SGI_MSK; - - return rate_v2; -} -IWL_EXPORT_SYMBOL(iwl_new_rate_from_v1); - int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate) { char *type; @@ -197,37 +99,40 @@ int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate) u32 bw = (rate & RATE_MCS_CHAN_WIDTH_MSK) >> RATE_MCS_CHAN_WIDTH_POS; u32 format = rate & RATE_MCS_MOD_TYPE_MSK; + int index = 0; bool sgi; - if (format == RATE_MCS_CCK_MSK || - format == RATE_MCS_LEGACY_OFDM_MSK) { - int legacy_rate = rate & RATE_LEGACY_RATE_MSK; - int index = format == RATE_MCS_CCK_MSK ? - legacy_rate : - legacy_rate + IWL_FIRST_OFDM_RATE; + switch (format) { + case RATE_MCS_MOD_TYPE_LEGACY_OFDM: + index = IWL_FIRST_OFDM_RATE; + fallthrough; + case RATE_MCS_MOD_TYPE_CCK: + index += rate & RATE_LEGACY_RATE_MSK; return scnprintf(buf, bufsz, "Legacy | ANT: %s Rate: %s Mbps", iwl_rs_pretty_ant(ant), iwl_rate_mcs(index)->mbps); - } - - if (format == RATE_MCS_VHT_MSK) + case RATE_MCS_MOD_TYPE_VHT: type = "VHT"; - else if (format == RATE_MCS_HT_MSK) + break; + case RATE_MCS_MOD_TYPE_HT: type = "HT"; - else if (format == RATE_MCS_HE_MSK) + break; + case RATE_MCS_MOD_TYPE_HE: type = "HE"; - else if (format == RATE_MCS_EHT_MSK) + break; + case RATE_MCS_MOD_TYPE_EHT: type = "EHT"; - else + break; + default: type = "Unknown"; /* shouldn't happen */ + } - mcs = format == RATE_MCS_HT_MSK ? + mcs = format == RATE_MCS_MOD_TYPE_HT ? RATE_HT_MCS_INDEX(rate) : rate & RATE_MCS_CODE_MSK; - nss = ((rate & RATE_MCS_NSS_MSK) - >> RATE_MCS_NSS_POS) + 1; - sgi = format == RATE_MCS_HE_MSK ? + nss = u32_get_bits(rate, RATE_MCS_NSS_MSK); + sgi = format == RATE_MCS_MOD_TYPE_HE ? iwl_he_is_sgi(rate) : rate & RATE_MCS_SGI_MSK; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h index 5753d95986cd..0444a736c2b2 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h @@ -111,6 +111,8 @@ struct iwl_txf_iter_data { * @sar_profiles: sar profiles as read from WRDS/EWRD BIOS tables * @geo_profiles: geographic profiles as read from WGDS BIOS table * @phy_filters: specific phy filters as read from WPFC BIOS table + * @ppag_bios_rev: PPAG BIOS revision + * @ppag_bios_source: see &enum bios_source */ struct iwl_fw_runtime { struct iwl_trans *trans; @@ -179,15 +181,14 @@ struct iwl_fw_runtime { bool geo_enabled; struct iwl_ppag_chain ppag_chains[IWL_NUM_CHAIN_LIMITS]; u32 ppag_flags; - u8 ppag_ver; + u8 ppag_bios_rev; + u8 ppag_bios_source; struct iwl_sar_offset_mapping_cmd sgom_table; bool sgom_enabled; struct iwl_mcc_allowed_ap_type_cmd uats_table; bool uats_valid; u8 uefi_tables_lock_status; -#ifdef CONFIG_ACPI struct iwl_phy_specific_cfg phy_filters; -#endif }; void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/smem.c b/drivers/net/wireless/intel/iwlwifi/fw/smem.c index 3f1272014daf..90fd69b4860c 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/smem.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/smem.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2021 Intel Corporation + * Copyright (C) 2012-2014, 2018-2021, 2025 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -102,7 +102,7 @@ void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt) } pkt = cmd.resp_pkt; - if (fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000) + if (fwrt->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_22000) iwl_parse_shared_mem_22000(fwrt, pkt); else iwl_parse_shared_mem(fwrt, pkt); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c index 386aadbce2a2..48126ec6b94b 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c @@ -219,7 +219,8 @@ done: int iwl_uefi_reduce_power_parse(struct iwl_trans *trans, const u8 *data, size_t len, - struct iwl_pnvm_image *pnvm_data) + struct iwl_pnvm_image *pnvm_data, + __le32 sku_id[3]) { const struct iwl_ucode_tlv *tlv; @@ -241,23 +242,23 @@ int iwl_uefi_reduce_power_parse(struct iwl_trans *trans, } if (tlv_type == IWL_UCODE_TLV_PNVM_SKU) { - const struct iwl_sku_id *sku_id = + const struct iwl_sku_id *tlv_sku_id = (const void *)(data + sizeof(*tlv)); IWL_DEBUG_FW(trans, "Got IWL_UCODE_TLV_PNVM_SKU len %d\n", tlv_len); IWL_DEBUG_FW(trans, "sku_id 0x%0x 0x%0x 0x%0x\n", - le32_to_cpu(sku_id->data[0]), - le32_to_cpu(sku_id->data[1]), - le32_to_cpu(sku_id->data[2])); + le32_to_cpu(tlv_sku_id->data[0]), + le32_to_cpu(tlv_sku_id->data[1]), + le32_to_cpu(tlv_sku_id->data[2])); data += sizeof(*tlv) + ALIGN(tlv_len, 4); len -= ALIGN(tlv_len, 4); - if (trans->sku_id[0] == le32_to_cpu(sku_id->data[0]) && - trans->sku_id[1] == le32_to_cpu(sku_id->data[1]) && - trans->sku_id[2] == le32_to_cpu(sku_id->data[2])) { + if (sku_id[0] == tlv_sku_id->data[0] && + sku_id[1] == tlv_sku_id->data[1] && + sku_id[2] == tlv_sku_id->data[2]) { int ret = iwl_uefi_reduce_power_section(trans, data, len, pnvm_data); @@ -310,11 +311,12 @@ static int iwl_uefi_step_parse(struct uefi_cnv_common_step_data *common_step_dat if (common_step_data->revision != 1) return -EINVAL; - trans->mbx_addr_0_step = (u32)common_step_data->revision | + trans->conf.mbx_addr_0_step = + (u32)common_step_data->revision | (u32)common_step_data->cnvi_eq_channel << 8 | (u32)common_step_data->cnvr_eq_channel << 16 | (u32)common_step_data->radio1 << 24; - trans->mbx_addr_1_step = (u32)common_step_data->radio2; + trans->conf.mbx_addr_1_step = (u32)common_step_data->radio2; return 0; } @@ -323,7 +325,7 @@ void iwl_uefi_get_step_table(struct iwl_trans *trans) struct uefi_cnv_common_step_data *data; int ret; - if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) + if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210) return; data = iwl_uefi_get_verified_variable_guid(trans, &IWL_EFI_WIFI_BT_GUID, @@ -408,8 +410,8 @@ static int iwl_uefi_uats_parse(struct uefi_cnv_wlan_uats_data *uats_data, return 0; } -int iwl_uefi_get_uats_table(struct iwl_trans *trans, - struct iwl_fw_runtime *fwrt) +void iwl_uefi_get_uats_table(struct iwl_trans *trans, + struct iwl_fw_runtime *fwrt) { struct uefi_cnv_wlan_uats_data *data; int ret; @@ -417,17 +419,12 @@ int iwl_uefi_get_uats_table(struct iwl_trans *trans, data = iwl_uefi_get_verified_variable(trans, IWL_UEFI_UATS_NAME, "UATS", sizeof(*data), NULL); if (IS_ERR(data)) - return -EINVAL; + return; ret = iwl_uefi_uats_parse(data, fwrt); - if (ret < 0) { + if (ret < 0) IWL_DEBUG_FW(trans, "Cannot read UATS table. rev is invalid\n"); - kfree(data); - return ret; - } - kfree(data); - return 0; } IWL_EXPORT_SYMBOL(iwl_uefi_get_uats_table); @@ -557,13 +554,14 @@ int iwl_uefi_get_ppag_table(struct iwl_fw_runtime *fwrt) goto out; } - fwrt->ppag_ver = data->revision; + fwrt->ppag_bios_rev = data->revision; fwrt->ppag_flags = iwl_bios_get_ppag_flags(data->ppag_modes, - fwrt->ppag_ver); + fwrt->ppag_bios_rev); BUILD_BUG_ON(sizeof(fwrt->ppag_chains) != sizeof(data->ppag_chains)); memcpy(&fwrt->ppag_chains, &data->ppag_chains, sizeof(data->ppag_chains)); + fwrt->ppag_bios_source = BIOS_SOURCE_UEFI; out: kfree(data); return ret; @@ -750,6 +748,10 @@ int iwl_uefi_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func, } *value = data->functions[func]; + + IWL_DEBUG_RADIO(fwrt, + "UEFI: DSM func=%d: value=%d\n", func, *value); + ret = 0; out: kfree(data); @@ -810,3 +812,31 @@ out: kfree(data); return ret; } + +int iwl_uefi_get_phy_filters(struct iwl_fw_runtime *fwrt) +{ + struct uefi_cnv_wpfc_data *data __free(kfree); + struct iwl_phy_specific_cfg *filters = &fwrt->phy_filters; + + data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_WPFC_NAME, + "WPFC", sizeof(*data), NULL); + if (IS_ERR(data)) + return -EINVAL; + + if (data->revision != 0) { + IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI WPFC revision:%d\n", + data->revision); + return -EINVAL; + } + + BUILD_BUG_ON(ARRAY_SIZE(filters->filter_cfg_chains) != + ARRAY_SIZE(data->chains)); + + for (int i = 0; i < ARRAY_SIZE(filters->filter_cfg_chains); i++) { + filters->filter_cfg_chains[i] = cpu_to_le32(data->chains[i]); + IWL_DEBUG_RADIO(fwrt, "WPFC: chain %d: %u\n", i, data->chains[i]); + } + + IWL_DEBUG_RADIO(fwrt, "Loaded WPFC config from UEFI\n"); + return 0; +} diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h index eb3c05417da3..5a4c557e47c7 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h @@ -24,6 +24,7 @@ #define IWL_UEFI_WBEM_NAME L"UefiCnvWlanWBEM" #define IWL_UEFI_PUNCTURING_NAME L"UefiCnvWlanPuncturing" #define IWL_UEFI_DSBR_NAME L"UefiCnvCommonDSBR" +#define IWL_UEFI_WPFC_NAME L"WPFC" #define IWL_SGOM_MAP_SIZE 339 @@ -33,7 +34,7 @@ #define IWL_UEFI_EWRD_REVISION 2 #define IWL_UEFI_WGDS_REVISION 3 #define IWL_UEFI_MIN_PPAG_REV 1 -#define IWL_UEFI_MAX_PPAG_REV 3 +#define IWL_UEFI_MAX_PPAG_REV 4 #define IWL_UEFI_MIN_WTAS_REVISION 1 #define IWL_UEFI_MAX_WTAS_REVISION 2 #define IWL_UEFI_SPLC_REVISION 0 @@ -230,6 +231,18 @@ struct uefi_cnv_wlan_dsbr_data { u32 config; } __packed; +/** + * struct uefi_cnv_wpfc_data - BIOS Wi-Fi PHY filter Configuration + * @revision: the revision of the table + * @chains: configuration of each of the chains (a-d) + * + * specific PHY filter configuration + */ +struct uefi_cnv_wpfc_data { + u8 revision; + u32 chains[4]; +} __packed; + /* * This is known to be broken on v4.19 and to work on v5.4. Until we * figure out why this is the case and how to make it work, simply @@ -240,7 +253,8 @@ void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len); u8 *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len); int iwl_uefi_reduce_power_parse(struct iwl_trans *trans, const u8 *data, size_t len, - struct iwl_pnvm_image *pnvm_data); + struct iwl_pnvm_image *pnvm_data, + __le32 sku_id[3]); void iwl_uefi_get_step_table(struct iwl_trans *trans); int iwl_uefi_handle_tlv_mem_desc(struct iwl_trans *trans, const u8 *data, u32 tlv_len, struct iwl_pnvm_image *pnvm_data); @@ -258,10 +272,11 @@ int iwl_uefi_get_wbem(struct iwl_fw_runtime *fwrt, u32 *value); int iwl_uefi_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func, u32 *value); void iwl_uefi_get_sgom_table(struct iwl_trans *trans, struct iwl_fw_runtime *fwrt); -int iwl_uefi_get_uats_table(struct iwl_trans *trans, - struct iwl_fw_runtime *fwrt); +void iwl_uefi_get_uats_table(struct iwl_trans *trans, + struct iwl_fw_runtime *fwrt); int iwl_uefi_get_puncturing(struct iwl_fw_runtime *fwrt); int iwl_uefi_get_dsbr(struct iwl_fw_runtime *fwrt, u32 *value); +int iwl_uefi_get_phy_filters(struct iwl_fw_runtime *fwrt); #else /* CONFIG_EFI */ static inline void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len) { @@ -271,7 +286,8 @@ static inline void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len) static inline int iwl_uefi_reduce_power_parse(struct iwl_trans *trans, const u8 *data, size_t len, - struct iwl_pnvm_image *pnvm_data) + struct iwl_pnvm_image *pnvm_data, + __le32 sku_id[3]) { return -EOPNOTSUPP; } @@ -352,11 +368,9 @@ void iwl_uefi_get_sgom_table(struct iwl_trans *trans, struct iwl_fw_runtime *fwr { } -static inline -int iwl_uefi_get_uats_table(struct iwl_trans *trans, - struct iwl_fw_runtime *fwrt) +static inline void +iwl_uefi_get_uats_table(struct iwl_trans *trans, struct iwl_fw_runtime *fwrt) { - return 0; } static inline @@ -370,5 +384,10 @@ int iwl_uefi_get_dsbr(struct iwl_fw_runtime *fwrt, u32 *value) { return -ENOENT; } + +static inline int iwl_uefi_get_phy_filters(struct iwl_fw_runtime *fwrt) +{ + return -ENOENT; +} #endif /* CONFIG_EFI */ #endif /* __iwl_fw_uefi__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index acafee538b8a..91f22ce36d74 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -2,7 +2,7 @@ /* * Copyright (C) 2005-2014, 2018-2021 Intel Corporation * Copyright (C) 2016-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2024 Intel Corporation + * Copyright (C) 2018-2025 Intel Corporation */ #ifndef __IWL_CONFIG_H__ #define __IWL_CONFIG_H__ @@ -115,7 +115,29 @@ static inline u8 num_of_ant(u8 mask) } /** - * struct iwl_base_params - params not likely to change within a device family + * struct iwl_fw_mon_reg - FW monitor register info + * @addr: register address + * @mask: register mask + */ +struct iwl_fw_mon_reg { + u32 addr; + u32 mask; +}; + +/** + * struct iwl_fw_mon_regs - FW monitor registers + * @write_ptr: write pointer register + * @cycle_cnt: cycle count register + * @cur_frag: current fragment in use + */ +struct iwl_fw_mon_regs { + struct iwl_fw_mon_reg write_ptr; + struct iwl_fw_mon_reg cycle_cnt; + struct iwl_fw_mon_reg cur_frag; +}; + +/** + * struct iwl_family_base_params - base parameters for an entire family * @max_ll_items: max number of OTP blocks * @shadow_ram_support: shadow support for OTP memory * @led_compensation: compensate on the led on/off time per HW according @@ -124,12 +146,34 @@ static inline u8 num_of_ant(u8 mask) * @wd_timeout: TX queues watchdog timeout * @max_event_log_size: size of event log buffer size for ucode event logging * @shadow_reg_enable: HW shadow register support + * @apmg_not_supported: there's no APMG * @apmg_wake_up_wa: should the MAC access REQ be asserted when a command * is in flight. This is due to a HW bug in 7260, 3160 and 7265. * @scd_chain_ext_wa: should the chain extension feature in SCD be disabled. * @max_tfd_queue_size: max number of entries in tfd queue. + * @eeprom_size: EEPROM size + * @num_of_queues: number of HW TX queues supported + * @pcie_l1_allowed: PCIe L1 state is allowed + * @pll_cfg: PLL configuration needed + * @nvm_hw_section_num: the ID of the HW NVM section + * @features: hw features, any combination of feature_passlist + * @smem_offset: offset from which the SMEM begins + * @smem_len: the length of SMEM + * @mac_addr_from_csr: read HW address from CSR registers at this offset + * @d3_debug_data_base_addr: base address where D3 debug data is stored + * @d3_debug_data_length: length of the D3 debug data + * @min_ba_txq_size: minimum number of slots required in a TX queue used + * for aggregation + * @min_txq_size: minimum number of slots required in a TX queue + * @gp2_reg_addr: GP2 (timer) register address + * @min_umac_error_event_table: minimum SMEM location of UMAC error table + * @mon_dbgi_regs: monitor DBGI registers + * @mon_dram_regs: monitor DRAM registers + * @mon_smem_regs: monitor SMEM registers + * @ucode_api_max: Highest version of uCode API supported by driver. + * @ucode_api_min: Lowest version of uCode API supported by driver. */ -struct iwl_base_params { +struct iwl_family_base_params { unsigned int wd_timeout; u16 eeprom_size; @@ -140,6 +184,7 @@ struct iwl_base_params { shadow_reg_enable:1, pcie_l1_allowed:1, apmg_wake_up_wa:1, + apmg_not_supported:1, scd_chain_ext_wa:1; u16 num_of_queues; /* def: HW dependent */ @@ -147,6 +192,22 @@ struct iwl_base_params { u8 max_ll_items; u8 led_compensation; + u8 ucode_api_max; + u8 ucode_api_min; + u32 mac_addr_from_csr:10; + u8 nvm_hw_section_num; + netdev_features_t features; + u32 smem_offset; + u32 smem_len; + u32 min_umac_error_event_table; + u32 d3_debug_data_base_addr; + u32 d3_debug_data_length; + u32 min_txq_size; + u32 gp2_reg_addr; + u32 min_ba_txq_size; + const struct iwl_fw_mon_regs mon_dram_regs; + const struct iwl_fw_mon_regs mon_smem_regs; + const struct iwl_fw_mon_regs mon_dbgi_regs; }; /* @@ -238,7 +299,7 @@ struct iwl_pwr_tx_backoff { u32 backoff; }; -enum iwl_cfg_trans_ltr_delay { +enum iwl_mac_cfg_ltr_delay { IWL_CFG_TRANS_LTR_DELAY_NONE = 0, IWL_CFG_TRANS_LTR_DELAY_200US = 1, IWL_CFG_TRANS_LTR_DELAY_2500US = 2, @@ -246,35 +307,33 @@ enum iwl_cfg_trans_ltr_delay { }; /** - * struct iwl_cfg_trans_params - information needed to start the trans + * struct iwl_mac_cfg - information about the MAC-specific device part * * These values are specific to the device ID and do not change when * multiple configs are used for a single device ID. They values are * used, among other things, to boot the NIC so that the HW REV or * RFID can be read before deciding the remaining parameters to use. * - * @base_params: pointer to basic parameters + * @base: pointer to basic parameters * @device_family: the device family * @umac_prph_offset: offset to add to UMAC periphery address * @xtal_latency: power up latency to get the xtal stabilized * @extra_phy_cfg_flags: extra configuration flags to pass to the PHY - * @rf_id: need to read rf_id to determine the firmware image * @gen2: 22000 and on transport operation * @mq_rx_supported: multi-queue rx support * @integrated: discrete or integrated * @low_latency_xtal: use the low latency xtal if supported * @bisr_workaround: BISR hardware workaround (for 22260 series devices) - * @ltr_delay: LTR delay parameter, &enum iwl_cfg_trans_ltr_delay. + * @ltr_delay: LTR delay parameter, &enum iwl_mac_cfg_ltr_delay. * @imr_enabled: use the IMR if supported. */ -struct iwl_cfg_trans_params { - const struct iwl_base_params *base_params; +struct iwl_mac_cfg { + const struct iwl_family_base_params *base; enum iwl_device_family device_family; u32 umac_prph_offset; u32 xtal_latency; u32 extra_phy_cfg_flags; - u32 rf_id:1, - gen2:1, + u32 gen2:1, mq_rx_supported:1, integrated:1, low_latency_xtal:1, @@ -283,36 +342,21 @@ struct iwl_cfg_trans_params { imr_enabled:1; }; -/** - * struct iwl_fw_mon_reg - FW monitor register info - * @addr: register address - * @mask: register mask - */ -struct iwl_fw_mon_reg { - u32 addr; - u32 mask; -}; - -/** - * struct iwl_fw_mon_regs - FW monitor registers - * @write_ptr: write pointer register - * @cycle_cnt: cycle count register - * @cur_frag: current fragment in use +/* + * These sizes were picked according to 8 MSDUs inside 64/256/612 A-MSDUs + * in an A-MPDU, with additional overhead to account for processing time. + * They will be doubled for MACs starting from So/Ty that don't support + * putting multiple frames into a single buffer. */ -struct iwl_fw_mon_regs { - struct iwl_fw_mon_reg write_ptr; - struct iwl_fw_mon_reg cycle_cnt; - struct iwl_fw_mon_reg cur_frag; -}; +#define IWL_NUM_RBDS_NON_HE (64 * 8) +#define IWL_NUM_RBDS_HE (256 * 8) +#define IWL_NUM_RBDS_EHT (512 * 8) /** - * struct iwl_cfg - * @trans: the trans-specific configuration part - * @name: Official name of the device + * struct iwl_rf_cfg * @fw_name_pre: Firmware filename prefix. The api version and extension * (.ucode) will be added to filename before loading from disk. The * filename is constructed as <fw_name_pre>-<api>.ucode. - * @fw_name_mac: MAC name for this config, the remaining pieces of the * name will be generated dynamically * @ucode_api_max: Highest version of uCode API supported by driver. * @ucode_api_min: Lowest version of uCode API supported by driver. @@ -323,34 +367,25 @@ struct iwl_fw_mon_regs { * @non_shared_ant: the antenna that is for WiFi only * @nvm_ver: NVM version * @nvm_calib_ver: NVM calibration version + * @bw_limit: bandwidth limit for this device, if non-zero * @ht_params: point to ht parameters + * @eeprom_params: EEPROM parameters (old devices) + * @thermal_params: Thermal throttling parameters + * @lp_xtal_workaround: low-power crystal workaround needed * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off) * @rx_with_siso_diversity: 1x1 device with rx antenna diversity * @tx_with_siso_diversity: 1x1 device with tx antenna diversity * @internal_wimax_coex: internal wifi/wimax combo device - * @high_temp: Is this NIC is designated to be in high temperature. * @host_interrupt_operation_mode: device needs host interrupt operation * mode set - * @nvm_hw_section_num: the ID of the HW NVM section - * @mac_addr_from_csr: read HW address from CSR registers at this offset - * @features: hw features, any combination of feature_passlist * @pwr_tx_backoffs: translation table between power limits and backoffs - * @max_tx_agg_size: max TX aggregation size of the ADDBA request/response * @dccm_offset: offset from which DCCM begins * @dccm_len: length of DCCM (including runtime stack CCM) * @dccm2_offset: offset from which the second DCCM begins * @dccm2_len: length of the second DCCM - * @smem_offset: offset from which the SMEM begins - * @smem_len: the length of SMEM * @vht_mu_mimo_supported: VHT MU-MIMO support - * @cdb: CDB support * @nvm_type: see &enum iwl_nvm_type - * @d3_debug_data_base_addr: base address where D3 debug data is stored - * @d3_debug_data_length: length of the D3 debug data - * @min_txq_size: minimum number of slots required in a TX queue * @uhb_supported: ultra high band channels supported - * @min_ba_txq_size: minimum number of slots required in a TX queue which - * based on hardware support (HE - 256, EHT - 1K). * @num_rbds: number of receive buffer descriptors to use * (only used for multi-queue capable devices) * @@ -358,60 +393,38 @@ struct iwl_fw_mon_regs { * API differences in uCode shouldn't be handled here but through TLVs * and/or the uCode API version instead. */ -struct iwl_cfg { - struct iwl_cfg_trans_params trans; +struct iwl_rf_cfg { /* params specific to an individual device within a device family */ - const char *name; const char *fw_name_pre; - const char *fw_name_mac; /* params likely to change within a device family */ - const struct iwl_ht_params *ht_params; + const struct iwl_ht_params ht_params; const struct iwl_eeprom_params *eeprom_params; const struct iwl_pwr_tx_backoff *pwr_tx_backoffs; - const char *default_nvm_file_C_step; const struct iwl_tt_params *thermal_params; enum iwl_led_mode led_mode; enum iwl_nvm_type nvm_type; u32 max_data_size; u32 max_inst_size; - netdev_features_t features; u32 dccm_offset; u32 dccm_len; u32 dccm2_offset; u32 dccm2_len; - u32 smem_offset; - u32 smem_len; u16 nvm_ver; u16 nvm_calib_ver; + u16 bw_limit; u32 rx_with_siso_diversity:1, tx_with_siso_diversity:1, internal_wimax_coex:1, host_interrupt_operation_mode:1, - high_temp:1, - mac_addr_from_csr:10, lp_xtal_workaround:1, - apmg_not_supported:1, vht_mu_mimo_supported:1, - cdb:1, - dbgc_supported:1, uhb_supported:1; u8 valid_tx_ant; u8 valid_rx_ant; u8 non_shared_ant; - u8 nvm_hw_section_num; - u8 max_tx_agg_size; u8 ucode_api_max; u8 ucode_api_min; u16 num_rbds; - u32 min_umac_error_event_table; - u32 d3_debug_data_base_addr; - u32 d3_debug_data_length; - u32 min_txq_size; - u32 gp2_reg_addr; - u32 min_ba_txq_size; - const struct iwl_fw_mon_regs mon_dram_regs; - const struct iwl_fw_mon_regs mon_smem_regs; - const struct iwl_fw_mon_regs mon_dbgi_regs; }; #define IWL_CFG_ANY (~0) @@ -419,8 +432,10 @@ struct iwl_cfg { #define IWL_CFG_MAC_TYPE_PU 0x31 #define IWL_CFG_MAC_TYPE_TH 0x32 #define IWL_CFG_MAC_TYPE_QU 0x33 +#define IWL_CFG_MAC_TYPE_CC 0x34 #define IWL_CFG_MAC_TYPE_QUZ 0x35 #define IWL_CFG_MAC_TYPE_SO 0x37 +#define IWL_CFG_MAC_TYPE_TY 0x42 #define IWL_CFG_MAC_TYPE_SOF 0x43 #define IWL_CFG_MAC_TYPE_MA 0x44 #define IWL_CFG_MAC_TYPE_BZ 0x46 @@ -432,8 +447,6 @@ struct iwl_cfg { #define IWL_CFG_MAC_TYPE_BR 0x4C #define IWL_CFG_MAC_TYPE_DR 0x4D -#define IWL_CFG_RF_TYPE_TH 0x105 -#define IWL_CFG_RF_TYPE_TH1 0x108 #define IWL_CFG_RF_TYPE_JF2 0x105 #define IWL_CFG_RF_TYPE_JF1 0x108 #define IWL_CFG_RF_TYPE_HR2 0x10A @@ -451,12 +464,6 @@ struct iwl_cfg { #define IWL_CFG_RF_ID_HR 0x7 #define IWL_CFG_RF_ID_HR1 0x4 -#define IWL_CFG_NO_160 0x1 -#define IWL_CFG_160 0x0 - -#define IWL_CFG_NO_320 0x1 -#define IWL_CFG_320 0x0 - #define IWL_CFG_CORES_BT 0x0 #define IWL_CFG_CORES_BT_GNSS 0x5 @@ -467,55 +474,133 @@ struct iwl_cfg { #define IWL_CFG_IS_JACKET 0x1 #define IWL_SUBDEVICE_RF_ID(subdevice) ((u16)((subdevice) & 0x00F0) >> 4) -#define IWL_SUBDEVICE_NO_160(subdevice) ((u16)((subdevice) & 0x0200) >> 9) +#define IWL_SUBDEVICE_BW_LIM(subdevice) ((u16)((subdevice) & 0x0200) >> 9) #define IWL_SUBDEVICE_CORES(subdevice) ((u16)((subdevice) & 0x1C00) >> 10) struct iwl_dev_info { + const struct iwl_rf_cfg *cfg; + const char *name; u16 device; u16 subdevice; - u16 mac_type; - u16 rf_type; - u8 mac_step; - u8 rf_step; - u8 rf_id; - u8 no_160; - u8 cores; - u8 cdb; - u8 jacket; - const struct iwl_cfg *cfg; - const char *name; + u32 subdevice_m_l:4, + subdevice_m_h:4, + match_rf_type:1, + rf_type:9, + match_bw_limit:1, + bw_limit:1, + match_rf_step:1, + rf_step:4, + match_rf_id:1, + rf_id:4, + match_cdb:1, + cdb:1; }; #if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS) extern const struct iwl_dev_info iwl_dev_info_table[]; extern const unsigned int iwl_dev_info_table_size; const struct iwl_dev_info * -iwl_pci_find_dev_info(u16 device, u16 subsystem_device, - u16 mac_type, u8 mac_step, u16 rf_type, u8 cdb, - u8 jacket, u8 rf_id, u8 no_160, u8 cores, u8 rf_step); +iwl_pci_find_dev_info(u16 device, u16 subsystem_device, u16 rf_type, u8 cdb, + u8 rf_id, u8 bw_limit, u8 rf_step); extern const struct pci_device_id iwl_hw_card_ids[]; #endif /* * This list declares the config structures for all devices. */ -extern const struct iwl_cfg_trans_params iwl9000_trans_cfg; -extern const struct iwl_cfg_trans_params iwl9560_trans_cfg; -extern const struct iwl_cfg_trans_params iwl9560_long_latency_trans_cfg; -extern const struct iwl_cfg_trans_params iwl9560_shared_clk_trans_cfg; -extern const struct iwl_cfg_trans_params iwl_qu_trans_cfg; -extern const struct iwl_cfg_trans_params iwl_qu_medium_latency_trans_cfg; -extern const struct iwl_cfg_trans_params iwl_qu_long_latency_trans_cfg; -extern const struct iwl_cfg_trans_params iwl_ax200_trans_cfg; -extern const struct iwl_cfg_trans_params iwl_so_trans_cfg; -extern const struct iwl_cfg_trans_params iwl_so_long_latency_trans_cfg; -extern const struct iwl_cfg_trans_params iwl_so_long_latency_imr_trans_cfg; -extern const struct iwl_cfg_trans_params iwl_ma_trans_cfg; -extern const struct iwl_cfg_trans_params iwl_bz_trans_cfg; -extern const struct iwl_cfg_trans_params iwl_gl_trans_cfg; -extern const struct iwl_cfg_trans_params iwl_sc_trans_cfg; -extern const struct iwl_cfg_trans_params iwl_dr_trans_cfg; -extern const struct iwl_cfg_trans_params iwl_br_trans_cfg; +extern const struct iwl_mac_cfg iwl1000_mac_cfg; +extern const struct iwl_mac_cfg iwl5000_mac_cfg; +extern const struct iwl_mac_cfg iwl2000_mac_cfg; +extern const struct iwl_mac_cfg iwl2030_mac_cfg; +extern const struct iwl_mac_cfg iwl105_mac_cfg; +extern const struct iwl_mac_cfg iwl135_mac_cfg; +extern const struct iwl_mac_cfg iwl5150_mac_cfg; +extern const struct iwl_mac_cfg iwl6005_mac_cfg; +extern const struct iwl_mac_cfg iwl6030_mac_cfg; +extern const struct iwl_mac_cfg iwl6000i_mac_cfg; +extern const struct iwl_mac_cfg iwl6050_mac_cfg; +extern const struct iwl_mac_cfg iwl6150_mac_cfg; +extern const struct iwl_mac_cfg iwl6000_mac_cfg; +extern const struct iwl_mac_cfg iwl7000_mac_cfg; +extern const struct iwl_mac_cfg iwl8000_mac_cfg; +extern const struct iwl_mac_cfg iwl9000_mac_cfg; +extern const struct iwl_mac_cfg iwl9560_mac_cfg; +extern const struct iwl_mac_cfg iwl9560_long_latency_mac_cfg; +extern const struct iwl_mac_cfg iwl9560_shared_clk_mac_cfg; +extern const struct iwl_mac_cfg iwl_qu_mac_cfg; +extern const struct iwl_mac_cfg iwl_qu_medium_latency_mac_cfg; +extern const struct iwl_mac_cfg iwl_qu_long_latency_mac_cfg; +extern const struct iwl_mac_cfg iwl_ax200_mac_cfg; +extern const struct iwl_mac_cfg iwl_ty_mac_cfg; +extern const struct iwl_mac_cfg iwl_so_mac_cfg; +extern const struct iwl_mac_cfg iwl_so_long_latency_mac_cfg; +extern const struct iwl_mac_cfg iwl_so_long_latency_imr_mac_cfg; +extern const struct iwl_mac_cfg iwl_ma_mac_cfg; +extern const struct iwl_mac_cfg iwl_bz_mac_cfg; +extern const struct iwl_mac_cfg iwl_gl_mac_cfg; +extern const struct iwl_mac_cfg iwl_sc_mac_cfg; +extern const struct iwl_mac_cfg iwl_dr_mac_cfg; + +extern const char iwl1000_bgn_name[]; +extern const char iwl1000_bg_name[]; +extern const char iwl100_bgn_name[]; +extern const char iwl100_bg_name[]; +extern const char iwl2000_2bgn_name[]; +extern const char iwl2000_2bgn_d_name[]; +extern const char iwl2030_2bgn_name[]; +extern const char iwl105_bgn_name[]; +extern const char iwl105_bgn_d_name[]; +extern const char iwl135_bgn_name[]; +extern const char iwl5300_agn_name[]; +extern const char iwl5100_bgn_name[]; +extern const char iwl5100_abg_name[]; +extern const char iwl5100_agn_name[]; +extern const char iwl5350_agn_name[]; +extern const char iwl5150_agn_name[]; +extern const char iwl5150_abg_name[]; +extern const char iwl6005_2agn_name[]; +extern const char iwl6005_2abg_name[]; +extern const char iwl6005_2bg_name[]; +extern const char iwl6005_2agn_sff_name[]; +extern const char iwl6005_2agn_d_name[]; +extern const char iwl6005_2agn_mow1_name[]; +extern const char iwl6005_2agn_mow2_name[]; +extern const char iwl6030_2agn_name[]; +extern const char iwl6030_2abg_name[]; +extern const char iwl6030_2bgn_name[]; +extern const char iwl6030_2bg_name[]; +extern const char iwl6035_2agn_name[]; +extern const char iwl6035_2agn_sff_name[]; +extern const char iwl1030_bgn_name[]; +extern const char iwl1030_bg_name[]; +extern const char iwl130_bgn_name[]; +extern const char iwl130_bg_name[]; +extern const char iwl6000i_2agn_name[]; +extern const char iwl6000i_2abg_name[]; +extern const char iwl6000i_2bg_name[]; +extern const char iwl6050_2agn_name[]; +extern const char iwl6050_2abg_name[]; +extern const char iwl6150_bgn_name[]; +extern const char iwl6150_bg_name[]; +extern const char iwl6000_3agn_name[]; +extern const char iwl7260_2ac_name[]; +extern const char iwl7260_2n_name[]; +extern const char iwl7260_n_name[]; +extern const char iwl3160_2ac_name[]; +extern const char iwl3160_2n_name[]; +extern const char iwl3160_n_name[]; +extern const char iwl3165_2ac_name[]; +extern const char iwl3168_2ac_name[]; +extern const char iwl7265_2ac_name[]; +extern const char iwl7265_2n_name[]; +extern const char iwl7265_n_name[]; +extern const char iwl8260_2n_name[]; +extern const char iwl8260_2ac_name[]; +extern const char iwl8265_2ac_name[]; +extern const char iwl8275_2ac_name[]; +extern const char iwl4165_2ac_name[]; +extern const char iwl_killer_1435i_name[]; +extern const char iwl_killer_1434_kix_name[]; extern const char iwl9162_name[]; extern const char iwl9260_name[]; extern const char iwl9260_1_name[]; @@ -548,129 +633,84 @@ extern const char iwl_ax211_killer_1675s_name[]; extern const char iwl_ax211_killer_1675i_name[]; extern const char iwl_ax411_killer_1690s_name[]; extern const char iwl_ax411_killer_1690i_name[]; +extern const char iwl_ax210_name[]; extern const char iwl_ax211_name[]; -extern const char iwl_ax231_name[]; extern const char iwl_ax411_name[]; -extern const char iwl_fm_name[]; -extern const char iwl_wh_name[]; -extern const char iwl_gl_name[]; -extern const char iwl_mtp_name[]; -extern const char iwl_dr_name[]; -extern const char iwl_br_name[]; +extern const char iwl_killer_be1750s_name[]; +extern const char iwl_killer_be1750i_name[]; +extern const char iwl_killer_be1750w_name[]; +extern const char iwl_killer_be1750x_name[]; +extern const char iwl_killer_be1790s_name[]; +extern const char iwl_killer_be1790i_name[]; +extern const char iwl_be201_name[]; +extern const char iwl_be200_name[]; +extern const char iwl_be202_name[]; +extern const char iwl_be401_name[]; +extern const char iwl_be213_name[]; +extern const char iwl_killer_be1775s_name[]; +extern const char iwl_killer_be1775i_name[]; +extern const char iwl_be211_name[]; +extern const char iwl_killer_bn1850w2_name[]; +extern const char iwl_killer_bn1850i_name[]; +extern const char iwl_bn201_name[]; +extern const char iwl_be221_name[]; +extern const char iwl_be223_name[]; #if IS_ENABLED(CONFIG_IWLDVM) -extern const struct iwl_cfg iwl5300_agn_cfg; -extern const struct iwl_cfg iwl5100_agn_cfg; -extern const struct iwl_cfg iwl5350_agn_cfg; -extern const struct iwl_cfg iwl5100_bgn_cfg; -extern const struct iwl_cfg iwl5100_abg_cfg; -extern const struct iwl_cfg iwl5150_agn_cfg; -extern const struct iwl_cfg iwl5150_abg_cfg; -extern const struct iwl_cfg iwl6005_2agn_cfg; -extern const struct iwl_cfg iwl6005_2abg_cfg; -extern const struct iwl_cfg iwl6005_2bg_cfg; -extern const struct iwl_cfg iwl6005_2agn_sff_cfg; -extern const struct iwl_cfg iwl6005_2agn_d_cfg; -extern const struct iwl_cfg iwl6005_2agn_mow1_cfg; -extern const struct iwl_cfg iwl6005_2agn_mow2_cfg; -extern const struct iwl_cfg iwl1030_bgn_cfg; -extern const struct iwl_cfg iwl1030_bg_cfg; -extern const struct iwl_cfg iwl6030_2agn_cfg; -extern const struct iwl_cfg iwl6030_2abg_cfg; -extern const struct iwl_cfg iwl6030_2bgn_cfg; -extern const struct iwl_cfg iwl6030_2bg_cfg; -extern const struct iwl_cfg iwl6000i_2agn_cfg; -extern const struct iwl_cfg iwl6000i_2abg_cfg; -extern const struct iwl_cfg iwl6000i_2bg_cfg; -extern const struct iwl_cfg iwl6000_3agn_cfg; -extern const struct iwl_cfg iwl6050_2agn_cfg; -extern const struct iwl_cfg iwl6050_2abg_cfg; -extern const struct iwl_cfg iwl6150_bgn_cfg; -extern const struct iwl_cfg iwl6150_bg_cfg; -extern const struct iwl_cfg iwl1000_bgn_cfg; -extern const struct iwl_cfg iwl1000_bg_cfg; -extern const struct iwl_cfg iwl100_bgn_cfg; -extern const struct iwl_cfg iwl100_bg_cfg; -extern const struct iwl_cfg iwl130_bgn_cfg; -extern const struct iwl_cfg iwl130_bg_cfg; -extern const struct iwl_cfg iwl2000_2bgn_cfg; -extern const struct iwl_cfg iwl2000_2bgn_d_cfg; -extern const struct iwl_cfg iwl2030_2bgn_cfg; -extern const struct iwl_cfg iwl6035_2agn_cfg; -extern const struct iwl_cfg iwl6035_2agn_sff_cfg; -extern const struct iwl_cfg iwl105_bgn_cfg; -extern const struct iwl_cfg iwl105_bgn_d_cfg; -extern const struct iwl_cfg iwl135_bgn_cfg; +extern const struct iwl_rf_cfg iwl5300_agn_cfg; +extern const struct iwl_rf_cfg iwl5350_agn_cfg; +extern const struct iwl_rf_cfg iwl5100_n_cfg; +extern const struct iwl_rf_cfg iwl5100_abg_cfg; +extern const struct iwl_rf_cfg iwl5150_agn_cfg; +extern const struct iwl_rf_cfg iwl5150_abg_cfg; +extern const struct iwl_rf_cfg iwl6005_non_n_cfg; +extern const struct iwl_rf_cfg iwl6005_n_cfg; +extern const struct iwl_rf_cfg iwl6030_n_cfg; +extern const struct iwl_rf_cfg iwl6030_non_n_cfg; +extern const struct iwl_rf_cfg iwl6000i_2agn_cfg; +extern const struct iwl_rf_cfg iwl6000i_non_n_cfg; +extern const struct iwl_rf_cfg iwl6000i_non_n_cfg; +extern const struct iwl_rf_cfg iwl6000_3agn_cfg; +extern const struct iwl_rf_cfg iwl6050_2agn_cfg; +extern const struct iwl_rf_cfg iwl6050_2abg_cfg; +extern const struct iwl_rf_cfg iwl6150_bgn_cfg; +extern const struct iwl_rf_cfg iwl6150_bg_cfg; +extern const struct iwl_rf_cfg iwl1000_bgn_cfg; +extern const struct iwl_rf_cfg iwl1000_bg_cfg; +extern const struct iwl_rf_cfg iwl100_bgn_cfg; +extern const struct iwl_rf_cfg iwl100_bg_cfg; +extern const struct iwl_rf_cfg iwl130_bgn_cfg; +extern const struct iwl_rf_cfg iwl130_bg_cfg; +extern const struct iwl_rf_cfg iwl2000_2bgn_cfg; +extern const struct iwl_rf_cfg iwl2030_2bgn_cfg; +extern const struct iwl_rf_cfg iwl6035_2agn_cfg; +extern const struct iwl_rf_cfg iwl105_bgn_cfg; +extern const struct iwl_rf_cfg iwl135_bgn_cfg; #endif /* CONFIG_IWLDVM */ #if IS_ENABLED(CONFIG_IWLMVM) -extern const struct iwl_ht_params iwl_22000_ht_params; -extern const struct iwl_cfg iwl7260_2ac_cfg; -extern const struct iwl_cfg iwl7260_2ac_cfg_high_temp; -extern const struct iwl_cfg iwl7260_2n_cfg; -extern const struct iwl_cfg iwl7260_n_cfg; -extern const struct iwl_cfg iwl3160_2ac_cfg; -extern const struct iwl_cfg iwl3160_2n_cfg; -extern const struct iwl_cfg iwl3160_n_cfg; -extern const struct iwl_cfg iwl3165_2ac_cfg; -extern const struct iwl_cfg iwl3168_2ac_cfg; -extern const struct iwl_cfg iwl7265_2ac_cfg; -extern const struct iwl_cfg iwl7265_2n_cfg; -extern const struct iwl_cfg iwl7265_n_cfg; -extern const struct iwl_cfg iwl7265d_2ac_cfg; -extern const struct iwl_cfg iwl7265d_2n_cfg; -extern const struct iwl_cfg iwl7265d_n_cfg; -extern const struct iwl_cfg iwl8260_2n_cfg; -extern const struct iwl_cfg iwl8260_2ac_cfg; -extern const struct iwl_cfg iwl8265_2ac_cfg; -extern const struct iwl_cfg iwl8275_2ac_cfg; -extern const struct iwl_cfg iwl4165_2ac_cfg; -extern const struct iwl_cfg iwl9260_2ac_cfg; -extern const struct iwl_cfg iwl9560_qu_b0_jf_b0_cfg; -extern const struct iwl_cfg iwl9560_qu_c0_jf_b0_cfg; -extern const struct iwl_cfg iwl9560_quz_a0_jf_b0_cfg; -extern const struct iwl_cfg iwl9560_2ac_cfg_soc; -extern const struct iwl_cfg iwl_qu_b0_hr1_b0; -extern const struct iwl_cfg iwl_qu_c0_hr1_b0; -extern const struct iwl_cfg iwl_quz_a0_hr1_b0; -extern const struct iwl_cfg iwl_qu_b0_hr_b0; -extern const struct iwl_cfg iwl_qu_c0_hr_b0; -extern const struct iwl_cfg iwl_ax200_cfg_cc; -extern const struct iwl_cfg iwl_ax201_cfg_qu_hr; -extern const struct iwl_cfg iwl_ax201_cfg_qu_c0_hr_b0; -extern const struct iwl_cfg iwl_ax201_cfg_quz_hr; -extern const struct iwl_cfg iwl_ax1650i_cfg_quz_hr; -extern const struct iwl_cfg iwl_ax1650s_cfg_quz_hr; -extern const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0; -extern const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0; -extern const struct iwl_cfg killer1650s_2ax_cfg_qu_c0_hr_b0; -extern const struct iwl_cfg killer1650i_2ax_cfg_qu_c0_hr_b0; -extern const struct iwl_cfg killer1650x_2ax_cfg; -extern const struct iwl_cfg killer1650w_2ax_cfg; -extern const struct iwl_cfg iwlax210_2ax_cfg_so_jf_b0; -extern const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0; -extern const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0_long; -extern const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0; -extern const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0; -extern const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0_long; - -extern const struct iwl_cfg iwl_cfg_ma; - -extern const struct iwl_cfg iwl_cfg_so_a0_hr_a0; -extern const struct iwl_cfg iwl_cfg_quz_a0_hr_b0; +extern const struct iwl_rf_cfg iwl7260_cfg; +extern const struct iwl_rf_cfg iwl7260_high_temp_cfg; +extern const struct iwl_rf_cfg iwl3160_cfg; +extern const struct iwl_rf_cfg iwl3165_2ac_cfg; +extern const struct iwl_rf_cfg iwl3168_2ac_cfg; +extern const struct iwl_rf_cfg iwl7265_cfg; +extern const struct iwl_rf_cfg iwl7265d_cfg; +extern const struct iwl_rf_cfg iwl8260_cfg; +extern const struct iwl_rf_cfg iwl8265_cfg; +extern const struct iwl_rf_cfg iwl_rf_jf; +extern const struct iwl_rf_cfg iwl_rf_jf_80mhz; +extern const struct iwl_rf_cfg iwl_rf_hr1; +extern const struct iwl_rf_cfg iwl_rf_hr; +extern const struct iwl_rf_cfg iwl_rf_hr_80mhz; + +extern const struct iwl_rf_cfg iwl_rf_gf; #endif /* CONFIG_IWLMVM */ #if IS_ENABLED(CONFIG_IWLMLD) -extern const struct iwl_ht_params iwl_bz_ht_params; - -extern const struct iwl_ht_params iwl_bz_ht_params; - -extern const struct iwl_cfg iwl_cfg_bz; -extern const struct iwl_cfg iwl_cfg_gl; - -extern const struct iwl_cfg iwl_cfg_sc; -extern const struct iwl_cfg iwl_cfg_sc2; -extern const struct iwl_cfg iwl_cfg_sc2f; -extern const struct iwl_cfg iwl_cfg_dr; -extern const struct iwl_cfg iwl_cfg_br; +extern const struct iwl_rf_cfg iwl_rf_fm; +extern const struct iwl_rf_cfg iwl_rf_fm_160mhz; +#define iwl_rf_wh iwl_rf_fm +#define iwl_rf_wh_160mhz iwl_rf_fm_160mhz +#define iwl_rf_pe iwl_rf_fm #endif /* CONFIG_IWLMLD */ #endif /* __IWL_CONFIG_H__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-v2.h index 6111ca970ed2..8c5c0ea46181 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-v2.h @@ -2,8 +2,8 @@ /* * Copyright (C) 2018, 2020-2025 Intel Corporation */ -#ifndef __iwl_context_info_file_gen3_h__ -#define __iwl_context_info_file_gen3_h__ +#ifndef __iwl_context_info_file_v2_h__ +#define __iwl_context_info_file_v2_h__ #include "iwl-context-info.h" @@ -58,6 +58,7 @@ enum iwl_prph_scratch_mtr_format { * @IWL_PRPH_SCRATCH_RB_SIZE_EXT_16K: 16kB RB size * @IWL_PRPH_SCRATCH_SCU_FORCE_ACTIVE: Indicate fw to set SCU_FORCE_ACTIVE * upon reset. + * @IWL_PRPH_SCRATCH_TOP_RESET: request TOP reset */ enum iwl_prph_scratch_flags { IWL_PRPH_SCRATCH_IMR_DEBUG_EN = BIT(1), @@ -74,6 +75,7 @@ enum iwl_prph_scratch_flags { IWL_PRPH_SCRATCH_RB_SIZE_EXT_12K = 9 << 20, IWL_PRPH_SCRATCH_RB_SIZE_EXT_16K = 10 << 20, IWL_PRPH_SCRATCH_SCU_FORCE_ACTIVE = BIT(29), + IWL_PRPH_SCRATCH_TOP_RESET = BIT(30), }; /** @@ -248,7 +250,7 @@ struct iwl_prph_info { } __packed; /* PERIPH_INFO_S */ /** - * struct iwl_context_info_gen3 - device INIT configuration + * struct iwl_context_info_v2 - device INIT configuration * @version: version of the context information * @size: size of context information in DWs * @config: context in which the peripheral would execute - a subset of @@ -291,7 +293,7 @@ struct iwl_prph_info { * @prph_scratch_size: the size of the peripheral scratch structure in DWs * @reserved: reserved */ -struct iwl_context_info_gen3 { +struct iwl_context_info_v2 { __le16 version; __le16 size; __le32 config; @@ -321,22 +323,22 @@ struct iwl_context_info_gen3 { __le32 reserved; } __packed; /* IPC_CONTEXT_INFO_S */ -int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, - const struct fw_img *fw); -void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans, bool alive); +int iwl_pcie_ctxt_info_v2_alloc(struct iwl_trans *trans, + const struct iwl_fw *fw, + const struct fw_img *img); +void iwl_pcie_ctxt_info_v2_kick(struct iwl_trans *trans); +void iwl_pcie_ctxt_info_v2_free(struct iwl_trans *trans, bool alive); -int iwl_trans_pcie_ctx_info_gen3_load_pnvm(struct iwl_trans *trans, - const struct iwl_pnvm_image *pnvm_payloads, - const struct iwl_ucode_capabilities *capa); -void iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans, - const struct iwl_ucode_capabilities *capa); +int iwl_trans_pcie_ctx_info_v2_load_pnvm(struct iwl_trans *trans, + const struct iwl_pnvm_image *pnvm_payloads, + const struct iwl_ucode_capabilities *capa); +void iwl_trans_pcie_ctx_info_v2_set_pnvm(struct iwl_trans *trans, + const struct iwl_ucode_capabilities *capa); int -iwl_trans_pcie_ctx_info_gen3_load_reduce_power(struct iwl_trans *trans, - const struct iwl_pnvm_image *payloads, - const struct iwl_ucode_capabilities *capa); +iwl_trans_pcie_ctx_info_v2_load_reduce_power(struct iwl_trans *trans, + const struct iwl_pnvm_image *payloads, + const struct iwl_ucode_capabilities *capa); void -iwl_trans_pcie_ctx_info_gen3_set_reduce_power(struct iwl_trans *trans, - const struct iwl_ucode_capabilities *capa); -int iwl_trans_pcie_ctx_info_gen3_set_step(struct iwl_trans *trans, - u32 mbx_addr_0_step, u32 mbx_addr_1_step); -#endif /* __iwl_context_info_file_gen3_h__ */ +iwl_trans_pcie_ctx_info_v2_set_reduce_power(struct iwl_trans *trans, + const struct iwl_ucode_capabilities *capa); +#endif /* __iwl_context_info_file_v2_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h index b495eb94d126..7ae0fbdef208 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h @@ -179,9 +179,9 @@ struct iwl_context_info { __le32 reserved2[16]; struct iwl_context_info_dram_nonfseq dram; __le32 reserved3[16]; -} __packed; +} __packed; /* BOOT_LOADER_CONTEXT_INFO_S */ -int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, const struct fw_img *fw); +int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, const struct fw_img *img); void iwl_pcie_ctxt_info_free(struct iwl_trans *trans); void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans); int iwl_pcie_init_fw_sec(struct iwl_trans *trans, diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h index c09967e2c68c..0fd452cb94ae 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h @@ -107,6 +107,13 @@ /* GIO Chicken Bits (PCI Express bus link power management) */ #define CSR_GIO_CHICKEN_BITS (CSR_BASE+0x100) +#define CSR_IPC_STATE (CSR_BASE + 0x110) +#define CSR_IPC_STATE_RESET 0x00000030 +#define CSR_IPC_STATE_RESET_NONE 0 +#define CSR_IPC_STATE_RESET_SW_READY 1 +#define CSR_IPC_STATE_RESET_TOP_READY 2 +#define CSR_IPC_STATE_RESET_TOP_FOLLOWER 3 + #define CSR_IPC_SLEEP_CONTROL (CSR_BASE + 0x114) #define CSR_IPC_SLEEP_CONTROL_SUSPEND 0x3 #define CSR_IPC_SLEEP_CONTROL_RESUME 0 @@ -643,7 +650,7 @@ enum msix_hw_int_causes { * HW address related registers * *****************************************************************************/ -#define CSR_ADDR_BASE(trans) ((trans)->cfg->mac_addr_from_csr) +#define CSR_ADDR_BASE(trans) ((trans)->mac_cfg->base->mac_addr_from_csr) #define CSR_MAC_ADDR0_OTP(trans) (CSR_ADDR_BASE(trans) + 0x00) #define CSR_MAC_ADDR1_OTP(trans) (CSR_ADDR_BASE(trans) + 0x04) #define CSR_MAC_ADDR0_STRAP(trans) (CSR_ADDR_BASE(trans) + 0x08) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c index ce787326aa69..5c8f6dc9a3e0 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c @@ -503,7 +503,7 @@ void iwl_dbg_tlv_load_bin(struct device *dev, struct iwl_trans *trans) int res; if (!iwlwifi_mod_params.enable_ini || - trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_8000) + trans->mac_cfg->device_family <= IWL_DEVICE_FAMILY_8000) return; res = firmware_request_nowarn(&fw, yoyo_bin, dev); @@ -603,11 +603,11 @@ static int iwl_dbg_tlv_alloc_fragments(struct iwl_fw_runtime *fwrt, return 0; num_frags = le32_to_cpu(fw_mon_cfg->max_frags_num); - if (fwrt->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) { + if (fwrt->trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210) { if (alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1) return -EIO; num_frags = 1; - } else if (fwrt->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ && + } else if (fwrt->trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_BZ && alloc_id > IWL_FW_INI_ALLOCATION_ID_DBGC3) { return -EIO; } @@ -1241,7 +1241,7 @@ iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt, bool sync, fwrt->trans->dbg.restart_required = false; - if (fwrt->trans->trans_cfg->device_family == + if (fwrt->trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_9000) { fwrt->trans->dbg.restart_required = true; } else if (tp == IWL_FW_INI_TIME_POINT_FW_ASSERT && diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h index 76166e1b10e5..99789c7cef3b 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h @@ -3,7 +3,7 @@ * * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved. * Copyright(C) 2016 Intel Deutschland GmbH - * Copyright(c) 2018, 2023 Intel Corporation + * Copyright(c) 2018, 2023, 2025 Intel Corporation *****************************************************************************/ #ifndef __IWLWIFI_DEVICE_TRACE @@ -54,11 +54,11 @@ static inline size_t iwl_rx_trace_len(const struct iwl_trans *trans, struct ieee80211_hdr *hdr = NULL; size_t hdr_offset; - if (cmd->cmd != trans->rx_mpdu_cmd) + if (cmd->cmd != trans->conf.rx_mpdu_cmd) return len; hdr_offset = sizeof(struct iwl_cmd_header) + - trans->rx_mpdu_cmd_hdr_size; + trans->conf.rx_mpdu_cmd_hdr_size; if (out_hdr_offset) *out_hdr_offset = hdr_offset; @@ -67,7 +67,8 @@ static inline size_t iwl_rx_trace_len(const struct iwl_trans *trans, if (!ieee80211_is_data(hdr->frame_control)) return len; /* maybe try to identify EAPOL frames? */ - return sizeof(__le32) + sizeof(*cmd) + trans->rx_mpdu_cmd_hdr_size + + return sizeof(__le32) + sizeof(*cmd) + + trans->conf.rx_mpdu_cmd_hdr_size + ieee80211_hdrlen(hdr->frame_control); } diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index d36837501e08..9504a0cb8b13 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -176,25 +176,86 @@ static inline char iwl_drv_get_step(int step) static bool iwl_drv_is_wifi7_supported(struct iwl_trans *trans) { - return CSR_HW_RFID_TYPE(trans->hw_rf_id) >= IWL_CFG_RF_TYPE_FM; + return CSR_HW_RFID_TYPE(trans->info.hw_rf_id) >= IWL_CFG_RF_TYPE_FM; } const char *iwl_drv_get_fwname_pre(struct iwl_trans *trans, char *buf) { char mac_step, rf_step; - const char *rf, *cdb; + const char *mac, *rf, *cdb; if (trans->cfg->fw_name_pre) return trans->cfg->fw_name_pre; - if (WARN_ON(!trans->cfg->fw_name_mac)) - return "unconfigured"; + mac_step = iwl_drv_get_step(trans->info.hw_rev_step); - mac_step = iwl_drv_get_step(trans->hw_rev_step); + switch (CSR_HW_REV_TYPE(trans->info.hw_rev)) { + case IWL_CFG_MAC_TYPE_PU: + mac = "9000-pu"; + mac_step = 'b'; + break; + case IWL_CFG_MAC_TYPE_TH: + mac = "9260-th"; + mac_step = 'b'; + break; + case IWL_CFG_MAC_TYPE_QU: + mac = "Qu"; + break; + case IWL_CFG_MAC_TYPE_CC: + /* special case - no RF since it's fixed (discrete) */ + scnprintf(buf, FW_NAME_PRE_BUFSIZE, "iwlwifi-cc-a0"); + return buf; + case IWL_CFG_MAC_TYPE_QUZ: + mac = "QuZ"; + /* all QuZ use A0 firmware */ + mac_step = 'a'; + break; + case IWL_CFG_MAC_TYPE_SO: + case IWL_CFG_MAC_TYPE_SOF: + mac = "so"; + mac_step = 'a'; + break; + case IWL_CFG_MAC_TYPE_TY: + mac = "ty"; + mac_step = 'a'; + break; + case IWL_CFG_MAC_TYPE_MA: + mac = "ma"; + break; + case IWL_CFG_MAC_TYPE_BZ: + case IWL_CFG_MAC_TYPE_BZ_W: + mac = "bz"; + break; + case IWL_CFG_MAC_TYPE_GL: + mac = "gl"; + break; + case IWL_CFG_MAC_TYPE_SC: + mac = "sc"; + break; + case IWL_CFG_MAC_TYPE_SC2: + mac = "sc2"; + break; + case IWL_CFG_MAC_TYPE_SC2F: + mac = "sc2f"; + break; + case IWL_CFG_MAC_TYPE_BR: + mac = "br"; + break; + case IWL_CFG_MAC_TYPE_DR: + mac = "dr"; + break; + default: + return "unknown-mac"; + } - rf_step = iwl_drv_get_step(CSR_HW_RFID_STEP(trans->hw_rf_id)); + rf_step = iwl_drv_get_step(CSR_HW_RFID_STEP(trans->info.hw_rf_id)); - switch (CSR_HW_RFID_TYPE(trans->hw_rf_id)) { + switch (CSR_HW_RFID_TYPE(trans->info.hw_rf_id)) { + case IWL_CFG_RF_TYPE_JF1: + case IWL_CFG_RF_TYPE_JF2: + rf = "jf"; + rf_step = 'b'; + break; case IWL_CFG_RF_TYPE_HR1: case IWL_CFG_RF_TYPE_HR2: rf = "hr"; @@ -202,29 +263,26 @@ const char *iwl_drv_get_fwname_pre(struct iwl_trans *trans, char *buf) break; case IWL_CFG_RF_TYPE_GF: rf = "gf"; + rf_step = 'a'; break; case IWL_CFG_RF_TYPE_FM: rf = "fm"; break; case IWL_CFG_RF_TYPE_WH: - if (SILICON_Z_STEP == - CSR_HW_RFID_STEP(trans->hw_rf_id)) { - rf = "whtc"; - rf_step = 'a'; - } else { - rf = "wh"; - } + rf = "wh"; + break; + case IWL_CFG_RF_TYPE_PE: + rf = "pe"; break; default: return "unknown-rf"; } - cdb = CSR_HW_RFID_IS_CDB(trans->hw_rf_id) ? "4" : ""; + cdb = CSR_HW_RFID_IS_CDB(trans->info.hw_rf_id) ? "4" : ""; scnprintf(buf, FW_NAME_PRE_BUFSIZE, "iwlwifi-%s-%c0-%s%s-%c0", - trans->cfg->fw_name_mac, mac_step, - rf, cdb, rf_step); + mac, mac_step, rf, cdb, rf_step); return buf; } @@ -233,39 +291,64 @@ IWL_EXPORT_SYMBOL(iwl_drv_get_fwname_pre); static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context); +static void iwl_get_ucode_api_versions(struct iwl_trans *trans, + unsigned int *api_min, + unsigned int *api_max) +{ + const struct iwl_family_base_params *base = trans->mac_cfg->base; + const struct iwl_rf_cfg *cfg = trans->cfg; + + if (!base->ucode_api_max) { + *api_min = cfg->ucode_api_min; + *api_max = cfg->ucode_api_max; + return; + } + + if (!cfg->ucode_api_max) { + *api_min = base->ucode_api_min; + *api_max = base->ucode_api_max; + return; + } + + *api_min = max(cfg->ucode_api_min, base->ucode_api_min); + *api_max = min(cfg->ucode_api_max, base->ucode_api_max); +} + static int iwl_request_firmware(struct iwl_drv *drv, bool first) { - const struct iwl_cfg *cfg = drv->trans->cfg; char _fw_name_pre[FW_NAME_PRE_BUFSIZE]; + unsigned int ucode_api_max, ucode_api_min; const char *fw_name_pre; - if (drv->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_9000 && - (drv->trans->hw_rev_step != SILICON_B_STEP && - drv->trans->hw_rev_step != SILICON_C_STEP)) { + iwl_get_ucode_api_versions(drv->trans, &ucode_api_min, &ucode_api_max); + + if (drv->trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_9000 && + (drv->trans->info.hw_rev_step != SILICON_B_STEP && + drv->trans->info.hw_rev_step != SILICON_C_STEP)) { IWL_ERR(drv, "Only HW steps B and C are currently supported (0x%0x)\n", - drv->trans->hw_rev); + drv->trans->info.hw_rev); return -EINVAL; } fw_name_pre = iwl_drv_get_fwname_pre(drv->trans, _fw_name_pre); if (first) - drv->fw_index = cfg->ucode_api_max; + drv->fw_index = ucode_api_max; else drv->fw_index--; - if (drv->fw_index < cfg->ucode_api_min) { + if (drv->fw_index < ucode_api_min) { IWL_ERR(drv, "no suitable firmware found!\n"); - if (cfg->ucode_api_min == cfg->ucode_api_max) { + if (ucode_api_min == ucode_api_max) { IWL_ERR(drv, "%s-%d is required\n", fw_name_pre, - cfg->ucode_api_max); + ucode_api_max); } else { IWL_ERR(drv, "minimum version required: %s-%d\n", - fw_name_pre, cfg->ucode_api_min); + fw_name_pre, ucode_api_min); IWL_ERR(drv, "maximum version supported: %s-%d\n", - fw_name_pre, cfg->ucode_api_max); + fw_name_pre, ucode_api_max); } IWL_ERR(drv, @@ -1235,7 +1318,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, if (tlv_len != sizeof(*dbg_ptrs)) goto invalid_tlv_len; - if (drv->trans->trans_cfg->device_family < + if (drv->trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_22000) break; drv->trans->dbg.umac_error_event_table = @@ -1251,7 +1334,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, if (tlv_len != sizeof(*dbg_ptrs)) goto invalid_tlv_len; - if (drv->trans->trans_cfg->device_family < + if (drv->trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_22000) break; drv->trans->dbg.lmac_error_event_table[0] = @@ -1373,7 +1456,7 @@ static int iwl_alloc_ucode(struct iwl_drv *drv, static int validate_sec_sizes(struct iwl_drv *drv, struct iwl_firmware_pieces *pieces, - const struct iwl_cfg *cfg) + const struct iwl_rf_cfg *cfg) { IWL_DEBUG_INFO(drv, "f/w package hdr runtime inst size = %zd\n", get_sec_size(pieces, IWL_UCODE_REGULAR, @@ -1496,14 +1579,15 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) struct iwlwifi_opmode_table *op; int err; struct iwl_firmware_pieces *pieces; - const unsigned int api_max = drv->trans->cfg->ucode_api_max; - const unsigned int api_min = drv->trans->cfg->ucode_api_min; + unsigned int api_min, api_max; size_t trigger_tlv_sz[FW_DBG_TRIGGER_MAX]; u32 api_ver; int i; bool usniffer_images = false; bool failure = true; + iwl_get_ucode_api_versions(drv->trans, &api_min, &api_max); + fw->ucode_capa.max_probe_length = IWL_DEFAULT_MAX_PROBE_LENGTH; fw->ucode_capa.standard_phy_calibration_size = IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE; @@ -1697,14 +1781,14 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) fw->init_evtlog_size = (pieces->init_evtlog_size - 16)/12; else fw->init_evtlog_size = - drv->trans->trans_cfg->base_params->max_event_log_size; + drv->trans->mac_cfg->base->max_event_log_size; fw->init_errlog_ptr = pieces->init_errlog_ptr; fw->inst_evtlog_ptr = pieces->inst_evtlog_ptr; if (pieces->inst_evtlog_size) fw->inst_evtlog_size = (pieces->inst_evtlog_size - 16)/12; else fw->inst_evtlog_size = - drv->trans->trans_cfg->base_params->max_event_log_size; + drv->trans->mac_cfg->base->max_event_log_size; fw->inst_errlog_ptr = pieces->inst_errlog_ptr; /* diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h index 854957bdf79d..595300a14639 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2020-2021, 2023 Intel Corporation + * Copyright (C) 2005-2014, 2020-2021, 2023, 2025 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH */ #ifndef __iwl_drv_h__ @@ -53,7 +53,7 @@ struct iwl_drv; struct iwl_trans; -struct iwl_cfg; +struct iwl_rf_cfg; /** * iwl_drv_start - start the drv * diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h index 5c8f1868db64..0f6de08b7473 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2018-2021, 2023-2024 Intel Corporation + * Copyright (C) 2005-2014, 2018-2021, 2023-2025 Intel Corporation * Copyright (C) 2015-2017 Intel Deutschland GmbH */ #ifndef __iwl_fh_h__ @@ -71,7 +71,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans, unsigned int chnl) { - if (trans->trans_cfg->gen2) { + if (trans->mac_cfg->gen2) { WARN_ON_ONCE(chnl >= 64); return TFH_TFDQ_CBB_TABLE + 8 * chnl; } @@ -378,14 +378,14 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans, * Once the RXF-to-DRAM DMA is active, this flag is immediately turned off. */ #define RFH_GEN_STATUS 0xA09808 -#define RFH_GEN_STATUS_GEN3 0xA07824 +#define RFH_GEN_STATUS_AX210 0xA07824 #define RBD_FETCH_IDLE BIT(29) #define SRAM_DMA_IDLE BIT(30) #define RXF_DMA_IDLE BIT(31) /* DMA configuration */ #define RFH_RXF_DMA_CFG 0xA09820 -#define RFH_RXF_DMA_CFG_GEN3 0xA07880 +#define RFH_RXF_DMA_CFG_AX210 0xA07880 /* RB size */ #define RFH_RXF_DMA_RB_SIZE_MASK (0x000F0000) /* bits 16-19 */ #define RFH_RXF_DMA_RB_SIZE_POS 16 @@ -588,13 +588,12 @@ struct iwl_rb_status { #define TFD_QUEUE_SIZE_MAX (256) -#define TFD_QUEUE_SIZE_MAX_GEN3 (65536) /* cb size is the exponent - 3 */ #define TFD_QUEUE_CB_SIZE(x) (ilog2(x) - 3) #define TFD_QUEUE_SIZE_BC_DUP (64) #define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP) -#define TFD_QUEUE_BC_SIZE_GEN3_AX210 1024 -#define TFD_QUEUE_BC_SIZE_GEN3_BZ (1024 * 4) +#define TFD_QUEUE_BC_SIZE_AX210 1024 +#define TFD_QUEUE_BC_SIZE_BZ (1024 * 4) #define IWL_TX_DMA_MASK DMA_BIT_MASK(36) #define IWL_NUM_OF_TBS 20 #define IWL_TFH_NUM_TBS 25 @@ -717,30 +716,19 @@ struct iwl_tfh_tfd { /* Fixed (non-configurable) rx data from phy */ /** - * struct iwlagn_scd_bc_tbl - scheduler byte count table + * struct iwl_bc_tbl_entry - scheduler byte count table entry * base physical address provided by SCD_DRAM_BASE_ADDR * For devices up to 22000: * @tfd_offset: * For devices up to 22000: * 0-12 - tx command byte count * 12-16 - station index - * For 22000: + * For 22000 and on: * 0-12 - tx command byte count * 12-13 - number of 64 byte chunks * 14-16 - reserved */ -struct iwlagn_scd_bc_tbl { - __le16 tfd_offset[TFD_QUEUE_BC_SIZE]; -} __packed; - -/** - * struct iwl_gen3_bc_tbl_entry - scheduler byte count table entry gen3 - * For AX210 and on: - * @tfd_offset: 0-12 - tx command byte count - * 12-13 - number of 64 byte chunks - * 14-16 - reserved - */ -struct iwl_gen3_bc_tbl_entry { +struct iwl_bc_tbl_entry { __le16 tfd_offset; } __packed; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.c b/drivers/net/wireless/intel/iwlwifi/iwl-io.c index 0653ca8b974a..80591809164e 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-io.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2003-2014, 2018-2022, 2024 Intel Corporation + * Copyright (C) 2003-2014, 2018-2022, 2024-2025 Intel Corporation * Copyright (C) 2015-2016 Intel Deutschland GmbH */ #include <linux/delay.h> @@ -211,13 +211,13 @@ IWL_EXPORT_SYMBOL(iwl_clear_bits_prph); void iwl_force_nmi(struct iwl_trans *trans) { - if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_9000) + if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_9000) iwl_write_prph_delay(trans, DEVICE_SET_NMI_REG, DEVICE_SET_NMI_VAL_DRV, 1); - else if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) + else if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210) iwl_write_umac_prph(trans, UREG_NIC_SET_NMI_DRIVER, UREG_NIC_SET_NMI_DRIVER_NMI_FROM_DRIVER); - else if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ) + else if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_BZ) iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6, UREG_DOORBELL_TO_ISR6_NMI_BIT); else @@ -260,7 +260,7 @@ struct reg { static int iwl_dump_rfh(struct iwl_trans *trans, char **buf) { int i, q; - int num_q = trans->num_rx_queues; + int num_q = trans->info.num_rxqs; static const u32 rfh_tbl[] = { RFH_RXF_DMA_CFG, RFH_GEN_CFG, @@ -368,7 +368,7 @@ int iwl_dump_fh(struct iwl_trans *trans, char **buf) FH_TSSR_TX_ERROR_REG }; - if (trans->trans_cfg->mq_rx_supported) + if (trans->mac_cfg->mq_rx_supported) return iwl_dump_rfh(trans, buf); #ifdef CONFIG_IWLWIFI_DEBUGFS @@ -423,7 +423,7 @@ static void iwl_dump_host_monitor_block(struct iwl_trans *trans, static void iwl_dump_host_monitor(struct iwl_trans *trans) { - switch (trans->trans_cfg->device_family) { + switch (trans->mac_cfg->device_family) { case IWL_DEVICE_FAMILY_22000: case IWL_DEVICE_FAMILY_AX210: IWL_ERR(trans, "CSR_RESET = 0x%x\n", @@ -445,11 +445,11 @@ static void iwl_dump_host_monitor(struct iwl_trans *trans) int iwl_finish_nic_init(struct iwl_trans *trans) { - const struct iwl_cfg_trans_params *cfg_trans = trans->trans_cfg; + const struct iwl_mac_cfg *mac_cfg = trans->mac_cfg; u32 poll_ready; int err; - if (cfg_trans->bisr_workaround) { + if (mac_cfg->bisr_workaround) { /* ensure the TOP FSM isn't still in previous reset */ mdelay(2); } @@ -458,7 +458,7 @@ int iwl_finish_nic_init(struct iwl_trans *trans) * Set "initialization complete" bit to move adapter from * D0U* --> D0A* (powered-up active) state. */ - if (cfg_trans->device_family >= IWL_DEVICE_FAMILY_BZ) { + if (mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ | CSR_GP_CNTRL_REG_FLAG_MAC_INIT); @@ -469,7 +469,7 @@ int iwl_finish_nic_init(struct iwl_trans *trans) poll_ready = CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY; } - if (cfg_trans->device_family == IWL_DEVICE_FAMILY_8000) + if (mac_cfg->device_family == IWL_DEVICE_FAMILY_8000) udelay(2); /* @@ -484,7 +484,7 @@ int iwl_finish_nic_init(struct iwl_trans *trans) iwl_dump_host_monitor(trans); } - if (cfg_trans->bisr_workaround) { + if (mac_cfg->bisr_workaround) { /* ensure BISR shift has finished */ udelay(200); } diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.h b/drivers/net/wireless/intel/iwlwifi/iwl-io.h index 37b3bd62897e..f4833c5fe86e 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-io.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2018-2021 Intel Corporation + * Copyright (C) 2018-2021, 2025 Intel Corporation */ #ifndef __iwl_io_h__ #define __iwl_io_h__ @@ -64,38 +64,38 @@ int iwl_dump_fh(struct iwl_trans *trans, char **buf); */ static inline u32 iwl_umac_prph(struct iwl_trans *trans, u32 ofs) { - return ofs + trans->trans_cfg->umac_prph_offset; + return ofs + trans->mac_cfg->umac_prph_offset; } static inline u32 iwl_read_umac_prph_no_grab(struct iwl_trans *trans, u32 ofs) { return iwl_read_prph_no_grab(trans, ofs + - trans->trans_cfg->umac_prph_offset); + trans->mac_cfg->umac_prph_offset); } static inline u32 iwl_read_umac_prph(struct iwl_trans *trans, u32 ofs) { - return iwl_read_prph(trans, ofs + trans->trans_cfg->umac_prph_offset); + return iwl_read_prph(trans, ofs + trans->mac_cfg->umac_prph_offset); } static inline void iwl_write_umac_prph_no_grab(struct iwl_trans *trans, u32 ofs, u32 val) { - iwl_write_prph_no_grab(trans, ofs + trans->trans_cfg->umac_prph_offset, + iwl_write_prph_no_grab(trans, ofs + trans->mac_cfg->umac_prph_offset, val); } static inline void iwl_write_umac_prph(struct iwl_trans *trans, u32 ofs, u32 val) { - iwl_write_prph(trans, ofs + trans->trans_cfg->umac_prph_offset, val); + iwl_write_prph(trans, ofs + trans->mac_cfg->umac_prph_offset, val); } static inline int iwl_poll_umac_prph_bit(struct iwl_trans *trans, u32 addr, u32 bits, u32 mask, int timeout) { return iwl_poll_prph_bit(trans, addr + - trans->trans_cfg->umac_prph_offset, + trans->mac_cfg->umac_prph_offset, bits, mask, timeout); } diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index e30e11b987d5..0592f0f59d1c 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2005-2014, 2018-2023 Intel Corporation + * Copyright (C) 2005-2014, 2018-2023, 2025 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -336,7 +336,7 @@ static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level, } static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, enum nl80211_band band, - u32 nvm_flags, const struct iwl_cfg *cfg) + u32 nvm_flags, const struct iwl_rf_cfg *cfg) { u32 flags = IEEE80211_CHAN_NO_HT40; @@ -403,7 +403,7 @@ static int iwl_init_channel_map(struct iwl_trans *trans, const void * const nvm_ch_flags, u32 sbands_flags, bool v4) { - const struct iwl_cfg *cfg = trans->cfg; + const struct iwl_rf_cfg *cfg = trans->cfg; struct device *dev = trans->dev; int ch_idx; int n_channels = 0; @@ -504,7 +504,7 @@ static void iwl_init_vht_hw_capab(struct iwl_trans *trans, struct ieee80211_sta_vht_cap *vht_cap, u8 tx_chains, u8 rx_chains) { - const struct iwl_cfg *cfg = trans->cfg; + const struct iwl_rf_cfg *cfg = trans->cfg; int num_rx_ants = num_of_ant(rx_chains); int num_tx_ants = num_of_ant(tx_chains); @@ -517,7 +517,7 @@ static void iwl_init_vht_hw_capab(struct iwl_trans *trans, IEEE80211_VHT_MAX_AMPDU_1024K << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; - if (!trans->cfg->ht_params->stbc) + if (!trans->cfg->ht_params.stbc) vht_cap->cap &= ~IEEE80211_VHT_CAP_RXSTBC_MASK; if (data->vht160_supported) @@ -527,7 +527,7 @@ static void iwl_init_vht_hw_capab(struct iwl_trans *trans, if (cfg->vht_mu_mimo_supported) vht_cap->cap |= IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE; - if (cfg->ht_params->ldpc) + if (cfg->ht_params.ldpc) vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC; if (data->sku_cap_mimo_disabled) { @@ -535,21 +535,21 @@ static void iwl_init_vht_hw_capab(struct iwl_trans *trans, num_tx_ants = 1; } - if (trans->cfg->ht_params->stbc && num_tx_ants > 1) + if (trans->cfg->ht_params.stbc && num_tx_ants > 1) vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC; else vht_cap->cap |= IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN; switch (iwlwifi_mod_params.amsdu_size) { case IWL_AMSDU_DEF: - if (trans->trans_cfg->mq_rx_supported) + if (trans->mac_cfg->mq_rx_supported) vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454; else vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895; break; case IWL_AMSDU_2K: - if (trans->trans_cfg->mq_rx_supported) + if (trans->mac_cfg->mq_rx_supported) vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454; else @@ -920,8 +920,8 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans, { bool is_ap = iftype_data->types_mask & (BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_P2P_GO)); - bool slow_pcie = (!trans->trans_cfg->integrated && - trans->pcie_link_speed < PCI_EXP_LNKSTA_CLS_8_0GB); + bool slow_pcie = (!trans->mac_cfg->integrated && + trans->info.pcie_link_speed < PCI_EXP_LNKSTA_CLS_8_0GB); if (!data->sku_cap_11be_enable || iwlwifi_mod_params.disable_11be) iftype_data->eht_cap.has_eht = false; @@ -948,7 +948,8 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans, IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_MASK); break; case NL80211_BAND_6GHZ: - if (!trans->reduced_cap_sku) { + if (!trans->reduced_cap_sku && + (!trans->cfg->bw_limit || trans->cfg->bw_limit >= 320)) { iftype_data->eht_cap.eht_cap_elem.phy_cap_info[0] |= IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ; iftype_data->eht_cap.eht_cap_elem.phy_cap_info[1] |= @@ -1035,11 +1036,11 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans, cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << 2); } - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210 && !is_ap) + if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210 && !is_ap) iftype_data->he_cap.he_cap_elem.phy_cap_info[2] |= IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO; - switch (CSR_HW_RFID_TYPE(trans->hw_rf_id)) { + switch (CSR_HW_RFID_TYPE(trans->info.hw_rf_id)) { case IWL_CFG_RF_TYPE_GF: case IWL_CFG_RF_TYPE_FM: case IWL_CFG_RF_TYPE_WH: @@ -1051,7 +1052,7 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans, break; } - if (CSR_HW_REV_TYPE(trans->hw_rev) == IWL_CFG_MAC_TYPE_GL && + if (CSR_HW_REV_TYPE(trans->info.hw_rev) == IWL_CFG_MAC_TYPE_GL && iftype_data->eht_cap.has_eht) { iftype_data->eht_cap.eht_cap_elem.mac_cap_info[0] &= ~(IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 | @@ -1080,13 +1081,13 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans, iftype_data->he_cap.he_cap_elem.mac_cap_info[2] |= IEEE80211_HE_MAC_CAP2_BCAST_TWT; - if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000 && + if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_22000 && !is_ap) { iftype_data->vendor_elems.data = iwl_vendor_caps; iftype_data->vendor_elems.len = ARRAY_SIZE(iwl_vendor_caps); } - if (!trans->cfg->ht_params->stbc) { + if (!trans->cfg->ht_params.stbc) { iftype_data->he_cap.he_cap_elem.phy_cap_info[2] &= ~IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ; iftype_data->he_cap.he_cap_elem.phy_cap_info[7] &= @@ -1098,19 +1099,23 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans, iftype_data->eht_cap.eht_mcs_nss_supp.bw._320.rx_tx_mcs13_max_nss = 0; } - if (trans->no_160) + if (trans->cfg->bw_limit && trans->cfg->bw_limit < 160) iftype_data->he_cap.he_cap_elem.phy_cap_info[0] &= ~IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G; - if (trans->reduced_cap_sku) { + if ((trans->cfg->bw_limit && trans->cfg->bw_limit < 320) || + trans->reduced_cap_sku) { memset(&iftype_data->eht_cap.eht_mcs_nss_supp.bw._320, 0, sizeof(iftype_data->eht_cap.eht_mcs_nss_supp.bw._320)); + iftype_data->eht_cap.eht_cap_elem.phy_cap_info[2] &= + ~IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK; + } + + if (trans->reduced_cap_sku) { iftype_data->eht_cap.eht_mcs_nss_supp.bw._80.rx_tx_mcs13_max_nss = 0; iftype_data->eht_cap.eht_mcs_nss_supp.bw._160.rx_tx_mcs13_max_nss = 0; iftype_data->eht_cap.eht_cap_elem.phy_cap_info[8] &= ~IEEE80211_EHT_PHY_CAP8_RX_4096QAM_WIDER_BW_DL_OFDMA; - iftype_data->eht_cap.eht_cap_elem.phy_cap_info[2] &= - ~IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK; } } @@ -1246,7 +1251,7 @@ static void iwl_init_sbands(struct iwl_trans *trans, n_used, n_channels); } -static int iwl_get_sku(const struct iwl_cfg *cfg, const __le16 *nvm_sw, +static int iwl_get_sku(const struct iwl_rf_cfg *cfg, const __le16 *nvm_sw, const __le16 *phy_sku) { if (cfg->nvm_type != IWL_NVM_EXT) @@ -1255,7 +1260,7 @@ static int iwl_get_sku(const struct iwl_cfg *cfg, const __le16 *nvm_sw, return le32_to_cpup((const __le32 *)(phy_sku + SKU_FAMILY_8000)); } -static int iwl_get_nvm_version(const struct iwl_cfg *cfg, const __le16 *nvm_sw) +static int iwl_get_nvm_version(const struct iwl_rf_cfg *cfg, const __le16 *nvm_sw) { if (cfg->nvm_type != IWL_NVM_EXT) return le16_to_cpup(nvm_sw + NVM_VERSION); @@ -1264,7 +1269,7 @@ static int iwl_get_nvm_version(const struct iwl_cfg *cfg, const __le16 *nvm_sw) NVM_VERSION_EXT_NVM)); } -static int iwl_get_radio_cfg(const struct iwl_cfg *cfg, const __le16 *nvm_sw, +static int iwl_get_radio_cfg(const struct iwl_rf_cfg *cfg, const __le16 *nvm_sw, const __le16 *phy_sku) { if (cfg->nvm_type != IWL_NVM_EXT) @@ -1274,7 +1279,7 @@ static int iwl_get_radio_cfg(const struct iwl_cfg *cfg, const __le16 *nvm_sw, } -static int iwl_get_n_hw_addrs(const struct iwl_cfg *cfg, const __le16 *nvm_sw) +static int iwl_get_n_hw_addrs(const struct iwl_rf_cfg *cfg, const __le16 *nvm_sw) { int n_hw_addr; @@ -1286,7 +1291,7 @@ static int iwl_get_n_hw_addrs(const struct iwl_cfg *cfg, const __le16 *nvm_sw) return n_hw_addr & N_HW_ADDR_MASK; } -static void iwl_set_radio_cfg(const struct iwl_cfg *cfg, +static void iwl_set_radio_cfg(const struct iwl_rf_cfg *cfg, struct iwl_nvm_data *data, u32 radio_cfg) { @@ -1345,7 +1350,7 @@ static void iwl_set_hw_address_from_csr(struct iwl_trans *trans, } static void iwl_set_hw_address_family_8000(struct iwl_trans *trans, - const struct iwl_cfg *cfg, + const struct iwl_rf_cfg *cfg, struct iwl_nvm_data *data, const __le16 *mac_override, const __be16 *nvm_hw) @@ -1394,11 +1399,12 @@ static void iwl_set_hw_address_family_8000(struct iwl_trans *trans, } static int iwl_set_hw_address(struct iwl_trans *trans, - const struct iwl_cfg *cfg, + const struct iwl_rf_cfg *cfg, struct iwl_nvm_data *data, const __be16 *nvm_hw, const __le16 *mac_override) { - if (cfg->mac_addr_from_csr) { + const struct iwl_mac_cfg *mac_cfg = trans->mac_cfg; + if (mac_cfg->base->mac_addr_from_csr) { iwl_set_hw_address_from_csr(trans, data); } else if (cfg->nvm_type != IWL_NVM_EXT) { const u8 *hw_addr = (const u8 *)(nvm_hw + HW_ADDR); @@ -1428,7 +1434,7 @@ static int iwl_set_hw_address(struct iwl_trans *trans, } static bool -iwl_nvm_no_wide_in_5ghz(struct iwl_trans *trans, const struct iwl_cfg *cfg, +iwl_nvm_no_wide_in_5ghz(struct iwl_trans *trans, const struct iwl_rf_cfg *cfg, const __be16 *nvm_hw) { /* @@ -1440,7 +1446,7 @@ iwl_nvm_no_wide_in_5ghz(struct iwl_trans *trans, const struct iwl_cfg *cfg, * in 5GHz otherwise the FW will throw a sysassert when we try * to use them. */ - if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000) { + if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_7000) { /* * Unlike the other sections in the NVM, the hw * section uses big-endian. @@ -1460,7 +1466,7 @@ iwl_nvm_no_wide_in_5ghz(struct iwl_trans *trans, const struct iwl_cfg *cfg, } struct iwl_nvm_data * -iwl_parse_mei_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, +iwl_parse_mei_nvm_data(struct iwl_trans *trans, const struct iwl_rf_cfg *cfg, const struct iwl_mei_nvm *mei_nvm, const struct iwl_fw *fw, u8 tx_ant, u8 rx_ant) { @@ -1524,7 +1530,7 @@ iwl_parse_mei_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, IWL_EXPORT_SYMBOL(iwl_parse_mei_nvm_data); struct iwl_nvm_data * -iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, +iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_rf_cfg *cfg, const struct iwl_fw *fw, const __be16 *nvm_hw, const __le16 *nvm_sw, const __le16 *nvm_calib, const __le16 *regulatory, @@ -1624,7 +1630,7 @@ IWL_EXPORT_SYMBOL(iwl_parse_nvm_data); static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan, int ch_idx, u16 nvm_flags, struct iwl_reg_capa reg_capa, - const struct iwl_cfg *cfg) + const struct iwl_rf_cfg *cfg) { u32 flags = NL80211_RRF_NO_HT40; @@ -1739,10 +1745,12 @@ static struct iwl_reg_capa iwl_get_reg_capa(u32 flags, u8 resp_ver) } struct ieee80211_regdomain * -iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, +iwl_parse_nvm_mcc_info(struct iwl_trans *trans, int num_of_ch, __le32 *channels, u16 fw_mcc, u16 geo_info, u32 cap, u8 resp_ver) { + const struct iwl_rf_cfg *cfg = trans->cfg; + struct device *dev = trans->dev; int ch_idx; u16 ch_flags; u32 reg_rule_flags, prev_reg_rule_flags = 0; @@ -1997,8 +2005,8 @@ int iwl_read_external_nvm(struct iwl_trans *trans, le32_to_cpu(dword_buff[3])); /* nvm file validation, dword_buff[2] holds the file version */ - if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_8000 && - trans->hw_rev_step == SILICON_C_STEP && + if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_8000 && + trans->info.hw_rev_step == SILICON_C_STEP && le32_to_cpu(dword_buff[2]) < 0xE4A) { ret = -EFAULT; goto out; @@ -2065,7 +2073,7 @@ int iwl_read_external_nvm(struct iwl_trans *trans, break; } - iwl_nvm_fixups(trans->hw_id, section_id, temp, section_size); + iwl_nvm_fixups(trans->info.hw_id, section_id, temp, section_size); kfree(nvm_sections[section_id].data); nvm_sections[section_id].data = temp; @@ -2168,7 +2176,7 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans, !!(mac_flags & NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED); nvm->sku_cap_mimo_disabled = !!(mac_flags & NVM_MAC_SKU_FLAGS_MIMO_DISABLED); - if (CSR_HW_RFID_TYPE(trans->hw_rf_id) >= IWL_CFG_RF_TYPE_FM) + if (CSR_HW_RFID_TYPE(trans->info.hw_rf_id) >= IWL_CFG_RF_TYPE_FM) nvm->sku_cap_11be_enable = true; /* Initialize PHY sku data */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h index 0c6c3fb8c6dd..9ce9fa4e78fd 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2015, 2018-2024 Intel Corporation + * Copyright (C) 2005-2015, 2018-2025 Intel Corporation * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #ifndef __iwl_nvm_parse_h__ @@ -30,7 +30,7 @@ enum iwl_nvm_sbands_flags { * later with iwl_free_nvm_data(). */ struct iwl_nvm_data * -iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, +iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_rf_cfg *cfg, const struct iwl_fw *fw, const __be16 *nvm_hw, const __le16 *nvm_sw, const __le16 *nvm_calib, const __le16 *regulatory, @@ -46,9 +46,17 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, * accordingly. An ERR_PTR is returned on error. * If not given to the regulatory core, the user is responsible for freeing * the regdomain returned here with kfree. + * + * @trans: the transport + * @num_of_ch: the number of channels + * @channels: channel map + * @fw_mcc: firmware country code + * @geo_info: geo info value + * @cap: capability + * @resp_ver: FW response version */ struct ieee80211_regdomain * -iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, +iwl_parse_nvm_mcc_info(struct iwl_trans *trans, int num_of_ch, __le32 *channels, u16 fw_mcc, u16 geo_info, u32 cap, u8 resp_ver); @@ -87,7 +95,7 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans, * iwl_parse_mei_nvm_data - parse the mei_nvm_data and get an iwl_nvm_data */ struct iwl_nvm_data * -iwl_parse_mei_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, +iwl_parse_mei_nvm_data(struct iwl_trans *trans, const struct iwl_rf_cfg *cfg, const struct iwl_mei_nvm *mei_nvm, const struct iwl_fw *fw, u8 set_tx_ant, u8 set_rx_ant); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-utils.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-utils.c index b3c25acd3691..ec312c90ff85 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-utils.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-utils.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2005-2014, 2018-2021, 2023 Intel Corporation + * Copyright (C) 2005-2014, 2018-2021, 2023, 2025 Intel Corporation * Copyright (C) 2015 Intel Mobile Communications GmbH */ #include <linux/types.h> @@ -42,7 +42,7 @@ void iwl_init_ht_hw_capab(struct iwl_trans *trans, enum nl80211_band band, u8 tx_chains, u8 rx_chains) { - const struct iwl_cfg *cfg = trans->cfg; + const struct iwl_rf_cfg *cfg = trans->cfg; int max_bit_rate = 0; tx_chains = hweight8(tx_chains); @@ -53,7 +53,8 @@ void iwl_init_ht_hw_capab(struct iwl_trans *trans, if (!(data->sku_cap_11n_enable) || (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL) || - !cfg->ht_params) { + /* there are no devices with HT but without HT40 entirely */ + !cfg->ht_params.ht40_bands) { ht_info->ht_supported = false; return; } @@ -64,17 +65,17 @@ void iwl_init_ht_hw_capab(struct iwl_trans *trans, ht_info->ht_supported = true; ht_info->cap = IEEE80211_HT_CAP_DSSSCCK40; - if (cfg->ht_params->stbc) { + if (cfg->ht_params.stbc) { ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT); if (tx_chains > 1) ht_info->cap |= IEEE80211_HT_CAP_TX_STBC; } - if (cfg->ht_params->ldpc) + if (cfg->ht_params.ldpc) ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING; - if (trans->trans_cfg->mq_rx_supported || + if (trans->mac_cfg->mq_rx_supported || iwlwifi_mod_params.amsdu_size >= IWL_AMSDU_8K) ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU; @@ -90,13 +91,13 @@ void iwl_init_ht_hw_capab(struct iwl_trans *trans, if (rx_chains >= 3) ht_info->mcs.rx_mask[2] = 0xFF; - if (cfg->ht_params->ht_greenfield_support) + if (cfg->ht_params.ht_greenfield_support) ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD; ht_info->cap |= IEEE80211_HT_CAP_SGI_20; max_bit_rate = MAX_BIT_RATE_20_MHZ; - if (cfg->ht_params->ht40_bands & BIT(band)) { + if (cfg->ht_params.ht40_bands & BIT(band)) { ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; ht_info->cap |= IEEE80211_HT_CAP_SGI_40; max_bit_rate = MAX_BIT_RATE_40_MHZ; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h index 34eca1a568ea..5dc299296d6d 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2018-2021, 2024 Intel Corporation + * Copyright (C) 2005-2014, 2018-2021, 2024-2025 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015 Intel Deutschland GmbH */ @@ -17,7 +17,7 @@ struct sk_buff; struct iwl_device_cmd; struct iwl_rx_cmd_buffer; struct iwl_fw; -struct iwl_cfg; +struct iwl_rf_cfg; /** * DOC: Operational mode - what is it ? @@ -52,12 +52,20 @@ struct iwl_cfg; * any debug collection must happen synchronously as * the device will be shut down * @IWL_ERR_TYPE_CMD_QUEUE_FULL: command queue was full + * @IWL_ERR_TYPE_TOP_RESET_BY_BT: TOP reset initiated by BT + * @IWL_ERR_TYPE_TOP_FATAL_ERROR: TOP fatal error + * @IWL_ERR_TYPE_TOP_RESET_FAILED: TOP reset failed + * @IWL_ERR_TYPE_DEBUGFS: error/reset indication from debugfs */ enum iwl_fw_error_type { IWL_ERR_TYPE_IRQ, IWL_ERR_TYPE_NMI_FORCED, IWL_ERR_TYPE_RESET_HS_TIMEOUT, IWL_ERR_TYPE_CMD_QUEUE_FULL, + IWL_ERR_TYPE_TOP_RESET_BY_BT, + IWL_ERR_TYPE_TOP_FATAL_ERROR, + IWL_ERR_TYPE_TOP_RESET_FAILED, + IWL_ERR_TYPE_DEBUGFS, }; /** @@ -142,7 +150,7 @@ struct iwl_fw_error_dump_mode { */ struct iwl_op_mode_ops { struct iwl_op_mode *(*start)(struct iwl_trans *trans, - const struct iwl_cfg *cfg, + const struct iwl_rf_cfg *cfg, const struct iwl_fw *fw, struct dentry *dbgfs_dir); void (*stop)(struct iwl_op_mode *op_mode); @@ -242,6 +250,9 @@ static inline void iwl_op_mode_dump_error(struct iwl_op_mode *op_mode, { might_sleep(); + if (WARN_ON(mode->type == IWL_ERR_TYPE_TOP_RESET_BY_BT)) + return; + if (op_mode->ops->dump_error) op_mode->ops->dump_error(op_mode, mode); } diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c index e7b2e08645ef..8a40801cf0dd 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c @@ -15,7 +15,7 @@ #include <linux/dmapool.h> #include "fw/api/commands.h" #include "pcie/internal.h" -#include "iwl-context-info-gen3.h" +#include "iwl-context-info-v2.h" struct iwl_trans_dev_restart_data { struct list_head list; @@ -82,14 +82,14 @@ void iwl_trans_free_restart_list(void) struct iwl_trans_reprobe { struct device *dev; - struct work_struct work; + struct delayed_work work; }; static void iwl_trans_reprobe_wk(struct work_struct *wk) { struct iwl_trans_reprobe *reprobe; - reprobe = container_of(wk, typeof(*reprobe), work); + reprobe = container_of(wk, typeof(*reprobe), work.work); if (device_reprobe(reprobe->dev)) dev_err(reprobe->dev, "reprobe failed!\n"); @@ -98,6 +98,31 @@ static void iwl_trans_reprobe_wk(struct work_struct *wk) module_put(THIS_MODULE); } +static void iwl_trans_schedule_reprobe(struct iwl_trans *trans, + unsigned int delay_ms) +{ + struct iwl_trans_reprobe *reprobe; + + /* + * get a module reference to avoid doing this while unloading + * anyway and to avoid scheduling a work with code that's + * being removed. + */ + if (!try_module_get(THIS_MODULE)) { + IWL_ERR(trans, "Module is being unloaded - abort\n"); + return; + } + + reprobe = kzalloc(sizeof(*reprobe), GFP_KERNEL); + if (!reprobe) { + module_put(THIS_MODULE); + return; + } + reprobe->dev = get_device(trans->dev); + INIT_DELAYED_WORK(&reprobe->work, iwl_trans_reprobe_wk); + schedule_delayed_work(&reprobe->work, msecs_to_jiffies(delay_ms)); +} + #define IWL_TRANS_RESET_OK_TIME 7 /* seconds */ static enum iwl_reset_mode @@ -106,18 +131,46 @@ iwl_trans_determine_restart_mode(struct iwl_trans *trans) struct iwl_trans_dev_restart_data *data; enum iwl_reset_mode at_least = 0; unsigned int index; - static const enum iwl_reset_mode escalation_list[] = { + static const enum iwl_reset_mode escalation_list_old[] = { + IWL_RESET_MODE_SW_RESET, + IWL_RESET_MODE_REPROBE, + IWL_RESET_MODE_REPROBE, + IWL_RESET_MODE_FUNC_RESET, + IWL_RESET_MODE_PROD_RESET, + }; + static const enum iwl_reset_mode escalation_list_sc[] = { IWL_RESET_MODE_SW_RESET, IWL_RESET_MODE_REPROBE, IWL_RESET_MODE_REPROBE, IWL_RESET_MODE_FUNC_RESET, - /* FIXME: add TOP reset */ + IWL_RESET_MODE_TOP_RESET, IWL_RESET_MODE_PROD_RESET, - /* FIXME: add TOP reset */ + IWL_RESET_MODE_TOP_RESET, IWL_RESET_MODE_PROD_RESET, - /* FIXME: add TOP reset */ + IWL_RESET_MODE_TOP_RESET, IWL_RESET_MODE_PROD_RESET, }; + const enum iwl_reset_mode *escalation_list; + size_t escalation_list_size; + + /* used by TOP fatal error/TOP reset */ + if (trans->restart.mode.type == IWL_ERR_TYPE_TOP_RESET_FAILED) + return IWL_RESET_MODE_PROD_RESET; + + if (trans->request_top_reset) { + trans->request_top_reset = 0; + if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_SC) + return IWL_RESET_MODE_TOP_RESET; + return IWL_RESET_MODE_PROD_RESET; + } + + if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_SC) { + escalation_list = escalation_list_sc; + escalation_list_size = ARRAY_SIZE(escalation_list_sc); + } else { + escalation_list = escalation_list_old; + escalation_list_size = ARRAY_SIZE(escalation_list_old); + } if (trans->restart.during_reset) at_least = IWL_RESET_MODE_REPROBE; @@ -132,8 +185,8 @@ iwl_trans_determine_restart_mode(struct iwl_trans *trans) data->restart_count = 0; index = data->restart_count; - if (index >= ARRAY_SIZE(escalation_list)) { - index = ARRAY_SIZE(escalation_list) - 1; + if (index >= escalation_list_size) { + index = escalation_list_size - 1; if (!data->backoff) { data->backoff = true; return IWL_RESET_MODE_BACKOFF; @@ -144,15 +197,21 @@ iwl_trans_determine_restart_mode(struct iwl_trans *trans) return max(at_least, escalation_list[index]); } +#define IWL_TRANS_TOP_FOLLOWER_WAIT 180 /* ms */ + #define IWL_TRANS_RESET_DELAY (HZ * 60) static void iwl_trans_restart_wk(struct work_struct *wk) { struct iwl_trans *trans = container_of(wk, typeof(*trans), restart.wk.work); - struct iwl_trans_reprobe *reprobe; enum iwl_reset_mode mode; + if (trans->restart.mode.type == IWL_ERR_TYPE_TOP_RESET_BY_BT) { + iwl_trans_schedule_reprobe(trans, IWL_TRANS_TOP_FOLLOWER_WAIT); + return; + } + if (!trans->op_mode) return; @@ -187,31 +246,19 @@ static void iwl_trans_restart_wk(struct work_struct *wk) iwl_trans_inc_restart_count(trans->dev); switch (mode) { + case IWL_RESET_MODE_TOP_RESET: + trans->do_top_reset = 1; + IWL_ERR(trans, "Device error - TOP reset\n"); + fallthrough; case IWL_RESET_MODE_SW_RESET: - IWL_ERR(trans, "Device error - SW reset\n"); + if (mode == IWL_RESET_MODE_SW_RESET) + IWL_ERR(trans, "Device error - SW reset\n"); iwl_trans_opmode_sw_reset(trans, trans->restart.mode.type); break; case IWL_RESET_MODE_REPROBE: IWL_ERR(trans, "Device error - reprobe!\n"); - /* - * get a module reference to avoid doing this while unloading - * anyway and to avoid scheduling a work with code that's - * being removed. - */ - if (!try_module_get(THIS_MODULE)) { - IWL_ERR(trans, "Module is being unloaded - abort\n"); - return; - } - - reprobe = kzalloc(sizeof(*reprobe), GFP_KERNEL); - if (!reprobe) { - module_put(THIS_MODULE); - return; - } - reprobe->dev = get_device(trans->dev); - INIT_WORK(&reprobe->work, iwl_trans_reprobe_wk); - schedule_work(&reprobe->work); + iwl_trans_schedule_reprobe(trans, 0); break; default: iwl_trans_pcie_reset(trans, mode); @@ -221,7 +268,7 @@ static void iwl_trans_restart_wk(struct work_struct *wk) struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, struct device *dev, - const struct iwl_cfg_trans_params *cfg_trans) + const struct iwl_mac_cfg *mac_cfg) { struct iwl_trans *trans; #ifdef CONFIG_LOCKDEP @@ -232,7 +279,7 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, if (!trans) return NULL; - trans->trans_cfg = cfg_trans; + trans->mac_cfg = mac_cfg; #ifdef CONFIG_LOCKDEP lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map", @@ -240,7 +287,6 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, #endif trans->dev = dev; - trans->num_rx_queues = 1; INIT_DELAYED_WORK(&trans->restart.wk, iwl_trans_restart_wk); @@ -251,14 +297,18 @@ int iwl_trans_init(struct iwl_trans *trans) { int txcmd_size, txcmd_align; - if (!trans->trans_cfg->gen2) { - txcmd_size = sizeof(struct iwl_tx_cmd); + /* check if name/num_rx_queues were set as a proxy for info being set */ + if (WARN_ON(!trans->info.name || !trans->info.num_rxqs)) + return -EINVAL; + + if (!trans->mac_cfg->gen2) { + txcmd_size = sizeof(struct iwl_tx_cmd_v6); txcmd_align = sizeof(void *); - } else if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) { - txcmd_size = sizeof(struct iwl_tx_cmd_gen2); + } else if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210) { + txcmd_size = sizeof(struct iwl_tx_cmd_v9); txcmd_align = 64; } else { - txcmd_size = sizeof(struct iwl_tx_cmd_gen3); + txcmd_size = sizeof(struct iwl_tx_cmd); txcmd_align = 128; } @@ -266,7 +316,7 @@ int iwl_trans_init(struct iwl_trans *trans) txcmd_size += 36; /* biggest possible 802.11 header */ /* Ensure device TX cmd cannot reach/cross a page boundary in gen2 */ - if (WARN_ON(trans->trans_cfg->gen2 && txcmd_size >= txcmd_align)) + if (WARN_ON(trans->mac_cfg->gen2 && txcmd_size >= txcmd_align)) return -EINVAL; snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name), @@ -278,9 +328,6 @@ int iwl_trans_init(struct iwl_trans *trans) if (!trans->dev_cmd_pool) return -ENOMEM; - /* Initialize the wait queue for commands */ - init_waitqueue_head(&trans->wait_command_queue); - return 0; } @@ -298,17 +345,7 @@ int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) test_bit(STATUS_RFKILL_OPMODE, &trans->status))) return -ERFKILL; - /* - * We can't test IWL_MVM_STATUS_IN_D3 in mvm->status because this - * bit is set early in the D3 flow, before we send all the commands - * that configure the firmware for D3 operation (power, patterns, ...) - * and we don't want to flag all those with CMD_SEND_IN_D3. - * So use the system_pm_mode instead. The only command sent after - * we set system_pm_mode is D3_CONFIG_CMD, which we now flag with - * CMD_SEND_IN_D3. - */ - if (unlikely(trans->system_pm_mode == IWL_PLAT_PM_MODE_D3 && - !(cmd->flags & CMD_SEND_IN_D3))) + if (unlikely(test_bit(STATUS_SUSPENDED, &trans->status))) return -EHOSTDOWN; if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status))) @@ -321,7 +358,7 @@ int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) if (!(cmd->flags & CMD_ASYNC)) lock_map_acquire_read(&trans->sync_cmd_lockdep_map); - if (trans->wide_cmd_header && !iwl_cmd_groupid(cmd->id)) { + if (trans->conf.wide_cmd_header && !iwl_cmd_groupid(cmd->id)) { if (cmd->id != REPLY_ERROR) cmd->id = DEF_ID(cmd->id); } @@ -365,11 +402,12 @@ const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id) grp = iwl_cmd_groupid(id); cmd = iwl_cmd_opcode(id); - if (!trans->command_groups || grp >= trans->command_groups_size || - !trans->command_groups[grp].arr) + if (!trans->conf.command_groups || + grp >= trans->conf.command_groups_size || + !trans->conf.command_groups[grp].arr) return "UNKNOWN"; - arr = &trans->command_groups[grp]; + arr = &trans->conf.command_groups[grp]; ret = bsearch(&cmd, arr->arr, arr->size, size, iwl_hcmd_names_cmp); if (!ret) return "UNKNOWN"; @@ -377,37 +415,29 @@ const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id) } IWL_EXPORT_SYMBOL(iwl_get_cmd_string); -int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config *trans) +void iwl_trans_op_mode_enter(struct iwl_trans *trans, + struct iwl_op_mode *op_mode) { - int i, j; - const struct iwl_hcmd_arr *arr; + trans->op_mode = op_mode; - for (i = 0; i < trans->command_groups_size; i++) { - arr = &trans->command_groups[i]; - if (!arr->arr) - continue; - for (j = 0; j < arr->size - 1; j++) - if (arr->arr[j].cmd_id > arr->arr[j + 1].cmd_id) - return -1; - } - return 0; -} -IWL_EXPORT_SYMBOL(iwl_cmd_groups_verify_sorted); + if (WARN_ON(trans->conf.n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS)) + trans->conf.n_no_reclaim_cmds = + ARRAY_SIZE(trans->conf.no_reclaim_cmds); -void iwl_trans_configure(struct iwl_trans *trans, - const struct iwl_trans_config *trans_cfg) -{ - trans->op_mode = trans_cfg->op_mode; + WARN_ON_ONCE(!trans->conf.rx_mpdu_cmd); - iwl_trans_pcie_configure(trans, trans_cfg); - WARN_ON(iwl_cmd_groups_verify_sorted(trans_cfg)); + iwl_trans_pcie_op_mode_enter(trans); } -IWL_EXPORT_SYMBOL(iwl_trans_configure); +IWL_EXPORT_SYMBOL(iwl_trans_op_mode_enter); int iwl_trans_start_hw(struct iwl_trans *trans) { might_sleep(); + clear_bit(STATUS_TRANS_RESET_IN_PROGRESS, &trans->status); + /* opmode may not resume if it detects errors */ + clear_bit(STATUS_SUSPENDED, &trans->status); + return iwl_trans_pcie_start_hw(trans); } IWL_EXPORT_SYMBOL(iwl_trans_start_hw); @@ -421,6 +451,7 @@ void iwl_trans_op_mode_leave(struct iwl_trans *trans) cancel_delayed_work_sync(&trans->restart.wk); trans->op_mode = NULL; + memset(&trans->conf, 0, sizeof(trans->conf)); trans->state = IWL_TRANS_NO_FW; } @@ -497,18 +528,31 @@ IWL_EXPORT_SYMBOL(iwl_trans_dump_data); int iwl_trans_d3_suspend(struct iwl_trans *trans, bool test, bool reset) { + int err; + might_sleep(); - return iwl_trans_pcie_d3_suspend(trans, test, reset); + err = iwl_trans_pcie_d3_suspend(trans, test, reset); + + if (!err) + set_bit(STATUS_SUSPENDED, &trans->status); + + return err; } IWL_EXPORT_SYMBOL(iwl_trans_d3_suspend); int iwl_trans_d3_resume(struct iwl_trans *trans, enum iwl_d3_status *status, bool test, bool reset) { + int err; + might_sleep(); - return iwl_trans_pcie_d3_resume(trans, status, test, reset); + err = iwl_trans_pcie_d3_resume(trans, status, test, reset); + + clear_bit(STATUS_SUSPENDED, &trans->status); + + return err; } IWL_EXPORT_SYMBOL(iwl_trans_d3_resume); @@ -558,34 +602,39 @@ iwl_trans_release_nic_access(struct iwl_trans *trans) } IWL_EXPORT_SYMBOL(iwl_trans_release_nic_access); -void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr) +void iwl_trans_fw_alive(struct iwl_trans *trans) { might_sleep(); trans->state = IWL_TRANS_FW_ALIVE; - if (trans->trans_cfg->gen2) + if (trans->mac_cfg->gen2) iwl_trans_pcie_gen2_fw_alive(trans); else - iwl_trans_pcie_fw_alive(trans, scd_addr); + iwl_trans_pcie_fw_alive(trans); } IWL_EXPORT_SYMBOL(iwl_trans_fw_alive); -int iwl_trans_start_fw(struct iwl_trans *trans, const struct fw_img *fw, - bool run_in_rfkill) +int iwl_trans_start_fw(struct iwl_trans *trans, const struct iwl_fw *fw, + enum iwl_ucode_type ucode_type, bool run_in_rfkill) { + const struct fw_img *img; int ret; might_sleep(); - WARN_ON_ONCE(!trans->rx_mpdu_cmd); + img = iwl_get_ucode_image(fw, ucode_type); + if (!img) + return -EINVAL; clear_bit(STATUS_FW_ERROR, &trans->status); - if (trans->trans_cfg->gen2) - ret = iwl_trans_pcie_gen2_start_fw(trans, fw, run_in_rfkill); + if (trans->mac_cfg->gen2) + ret = iwl_trans_pcie_gen2_start_fw(trans, fw, img, + run_in_rfkill); else - ret = iwl_trans_pcie_start_fw(trans, fw, run_in_rfkill); + ret = iwl_trans_pcie_start_fw(trans, fw, img, + run_in_rfkill); if (ret == 0) trans->state = IWL_TRANS_FW_STARTED; @@ -626,7 +675,7 @@ void iwl_trans_stop_device(struct iwl_trans *trans) iwl_op_mode_dump_error(trans->op_mode, &mode); } - if (trans->trans_cfg->gen2) + if (trans->mac_cfg->gen2) iwl_trans_pcie_gen2_stop_device(trans); else iwl_trans_pcie_stop_device(trans); @@ -645,7 +694,7 @@ int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb, "bad state = %d\n", trans->state)) return -EIO; - if (trans->trans_cfg->gen2) + if (trans->mac_cfg->gen2) return iwl_txq_gen2_tx(trans, skb, dev_cmd, queue); return iwl_trans_pcie_tx(trans, skb, dev_cmd, queue); @@ -778,14 +827,14 @@ int iwl_trans_load_pnvm(struct iwl_trans *trans, const struct iwl_pnvm_image *pnvm_data, const struct iwl_ucode_capabilities *capa) { - return iwl_trans_pcie_ctx_info_gen3_load_pnvm(trans, pnvm_data, capa); + return iwl_trans_pcie_ctx_info_v2_load_pnvm(trans, pnvm_data, capa); } IWL_EXPORT_SYMBOL(iwl_trans_load_pnvm); void iwl_trans_set_pnvm(struct iwl_trans *trans, const struct iwl_ucode_capabilities *capa) { - iwl_trans_pcie_ctx_info_gen3_set_pnvm(trans, capa); + iwl_trans_pcie_ctx_info_v2_set_pnvm(trans, capa); } IWL_EXPORT_SYMBOL(iwl_trans_set_pnvm); @@ -793,7 +842,7 @@ int iwl_trans_load_reduce_power(struct iwl_trans *trans, const struct iwl_pnvm_image *payloads, const struct iwl_ucode_capabilities *capa) { - return iwl_trans_pcie_ctx_info_gen3_load_reduce_power(trans, payloads, + return iwl_trans_pcie_ctx_info_v2_load_reduce_power(trans, payloads, capa); } IWL_EXPORT_SYMBOL(iwl_trans_load_reduce_power); @@ -801,6 +850,6 @@ IWL_EXPORT_SYMBOL(iwl_trans_load_reduce_power); void iwl_trans_set_reduce_power(struct iwl_trans *trans, const struct iwl_ucode_capabilities *capa) { - iwl_trans_pcie_ctx_info_gen3_set_reduce_power(trans, capa); + iwl_trans_pcie_ctx_info_v2_set_reduce_power(trans, capa); } IWL_EXPORT_SYMBOL(iwl_trans_set_reduce_power); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index 44a249a753ec..012b1e44bce3 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2018-2023 Intel Corporation + * Copyright (C) 2005-2014, 2018-2025 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -109,16 +109,12 @@ static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt) * the response. The caller needs to call iwl_free_resp when done. * @CMD_SEND_IN_RFKILL: Send the command even if the NIC is in RF-kill. * @CMD_BLOCK_TXQS: Block TXQs while the comment is executing. - * @CMD_SEND_IN_D3: Allow the command to be sent in D3 mode, relevant to - * SUSPEND and RESUME commands. We are in D3 mode when we set - * trans->system_pm_mode to IWL_PLAT_PM_MODE_D3. */ enum CMD_MODE { CMD_ASYNC = BIT(0), CMD_WANT_SKB = BIT(1), CMD_SEND_IN_RFKILL = BIT(2), CMD_BLOCK_TXQS = BIT(3), - CMD_SEND_IN_D3 = BIT(4), }; #define CMD_MODE_BITS 5 @@ -304,6 +300,10 @@ enum iwl_d3_status { * via iwl_trans_finish_sw_reset() * @STATUS_RESET_PENDING: reset worker was scheduled, but didn't dump * the firmware state yet + * @STATUS_TRANS_RESET_IN_PROGRESS: reset is still in progress, don't + * attempt another reset yet + * @STATUS_SUSPENDED: device is suspended, don't send commands that + * aren't marked accordingly */ enum iwl_trans_status { STATUS_SYNC_HCMD_ACTIVE, @@ -317,6 +317,8 @@ enum iwl_trans_status { STATUS_SUPPRESS_CMD_ERROR_ONCE, STATUS_IN_SW_RESET, STATUS_RESET_PENDING, + STATUS_TRANS_RESET_IN_PROGRESS, + STATUS_SUSPENDED, }; static inline int @@ -388,7 +390,8 @@ struct iwl_dump_sanitize_ops { /** * struct iwl_trans_config - transport configuration * - * @op_mode: pointer to the upper layer. + * These values should be set before iwl_trans_op_mode_enter(). + * * @cmd_queue: the index of the command queue. * Must be set before start_fw. * @cmd_fifo: the fifo for host commands @@ -399,8 +402,6 @@ struct iwl_dump_sanitize_ops { * @n_no_reclaim_cmds: # of commands in list * @rx_buf_size: RX buffer size needed for A-MSDUs * if unset 4k will be the RX buffer size - * @bc_table_dword: set to true if the BC table expects the byte count to be - * in DWORD (as opposed to bytes) * @scd_set_active: should the transport configure the SCD for HCMD queue * @command_groups: array of command groups, each member is an array of the * commands in the group; for debugging only @@ -411,17 +412,24 @@ struct iwl_dump_sanitize_ops { * @queue_alloc_cmd_ver: queue allocation command version, set to 0 * for using the older SCD_QUEUE_CFG, set to the version of * SCD_QUEUE_CONFIG_CMD otherwise. + * @wide_cmd_header: true when ucode supports wide command header format + * @rx_mpdu_cmd: MPDU RX command ID, must be assigned by opmode before + * starting the firmware, used for tracing + * @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the + * start of the 802.11 header in the @rx_mpdu_cmd + * @dsbr_urm_fw_dependent: switch to URM based on fw settings + * @dsbr_urm_permanent: switch to URM permanently + * @mbx_addr_0_step: step address data 0 + * @mbx_addr_1_step: step address data 1 + * @ext_32khz_clock_valid: if true, the external 32 KHz clock can be used */ struct iwl_trans_config { - struct iwl_op_mode *op_mode; - u8 cmd_queue; u8 cmd_fifo; - const u8 *no_reclaim_cmds; - unsigned int n_no_reclaim_cmds; + u8 n_no_reclaim_cmds; + u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS]; enum iwl_amsdu_size rx_buf_size; - bool bc_table_dword; bool scd_set_active; const struct iwl_hcmd_arr *command_groups; int command_groups_size; @@ -429,6 +437,16 @@ struct iwl_trans_config { u8 cb_data_offs; bool fw_reset_handshake; u8 queue_alloc_cmd_ver; + + bool wide_cmd_header; + u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size; + + u8 dsbr_urm_fw_dependent:1, + dsbr_urm_permanent:1, + ext_32khz_clock_valid:1; + + u32 mbx_addr_0_step; + u32 mbx_addr_1_step; }; struct iwl_trans_dump_data { @@ -514,23 +532,6 @@ enum iwl_trans_state { */ /** - * enum iwl_plat_pm_mode - platform power management mode - * - * This enumeration describes the device's platform power management - * behavior when in system-wide suspend (i.e WoWLAN). - * - * @IWL_PLAT_PM_MODE_DISABLED: power management is disabled for this - * device. In system-wide suspend mode, it means that the all - * connections will be closed automatically by mac80211 before - * the platform is suspended. - * @IWL_PLAT_PM_MODE_D3: the device goes into D3 mode (i.e. WoWLAN). - */ -enum iwl_plat_pm_mode { - IWL_PLAT_PM_MODE_DISABLED, - IWL_PLAT_PM_MODE_D3, -}; - -/** * enum iwl_ini_cfg_state * @IWL_INI_CFG_STATE_NOT_LOADED: no debug cfg was given * @IWL_INI_CFG_STATE_LOADED: debug cfg was found and loaded @@ -820,106 +821,90 @@ struct iwl_txq { }; /** + * struct iwl_trans_info - transport info for outside use + * @name: the device name + * @max_skb_frags: maximum number of fragments an SKB can have when transmitted. + * 0 indicates that frag SKBs (NETIF_F_SG) aren't supported. + * @hw_rev: the revision data of the HW + * @hw_rev_step: The mac step of the HW + * @hw_rf_id: the device RF ID + * @hw_cnv_id: the device CNV ID + * @hw_crf_id: the device CRF ID + * @hw_wfpm_id: the device wfpm ID + * @hw_id: the ID of the device / sub-device + * Bits 0:15 represent the sub-device ID + * Bits 16:31 represent the device ID. + * @pcie_link_speed: current PCIe link speed (%PCI_EXP_LNKSTA_CLS_*), + * only valid for discrete (not integrated) NICs + * @num_rxqs: number of RX queues allocated by the transport + */ +struct iwl_trans_info { + const char *name; + u32 max_skb_frags; + u32 hw_rev; + u32 hw_rev_step; + u32 hw_rf_id; + u32 hw_crf_id; + u32 hw_cnv_id; + u32 hw_wfpm_id; + u32 hw_id; + u8 pcie_link_speed; + u8 num_rxqs; +}; + +/** * struct iwl_trans - transport common data * * @csme_own: true if we couldn't get ownership on the device * @op_mode: pointer to the op_mode - * @trans_cfg: the trans-specific configuration part + * @mac_cfg: the trans-specific configuration part * @cfg: pointer to the configuration * @drv: pointer to iwl_drv + * @conf: configuration set by the opmode before enter * @state: current device state * @status: a bit-mask of transport status flags * @dev: pointer to struct device * that represents the device - * @max_skb_frags: maximum number of fragments an SKB can have when transmitted. - * 0 indicates that frag SKBs (NETIF_F_SG) aren't supported. - * @hw_rf_id: a u32 with the device RF ID - * @hw_cnv_id: a u32 with the device CNV ID - * @hw_crf_id: a u32 with the device CRF ID - * @hw_wfpm_id: a u32 with the device wfpm ID - * @hw_id: a u32 with the ID of the device / sub-device. - * Set during transport allocation. - * @hw_id_str: a string with info about HW ID. Set during transport allocation. - * @sku_id: the SKU identifier (for PNVM matching) + * @info: device information for use by other layers * @pnvm_loaded: indicates PNVM was loaded - * @hw_rev: the revision data of the HW - * @hw_rev_step: The mac step of the HW * @pm_support: set to true in start_hw if link pm is supported * @ltr_enabled: set to true if the LTR is enabled * @fail_to_parse_pnvm_image: set to true if pnvm parsing failed * @reduce_power_loaded: indicates reduced power section was loaded * @failed_to_load_reduce_power_image: set to true if pnvm loading failed - * @command_groups: pointer to command group name list array - * @command_groups_size: array size of @command_groups - * @wide_cmd_header: true when ucode supports wide command header format - * @wait_command_queue: wait queue for sync commands - * @num_rx_queues: number of RX queues allocated by the transport; - * the transport must set this before calling iwl_drv_start() - * @iml_len: the length of the image loader - * @iml: a pointer to the image loader itself * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only. * The user should use iwl_trans_{alloc,free}_tx_cmd. * @dev_cmd_pool_name: name for the TX command allocation pool * @dbgfs_dir: iwlwifi debugfs base dir for this device * @sync_cmd_lockdep_map: lockdep map for checking sync commands - * @rx_mpdu_cmd: MPDU RX command ID, must be assigned by opmode before - * starting the firmware, used for tracing - * @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the - * start of the 802.11 header in the @rx_mpdu_cmd * @dbg: additional debug data, see &struct iwl_trans_debug * @init_dram: FW initialization DMA data - * @system_pm_mode: the system-wide power management mode in use. - * This mode is set dynamically, depending on the WoWLAN values - * configured from the userspace at runtime. - * @name: the device name - * @mbx_addr_0_step: step address data 0 - * @mbx_addr_1_step: step address data 1 - * @pcie_link_speed: current PCIe link speed (%PCI_EXP_LNKSTA_CLS_*), - * only valid for discrete (not integrated) NICs - * @invalid_tx_cmd: invalid TX command buffer * @reduced_cap_sku: reduced capability supported SKU - * @no_160: device not supporting 160 MHz * @step_urm: STEP is in URM, no support for MCS>9 in 320 MHz * @restart: restart worker data * @restart.wk: restart worker * @restart.mode: reset/restart error mode information * @restart.during_reset: error occurred during previous software reset - * @me_recheck_wk: worker to recheck WiAMT/CSME presence - * @me_present: WiAMT/CSME is detected as present (1), not present (0) - * or unknown (-1, so can still use it as a boolean safely) * @trans_specific: data for the specific transport this is allocated for/with - * @dsbr_urm_fw_dependent: switch to URM based on fw settings - * @dsbr_urm_permanent: switch to URM permanently - * @ext_32khz_clock_valid: if true, the external 32 KHz clock can be used + * @request_top_reset: TOP reset was requested, used by the reset + * worker that should be scheduled (with appropriate reason) + * @do_top_reset: indication to the (PCIe) transport/context-info + * to do the TOP reset */ struct iwl_trans { bool csme_own; struct iwl_op_mode *op_mode; - const struct iwl_cfg_trans_params *trans_cfg; - const struct iwl_cfg *cfg; + const struct iwl_mac_cfg *mac_cfg; + const struct iwl_rf_cfg *cfg; struct iwl_drv *drv; + struct iwl_trans_config conf; enum iwl_trans_state state; unsigned long status; struct device *dev; - u32 max_skb_frags; - u32 hw_rev; - u32 hw_rev_step; - u32 hw_rf_id; - u32 hw_crf_id; - u32 hw_cnv_id; - u32 hw_wfpm_id; - u32 hw_id; - char hw_id_str[52]; - u32 sku_id[3]; - bool reduced_cap_sku; - u8 no_160:1, step_urm:1; - - u8 dsbr_urm_fw_dependent:1, - dsbr_urm_permanent:1; - bool ext_32khz_clock_valid; - - u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size; + const struct iwl_trans_info info; + bool reduced_cap_sku; + bool step_urm; bool pm_support; bool ltr_enabled; @@ -928,16 +913,6 @@ struct iwl_trans { u8 reduce_power_loaded:1; u8 failed_to_load_reduce_power_image:1; - const struct iwl_hcmd_arr *command_groups; - int command_groups_size; - bool wide_cmd_header; - - wait_queue_head_t wait_command_queue; - u8 num_rx_queues; - - size_t iml_len; - u8 *iml; - /* The following fields are internal only */ struct kmem_cache *dev_cmd_pool; char dev_cmd_pool_name[50]; @@ -951,24 +926,14 @@ struct iwl_trans { struct iwl_trans_debug dbg; struct iwl_self_init_dram init_dram; - enum iwl_plat_pm_mode system_pm_mode; - - const char *name; - u32 mbx_addr_0_step; - u32 mbx_addr_1_step; - - u8 pcie_link_speed; - - struct iwl_dma_ptr invalid_tx_cmd; - struct { struct delayed_work wk; struct iwl_fw_error_dump_mode mode; bool during_reset; } restart; - struct delayed_work me_recheck_wk; - s8 me_present; + u8 request_top_reset:1, + do_top_reset:1; /* pointer to trans specific struct */ /*Ensure that this pointer will always be aligned to sizeof pointer */ @@ -976,19 +941,18 @@ struct iwl_trans { }; const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id); -int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config *trans); -void iwl_trans_configure(struct iwl_trans *trans, - const struct iwl_trans_config *trans_cfg); +void iwl_trans_op_mode_enter(struct iwl_trans *trans, + struct iwl_op_mode *op_mode); int iwl_trans_start_hw(struct iwl_trans *trans); void iwl_trans_op_mode_leave(struct iwl_trans *trans); -void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr); +void iwl_trans_fw_alive(struct iwl_trans *trans); -int iwl_trans_start_fw(struct iwl_trans *trans, const struct fw_img *fw, - bool run_in_rfkill); +int iwl_trans_start_fw(struct iwl_trans *trans, const struct iwl_fw *fw, + enum iwl_ucode_type ucode_type, bool run_in_rfkill); void iwl_trans_stop_device(struct iwl_trans *trans); @@ -1151,6 +1115,9 @@ static inline void iwl_trans_schedule_reset(struct iwl_trans *trans, { if (test_bit(STATUS_TRANS_DEAD, &trans->status)) return; + /* clear this on device init, not cleared on any unbind/reprobe */ + if (test_and_set_bit(STATUS_TRANS_RESET_IN_PROGRESS, &trans->status)) + return; trans->restart.mode.type = type; trans->restart.mode.context = IWL_ERR_CONTEXT_WORKER; @@ -1188,6 +1155,9 @@ static inline void iwl_trans_opmode_sw_reset(struct iwl_trans *trans, set_bit(STATUS_IN_SW_RESET, &trans->status); + if (WARN_ON(type == IWL_ERR_TYPE_TOP_RESET_BY_BT)) + return; + if (!trans->op_mode->ops->sw_reset || !trans->op_mode->ops->sw_reset(trans->op_mode, type)) clear_bit(STATUS_IN_SW_RESET, &trans->status); @@ -1235,7 +1205,7 @@ static inline void iwl_trans_finish_sw_reset(struct iwl_trans *trans) *****************************************************/ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, struct device *dev, - const struct iwl_cfg_trans_params *cfg_trans); + const struct iwl_mac_cfg *cfg_trans); int iwl_trans_init(struct iwl_trans *trans); void iwl_trans_free(struct iwl_trans *trans); @@ -1246,6 +1216,19 @@ static inline bool iwl_trans_is_hw_error_value(u32 val) void iwl_trans_free_restart_list(void); +static inline u16 iwl_trans_get_num_rbds(struct iwl_trans *trans) +{ + u16 result = trans->cfg->num_rbds; + + /* + * Since AX210 family (So/Ty) the device cannot put mutliple + * frames into the same buffer, so double the value for them. + */ + if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) + return 2 * result; + return result; +} + /***************************************************** * PCIe handling *****************************************************/ @@ -1257,6 +1240,8 @@ enum iwl_reset_mode { /* upper level modes: */ IWL_RESET_MODE_SW_RESET, IWL_RESET_MODE_REPROBE, + /* TOP reset doesn't require PCIe remove */ + IWL_RESET_MODE_TOP_RESET, /* PCIE level modes: */ IWL_RESET_MODE_REMOVE_ONLY, IWL_RESET_MODE_RESCAN, @@ -1273,4 +1258,19 @@ void iwl_trans_pcie_fw_reset_handshake(struct iwl_trans *trans); int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); +/* Internal helper */ +static inline void iwl_trans_set_info(struct iwl_trans *trans, + struct iwl_trans_info *info) +{ + struct iwl_trans_info *write; + + write = (void *)(uintptr_t)&trans->info; + *write = *info; +} + +static inline u16 iwl_trans_get_device_id(struct iwl_trans *trans) +{ + return u32_get_bits(trans->info.hw_id, GENMASK(31, 16)); +} + #endif /* __iwl_trans_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mld/agg.c b/drivers/net/wireless/intel/iwlwifi/mld/agg.c index 687a9450ac98..bda488ae9eec 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/agg.c +++ b/drivers/net/wireless/intel/iwlwifi/mld/agg.c @@ -124,10 +124,12 @@ void iwl_mld_handle_bar_frame_release_notif(struct iwl_mld *mld, rcu_read_lock(); baid_data = rcu_dereference(mld->fw_id_to_ba[baid]); - if (IWL_FW_CHECK(mld, !baid_data, - "Got valid BAID %d but not allocated, invalid BAR release!\n", - baid)) + if (!baid_data) { + IWL_DEBUG_HT(mld, + "Got valid BAID %d but not allocated\n", + baid); goto out_unlock; + } if (IWL_FW_CHECK(mld, tid != baid_data->tid || sta_id > mld->fw->ucode_capa.num_stations || @@ -444,7 +446,7 @@ static void iwl_mld_init_reorder_buffer(struct iwl_mld *mld, struct iwl_mld_baid_data *data, u16 ssn) { - for (int i = 0; i < mld->trans->num_rx_queues; i++) { + for (int i = 0; i < mld->trans->info.num_rxqs; i++) { struct iwl_mld_reorder_buffer *reorder_buf = &data->reorder_buf[i]; struct iwl_mld_reorder_buf_entry *entries = @@ -468,7 +470,7 @@ static void iwl_mld_free_reorder_buffer(struct iwl_mld *mld, iwl_mld_sync_rx_queues(mld, IWL_MLD_RXQ_NOTIF_DEL_BA, &delba_data, sizeof(delba_data)); - for (int i = 0; i < mld->trans->num_rx_queues; i++) { + for (int i = 0; i < mld->trans->info.num_rxqs; i++) { struct iwl_mld_reorder_buffer *reorder_buf = &data->reorder_buf[i]; struct iwl_mld_reorder_buf_entry *entries = @@ -530,7 +532,7 @@ int iwl_mld_ampdu_rx_start(struct iwl_mld *mld, struct ieee80211_sta *sta, * before starting the BA session in the firmware */ baid_data = kzalloc(sizeof(*baid_data) + - mld->trans->num_rx_queues * reorder_buf_size, + mld->trans->info.num_rxqs * reorder_buf_size, GFP_KERNEL); if (!baid_data) return -ENOMEM; diff --git a/drivers/net/wireless/intel/iwlwifi/mld/ap.c b/drivers/net/wireless/intel/iwlwifi/mld/ap.c index 571eabd0b511..26511b49d89a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/ap.c +++ b/drivers/net/wireless/intel/iwlwifi/mld/ap.c @@ -11,6 +11,7 @@ #include "tx.h" #include "power.h" #include "key.h" +#include "phy.h" #include "iwl-utils.h" #include "fw/api/sta.h" @@ -269,6 +270,7 @@ int iwl_mld_start_ap_ibss(struct ieee80211_hw *hw, { struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + struct ieee80211_chanctx_conf *ctx; int ret; if (vif->type == NL80211_IFTYPE_AP) @@ -314,6 +316,13 @@ int iwl_mld_start_ap_ibss(struct ieee80211_hw *hw, return iwl_mld_mac_fw_action(mld, mld->p2p_device_vif, FW_CTXT_ACTION_MODIFY); + /* When the channel context was added, the link is not yet active, so + * min_def is always used. Update the PHY again here in case def should + * actually be used. + */ + ctx = wiphy_dereference(mld->wiphy, link->chanctx_conf); + iwl_mld_update_phy_chandef(mld, ctx); + return 0; rm_bcast: iwl_mld_remove_bcast_sta(mld, vif, link); diff --git a/drivers/net/wireless/intel/iwlwifi/mld/coex.c b/drivers/net/wireless/intel/iwlwifi/mld/coex.c index 5f262bd43f21..32c727b3b391 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/coex.c +++ b/drivers/net/wireless/intel/iwlwifi/mld/coex.c @@ -24,17 +24,13 @@ int iwl_mld_send_bt_init_conf(struct iwl_mld *mld) void iwl_mld_handle_bt_coex_notif(struct iwl_mld *mld, struct iwl_rx_packet *pkt) { - const struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data; + const struct iwl_bt_coex_profile_notif *notif = (const void *)pkt->data; const struct iwl_bt_coex_profile_notif zero_notif = {}; /* zeroed structure means that BT is OFF */ bool bt_is_active = memcmp(notif, &zero_notif, sizeof(*notif)); - if (bt_is_active == mld->bt_is_active) - return; - + mld->last_bt_notif = *notif; IWL_DEBUG_INFO(mld, "BT was turned %s\n", bt_is_active ? "ON" : "OFF"); - mld->bt_is_active = bt_is_active; - iwl_mld_emlsr_check_bt(mld); } diff --git a/drivers/net/wireless/intel/iwlwifi/mld/d3.c b/drivers/net/wireless/intel/iwlwifi/mld/d3.c index dc736fdc176d..339b148d6793 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mld/d3.c @@ -774,7 +774,7 @@ iwl_mld_update_ptk_rx_seq(struct iwl_mld *mld, return; for (int tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { - for (int i = 1; i < mld->trans->num_rx_queues; i++) + for (int i = 1; i < mld->trans->info.num_rxqs; i++) memcpy(mld_ptk_pn->q[i].pn[tid], wowlan_status->ptk.aes_seq[tid].ccmp.pn, IEEE80211_CCMP_PN_LEN); @@ -1349,7 +1349,6 @@ int iwl_mld_no_wowlan_suspend(struct iwl_mld *mld) } else { /* Async notification might send hcmds, which is not allowed in suspend */ iwl_mld_cancel_async_notifications(mld); - mld->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3; mld->fw_status.in_d3 = true; } @@ -1374,7 +1373,6 @@ int iwl_mld_no_wowlan_resume(struct iwl_mld *mld) IWL_DEBUG_WOWLAN(mld, "Starting the no wowlan resume flow\n"); - mld->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; mld->fw_status.in_d3 = false; iwl_fw_dbg_read_d3_debug_data(&mld->fwrt); @@ -1439,7 +1437,7 @@ iwl_mld_suspend_set_ucast_pn(struct iwl_mld *mld, struct ieee80211_sta *sta, ieee80211_get_key_rx_seq(key, tid, &seq); /* and use the internal data for all queues */ - for (int que = 1; que < mld->trans->num_rx_queues; que++) { + for (int que = 1; que < mld->trans->info.num_rxqs; que++) { u8 *cur_pn = mld_ptk_pn->q[que].pn[tid]; if (memcmp(max_pn, cur_pn, IEEE80211_CCMP_PN_LEN) < 0) @@ -1905,7 +1903,6 @@ int iwl_mld_wowlan_resume(struct iwl_mld *mld) IWL_DEBUG_WOWLAN(mld, "Starting the wowlan resume flow\n"); - mld->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; if (!mld->fw_status.in_d3) { IWL_DEBUG_WOWLAN(mld, "Device_powered_off() was called during wowlan\n"); diff --git a/drivers/net/wireless/intel/iwlwifi/mld/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mld/debugfs.c index 93f9f78e4276..352da8aa7898 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mld/debugfs.c @@ -546,6 +546,9 @@ iwl_mld_add_debugfs_files(struct iwl_mld *mld, struct dentry *debugfs_dir) #endif MLD_DEBUGFS_ADD_FILE(inject_packet, debugfs_dir, 0200); + debugfs_create_bool("rx_ts_ptp", 0600, debugfs_dir, + &mld->monitor.ptp_time); + /* Create a symlink with mac80211. It will be removed when mac80211 * exits (before the opmode exits which removes the target.) */ @@ -997,8 +1000,8 @@ void iwl_mld_add_link_debugfs(struct ieee80211_hw *hw, mld_link_dir = debugfs_create_dir("iwlmld", dir); } -static ssize_t iwl_dbgfs_fixed_rate_write(struct iwl_mld *mld, char *buf, - size_t count, void *data) +static ssize_t _iwl_dbgfs_fixed_rate_write(struct iwl_mld *mld, char *buf, + size_t count, void *data, bool v3) { struct ieee80211_link_sta *link_sta = data; struct iwl_mld_link_sta *mld_link_sta; @@ -1020,6 +1023,10 @@ static ssize_t iwl_dbgfs_fixed_rate_write(struct iwl_mld *mld, char *buf, if (iwl_mld_dbgfs_fw_cmd_disabled(mld)) return -EIO; + /* input is in FW format (v2 or v3) so convert to v3 */ + rate = iwl_v3_rate_from_v2_v3(cpu_to_le32(rate), v3); + rate = le32_to_cpu(iwl_v3_rate_to_v2_v3(rate, mld->fw_rates_ver_3)); + ret = iwl_mld_send_tlc_dhc(mld, fw_sta_id, partial ? IWL_TLC_DEBUG_PARTIAL_FIXED_RATE : IWL_TLC_DEBUG_FIXED_RATE, @@ -1033,6 +1040,18 @@ static ssize_t iwl_dbgfs_fixed_rate_write(struct iwl_mld *mld, char *buf, return ret ? : count; } +static ssize_t iwl_dbgfs_fixed_rate_write(struct iwl_mld *mld, char *buf, + size_t count, void *data) +{ + return _iwl_dbgfs_fixed_rate_write(mld, buf, count, data, false); +} + +static ssize_t iwl_dbgfs_fixed_rate_v3_write(struct iwl_mld *mld, char *buf, + size_t count, void *data) +{ + return _iwl_dbgfs_fixed_rate_write(mld, buf, count, data, true); +} + static ssize_t iwl_dbgfs_tlc_dhc_write(struct iwl_mld *mld, char *buf, size_t count, void *data) { @@ -1072,6 +1091,7 @@ static ssize_t iwl_dbgfs_tlc_dhc_write(struct iwl_mld *mld, char *buf, LINK_STA_WIPHY_DEBUGFS_WRITE_OPS(tlc_dhc, 64); LINK_STA_WIPHY_DEBUGFS_WRITE_OPS(fixed_rate, 64); +LINK_STA_WIPHY_DEBUGFS_WRITE_OPS(fixed_rate_v3, 64); void iwl_mld_add_link_sta_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif, @@ -1079,5 +1099,6 @@ void iwl_mld_add_link_sta_debugfs(struct ieee80211_hw *hw, struct dentry *dir) { LINK_STA_DEBUGFS_ADD_FILE(fixed_rate, dir, 0200); + LINK_STA_DEBUGFS_ADD_FILE(fixed_rate_v3, dir, 0200); LINK_STA_DEBUGFS_ADD_FILE(tlc_dhc, dir, 0200); } diff --git a/drivers/net/wireless/intel/iwlwifi/mld/fw.c b/drivers/net/wireless/intel/iwlwifi/mld/fw.c index 6e9af37fb86d..73ed8d5cab43 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mld/fw.c @@ -8,6 +8,7 @@ #include "fw/api/alive.h" #include "fw/api/scan.h" #include "fw/api/rx.h" +#include "phy.h" #include "fw/dbg.h" #include "fw/pnvm.h" #include "hcmd.h" @@ -48,7 +49,7 @@ static int iwl_mld_send_rss_cfg_cmd(struct iwl_mld *mld) /* Do not direct RSS traffic to Q 0 which is our fallback queue */ for (int i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++) cmd.indirection_table[i] = - 1 + (i % (mld->trans->num_rx_queues - 1)); + 1 + (i % (mld->trans->info.num_rxqs - 1)); netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key)); return iwl_mld_send_cmd_pdu(mld, RSS_CONFIG_CMD, &cmd); @@ -98,17 +99,23 @@ static void iwl_mld_alive_imr_data(struct iwl_trans *trans, } } +struct iwl_mld_alive_data { + __le32 sku_id[3]; + bool valid; +}; + static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, struct iwl_rx_packet *pkt, void *data) { unsigned int pkt_len = iwl_rx_packet_payload_len(pkt); + unsigned int expected_sz; struct iwl_mld *mld = container_of(notif_wait, struct iwl_mld, notif_wait); struct iwl_trans *trans = mld->trans; u32 version = iwl_fw_lookup_notif_ver(mld->fw, LEGACY_GROUP, UCODE_ALIVE_NTFY, 0); - struct iwl_alive_ntf_v6 *palive; - bool *alive_valid = data; + struct iwl_mld_alive_data *alive_data = data; + struct iwl_alive_ntf *palive; struct iwl_umac_alive *umac; struct iwl_lmac_alive *lmac1; struct iwl_lmac_alive *lmac2 = NULL; @@ -116,7 +123,19 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, u32 umac_error_table; u16 status; - if (version < 6 || version > 7 || pkt_len != sizeof(*palive)) + switch (version) { + case 6: + case 7: + expected_sz = sizeof(struct iwl_alive_ntf_v6); + break; + case 8: + expected_sz = sizeof(struct iwl_alive_ntf); + break; + default: + return false; + } + + if (pkt_len != expected_sz) return false; palive = (void *)pkt->data; @@ -128,12 +147,15 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, lmac2 = &palive->lmac_data[1]; status = le16_to_cpu(palive->status); - trans->sku_id[0] = le32_to_cpu(palive->sku_id.data[0]); - trans->sku_id[1] = le32_to_cpu(palive->sku_id.data[1]); - trans->sku_id[2] = le32_to_cpu(palive->sku_id.data[2]); + BUILD_BUG_ON(sizeof(alive_data->sku_id) != + sizeof(palive->sku_id.data)); + memcpy(alive_data->sku_id, palive->sku_id.data, + sizeof(palive->sku_id.data)); IWL_DEBUG_FW(mld, "Got sku_id: 0x0%x 0x0%x 0x0%x\n", - trans->sku_id[0], trans->sku_id[1], trans->sku_id[2]); + le32_to_cpu(alive_data->sku_id[0]), + le32_to_cpu(alive_data->sku_id[1]), + le32_to_cpu(alive_data->sku_id[2])); lmac_error_event_table = le32_to_cpu(lmac1->dbg_ptrs.error_event_table_ptr); @@ -146,13 +168,13 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, umac_error_table = le32_to_cpu(umac->dbg_ptrs.error_info_addr) & ~FW_ADDR_CACHE_CONTROL; - if (umac_error_table >= trans->cfg->min_umac_error_event_table) + if (umac_error_table >= trans->mac_cfg->base->min_umac_error_event_table) iwl_fw_umac_set_alive_err_table(trans, umac_error_table); else IWL_ERR(mld, "Not valid error log pointer 0x%08X\n", umac_error_table); - *alive_valid = status == IWL_ALIVE_STATUS_OK; + alive_data->valid = status == IWL_ALIVE_STATUS_OK; IWL_DEBUG_FW(mld, "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n", @@ -170,6 +192,10 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, IWL_DEBUG_FW(mld, "FW alive flags 0x%x\n", le16_to_cpu(palive->flags)); + if (version >= 8) + IWL_DEBUG_FW(mld, "platform_id 0x%llx\n", + le64_to_cpu(palive->platform_id)); + iwl_fwrt_update_fw_versions(&mld->fwrt, lmac1, umac); return true; @@ -207,24 +233,22 @@ static void iwl_mld_print_alive_notif_timeout(struct iwl_mld *mld) pc_data->pc_address); } -static int iwl_mld_load_fw_wait_alive(struct iwl_mld *mld) +static int iwl_mld_load_fw_wait_alive(struct iwl_mld *mld, + struct iwl_mld_alive_data *alive_data) { - const struct fw_img *fw = - iwl_get_ucode_image(mld->fw, IWL_UCODE_REGULAR); static const u16 alive_cmd[] = { UCODE_ALIVE_NTFY }; struct iwl_notification_wait alive_wait; - bool alive_valid = false; int ret; lockdep_assert_wiphy(mld->wiphy); iwl_init_notification_wait(&mld->notif_wait, &alive_wait, alive_cmd, ARRAY_SIZE(alive_cmd), - iwl_alive_fn, &alive_valid); + iwl_alive_fn, alive_data); iwl_dbg_tlv_time_point(&mld->fwrt, IWL_FW_INI_TIME_POINT_EARLY, NULL); - ret = iwl_trans_start_fw(mld->trans, fw, true); + ret = iwl_trans_start_fw(mld->trans, mld->fw, IWL_UCODE_REGULAR, true); if (ret) { iwl_remove_notification(&mld->notif_wait, &alive_wait); return ret; @@ -241,12 +265,12 @@ static int iwl_mld_load_fw_wait_alive(struct iwl_mld *mld) return ret; } - if (!alive_valid) { + if (!alive_data->valid) { IWL_ERR(mld, "Loaded firmware is not valid!\n"); return -EIO; } - iwl_trans_fw_alive(mld->trans, 0); + iwl_trans_fw_alive(mld->trans); return 0; } @@ -254,7 +278,10 @@ static int iwl_mld_load_fw_wait_alive(struct iwl_mld *mld) static int iwl_mld_run_fw_init_sequence(struct iwl_mld *mld) { struct iwl_notification_wait init_wait; - struct iwl_init_extended_cfg_cmd init_cfg = {}; + struct iwl_init_extended_cfg_cmd init_cfg = { + .init_flags = cpu_to_le32(BIT(IWL_INIT_PHY)), + }; + struct iwl_mld_alive_data alive_data = {}; static const u16 init_complete[] = { INIT_COMPLETE_NOTIF, }; @@ -262,12 +289,12 @@ static int iwl_mld_run_fw_init_sequence(struct iwl_mld *mld) lockdep_assert_wiphy(mld->wiphy); - ret = iwl_mld_load_fw_wait_alive(mld); + ret = iwl_mld_load_fw_wait_alive(mld, &alive_data); if (ret) return ret; ret = iwl_pnvm_load(mld->trans, &mld->notif_wait, - &mld->fw->ucode_capa); + &mld->fw->ucode_capa, alive_data.sku_id); if (ret) { IWL_ERR(mld, "Timeout waiting for PNVM load %d\n", ret); return ret; @@ -291,6 +318,13 @@ static int iwl_mld_run_fw_init_sequence(struct iwl_mld *mld) return ret; } + ret = iwl_mld_send_phy_cfg_cmd(mld); + if (ret) { + IWL_ERR(mld, "Failed to send PHY config command: %d\n", ret); + iwl_remove_notification(&mld->notif_wait, &init_wait); + return ret; + } + ret = iwl_wait_notification(&mld->notif_wait, &init_wait, MLD_INIT_COMPLETE_TIMEOUT); if (ret) { diff --git a/drivers/net/wireless/intel/iwlwifi/mld/iface.c b/drivers/net/wireless/intel/iwlwifi/mld/iface.c index 47b5b31b5b91..235b55e0fe59 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/iface.c +++ b/drivers/net/wireless/intel/iwlwifi/mld/iface.c @@ -22,9 +22,17 @@ void iwl_mld_cleanup_vif(void *data, u8 *mac, struct ieee80211_vif *vif) struct iwl_mld *mld = mld_vif->mld; struct iwl_mld_link *link; + mld_vif->emlsr.blocked_reasons &= ~IWL_MLD_EMLSR_BLOCKED_ROC; + + if (mld_vif->aux_sta.sta_id != IWL_INVALID_STA) + iwl_mld_free_internal_sta(mld, &mld_vif->aux_sta); + /* EMLSR is turned back on during recovery */ vif->driver_flags &= ~IEEE80211_VIF_EML_ACTIVE; + if (mld_vif->roc_activity != ROC_NUM_ACTIVITIES) + ieee80211_remain_on_channel_expired(mld->hw); + mld_vif->roc_activity = ROC_NUM_ACTIVITIES; for_each_mld_vif_valid_link(mld_vif, link) { @@ -103,6 +111,24 @@ static bool iwl_mld_is_nic_ack_enabled(struct iwl_mld *mld, IEEE80211_HE_MAC_CAP2_ACK_EN); } +static void iwl_mld_set_he_support(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct iwl_mac_config_cmd *cmd, + int cmd_ver) +{ + if (vif->type == NL80211_IFTYPE_AP) { + if (cmd_ver == 2) + cmd->wifi_gen_v2.he_ap_support = cpu_to_le16(1); + else + cmd->wifi_gen.he_ap_support = 1; + } else { + if (cmd_ver == 2) + cmd->wifi_gen_v2.he_support = cpu_to_le16(1); + else + cmd->wifi_gen.he_support = 1; + } +} + /* fill the common part for all interface types */ static void iwl_mld_mac_cmd_fill_common(struct iwl_mld *mld, struct ieee80211_vif *vif, @@ -112,6 +138,9 @@ static void iwl_mld_mac_cmd_fill_common(struct iwl_mld *mld, struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); struct ieee80211_bss_conf *link_conf; unsigned int link_id; + int cmd_ver = iwl_fw_lookup_cmd_ver(mld->fw, + WIDE_ID(MAC_CONF_GROUP, + MAC_CONFIG_CMD), 0); lockdep_assert_wiphy(mld->wiphy); @@ -138,12 +167,11 @@ static void iwl_mld_mac_cmd_fill_common(struct iwl_mld *mld, * and enable both when we have MLO. */ if (ieee80211_vif_is_mld(vif)) { - if (vif->type == NL80211_IFTYPE_AP) - cmd->he_ap_support = cpu_to_le16(1); + iwl_mld_set_he_support(mld, vif, cmd, cmd_ver); + if (cmd_ver == 2) + cmd->wifi_gen_v2.eht_support = cpu_to_le32(1); else - cmd->he_support = cpu_to_le16(1); - - cmd->eht_support = cpu_to_le32(1); + cmd->wifi_gen.eht_support = 1; return; } @@ -151,10 +179,7 @@ static void iwl_mld_mac_cmd_fill_common(struct iwl_mld *mld, if (!link_conf->he_support) continue; - if (vif->type == NL80211_IFTYPE_AP) - cmd->he_ap_support = cpu_to_le16(1); - else - cmd->he_support = cpu_to_le16(1); + iwl_mld_set_he_support(mld, vif, cmd, cmd_ver); /* EHT, if supported, was already set above */ break; @@ -388,6 +413,7 @@ iwl_mld_init_vif(struct iwl_mld *mld, struct ieee80211_vif *vif) wiphy_delayed_work_init(&mld_vif->emlsr.tmp_non_bss_done_wk, iwl_mld_emlsr_tmp_non_bss_done_wk); } + iwl_mld_init_internal_sta(&mld_vif->aux_sta); return 0; } diff --git a/drivers/net/wireless/intel/iwlwifi/mld/iface.h b/drivers/net/wireless/intel/iwlwifi/mld/iface.h index 1ae522431f3f..49e2ce65557d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/iface.h +++ b/drivers/net/wireless/intel/iwlwifi/mld/iface.h @@ -10,6 +10,7 @@ #include "link.h" #include "session-protect.h" #include "d3.h" +#include "fw/api/time-event.h" enum iwl_mld_cca_40mhz_wa_status { CCA_40_MHZ_WA_NONE, @@ -125,8 +126,6 @@ struct iwl_mld_emlsr { * Only valid for STA. (FIXME: needs to be per link) * @num_associated_stas: number of associated STAs. Relevant only for AP mode. * @ap_ibss_active: whether the AP/IBSS was started - * @roc_activity: the id of the roc_activity running. Relevant for p2p device - * only. Set to %ROC_NUM_ACTIVITIES when not in use. * @cca_40mhz_workaround: When we are connected in 2.4 GHz and 40 MHz, and the * environment is too loaded, we work around this by reconnecting to the * same AP with 20 MHz. This manages the status of the workaround. @@ -142,6 +141,9 @@ struct iwl_mld_emlsr { * @use_ps_poll: use ps_poll frames * @disable_bf: disable beacon filter * @dbgfs_slink: debugfs symlink for this interface + * @roc_activity: the id of the roc_activity running. Relevant for STA and + * p2p device only. Set to %ROC_NUM_ACTIVITIES when not in use. + * @aux_sta: station used for remain on channel. Used in P2P device. */ struct iwl_mld_vif { /* Add here fields that need clean up on restart */ @@ -153,7 +155,6 @@ struct iwl_mld_vif { struct ieee80211_key_conf __rcu *bigtks[2]; u8 num_associated_stas; bool ap_ibss_active; - u32 roc_activity; enum iwl_mld_cca_40mhz_wa_status cca_40mhz_workaround; #ifdef CONFIG_IWLWIFI_DEBUGFS bool beacon_inject_active; @@ -176,6 +177,8 @@ struct iwl_mld_vif { bool disable_bf; struct dentry *dbgfs_slink; #endif + enum iwl_roc_activity roc_activity; + struct iwl_mld_int_sta aux_sta; }; static inline struct iwl_mld_vif * diff --git a/drivers/net/wireless/intel/iwlwifi/mld/link.c b/drivers/net/wireless/intel/iwlwifi/mld/link.c index 82a4979a3af3..d0f56189ad3f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/link.c +++ b/drivers/net/wireless/intel/iwlwifi/mld/link.c @@ -580,7 +580,7 @@ iwl_mld_get_omi_bw_reduction_pointers(struct iwl_mld *mld, *link_sta = NULL; - if (mld->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_SC) + if (mld->trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_SC) return NULL; vif = iwl_mld_get_bss_vif(mld); @@ -782,10 +782,11 @@ iwl_mld_init_link(struct iwl_mld *mld, struct ieee80211_bss_conf *link, iwl_mld_init_internal_sta(&mld_link->bcast_sta); iwl_mld_init_internal_sta(&mld_link->mcast_sta); - iwl_mld_init_internal_sta(&mld_link->aux_sta); + iwl_mld_init_internal_sta(&mld_link->mon_sta); - wiphy_delayed_work_init(&mld_link->rx_omi.finished_work, - iwl_mld_omi_bw_finished_work); + if (!mld->fw_status.in_hw_restart) + wiphy_delayed_work_init(&mld_link->rx_omi.finished_work, + iwl_mld_omi_bw_finished_work); return iwl_mld_allocate_link_fw_id(mld, &mld_link->fw_id, link); } diff --git a/drivers/net/wireless/intel/iwlwifi/mld/link.h b/drivers/net/wireless/intel/iwlwifi/mld/link.h index 42b7bdcbd741..39f04aae5579 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/link.h +++ b/drivers/net/wireless/intel/iwlwifi/mld/link.h @@ -39,7 +39,7 @@ struct iwl_probe_resp_data { * @vif: the vif this link belongs to * @bcast_sta: station used for broadcast packets. Used in AP, GO and IBSS. * @mcast_sta: station used for multicast packets. Used in AP, GO and IBSS. - * @aux_sta: station used for remain on channel. Used in P2P device. + * @mon_sta: station used for TX injection in monitor interface. * @link_id: over the air link ID * @ap_early_keys: The firmware cannot install keys before bcast/mcast STAs, * but higher layers work differently, so we store the keys here for @@ -72,7 +72,7 @@ struct iwl_mld_link { struct ieee80211_vif *vif; struct iwl_mld_int_sta bcast_sta; struct iwl_mld_int_sta mcast_sta; - struct iwl_mld_int_sta aux_sta; + struct iwl_mld_int_sta mon_sta; u8 link_id; struct { @@ -89,7 +89,7 @@ struct iwl_mld_link { struct iwl_probe_resp_data __rcu *probe_resp_data; }; -/* Cleanup function for struct iwl_mld_phy, will be called in restart */ +/* Cleanup function for struct iwl_mld_link, will be called in restart */ static inline void iwl_mld_cleanup_link(struct iwl_mld *mld, struct iwl_mld_link *link) { @@ -105,8 +105,8 @@ iwl_mld_cleanup_link(struct iwl_mld *mld, struct iwl_mld_link *link) iwl_mld_free_internal_sta(mld, &link->bcast_sta); if (link->mcast_sta.sta_id != IWL_INVALID_STA) iwl_mld_free_internal_sta(mld, &link->mcast_sta); - if (link->aux_sta.sta_id != IWL_INVALID_STA) - iwl_mld_free_internal_sta(mld, &link->aux_sta); + if (link->mon_sta.sta_id != IWL_INVALID_STA) + iwl_mld_free_internal_sta(mld, &link->mon_sta); } /* Convert a percentage from [0,100] to [0,255] */ diff --git a/drivers/net/wireless/intel/iwlwifi/mld/low_latency.c b/drivers/net/wireless/intel/iwlwifi/mld/low_latency.c index a4a612afb3b3..f7faa87b8ba6 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/low_latency.c +++ b/drivers/net/wireless/intel/iwlwifi/mld/low_latency.c @@ -21,7 +21,7 @@ static bool iwl_mld_calc_low_latency(struct iwl_mld *mld, { struct iwl_mld_low_latency *ll = &mld->low_latency; bool global_low_latency = false; - u8 num_rx_q = mld->trans->num_rx_queues; + u8 num_rx_q = mld->trans->info.num_rxqs; for (int mac_id = 0; mac_id < NUM_MAC_INDEX_DRIVER; mac_id++) { u32 total_vo_vi_pkts = 0; @@ -131,12 +131,12 @@ int iwl_mld_low_latency_init(struct iwl_mld *mld) struct iwl_mld_low_latency *ll = &mld->low_latency; unsigned long ts = jiffies; - ll->pkts_counters = kcalloc(mld->trans->num_rx_queues, + ll->pkts_counters = kcalloc(mld->trans->info.num_rxqs, sizeof(*ll->pkts_counters), GFP_KERNEL); if (!ll->pkts_counters) return -ENOMEM; - for (int q = 0; q < mld->trans->num_rx_queues; q++) + for (int q = 0; q < mld->trans->info.num_rxqs; q++) spin_lock_init(&ll->pkts_counters[q].lock); wiphy_delayed_work_init(&ll->work, iwl_mld_low_latency_wk); @@ -167,7 +167,7 @@ void iwl_mld_low_latency_restart_cleanup(struct iwl_mld *mld) memset(ll->window_start, 0, sizeof(ll->window_start)); memset(ll->result, 0, sizeof(ll->result)); - for (int q = 0; q < mld->trans->num_rx_queues; q++) + for (int q = 0; q < mld->trans->info.num_rxqs; q++) memset(ll->pkts_counters[q].vo_vi, 0, sizeof(ll->pkts_counters[q].vo_vi)); } @@ -276,7 +276,7 @@ void iwl_mld_low_latency_update_counters(struct iwl_mld *mld, return; if (WARN_ON_ONCE(fw_id >= ARRAY_SIZE(counters->vo_vi) || - queue >= mld->trans->num_rx_queues)) + queue >= mld->trans->info.num_rxqs)) return; if (mld->low_latency.stopped) @@ -324,7 +324,7 @@ void iwl_mld_low_latency_restart(struct iwl_mld *mld) ll->window_start[mac] = 0; low_latency |= ll->result[mac]; - for (int q = 0; q < mld->trans->num_rx_queues; q++) { + for (int q = 0; q < mld->trans->info.num_rxqs; q++) { spin_lock_bh(&ll->pkts_counters[q].lock); ll->pkts_counters[q].vo_vi[mac] = 0; spin_unlock_bh(&ll->pkts_counters[q].lock); diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c index 1ee5ac9a37ec..4ba050397632 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c @@ -243,7 +243,6 @@ static void iwl_mac_hw_set_flags(struct iwl_mld *mld) ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW); ieee80211_hw_set(hw, HAS_RATE_CONTROL); ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER); - ieee80211_hw_set(hw, DISALLOW_PUNCTURING_5GHZ); ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS); ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU); ieee80211_hw_set(hw, TDLS_WIDER_BW); @@ -305,7 +304,7 @@ static void iwl_mac_hw_set_wiphy(struct iwl_mld *mld) wiphy->max_remain_on_channel_duration = 10000; - wiphy->hw_version = mld->trans->hw_id; + wiphy->hw_version = mld->trans->info.hw_id; wiphy->hw_timestamp_max_peers = 1; @@ -351,9 +350,9 @@ static void iwl_mac_hw_set_misc(struct iwl_mld *mld) hw->queues = IEEE80211_NUM_ACS; hw->netdev_features = NETIF_F_HIGHDMA | NETIF_F_SG; - hw->netdev_features |= mld->cfg->features; + hw->netdev_features |= mld->trans->mac_cfg->base->features; - hw->max_tx_fragments = mld->trans->max_skb_frags; + hw->max_tx_fragments = mld->trans->info.max_skb_frags; hw->max_listen_interval = IWL_MLD_CONN_LISTEN_INTERVAL; hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL; @@ -376,6 +375,24 @@ static void iwl_mac_hw_set_misc(struct iwl_mld *mld) static int iwl_mld_hw_verify_preconditions(struct iwl_mld *mld) { + int ratecheck; + + /* check for rates version 3 */ + ratecheck = + (iwl_fw_lookup_cmd_ver(mld->fw, TX_CMD, 0) >= 11) + + (iwl_fw_lookup_notif_ver(mld->fw, DATA_PATH_GROUP, + TLC_MNG_UPDATE_NOTIF, 0) >= 4) + + (iwl_fw_lookup_notif_ver(mld->fw, LEGACY_GROUP, + REPLY_RX_MPDU_CMD, 0) >= 6) + + (iwl_fw_lookup_notif_ver(mld->fw, DATA_PATH_GROUP, + RX_NO_DATA_NOTIF, 0) >= 4) + + (iwl_fw_lookup_notif_ver(mld->fw, LONG_GROUP, TX_CMD, 0) >= 9); + + if (ratecheck != 0 && ratecheck != 5) { + IWL_ERR(mld, "Firmware has inconsistent rates\n"); + return -EINVAL; + } + /* 11ax is expected to be enabled for all supported devices */ if (WARN_ON(!mld->nvm_data->sku_cap_11ax_enable)) return -EINVAL; @@ -888,9 +905,8 @@ void iwl_mld_change_chanctx(struct ieee80211_hw *hw, return; } update: - phy->chandef = *chandef; - iwl_mld_phy_fw_action(mld, ctx, FW_CTXT_ACTION_MODIFY); + iwl_mld_update_phy_chandef(mld, ctx); } static u8 @@ -1022,12 +1038,19 @@ int iwl_mld_assign_vif_chanctx(struct ieee80211_hw *hw, iwl_mld_send_ap_tx_power_constraint_cmd(mld, vif, link); if (vif->type == NL80211_IFTYPE_MONITOR) { - /* TODO: task=sniffer add sniffer station */ + ret = iwl_mld_add_mon_sta(mld, vif, link); + if (ret) + goto deactivate_link; + mld->monitor.p80 = iwl_mld_chandef_get_primary_80(&vif->bss_conf.chanreq.oper); } return 0; + +deactivate_link: + if (mld_link->active) + iwl_mld_deactivate_link(mld, link); err: RCU_INIT_POINTER(mld_link->chan_ctx, NULL); return ret; @@ -1053,7 +1076,8 @@ void iwl_mld_unassign_vif_chanctx(struct ieee80211_hw *hw, iwl_mld_deactivate_link(mld, link); - /* TODO: task=sniffer remove sniffer station */ + if (vif->type == NL80211_IFTYPE_MONITOR) + iwl_mld_remove_mon_sta(mld, vif, link); if (n_active > 1) { /* Indicate to mac80211 that EML is disabled */ @@ -1249,9 +1273,14 @@ iwl_mld_mac80211_link_info_changed(struct ieee80211_hw *hw, } static void -iwl_mld_smps_wa(struct iwl_mld *mld, struct ieee80211_vif *vif, bool enable) +iwl_mld_smps_workaround(struct iwl_mld *mld, struct ieee80211_vif *vif, bool enable) { struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + bool workaround_required = + iwl_fw_lookup_cmd_ver(mld->fw, MAC_PM_POWER_TABLE, 0) < 2; + + if (!workaround_required) + return; /* Send the device-level power commands since the * firmware checks the POWER_TABLE_CMD's POWER_SAVE_EN bit to @@ -1298,7 +1327,7 @@ void iwl_mld_mac80211_vif_cfg_changed(struct ieee80211_hw *hw, } if (changes & BSS_CHANGED_PS) { - iwl_mld_smps_wa(mld, vif, vif->cfg.ps); + iwl_mld_smps_workaround(mld, vif, vif->cfg.ps); iwl_mld_update_mac_power(mld, vif, false); } @@ -1311,13 +1340,22 @@ iwl_mld_mac80211_hw_scan(struct ieee80211_hw *hw, struct ieee80211_scan_request *hw_req) { struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); + int ret; if (WARN_ON(!hw_req->req.n_channels || hw_req->req.n_channels > mld->fw->ucode_capa.n_scan_channels)) return -EINVAL; - return iwl_mld_regular_scan_start(mld, vif, &hw_req->req, &hw_req->ies); + ret = iwl_mld_regular_scan_start(mld, vif, &hw_req->req, &hw_req->ies); + if (!ret) { + /* We will be busy with scanning, so the counters may not reflect the + * reality. Stop checking the counters until the scan ends + */ + iwl_mld_start_ignoring_tpt_updates(mld); + } + + return ret; } static void @@ -1333,8 +1371,11 @@ iwl_mld_mac80211_cancel_hw_scan(struct ieee80211_hw *hw, * cancel scan before ieee80211_scan_work() could run. * To handle that, simply return if the scan is not running. */ - if (mld->scan.status & IWL_MLD_SCAN_REGULAR) + if (mld->scan.status & IWL_MLD_SCAN_REGULAR) { iwl_mld_scan_stop(mld, IWL_MLD_SCAN_REGULAR, true); + /* Scan is over, we can check again the tpt counters */ + iwl_mld_stop_ignoring_tpt_updates(mld); + } } static int @@ -1711,7 +1752,7 @@ static int iwl_mld_move_sta_state_up(struct iwl_mld *mld, FW_CTXT_ACTION_MODIFY); if (ret) return ret; - iwl_mld_smps_wa(mld, vif, vif->cfg.ps); + iwl_mld_smps_workaround(mld, vif, vif->cfg.ps); } /* MFP is set by default before the station is authorized. @@ -1754,7 +1795,7 @@ static int iwl_mld_move_sta_state_down(struct iwl_mld *mld, &mld_vif->emlsr.check_tpt_wk); iwl_mld_reset_cca_40mhz_workaround(mld, vif); - iwl_mld_smps_wa(mld, vif, true); + iwl_mld_smps_workaround(mld, vif, true); } /* once we move into assoc state, need to update the FW to @@ -1995,7 +2036,7 @@ static int iwl_mld_alloc_ptk_pn(struct iwl_mld *mld, struct ieee80211_key_conf *key, struct iwl_mld_ptk_pn **ptk_pn) { - u8 num_rx_queues = mld->trans->num_rx_queues; + u8 num_rx_queues = mld->trans->info.num_rxqs; int keyidx = key->keyidx; struct ieee80211_key_seq seq; @@ -2451,15 +2492,17 @@ iwl_mld_change_vif_links(struct ieee80211_hw *hw, added |= BIT(0); for (int i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) { - if (removed & BIT(i)) + if (removed & BIT(i) && !WARN_ON(!old[i])) iwl_mld_remove_link(mld, old[i]); } for (int i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) { if (added & BIT(i)) { link_conf = link_conf_dereference_protected(vif, i); - if (WARN_ON(!link_conf)) - return -EINVAL; + if (!link_conf) { + err = -EINVAL; + goto remove_added_links; + } err = iwl_mld_add_link(mld, link_conf); if (err) @@ -2494,7 +2537,11 @@ remove_added_links: iwl_mld_remove_link(mld, link_conf); } - return err; + if (WARN_ON(!iwl_mld_error_before_recovery(mld))) + return err; + + /* reconfig will fix us anyway */ + return 0; } static int iwl_mld_change_sta_links(struct ieee80211_hw *hw, diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mcc.c b/drivers/net/wireless/intel/iwlwifi/mld/mcc.c index daca14e208bd..19cb562e7a73 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/mcc.c +++ b/drivers/net/wireless/intel/iwlwifi/mld/mcc.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2024 Intel Corporation + * Copyright (C) 2024-2025 Intel Corporation */ #include <net/cfg80211.h> @@ -158,7 +158,7 @@ iwl_mld_get_regdomain(struct iwl_mld *mld, } IWL_DEBUG_LAR(mld, "MCC update response version: %d\n", resp_ver); - regd = iwl_parse_nvm_mcc_info(mld->trans->dev, mld->cfg, + regd = iwl_parse_nvm_mcc_info(mld->trans, __le32_to_cpu(resp->n_channels), resp->channels, __le16_to_cpu(resp->mcc), diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mld.c b/drivers/net/wireless/intel/iwlwifi/mld/mld.c index 87624730fb50..8cdd960c5245 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/mld.c +++ b/drivers/net/wireless/intel/iwlwifi/mld/mld.c @@ -62,7 +62,7 @@ static void iwl_mld_hw_set_regulatory(struct iwl_mld *mld) VISIBLE_IF_IWLWIFI_KUNIT void iwl_construct_mld(struct iwl_mld *mld, struct iwl_trans *trans, - const struct iwl_cfg *cfg, const struct iwl_fw *fw, + const struct iwl_rf_cfg *cfg, const struct iwl_fw *fw, struct ieee80211_hw *hw, struct dentry *dbgfs_dir) { mld->dev = trans->dev; @@ -193,6 +193,7 @@ static const struct iwl_hcmd_names iwl_mld_system_names[] = { HCMD_NAME(SOC_CONFIGURATION_CMD), HCMD_NAME(INIT_EXTENDED_CFG_CMD), HCMD_NAME(FW_ERROR_RECOVERY_CMD), + HCMD_NAME(RFI_CONFIG_CMD), HCMD_NAME(RFI_GET_FREQ_TABLE_CMD), HCMD_NAME(SYSTEM_STATISTICS_CMD), HCMD_NAME(SYSTEM_STATISTICS_END_NOTIF), @@ -325,33 +326,40 @@ EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(global_iwl_mld_goups_size); static void iwl_mld_configure_trans(struct iwl_op_mode *op_mode) { - const struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode); + struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode); static const u8 no_reclaim_cmds[] = {TX_CMD}; - struct iwl_trans_config trans_cfg = { - .op_mode = op_mode, - /* Rx is not supported yet, but add it to avoid warnings */ - .rx_buf_size = iwl_amsdu_size_to_rxb_size(), - .command_groups = iwl_mld_groups, - .command_groups_size = ARRAY_SIZE(iwl_mld_groups), - .fw_reset_handshake = true, - .queue_alloc_cmd_ver = - iwl_fw_lookup_cmd_ver(mld->fw, - WIDE_ID(DATA_PATH_GROUP, - SCD_QUEUE_CONFIG_CMD), - 0), - .no_reclaim_cmds = no_reclaim_cmds, - .n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds), - .cb_data_offs = offsetof(struct ieee80211_tx_info, - driver_data[2]), - }; struct iwl_trans *trans = mld->trans; + u32 eckv_value; - trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD; - trans->iml = mld->fw->iml; - trans->iml_len = mld->fw->iml_len; - trans->wide_cmd_header = true; + iwl_bios_setup_step(trans, &mld->fwrt); + iwl_uefi_get_step_table(trans); - iwl_trans_configure(trans, &trans_cfg); + if (iwl_bios_get_eckv(&mld->fwrt, &eckv_value)) + IWL_DEBUG_RADIO(mld, "ECKV table doesn't exist in BIOS\n"); + else + trans->conf.ext_32khz_clock_valid = !!eckv_value; + + trans->conf.rx_buf_size = iwl_amsdu_size_to_rxb_size(); + trans->conf.command_groups = iwl_mld_groups; + trans->conf.command_groups_size = ARRAY_SIZE(iwl_mld_groups); + trans->conf.fw_reset_handshake = true; + trans->conf.queue_alloc_cmd_ver = + iwl_fw_lookup_cmd_ver(mld->fw, WIDE_ID(DATA_PATH_GROUP, + SCD_QUEUE_CONFIG_CMD), + 0); + trans->conf.cb_data_offs = offsetof(struct ieee80211_tx_info, + driver_data[2]); + BUILD_BUG_ON(sizeof(no_reclaim_cmds) > + sizeof(trans->conf.no_reclaim_cmds)); + memcpy(trans->conf.no_reclaim_cmds, no_reclaim_cmds, + sizeof(no_reclaim_cmds)); + trans->conf.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds); + + trans->conf.rx_mpdu_cmd = REPLY_RX_MPDU_CMD; + trans->conf.rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_res_start); + trans->conf.wide_cmd_header = true; + + iwl_trans_op_mode_enter(trans, op_mode); } /* @@ -362,13 +370,12 @@ iwl_mld_configure_trans(struct iwl_op_mode *op_mode) #define NUM_FW_LOAD_RETRIES 3 static struct iwl_op_mode * -iwl_op_mode_mld_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, +iwl_op_mode_mld_start(struct iwl_trans *trans, const struct iwl_rf_cfg *cfg, const struct iwl_fw *fw, struct dentry *dbgfs_dir) { struct ieee80211_hw *hw; struct iwl_op_mode *op_mode; struct iwl_mld *mld; - u32 eckv_value; int ret; /* Allocate and initialize a new hardware device */ @@ -386,16 +393,13 @@ iwl_op_mode_mld_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, iwl_construct_mld(mld, trans, cfg, fw, hw, dbgfs_dir); + /* we'll verify later it matches between commands */ + mld->fw_rates_ver_3 = iwl_fw_lookup_cmd_ver(mld->fw, TX_CMD, 0) >= 11; + iwl_mld_construct_fw_runtime(mld, trans, fw, dbgfs_dir); iwl_mld_get_bios_tables(mld); iwl_uefi_get_sgom_table(trans, &mld->fwrt); - iwl_uefi_get_step_table(trans); - if (iwl_bios_get_eckv(&mld->fwrt, &eckv_value)) - IWL_DEBUG_RADIO(mld, "ECKV table doesn't exist in BIOS\n"); - else - trans->ext_32khz_clock_valid = !!eckv_value; - iwl_bios_setup_step(trans, &mld->fwrt); mld->bios_enable_puncturing = iwl_uefi_get_puncturing(&mld->fwrt); iwl_mld_hw_set_regulatory(mld); @@ -485,8 +489,9 @@ iwl_op_mode_mld_stop(struct iwl_op_mode *op_mode) iwl_mld_ptp_remove(mld); iwl_mld_leds_exit(mld); - wiphy_lock(mld->wiphy); iwl_mld_thermal_exit(mld); + + wiphy_lock(mld->wiphy); iwl_mld_low_latency_stop(mld); iwl_mld_deinit_time_sync(mld); wiphy_unlock(mld->wiphy); @@ -674,6 +679,13 @@ static bool iwl_mld_sw_reset(struct iwl_op_mode *op_mode, { struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode); + /* SW reset can happen for TOP error w/o NIC error, so + * also abort scan here and set in_hw_restart, when we + * had a NIC error both were already done. + */ + iwl_mld_report_scan_aborted(mld); + mld->fw_status.in_hw_restart = true; + /* Do restart only in the following conditions are met: * - we consider the FW as running * - The trigger that brought us here is defined as one that requires @@ -702,7 +714,6 @@ static void iwl_mld_device_powered_off(struct iwl_op_mode *op_mode) struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode); wiphy_lock(mld->wiphy); - mld->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; iwl_mld_stop_fw(mld); mld->fw_status.in_d3 = false; wiphy_unlock(mld->wiphy); diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mld.h b/drivers/net/wireless/intel/iwlwifi/mld/mld.h index a4a16da6ebf3..1a2c44f44eff 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/mld.h +++ b/drivers/net/wireless/intel/iwlwifi/mld/mld.h @@ -116,6 +116,7 @@ * @monitor.ampdu_toggle: the state of the previous packet to track A-MPDU * @monitor.cur_aid: current association id tracked by the sniffer * @monitor.cur_bssid: current bssid tracked by the sniffer + * @monitor.ptp_time: set the Rx mactime using the device's PTP clock time * @monitor.p80: primary channel position relative to he whole bandwidth, in * steps of 80 MHz * @fw_id_to_link_sta: maps a fw id of a sta to the corresponding @@ -126,7 +127,6 @@ * cleanup using iwl_mld_free_internal_sta * @netdetect: indicates the FW is in suspend mode with netdetect configured * @p2p_device_vif: points to the p2p device vif if exists - * @bt_is_active: indicates that BT is active * @dev: pointer to device struct. For printing purposes * @trans: pointer to the transport layer * @cfg: pointer to the device configuration @@ -154,6 +154,8 @@ * @radio_kill: bitmap of radio kill status * @radio_kill.hw: radio is killed by hw switch * @radio_kill.ct: radio is killed because the device it too hot + * @power_budget_mw: maximum cTDP power budget as defined for this system and + * device * @addresses: device MAC addresses. * @scan: instance of the scan object * @wowlan: WoWLAN support data. @@ -174,6 +176,7 @@ * @mcast_filter_cmd: pointer to the multicast filter command. * @mgmt_tx_ant: stores the last TX antenna index; used for setting * TX rate_n_flags for non-STA mgmt frames (toggles on every TX failure). + * @fw_rates_ver_3: FW rates are in version 3 * @low_latency: low-latency manager. * @tzone: thermal zone device's data * @cooling_dev: cooling device's related data @@ -184,6 +187,7 @@ * @ptp_data: data of the PTP clock * @time_sync: time sync data. * @ftm_initiator: FTM initiator data + * @last_bt_notif: last received BT Coex notif */ struct iwl_mld { /* Add here fields that need clean up on restart */ @@ -201,19 +205,20 @@ struct iwl_mld { #ifdef CONFIG_IWLWIFI_DEBUGFS __le16 cur_aid; u8 cur_bssid[ETH_ALEN]; + bool ptp_time; #endif } monitor; #ifdef CONFIG_PM_SLEEP bool netdetect; #endif /* CONFIG_PM_SLEEP */ struct ieee80211_vif *p2p_device_vif; - bool bt_is_active; + struct iwl_bt_coex_profile_notif last_bt_notif; ); struct ieee80211_link_sta __rcu *fw_id_to_link_sta[IWL_STATION_COUNT_MAX]; /* And here fields that survive a fw restart */ struct device *dev; struct iwl_trans *trans; - const struct iwl_cfg *cfg; + const struct iwl_rf_cfg *cfg; const struct iwl_fw *fw; struct ieee80211_hw *hw; struct wiphy *wiphy; @@ -241,6 +246,8 @@ struct iwl_mld { ct:1; } radio_kill; + u32 power_budget_mw; + struct mac_address addresses[IWL_MLD_MAX_ADDRESSES]; struct iwl_mld_scan scan; #ifdef CONFIG_PM_SLEEP @@ -266,6 +273,8 @@ struct iwl_mld { u8 mgmt_tx_ant; + bool fw_rates_ver_3; + struct iwl_mld_low_latency low_latency; bool ibss_manager; @@ -286,7 +295,7 @@ struct iwl_mld { memset((void *)&(_ptr)->zeroed_on_hw_restart, 0, \ sizeof((_ptr)->zeroed_on_hw_restart)) -/* Cleanup function for struct iwl_mld_vif, will be called in restart */ +/* Cleanup function for struct iwl_mld, will be called in restart */ static inline void iwl_cleanup_mld(struct iwl_mld *mld) { @@ -410,7 +419,7 @@ iwl_mld_legacy_hw_idx_to_mac80211_idx(u32 rate_n_flags, int rate = rate_n_flags & RATE_LEGACY_RATE_MSK; bool is_lb = band == NL80211_BAND_2GHZ; - if (format == RATE_MCS_LEGACY_OFDM_MSK) + if (format == RATE_MCS_MOD_TYPE_LEGACY_OFDM) return is_lb ? rate + IWL_FIRST_OFDM_RATE : rate; /* CCK is not allowed in 5 GHz */ @@ -482,7 +491,7 @@ iwl_mld_is_dup(struct iwl_mld *mld, struct ieee80211_sta *sta, struct ieee80211_rx_status *rx_status, int queue); void iwl_construct_mld(struct iwl_mld *mld, struct iwl_trans *trans, - const struct iwl_cfg *cfg, const struct iwl_fw *fw, + const struct iwl_rf_cfg *cfg, const struct iwl_fw *fw, struct ieee80211_hw *hw, struct dentry *dbgfs_dir); #endif diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mlo.c b/drivers/net/wireless/intel/iwlwifi/mld/mlo.c index da16fff1ce86..dba5379ed009 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/mlo.c +++ b/drivers/net/wireless/intel/iwlwifi/mld/mlo.c @@ -326,23 +326,44 @@ static void iwl_mld_vif_iter_emlsr_mode_notif(void *data, u8 *mac, struct ieee80211_vif *vif) { - struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); - struct iwl_esr_mode_notif *notif = (void *)data; + const struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + enum iwl_mvm_fw_esr_recommendation action; + const struct iwl_esr_mode_notif *notif = NULL; + + if (iwl_fw_lookup_notif_ver(mld_vif->mld->fw, DATA_PATH_GROUP, + ESR_MODE_NOTIF, 0) > 1) { + notif = (void *)data; + action = le32_to_cpu(notif->action); + } else { + const struct iwl_esr_mode_notif_v1 *notif_v1 = (void *)data; + + action = le32_to_cpu(notif_v1->action); + } if (!iwl_mld_vif_has_emlsr_cap(vif)) return; - switch (le32_to_cpu(notif->action)) { + switch (action) { case ESR_RECOMMEND_LEAVE: + if (notif) + IWL_DEBUG_INFO(mld_vif->mld, + "FW recommend leave reason = 0x%x\n", + le32_to_cpu(notif->leave_reason_mask)); + iwl_mld_exit_emlsr(mld_vif->mld, vif, IWL_MLD_EMLSR_EXIT_FW_REQUEST, iwl_mld_get_primary_link(vif)); break; - case ESR_RECOMMEND_ENTER: case ESR_FORCE_LEAVE: + if (notif) + IWL_DEBUG_INFO(mld_vif->mld, + "FW force leave reason = 0x%x\n", + le32_to_cpu(notif->leave_reason_mask)); + fallthrough; + case ESR_RECOMMEND_ENTER: default: IWL_WARN(mld_vif->mld, "Unexpected EMLSR notification: %d\n", - le32_to_cpu(notif->action)); + action); } } @@ -524,7 +545,7 @@ void iwl_mld_emlsr_check_tpt(struct wiphy *wiphy, struct wiphy_work *wk) } /* Sum up RX and TX MPDUs from the different queues/links */ - for (int q = 0; q < mld->trans->num_rx_queues; q++) { + for (int q = 0; q < mld->trans->info.num_rxqs; q++) { struct iwl_mld_per_q_mpdu_counter *queue_counter = &mld_sta->mpdu_counters[q]; @@ -636,6 +657,42 @@ s8 iwl_mld_get_emlsr_rssi_thresh(struct iwl_mld *mld, #undef RSSI_THRESHOLD } +#define IWL_MLD_BT_COEX_DISABLE_EMLSR_RSSI_THRESH -69 +#define IWL_MLD_BT_COEX_ENABLE_EMLSR_RSSI_THRESH -63 +#define IWL_MLD_BT_COEX_WIFI_LOSS_THRESH 7 + +VISIBLE_IF_IWLWIFI_KUNIT +bool +iwl_mld_bt_allows_emlsr(struct iwl_mld *mld, struct ieee80211_bss_conf *link, + bool check_entry) +{ + int bt_penalty, rssi_thresh; + s32 link_rssi; + + if (WARN_ON_ONCE(!link->bss)) + return false; + + link_rssi = MBM_TO_DBM(link->bss->signal); + rssi_thresh = check_entry ? + IWL_MLD_BT_COEX_ENABLE_EMLSR_RSSI_THRESH : + IWL_MLD_BT_COEX_DISABLE_EMLSR_RSSI_THRESH; + /* No valid RSSI - force to take low rssi */ + if (!link_rssi) + link_rssi = rssi_thresh - 1; + + if (link_rssi > rssi_thresh) + bt_penalty = max(mld->last_bt_notif.wifi_loss_mid_high_rssi[PHY_BAND_24][0], + mld->last_bt_notif.wifi_loss_mid_high_rssi[PHY_BAND_24][1]); + else + bt_penalty = max(mld->last_bt_notif.wifi_loss_low_rssi[PHY_BAND_24][0], + mld->last_bt_notif.wifi_loss_low_rssi[PHY_BAND_24][1]); + + IWL_DEBUG_EHT(mld, "BT penalty for link-id %0X is %d\n", + link->link_id, bt_penalty); + return bt_penalty < IWL_MLD_BT_COEX_WIFI_LOSS_THRESH; +} +EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_bt_allows_emlsr); + static u32 iwl_mld_emlsr_disallowed_with_link(struct iwl_mld *mld, struct ieee80211_vif *vif, @@ -650,7 +707,8 @@ iwl_mld_emlsr_disallowed_with_link(struct iwl_mld *mld, if (WARN_ON_ONCE(!conf)) return IWL_MLD_EMLSR_EXIT_INVALID; - if (link->chandef->chan->band == NL80211_BAND_2GHZ && mld->bt_is_active) + if (link->chandef->chan->band == NL80211_BAND_2GHZ && + !iwl_mld_bt_allows_emlsr(mld, conf, true)) ret |= IWL_MLD_EMLSR_EXIT_BT_COEX; if (link->signal < @@ -732,7 +790,7 @@ iwl_mld_get_min_chan_load_thresh(struct ieee80211_chanctx_conf *chanctx) return 10; } -VISIBLE_IF_IWLWIFI_KUNIT bool +static bool iwl_mld_channel_load_allows_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif, const struct iwl_mld_link_sel_data *a, @@ -789,10 +847,9 @@ iwl_mld_channel_load_allows_emlsr(struct iwl_mld *mld, return false; } -EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_channel_load_allows_emlsr); -static bool -iwl_mld_valid_emlsr_pair(struct ieee80211_vif *vif, +VISIBLE_IF_IWLWIFI_KUNIT u32 +iwl_mld_emlsr_pair_state(struct ieee80211_vif *vif, struct iwl_mld_link_sel_data *a, struct iwl_mld_link_sel_data *b) { @@ -801,9 +858,13 @@ iwl_mld_valid_emlsr_pair(struct ieee80211_vif *vif, u32 reason_mask = 0; /* Per-link considerations */ - if (iwl_mld_emlsr_disallowed_with_link(mld, vif, a, true) || - iwl_mld_emlsr_disallowed_with_link(mld, vif, b, false)) - return false; + reason_mask = iwl_mld_emlsr_disallowed_with_link(mld, vif, a, true); + if (reason_mask) + return reason_mask; + + reason_mask = iwl_mld_emlsr_disallowed_with_link(mld, vif, b, false); + if (reason_mask) + return reason_mask; if (a->chandef->chan->band == b->chandef->chan->band) { const struct cfg80211_chan_def *c_low = a->chandef; @@ -839,11 +900,11 @@ iwl_mld_valid_emlsr_pair(struct ieee80211_vif *vif, nl80211_chan_width_to_mhz(a->chandef->width), nl80211_chan_width_to_mhz(b->chandef->width)); iwl_mld_print_emlsr_exit(mld, reason_mask); - return false; } - return true; + return reason_mask; } +EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_emlsr_pair_state); /* Calculation is done with fixed-point with a scaling factor of 1/256 */ #define SCALE_FACTOR 256 @@ -871,7 +932,7 @@ unsigned int iwl_mld_get_emlsr_grade(struct iwl_mld *mld, *primary_id = a->link_id; - if (!iwl_mld_valid_emlsr_pair(vif, a, b)) + if (iwl_mld_emlsr_pair_state(vif, a, b)) return 0; primary_conf = wiphy_dereference(wiphy, vif->link_conf[*primary_id]); @@ -913,8 +974,11 @@ static void _iwl_mld_select_links(struct iwl_mld *mld, n_data = iwl_mld_set_link_sel_data(mld, vif, data, usable_links, &best_idx); - if (WARN(!n_data, "Couldn't find a valid grade for any link!\n")) + if (!n_data) { + IWL_DEBUG_EHT(mld, + "Couldn't find a valid grade for any link!\n"); return; + } /* Default to selecting the single best link */ best_link = &data[best_idx]; @@ -982,27 +1046,41 @@ static void iwl_mld_emlsr_check_bt_iter(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + const struct iwl_bt_coex_profile_notif zero_notif = {}; struct iwl_mld *mld = mld_vif->mld; struct ieee80211_bss_conf *link; unsigned int link_id; + const struct iwl_bt_coex_profile_notif *notif = &mld->last_bt_notif; - if (!mld->bt_is_active) { - iwl_mld_retry_emlsr(mld, vif); + if (!iwl_mld_vif_has_emlsr_cap(vif)) return; - } - /* BT is turned ON but we are not in EMLSR, nothing to do */ - if (!iwl_mld_emlsr_active(vif)) + /* zeroed structure means that BT is OFF */ + if (!memcmp(notif, &zero_notif, sizeof(*notif))) { + iwl_mld_retry_emlsr(mld, vif); return; - - /* In EMLSR and BT is turned ON */ + } for_each_vif_active_link(vif, link, link_id) { + bool emlsr_active, emlsr_allowed; + if (WARN_ON(!link->chanreq.oper.chan)) continue; - if (link->chanreq.oper.chan->band == NL80211_BAND_2GHZ) { - iwl_mld_exit_emlsr(mld, vif, IWL_MLD_EMLSR_EXIT_BT_COEX, + if (link->chanreq.oper.chan->band != NL80211_BAND_2GHZ) + continue; + + emlsr_active = iwl_mld_emlsr_active(vif); + emlsr_allowed = iwl_mld_bt_allows_emlsr(mld, link, + !emlsr_active); + if (emlsr_allowed && !emlsr_active) { + iwl_mld_retry_emlsr(mld, vif); + return; + } + + if (!emlsr_allowed && emlsr_active) { + iwl_mld_exit_emlsr(mld, vif, + IWL_MLD_EMLSR_EXIT_BT_COEX, iwl_mld_get_primary_link(vif)); return; } @@ -1095,3 +1173,69 @@ void iwl_mld_retry_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif) iwl_mld_int_mlo_scan(mld, vif); } + +static void iwl_mld_ignore_tpt_iter(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + struct iwl_mld *mld = mld_vif->mld; + struct iwl_mld_sta *mld_sta; + bool *start = (void *)data; + + /* check_tpt_wk is only used when TPT block isn't set */ + if (mld_vif->emlsr.blocked_reasons & IWL_MLD_EMLSR_BLOCKED_TPT || + !IWL_MLD_AUTO_EML_ENABLE || !mld_vif->ap_sta) + return; + + mld_sta = iwl_mld_sta_from_mac80211(mld_vif->ap_sta); + + /* We only count for the AP sta in a MLO connection */ + if (!mld_sta->mpdu_counters) + return; + + if (*start) { + wiphy_delayed_work_cancel(mld_vif->mld->wiphy, + &mld_vif->emlsr.check_tpt_wk); + IWL_DEBUG_EHT(mld, "TPT check disabled\n"); + return; + } + + /* Clear the counters so we start from the beginning */ + for (int q = 0; q < mld->trans->info.num_rxqs; q++) { + struct iwl_mld_per_q_mpdu_counter *queue_counter = + &mld_sta->mpdu_counters[q]; + + spin_lock_bh(&queue_counter->lock); + + memset(queue_counter->per_link, 0, + sizeof(queue_counter->per_link)); + + spin_unlock_bh(&queue_counter->lock); + } + + /* Schedule the check in 5 seconds */ + wiphy_delayed_work_queue(mld_vif->mld->wiphy, + &mld_vif->emlsr.check_tpt_wk, + round_jiffies_relative(IWL_MLD_TPT_COUNT_WINDOW)); + IWL_DEBUG_EHT(mld, "TPT check enabled\n"); +} + +void iwl_mld_start_ignoring_tpt_updates(struct iwl_mld *mld) +{ + bool start = true; + + ieee80211_iterate_active_interfaces_mtx(mld->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mld_ignore_tpt_iter, + &start); +} + +void iwl_mld_stop_ignoring_tpt_updates(struct iwl_mld *mld) +{ + bool start = false; + + ieee80211_iterate_active_interfaces_mtx(mld->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mld_ignore_tpt_iter, + &start); +} diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mlo.h b/drivers/net/wireless/intel/iwlwifi/mld/mlo.h index 4fb1fdbe3df9..9afa3d6ea649 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/mlo.h +++ b/drivers/net/wireless/intel/iwlwifi/mld/mlo.h @@ -37,7 +37,7 @@ static inline bool iwl_mld_vif_has_emlsr_cap(struct ieee80211_vif *vif) return ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_STATION && ieee80211_vif_is_mld(vif) && vif->cfg.eml_cap & IEEE80211_EML_CAP_EMLSR_SUPP && - !CSR_HW_RFID_IS_CDB(mld_vif->mld->trans->hw_rf_id); + !CSR_HW_RFID_IS_CDB(mld_vif->mld->trans->info.hw_rf_id); } static inline int @@ -158,10 +158,16 @@ struct iwl_mld_link_sel_data { }; #if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS) -bool iwl_mld_channel_load_allows_emlsr(struct iwl_mld *mld, - struct ieee80211_vif *vif, - const struct iwl_mld_link_sel_data *a, - const struct iwl_mld_link_sel_data *b); +u32 iwl_mld_emlsr_pair_state(struct ieee80211_vif *vif, + struct iwl_mld_link_sel_data *a, + struct iwl_mld_link_sel_data *b); + +bool iwl_mld_bt_allows_emlsr(struct iwl_mld *mld, + struct ieee80211_bss_conf *link, + bool entry_criteria); #endif +void iwl_mld_start_ignoring_tpt_updates(struct iwl_mld *mld); +void iwl_mld_stop_ignoring_tpt_updates(struct iwl_mld *mld); + #endif /* __iwl_mld_mlo_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mld/notif.c b/drivers/net/wireless/intel/iwlwifi/mld/notif.c index 10f1bee89205..c0e62d46aba6 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/notif.c +++ b/drivers/net/wireless/intel/iwlwifi/mld/notif.c @@ -304,12 +304,15 @@ CMD_VERSIONS(session_prot_notif, CMD_VERSIONS(missed_beacon_notif, CMD_VER_ENTRY(5, iwl_missed_beacons_notif)) CMD_VERSIONS(tx_resp_notif, - CMD_VER_ENTRY(8, iwl_tx_resp)) + CMD_VER_ENTRY(8, iwl_tx_resp) + CMD_VER_ENTRY(9, iwl_tx_resp)) CMD_VERSIONS(compressed_ba_notif, CMD_VER_ENTRY(5, iwl_compressed_ba_notif) - CMD_VER_ENTRY(6, iwl_compressed_ba_notif)) + CMD_VER_ENTRY(6, iwl_compressed_ba_notif) + CMD_VER_ENTRY(7, iwl_compressed_ba_notif)) CMD_VERSIONS(tlc_notif, - CMD_VER_ENTRY(3, iwl_tlc_update_notif)) + CMD_VER_ENTRY(3, iwl_tlc_update_notif) + CMD_VER_ENTRY(4, iwl_tlc_update_notif)) CMD_VERSIONS(mu_mimo_grp_notif, CMD_VER_ENTRY(1, iwl_mu_group_mgmt_notif)) CMD_VERSIONS(channel_switch_start_notif, @@ -335,7 +338,8 @@ CMD_VERSIONS(bt_coex_notif, CMD_VERSIONS(beacon_notification, CMD_VER_ENTRY(6, iwl_extended_beacon_notif)) CMD_VERSIONS(emlsr_mode_notif, - CMD_VER_ENTRY(1, iwl_esr_mode_notif)) + CMD_VER_ENTRY(1, iwl_esr_mode_notif_v1) + CMD_VER_ENTRY(2, iwl_esr_mode_notif)) CMD_VERSIONS(emlsr_trans_fail_notif, CMD_VER_ENTRY(1, iwl_esr_trans_fail_notif)) CMD_VERSIONS(uapsd_misbehaving_ap_notif, @@ -601,7 +605,7 @@ void iwl_mld_rx_rss(struct iwl_op_mode *op_mode, struct napi_struct *napi, struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode); u16 cmd_id = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd); - if (unlikely(queue >= mld->trans->num_rx_queues)) + if (unlikely(queue >= mld->trans->info.num_rxqs)) return; if (likely(cmd_id == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD))) diff --git a/drivers/net/wireless/intel/iwlwifi/mld/phy.c b/drivers/net/wireless/intel/iwlwifi/mld/phy.c index 2fbc8090088b..d5a32ee56b92 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/phy.c +++ b/drivers/net/wireless/intel/iwlwifi/mld/phy.c @@ -50,6 +50,9 @@ static void iwl_mld_chanctx_usage_iter(void *_data, u8 *mac, if (rcu_access_pointer(link_conf->chanctx_conf) != data->ctx) continue; + if (vif->type == NL80211_IFTYPE_AP && link_conf->ftm_responder) + data->use_def = true; + if (iwl_mld_chanctx_fils_enabled(vif, data->ctx)) data->use_def = true; } @@ -153,3 +156,43 @@ int iwl_mld_phy_fw_action(struct iwl_mld *mld, return ret; } + +static u32 iwl_mld_get_phy_config(struct iwl_mld *mld) +{ + u32 phy_config = ~(FW_PHY_CFG_TX_CHAIN | + FW_PHY_CFG_RX_CHAIN); + u32 valid_rx_ant = iwl_mld_get_valid_rx_ant(mld); + u32 valid_tx_ant = iwl_mld_get_valid_tx_ant(mld); + + phy_config |= valid_tx_ant << FW_PHY_CFG_TX_CHAIN_POS | + valid_rx_ant << FW_PHY_CFG_RX_CHAIN_POS; + + return mld->fw->phy_config & phy_config; +} + +int iwl_mld_send_phy_cfg_cmd(struct iwl_mld *mld) +{ + const struct iwl_tlv_calib_ctrl *default_calib = + &mld->fw->default_calib[IWL_UCODE_REGULAR]; + struct iwl_phy_cfg_cmd_v3 cmd = { + .phy_cfg = cpu_to_le32(iwl_mld_get_phy_config(mld)), + .calib_control.event_trigger = default_calib->event_trigger, + .calib_control.flow_trigger = default_calib->flow_trigger, + .phy_specific_cfg = mld->fwrt.phy_filters, + }; + + IWL_INFO(mld, "Sending Phy CFG command: 0x%x\n", cmd.phy_cfg); + + return iwl_mld_send_cmd_pdu(mld, PHY_CONFIGURATION_CMD, &cmd); +} + +void iwl_mld_update_phy_chandef(struct iwl_mld *mld, + struct ieee80211_chanctx_conf *ctx) +{ + struct iwl_mld_phy *phy = iwl_mld_phy_from_mac80211(ctx); + struct cfg80211_chan_def *chandef = + iwl_mld_get_chandef_from_chanctx(mld, ctx); + + phy->chandef = *chandef; + iwl_mld_phy_fw_action(mld, ctx, FW_CTXT_ACTION_MODIFY); +} diff --git a/drivers/net/wireless/intel/iwlwifi/mld/phy.h b/drivers/net/wireless/intel/iwlwifi/mld/phy.h index 2212a89321b7..0deaf179f07c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/phy.h +++ b/drivers/net/wireless/intel/iwlwifi/mld/phy.h @@ -52,4 +52,9 @@ iwl_mld_get_chandef_from_chanctx(struct iwl_mld *mld, struct ieee80211_chanctx_conf *ctx); u8 iwl_mld_get_fw_ctrl_pos(const struct cfg80211_chan_def *chandef); +int iwl_mld_send_phy_cfg_cmd(struct iwl_mld *mld); + +void iwl_mld_update_phy_chandef(struct iwl_mld *mld, + struct ieee80211_chanctx_conf *ctx); + #endif /* __iwl_mld_phy_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mld/power.c b/drivers/net/wireless/intel/iwlwifi/mld/power.c index 2f16c174b57e..8cc276041360 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/power.c +++ b/drivers/net/wireless/intel/iwlwifi/mld/power.c @@ -253,6 +253,9 @@ static void iwl_mld_power_build_cmd(struct iwl_mld *mld, cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK); + if (iwl_fw_lookup_cmd_ver(mld->fw, MAC_PM_POWER_TABLE, 0) >= 2) + cmd->flags |= cpu_to_le16(POWER_FLAGS_ENABLE_SMPS_MSK); + /* firmware supports LPRX for beacons at rate 1 Mbps or 6 Mbps only */ if (link_conf->beacon_rate && (link_conf->beacon_rate->bitrate == 10 || diff --git a/drivers/net/wireless/intel/iwlwifi/mld/ptp.c b/drivers/net/wireless/intel/iwlwifi/mld/ptp.c index d5c3f853d96c..5ee38fc168c1 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/ptp.c +++ b/drivers/net/wireless/intel/iwlwifi/mld/ptp.c @@ -21,7 +21,7 @@ static int iwl_mld_get_systime(struct iwl_mld *mld, u32 *gp2) { - *gp2 = iwl_read_prph(mld->trans, mld->trans->cfg->gp2_reg_addr); + *gp2 = iwl_read_prph(mld->trans, mld->trans->mac_cfg->base->gp2_reg_addr); if (*gp2 == 0x5a5a5a5a) return -EINVAL; diff --git a/drivers/net/wireless/intel/iwlwifi/mld/regulatory.c b/drivers/net/wireless/intel/iwlwifi/mld/regulatory.c index a75af8c1e8ab..326c300470ea 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/regulatory.c +++ b/drivers/net/wireless/intel/iwlwifi/mld/regulatory.c @@ -63,9 +63,9 @@ void iwl_mld_get_bios_tables(struct iwl_mld *mld) /* we don't fail if the table is not available */ } - ret = iwl_uefi_get_uats_table(mld->trans, &mld->fwrt); - if (ret) - IWL_DEBUG_RADIO(mld, "failed to read UATS table (%d)\n", ret); + iwl_uefi_get_uats_table(mld->trans, &mld->fwrt); + + iwl_bios_get_phy_filters(&mld->fwrt); } static int iwl_mld_geo_sar_init(struct iwl_mld *mld) diff --git a/drivers/net/wireless/intel/iwlwifi/mld/roc.c b/drivers/net/wireless/intel/iwlwifi/mld/roc.c index b87faca23ceb..e85f45bce79a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/roc.c +++ b/drivers/net/wireless/intel/iwlwifi/mld/roc.c @@ -31,13 +31,54 @@ iwl_mld_vif_iter_emlsr_block_roc(void *data, u8 *mac, struct ieee80211_vif *vif) *result = ret; } +struct iwl_mld_roc_iter_data { + enum iwl_roc_activity activity; + struct ieee80211_vif *vif; + bool found; +}; + +static void iwl_mld_find_roc_vif_iter(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + struct iwl_mld_roc_iter_data *roc_data = data; + + if (mld_vif->roc_activity != roc_data->activity) + return; + + /* The FW supports one ROC of each type simultaneously */ + if (WARN_ON(roc_data->found)) { + roc_data->vif = NULL; + return; + } + + roc_data->found = true; + roc_data->vif = vif; +} + +static struct ieee80211_vif * +iwl_mld_find_roc_vif(struct iwl_mld *mld, enum iwl_roc_activity activity) +{ + struct iwl_mld_roc_iter_data roc_data = { + .activity = activity, + .found = false, + }; + + ieee80211_iterate_active_interfaces_mtx(mld->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mld_find_roc_vif_iter, + &roc_data); + + return roc_data.vif; +} + int iwl_mld_start_roc(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_channel *channel, int duration, enum ieee80211_roc_type type) { struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); - struct iwl_mld_int_sta *aux_sta; + struct iwl_mld_int_sta *aux_sta = &mld_vif->aux_sta; struct iwl_roc_req cmd = { .action = cpu_to_le32(FW_CTXT_ACTION_ADD), }; @@ -49,38 +90,40 @@ int iwl_mld_start_roc(struct ieee80211_hw *hw, struct ieee80211_vif *vif, lockdep_assert_wiphy(mld->wiphy); - ieee80211_iterate_active_interfaces_mtx(mld->hw, - IEEE80211_IFACE_ITER_NORMAL, - iwl_mld_vif_iter_emlsr_block_roc, - &ret); - if (ret) - return ret; - - /* TODO: task=Hotspot 2.0 */ - if (vif->type != NL80211_IFTYPE_P2P_DEVICE) { + if (vif->type != NL80211_IFTYPE_P2P_DEVICE && + vif->type != NL80211_IFTYPE_STATION) { IWL_ERR(mld, "NOT SUPPORTED: ROC on vif->type %d\n", vif->type); return -EOPNOTSUPP; } - switch (type) { - case IEEE80211_ROC_TYPE_NORMAL: - activity = ROC_ACTIVITY_P2P_DISC; - break; - case IEEE80211_ROC_TYPE_MGMT_TX: - activity = ROC_ACTIVITY_P2P_NEG; - break; - default: - WARN_ONCE(1, "Got an invalid P2P ROC type\n"); - return -EINVAL; + if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { + switch (type) { + case IEEE80211_ROC_TYPE_NORMAL: + activity = ROC_ACTIVITY_P2P_DISC; + break; + case IEEE80211_ROC_TYPE_MGMT_TX: + activity = ROC_ACTIVITY_P2P_NEG; + break; + default: + WARN_ONCE(1, "Got an invalid P2P ROC type\n"); + return -EINVAL; + } + } else { + activity = ROC_ACTIVITY_HOTSPOT; } - if (WARN_ON(mld_vif->roc_activity != ROC_NUM_ACTIVITIES)) + /* The FW supports one ROC of each type simultaneously */ + if (WARN_ON(iwl_mld_find_roc_vif(mld, activity))) return -EBUSY; - /* No MLO on P2P device */ - aux_sta = &mld_vif->deflink.aux_sta; + ieee80211_iterate_active_interfaces_mtx(mld->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mld_vif_iter_emlsr_block_roc, + &ret); + if (ret) + return ret; ret = iwl_mld_add_aux_sta(mld, aux_sta); if (ret) @@ -91,9 +134,6 @@ int iwl_mld_start_roc(struct ieee80211_hw *hw, struct ieee80211_vif *vif, cmd.channel_info.channel = cpu_to_le32(channel->hw_value); cmd.channel_info.band = iwl_mld_nl80211_band_to_fw(channel->band); cmd.channel_info.width = IWL_PHY_CHANNEL_MODE20; - /* TODO: task=Hotspot 2.0, revisit those parameters when we add an ROC - * on the BSS vif - */ cmd.max_delay = cpu_to_le32(AUX_ROC_MAX_DELAY); cmd.duration = cpu_to_le32(MSEC_TO_TU(duration)); @@ -105,6 +145,7 @@ int iwl_mld_start_roc(struct ieee80211_hw *hw, struct ieee80211_vif *vif, IWL_ERR(mld, "Couldn't send the ROC_CMD\n"); return ret; } + mld_vif->roc_activity = activity; return 0; @@ -136,9 +177,9 @@ static void iwl_mld_destroy_roc(struct iwl_mld *mld, * we can flush the Tx on the queues */ - iwl_mld_flush_link_sta_txqs(mld, mld_vif->deflink.aux_sta.sta_id); + iwl_mld_flush_link_sta_txqs(mld, mld_vif->aux_sta.sta_id); - iwl_mld_remove_aux_sta(mld, vif, &vif->bss_conf); + iwl_mld_remove_aux_sta(mld, vif); } int iwl_mld_cancel_roc(struct ieee80211_hw *hw, @@ -156,8 +197,8 @@ int iwl_mld_cancel_roc(struct ieee80211_hw *hw, lockdep_assert_wiphy(mld->wiphy); - /* TODO: task=Hotspot 2.0 */ - if (WARN_ON(vif->type != NL80211_IFTYPE_P2P_DEVICE)) + if (WARN_ON(vif->type != NL80211_IFTYPE_P2P_DEVICE && + vif->type != NL80211_IFTYPE_STATION)) return -EOPNOTSUPP; /* No roc activity running it's probably already done */ @@ -192,10 +233,10 @@ void iwl_mld_handle_roc_notif(struct iwl_mld *mld, { const struct iwl_roc_notif *notif = (void *)pkt->data; u32 activity = le32_to_cpu(notif->activity); - /* TODO: task=Hotspot 2.0 - roc can run on BSS */ - struct ieee80211_vif *vif = mld->p2p_device_vif; struct iwl_mld_vif *mld_vif; + struct ieee80211_vif *vif; + vif = iwl_mld_find_roc_vif(mld, activity); if (WARN_ON(!vif)) return; diff --git a/drivers/net/wireless/intel/iwlwifi/mld/rx.c b/drivers/net/wireless/intel/iwlwifi/mld/rx.c index 3e69f2a4fa81..ce0093d5c638 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/mld/rx.c @@ -36,11 +36,13 @@ struct iwl_mld_rx_phy_data { }; static void -iwl_mld_fill_phy_data(struct iwl_rx_mpdu_desc *desc, +iwl_mld_fill_phy_data(struct iwl_mld *mld, + struct iwl_rx_mpdu_desc *desc, struct iwl_mld_rx_phy_data *phy_data) { phy_data->phy_info = le16_to_cpu(desc->phy_info); - phy_data->rate_n_flags = le32_to_cpu(desc->v3.rate_n_flags); + phy_data->rate_n_flags = iwl_v3_rate_from_v2_v3(desc->v3.rate_n_flags, + mld->fw_rates_ver_3); phy_data->gp2_on_air_rise = le32_to_cpu(desc->v3.gp2_on_air_rise); phy_data->energy_a = desc->v3.energy_a; phy_data->energy_b = desc->v3.energy_b; @@ -1176,7 +1178,7 @@ static void iwl_mld_rx_fill_status(struct iwl_mld *mld, struct sk_buff *skb, IWL_RX_PHY_DATA1_INFO_TYPE_MASK); /* set the preamble flag if appropriate */ - if (format == RATE_MCS_CCK_MSK && + if (format == RATE_MCS_MOD_TYPE_CCK && phy_data->phy_info & IWL_RX_MPDU_PHY_SHORT_PREAMBLE) rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE; @@ -1201,7 +1203,7 @@ static void iwl_mld_rx_fill_status(struct iwl_mld *mld, struct sk_buff *skb, } /* must be before L-SIG data */ - if (format == RATE_MCS_HE_MSK) + if (format == RATE_MCS_MOD_TYPE_HE) iwl_mld_rx_he(mld, skb, phy_data, queue); iwl_mld_decode_lsig(skb, phy_data); @@ -1209,40 +1211,53 @@ static void iwl_mld_rx_fill_status(struct iwl_mld *mld, struct sk_buff *skb, rx_status->device_timestamp = phy_data->gp2_on_air_rise; /* using TLV format and must be after all fixed len fields */ - if (format == RATE_MCS_EHT_MSK) + if (format == RATE_MCS_MOD_TYPE_EHT) iwl_mld_rx_eht(mld, skb, phy_data, queue); #ifdef CONFIG_IWLWIFI_DEBUGFS - if (unlikely(mld->monitor.on)) + if (unlikely(mld->monitor.on)) { iwl_mld_add_rtap_sniffer_config(mld, skb); + + if (mld->monitor.ptp_time) { + u64 adj_time = + iwl_mld_ptp_get_adj_time(mld, + phy_data->gp2_on_air_rise * + NSEC_PER_USEC); + + rx_status->mactime = div64_u64(adj_time, NSEC_PER_USEC); + rx_status->flag |= RX_FLAG_MACTIME_IS_RTAP_TS64; + rx_status->flag &= ~RX_FLAG_MACTIME; + } + } #endif - if (format != RATE_MCS_CCK_MSK && is_sgi) + if (format != RATE_MCS_MOD_TYPE_CCK && is_sgi) rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; if (rate_n_flags & RATE_MCS_LDPC_MSK) rx_status->enc_flags |= RX_ENC_FLAG_LDPC; switch (format) { - case RATE_MCS_HT_MSK: + case RATE_MCS_MOD_TYPE_HT: rx_status->encoding = RX_ENC_HT; rx_status->rate_idx = RATE_HT_MCS_INDEX(rate_n_flags); rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; break; - case RATE_MCS_VHT_MSK: - case RATE_MCS_HE_MSK: - case RATE_MCS_EHT_MSK: - if (format == RATE_MCS_VHT_MSK) { + case RATE_MCS_MOD_TYPE_VHT: + case RATE_MCS_MOD_TYPE_HE: + case RATE_MCS_MOD_TYPE_EHT: + if (format == RATE_MCS_MOD_TYPE_VHT) { rx_status->encoding = RX_ENC_VHT; - } else if (format == RATE_MCS_HE_MSK) { + } else if (format == RATE_MCS_MOD_TYPE_HE) { rx_status->encoding = RX_ENC_HE; rx_status->he_dcm = !!(rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK); - } else if (format == RATE_MCS_EHT_MSK) { + } else if (format == RATE_MCS_MOD_TYPE_EHT) { rx_status->encoding = RX_ENC_EHT; } - rx_status->nss = u32_get_bits(rate_n_flags, RATE_MCS_NSS_MSK) + 1; + rx_status->nss = u32_get_bits(rate_n_flags, + RATE_MCS_NSS_MSK) + 1; rx_status->rate_idx = rate_n_flags & RATE_MCS_CODE_MSK; rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; break; @@ -1748,7 +1763,7 @@ void iwl_mld_rx_mpdu(struct iwl_mld *mld, struct napi_struct *napi, hdr = (void *)(pkt->data + mpdu_desc_size); - iwl_mld_fill_phy_data(mpdu_desc, &phy_data); + iwl_mld_fill_phy_data(mld, mpdu_desc, &phy_data); if (mpdu_desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD) { /* If the device inserted padding it means that (it thought) @@ -1850,7 +1865,7 @@ void iwl_mld_sync_rx_queues(struct iwl_mld *mld, enum iwl_mld_internal_rxq_notif_type type, const void *notif_payload, u32 notif_payload_size) { - u8 num_rx_queues = mld->trans->num_rx_queues; + u8 num_rx_queues = mld->trans->info.num_rxqs; struct { struct iwl_rxq_sync_cmd sync_cmd; struct iwl_mld_internal_rxq_notif notif; @@ -1970,7 +1985,8 @@ void iwl_mld_rx_monitor_no_data(struct iwl_mld *mld, struct napi_struct *napi, phy_data.data1 = desc->phy_info[1]; phy_data.phy_info = IWL_RX_MPDU_PHY_TSF_OVERLOAD; phy_data.gp2_on_air_rise = le32_to_cpu(desc->on_air_rise_time); - phy_data.rate_n_flags = le32_to_cpu(desc->rate); + phy_data.rate_n_flags = iwl_v3_rate_from_v2_v3(desc->rate, + mld->fw_rates_ver_3); phy_data.with_data = false; BUILD_BUG_ON(sizeof(phy_data.rx_vec) != sizeof(desc->rx_vec)); @@ -2032,17 +2048,17 @@ void iwl_mld_rx_monitor_no_data(struct iwl_mld *mld, struct napi_struct *napi, * may be up to 8 spatial streams. */ switch (format) { - case RATE_MCS_VHT_MSK: + case RATE_MCS_MOD_TYPE_VHT: rx_status->nss = le32_get_bits(desc->rx_vec[0], RX_NO_DATA_RX_VEC0_VHT_NSTS_MSK) + 1; break; - case RATE_MCS_HE_MSK: + case RATE_MCS_MOD_TYPE_HE: rx_status->nss = le32_get_bits(desc->rx_vec[0], RX_NO_DATA_RX_VEC0_HE_NSTS_MSK) + 1; break; - case RATE_MCS_EHT_MSK: + case RATE_MCS_MOD_TYPE_EHT: rx_status->nss = le32_get_bits(desc->rx_vec[2], RX_NO_DATA_RX_VEC2_EHT_NSTS_MSK) + 1; diff --git a/drivers/net/wireless/intel/iwlwifi/mld/scan.c b/drivers/net/wireless/intel/iwlwifi/mld/scan.c index 7ec04318ec2f..3fce7cd2d512 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mld/scan.c @@ -1920,6 +1920,9 @@ void iwl_mld_handle_scan_complete_notif(struct iwl_mld *mld, IWL_DEBUG_SCAN(mld, "Scan link is no longer valid\n"); ieee80211_scan_completed(mld->hw, &info); + + /* Scan is over, we can check again the tpt counters */ + iwl_mld_stop_ignoring_tpt_updates(mld); } else if (mld->scan.uid_status[uid] == IWL_MLD_SCAN_SCHED) { ieee80211_sched_scan_stopped(mld->hw); mld->scan.pass_all_sched_res = SCHED_SCAN_PASS_ALL_STATE_DISABLED; diff --git a/drivers/net/wireless/intel/iwlwifi/mld/sta.c b/drivers/net/wireless/intel/iwlwifi/mld/sta.c index 332a7aecec2d..8fb51209b4a6 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mld/sta.c @@ -401,9 +401,11 @@ static u32 iwl_mld_get_htc_flags(struct ieee80211_link_sta *link_sta) static int iwl_mld_send_sta_cmd(struct iwl_mld *mld, const struct iwl_sta_cfg_cmd *cmd) { - int ret = iwl_mld_send_cmd_pdu(mld, - WIDE_ID(MAC_CONF_GROUP, STA_CONFIG_CMD), - cmd); + u32 cmd_id = WIDE_ID(MAC_CONF_GROUP, STA_CONFIG_CMD); + int cmd_len = iwl_fw_lookup_cmd_ver(mld->fw, cmd_id, 0) > 1 ? + sizeof(*cmd) : + sizeof(struct iwl_sta_cfg_cmd_v1); + int ret = iwl_mld_send_cmd_pdu(mld, cmd_id, cmd, cmd_len); if (ret) IWL_ERR(mld, "STA_CONFIG_CMD send failed, ret=0x%x\n", ret); return ret; @@ -660,7 +662,7 @@ iwl_mld_alloc_dup_data(struct iwl_mld *mld, struct iwl_mld_sta *mld_sta) if (mld->fw_status.in_hw_restart) return 0; - dup_data = kcalloc(mld->trans->num_rx_queues, sizeof(*dup_data), + dup_data = kcalloc(mld->trans->info.num_rxqs, sizeof(*dup_data), GFP_KERNEL); if (!dup_data) return -ENOMEM; @@ -673,7 +675,7 @@ iwl_mld_alloc_dup_data(struct iwl_mld *mld, struct iwl_mld_sta *mld_sta) * This thus allows receiving a packet with seqno 0 and the * retry bit set as the very first packet on a new TID. */ - for (int q = 0; q < mld->trans->num_rx_queues; q++) + for (int q = 0; q < mld->trans->info.num_rxqs; q++) memset(dup_data[q].last_seq, 0xff, sizeof(dup_data[q].last_seq)); mld_sta->dup_data = dup_data; @@ -695,13 +697,13 @@ static void iwl_mld_alloc_mpdu_counters(struct iwl_mld *mld, sta->tdls || !ieee80211_vif_is_mld(vif)) return; - mld_sta->mpdu_counters = kcalloc(mld->trans->num_rx_queues, + mld_sta->mpdu_counters = kcalloc(mld->trans->info.num_rxqs, sizeof(*mld_sta->mpdu_counters), GFP_KERNEL); if (!mld_sta->mpdu_counters) return; - for (int q = 0; q < mld->trans->num_rx_queues; q++) + for (int q = 0; q < mld->trans->info.num_rxqs; q++) spin_lock_init(&mld_sta->mpdu_counters[q].lock); } @@ -947,7 +949,7 @@ static int iwl_mld_allocate_internal_txq(struct iwl_mld *mld, int queue, size; size = max_t(u32, IWL_MGMT_QUEUE_SIZE, - mld->trans->cfg->min_txq_size); + mld->trans->mac_cfg->base->min_txq_size); queue = iwl_trans_txq_alloc(mld->trans, 0, sta_mask, tid, size, IWL_WATCHDOG_DISABLED); @@ -1087,6 +1089,24 @@ int iwl_mld_add_aux_sta(struct iwl_mld *mld, 0, NULL, IWL_MAX_TID_COUNT); } +int iwl_mld_add_mon_sta(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link) +{ + struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link); + + if (WARN_ON(!mld_link)) + return -EINVAL; + + if (WARN_ON(vif->type != NL80211_IFTYPE_MONITOR)) + return -EINVAL; + + return iwl_mld_add_internal_sta(mld, &mld_link->mon_sta, + STATION_TYPE_BCAST_MGMT, + mld_link->fw_id, NULL, + IWL_MAX_TID_COUNT); +} + static void iwl_mld_remove_internal_sta(struct iwl_mld *mld, struct iwl_mld_int_sta *internal_sta, bool flush, u8 tid) @@ -1140,6 +1160,19 @@ void iwl_mld_remove_mcast_sta(struct iwl_mld *mld, } void iwl_mld_remove_aux_sta(struct iwl_mld *mld, + struct ieee80211_vif *vif) +{ + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + + if (WARN_ON(vif->type != NL80211_IFTYPE_P2P_DEVICE && + vif->type != NL80211_IFTYPE_STATION)) + return; + + iwl_mld_remove_internal_sta(mld, &mld_vif->aux_sta, false, + IWL_MAX_TID_COUNT); +} + +void iwl_mld_remove_mon_sta(struct iwl_mld *mld, struct ieee80211_vif *vif, struct ieee80211_bss_conf *link) { @@ -1148,11 +1181,10 @@ void iwl_mld_remove_aux_sta(struct iwl_mld *mld, if (WARN_ON(!mld_link)) return; - /* TODO: Hotspot 2.0 */ - if (WARN_ON(vif->type != NL80211_IFTYPE_P2P_DEVICE)) + if (WARN_ON(vif->type != NL80211_IFTYPE_MONITOR)) return; - iwl_mld_remove_internal_sta(mld, &mld_link->aux_sta, false, + iwl_mld_remove_internal_sta(mld, &mld_link->mon_sta, false, IWL_MAX_TID_COUNT); } diff --git a/drivers/net/wireless/intel/iwlwifi/mld/sta.h b/drivers/net/wireless/intel/iwlwifi/mld/sta.h index ddcffd7b9fde..1897b121aae2 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mld/sta.h @@ -247,6 +247,10 @@ int iwl_mld_add_mcast_sta(struct iwl_mld *mld, int iwl_mld_add_aux_sta(struct iwl_mld *mld, struct iwl_mld_int_sta *internal_sta); +int iwl_mld_add_mon_sta(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link); + void iwl_mld_remove_bcast_sta(struct iwl_mld *mld, struct ieee80211_vif *vif, struct ieee80211_bss_conf *link); @@ -256,6 +260,9 @@ void iwl_mld_remove_mcast_sta(struct iwl_mld *mld, struct ieee80211_bss_conf *link); void iwl_mld_remove_aux_sta(struct iwl_mld *mld, + struct ieee80211_vif *vif); + +void iwl_mld_remove_mon_sta(struct iwl_mld *mld, struct ieee80211_vif *vif, struct ieee80211_bss_conf *link); diff --git a/drivers/net/wireless/intel/iwlwifi/mld/stats.c b/drivers/net/wireless/intel/iwlwifi/mld/stats.c index 360a6bfbbfb2..f633cb1cf510 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/stats.c +++ b/drivers/net/wireless/intel/iwlwifi/mld/stats.c @@ -191,11 +191,12 @@ static void iwl_mld_sta_stats_fill_txrate(struct iwl_mld_sta *mld_sta, break; } - if (format == RATE_MCS_CCK_MSK || format == RATE_MCS_LEGACY_OFDM_MSK) { + if (format == RATE_MCS_MOD_TYPE_CCK || + format == RATE_MCS_MOD_TYPE_LEGACY_OFDM) { int rate = u32_get_bits(rate_n_flags, RATE_LEGACY_RATE_MSK); /* add the offset needed to get to the legacy ofdm indices */ - if (format == RATE_MCS_LEGACY_OFDM_MSK) + if (format == RATE_MCS_MOD_TYPE_LEGACY_OFDM) rate += IWL_FIRST_OFDM_RATE; switch (rate) { @@ -240,7 +241,7 @@ static void iwl_mld_sta_stats_fill_txrate(struct iwl_mld_sta *mld_sta, rinfo->nss = u32_get_bits(rate_n_flags, RATE_MCS_NSS_MSK) + 1; - if (format == RATE_MCS_HT_MSK) + if (format == RATE_MCS_MOD_TYPE_HT) rinfo->mcs = RATE_HT_MCS_INDEX(rate_n_flags); else rinfo->mcs = u32_get_bits(rate_n_flags, RATE_MCS_CODE_MSK); @@ -249,10 +250,10 @@ static void iwl_mld_sta_stats_fill_txrate(struct iwl_mld_sta *mld_sta, rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI; switch (format) { - case RATE_MCS_EHT_MSK: + case RATE_MCS_MOD_TYPE_EHT: rinfo->flags |= RATE_INFO_FLAGS_EHT_MCS; break; - case RATE_MCS_HE_MSK: + case RATE_MCS_MOD_TYPE_HE: gi_ltf = u32_get_bits(rate_n_flags, RATE_MCS_HE_GI_LTF_MSK); rinfo->flags |= RATE_INFO_FLAGS_HE_MCS; @@ -293,10 +294,10 @@ static void iwl_mld_sta_stats_fill_txrate(struct iwl_mld_sta *mld_sta, if (rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK) rinfo->he_dcm = 1; break; - case RATE_MCS_HT_MSK: + case RATE_MCS_MOD_TYPE_HT: rinfo->flags |= RATE_INFO_FLAGS_MCS; break; - case RATE_MCS_VHT_MSK: + case RATE_MCS_MOD_TYPE_VHT: rinfo->flags |= RATE_INFO_FLAGS_VHT_MCS; break; } diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tests/Makefile b/drivers/net/wireless/intel/iwlwifi/mld/tests/Makefile index 36317feb923b..3e2ae6020613 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/tests/Makefile +++ b/drivers/net/wireless/intel/iwlwifi/mld/tests/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause -iwlmld-tests-y += module.o hcmd.o utils.o link.o rx.o agg.o link-selection.o +iwlmld-tests-y += module.o hcmd.o utils.o link.o rx.o agg.o link-selection.o emlsr_with_bt.o ccflags-y += -I$(src)/../ obj-$(CONFIG_IWLWIFI_KUNIT_TESTS) += iwlmld-tests.o diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tests/emlsr_with_bt.c b/drivers/net/wireless/intel/iwlwifi/mld/tests/emlsr_with_bt.c new file mode 100644 index 000000000000..91556ee5c142 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/tests/emlsr_with_bt.c @@ -0,0 +1,140 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * KUnit tests for link selection functions + * + * Copyright (C) 2025 Intel Corporation + */ +#include <kunit/static_stub.h> + +#include "utils.h" +#include "mld.h" +#include "mlo.h" + +static const struct emlsr_with_bt_test_case { + const char *desc; + struct { + struct iwl_bt_coex_profile_notif notif; + s32 signal; + bool check_entry; + } input; + bool emlsr_allowed; +} emlsr_with_bt_cases[] = { + { + .desc = "BT penalty(exit) with low rssi 4.5: emlsr allowed", + .input = { + .notif.wifi_loss_low_rssi[1] = {4, 5}, + .notif.wifi_loss_mid_high_rssi[1] = {7, 9}, + .signal = -69, + .check_entry = false, + }, + .emlsr_allowed = true, + }, + { + .desc = "BT penalty(exit) from high rssi 5: emlsr allowed", + .input = { + .notif.wifi_loss_low_rssi[1] = {7, 9}, + .notif.wifi_loss_mid_high_rssi[1] = {5, 5}, + .signal = -68, + .check_entry = false, + }, + .emlsr_allowed = true, + }, + { + .desc = "BT penalty(exit) with low rssi 8: emlsr not allowed", + .input = { + .notif.wifi_loss_low_rssi[1] = {7, 9}, + .notif.wifi_loss_mid_high_rssi[1] = {4, 5}, + .signal = -69, + .check_entry = false, + }, + .emlsr_allowed = false, + }, + { + .desc = "BT penalty(exit) from high rssi 9: emlsr not allowed", + .input = { + .notif.wifi_loss_low_rssi[1] = {4, 5}, + .notif.wifi_loss_mid_high_rssi[1] = {9, 9}, + .signal = -68, + .check_entry = false, + }, + .emlsr_allowed = false, + }, + { + .desc = "BT penalty(entry) with low rssi 4.5: emlsr allowed", + .input = { + .notif.wifi_loss_low_rssi[1] = {4, 5}, + .notif.wifi_loss_mid_high_rssi[1] = {7, 9}, + .signal = -63, + .check_entry = true, + }, + .emlsr_allowed = true, + }, + { + .desc = "BT penalty(entry) from high rssi 5: emlsr allowed", + .input = { + .notif.wifi_loss_low_rssi[1] = {7, 9}, + .notif.wifi_loss_mid_high_rssi[1] = {5, 5}, + .signal = -62, + .check_entry = false, + }, + .emlsr_allowed = true, + }, + { + .desc = "BT penalty(entry) with low rssi 8: emlsr not allowed", + .input = { + .notif.wifi_loss_low_rssi[1] = {7, 9}, + .notif.wifi_loss_mid_high_rssi[1] = {4, 5}, + .signal = -63, + .check_entry = false, + }, + .emlsr_allowed = true, + }, + { + .desc = "BT penalty(entry) from high rssi 9: emlsr not allowed", + .input = { + .notif.wifi_loss_low_rssi[1] = {4, 5}, + .notif.wifi_loss_mid_high_rssi[1] = {9, 9}, + .signal = -62, + .check_entry = true, + }, + .emlsr_allowed = false, + }, +}; + +KUNIT_ARRAY_PARAM_DESC(emlsr_with_bt, emlsr_with_bt_cases, desc); + +static void test_emlsr_with_bt(struct kunit *test) +{ + struct iwl_mld *mld = test->priv; + const struct emlsr_with_bt_test_case *test_param = + (const void *)(test->param_value); + struct ieee80211_vif *vif = + iwlmld_kunit_add_vif(true, NL80211_IFTYPE_STATION); + struct ieee80211_bss_conf *link = iwlmld_kunit_add_link(vif, 1); + bool actual_value = false; + + KUNIT_ALLOC_AND_ASSERT(test, link->bss); + + /* Extract test case parameters */ + link->bss->signal = DBM_TO_MBM(test_param->input.signal); + memcpy(&mld->last_bt_notif, &test_param->input.notif, + sizeof(struct iwl_bt_coex_profile_notif)); + + actual_value = iwl_mld_bt_allows_emlsr(mld, link, + test_param->input.check_entry); + /* Assert that the returned value matches the expected emlsr_allowed */ + KUNIT_EXPECT_EQ(test, actual_value, test_param->emlsr_allowed); +} + +static struct kunit_case emlsr_with_bt_test_cases[] = { + KUNIT_CASE_PARAM(test_emlsr_with_bt, emlsr_with_bt_gen_params), + {}, +}; + +static struct kunit_suite emlsr_with_bt = { + .name = "iwlmld-emlsr-with-bt-tests", + .test_cases = emlsr_with_bt_test_cases, + .init = iwlmld_kunit_test_init, +}; + +kunit_test_suite(emlsr_with_bt); diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tests/hcmd.c b/drivers/net/wireless/intel/iwlwifi/mld/tests/hcmd.c index 4e189bf8b3fb..0e3b9417dd63 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/tests/hcmd.c +++ b/drivers/net/wireless/intel/iwlwifi/mld/tests/hcmd.c @@ -2,7 +2,7 @@ /* * KUnit tests for channel helper functions * - * Copyright (C) 2024 Intel Corporation + * Copyright (C) 2024-2025 Intel Corporation */ #include <kunit/test.h> @@ -30,10 +30,10 @@ static void test_hcmd_names_sorted(struct kunit *test) static void test_hcmd_names_for_rx(struct kunit *test) { static struct iwl_trans t = { - .command_groups = iwl_mld_groups, + .conf.command_groups = iwl_mld_groups, }; - t.command_groups_size = global_iwl_mld_goups_size; + t.conf.command_groups_size = global_iwl_mld_goups_size; for (unsigned int i = 0; i < iwl_mld_rx_handlers_num; i++) { const struct iwl_rx_handler *rxh; diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tests/link-selection.c b/drivers/net/wireless/intel/iwlwifi/mld/tests/link-selection.c index 295dcfd3f85d..94a037bec1fa 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/tests/link-selection.c +++ b/drivers/net/wireless/intel/iwlwifi/mld/tests/link-selection.c @@ -32,7 +32,7 @@ static const struct link_grading_test_case { .desc = "channel util of 128 (50%)", .input.link = { .link_id = 0, - .chandef = &chandef_2ghz, + .chandef = &chandef_2ghz_20mhz, .active = false, .has_chan_util_elem = true, .chan_util = 128, @@ -43,7 +43,7 @@ static const struct link_grading_test_case { .desc = "channel util of 180 (70%)", .input.link = { .link_id = 0, - .chandef = &chandef_2ghz, + .chandef = &chandef_2ghz_20mhz, .active = false, .has_chan_util_elem = true, .chan_util = 180, @@ -54,7 +54,7 @@ static const struct link_grading_test_case { .desc = "channel util of 180 (70%), channel load by us of 10%", .input.link = { .link_id = 0, - .chandef = &chandef_2ghz, + .chandef = &chandef_2ghz_20mhz, .has_chan_util_elem = true, .chan_util = 180, .active = true, @@ -66,7 +66,7 @@ static const struct link_grading_test_case { .desc = "no channel util element", .input.link = { .link_id = 0, - .chandef = &chandef_2ghz, + .chandef = &chandef_2ghz_20mhz, .active = true, }, .expected_grade = 120, @@ -132,7 +132,7 @@ static void test_link_grading(struct kunit *test) bool active = test_param->input.link.active; u16 valid_links; struct iwl_mld_kunit_link assoc_link = { - .band = test_param->input.link.chandef->chan->band, + .chandef = test_param->input.link.chandef, }; /* If the link is not active, use a different link as the assoc link */ @@ -172,108 +172,150 @@ static struct kunit_suite link_selection = { kunit_test_suite(link_selection); -static const struct channel_load_case { +static const struct link_pair_case { const char *desc; + const struct cfg80211_chan_def *chandef_a, *chandef_b; bool low_latency_vif; u32 chan_load_not_by_us; - enum nl80211_chan_width bw_a; - enum nl80211_chan_width bw_b; bool primary_link_active; - bool expected_result; -} channel_load_cases[] = { + u32 expected_result; +} link_pair_cases[] = { { .desc = "Unequal bandwidth, primary link inactive, EMLSR not allowed", .low_latency_vif = false, .primary_link_active = false, - .bw_a = NL80211_CHAN_WIDTH_40, - .bw_b = NL80211_CHAN_WIDTH_20, - .expected_result = false, + .chandef_a = &chandef_5ghz_40mhz, + .chandef_b = &chandef_6ghz_20mhz, + .expected_result = IWL_MLD_EMLSR_EXIT_CHAN_LOAD, }, { .desc = "Equal bandwidths, sufficient channel load, EMLSR allowed", .low_latency_vif = false, .primary_link_active = true, .chan_load_not_by_us = 11, - .bw_a = NL80211_CHAN_WIDTH_40, - .bw_b = NL80211_CHAN_WIDTH_40, - .expected_result = true, + .chandef_a = &chandef_5ghz_40mhz, + .chandef_b = &chandef_6ghz_40mhz, + .expected_result = 0, }, { .desc = "Equal bandwidths, insufficient channel load, EMLSR not allowed", .low_latency_vif = false, .primary_link_active = true, .chan_load_not_by_us = 6, - .bw_a = NL80211_CHAN_WIDTH_80, - .bw_b = NL80211_CHAN_WIDTH_80, - .expected_result = false, + .chandef_a = &chandef_5ghz_80mhz, + .chandef_b = &chandef_6ghz_80mhz, + .expected_result = IWL_MLD_EMLSR_EXIT_CHAN_LOAD, }, { .desc = "Low latency VIF, sufficient channel load, EMLSR allowed", .low_latency_vif = true, .primary_link_active = true, .chan_load_not_by_us = 6, - .bw_a = NL80211_CHAN_WIDTH_160, - .bw_b = NL80211_CHAN_WIDTH_160, - .expected_result = true, + .chandef_a = &chandef_5ghz_160mhz, + .chandef_b = &chandef_6ghz_160mhz, + .expected_result = 0, }, { .desc = "Different bandwidths (2x ratio), primary link load permits EMLSR", .low_latency_vif = false, .primary_link_active = true, .chan_load_not_by_us = 30, - .bw_a = NL80211_CHAN_WIDTH_40, - .bw_b = NL80211_CHAN_WIDTH_20, - .expected_result = true, + .chandef_a = &chandef_5ghz_40mhz, + .chandef_b = &chandef_6ghz_20mhz, + .expected_result = 0, }, { .desc = "Different bandwidths (4x ratio), primary link load permits EMLSR", .low_latency_vif = false, .primary_link_active = true, .chan_load_not_by_us = 45, - .bw_a = NL80211_CHAN_WIDTH_80, - .bw_b = NL80211_CHAN_WIDTH_20, - .expected_result = true, + .chandef_a = &chandef_5ghz_80mhz, + .chandef_b = &chandef_6ghz_20mhz, + .expected_result = 0, }, { .desc = "Different bandwidths (16x ratio), primary link load insufficient", .low_latency_vif = false, .primary_link_active = true, .chan_load_not_by_us = 45, - .bw_a = NL80211_CHAN_WIDTH_320, - .bw_b = NL80211_CHAN_WIDTH_20, - .expected_result = false, + .chandef_a = &chandef_6ghz_320mhz, + .chandef_b = &chandef_5ghz_20mhz, + .expected_result = IWL_MLD_EMLSR_EXIT_CHAN_LOAD, + }, + { + .desc = "Same band not allowed (2.4 GHz)", + .low_latency_vif = false, + .primary_link_active = true, + .chan_load_not_by_us = 30, + .chandef_a = &chandef_2ghz_20mhz, + .chandef_b = &chandef_2ghz_11_20mhz, + .expected_result = IWL_MLD_EMLSR_EXIT_EQUAL_BAND, + }, + { + .desc = "Same band not allowed (5 GHz)", + .low_latency_vif = false, + .primary_link_active = true, + .chan_load_not_by_us = 30, + .chandef_a = &chandef_5ghz_40mhz, + .chandef_b = &chandef_5ghz_40mhz, + .expected_result = IWL_MLD_EMLSR_EXIT_EQUAL_BAND, + }, + { + .desc = "Same band allowed (5 GHz separated)", + .low_latency_vif = false, + .primary_link_active = true, + .chan_load_not_by_us = 30, + .chandef_a = &chandef_5ghz_40mhz, + .chandef_b = &chandef_5ghz_120_40mhz, + .expected_result = 0, + }, + { + .desc = "Same band not allowed (6 GHz)", + .low_latency_vif = false, + .primary_link_active = true, + .chan_load_not_by_us = 30, + .chandef_a = &chandef_6ghz_160mhz, + .chandef_b = &chandef_6ghz_221_160mhz, + .expected_result = IWL_MLD_EMLSR_EXIT_EQUAL_BAND, }, }; -KUNIT_ARRAY_PARAM_DESC(channel_load, channel_load_cases, desc); +KUNIT_ARRAY_PARAM_DESC(link_pair, link_pair_cases, desc); -static void test_iwl_mld_channel_load_allows_emlsr(struct kunit *test) +static void test_iwl_mld_link_pair_allows_emlsr(struct kunit *test) { - const struct channel_load_case *params = test->param_value; + const struct link_pair_case *params = test->param_value; struct iwl_mld *mld = test->priv; struct ieee80211_vif *vif; - struct cfg80211_chan_def chandef_a, chandef_b; - struct iwl_mld_link_sel_data a = {.chandef = &chandef_a, - .link_id = 4}; - struct iwl_mld_link_sel_data b = {.chandef = &chandef_b, - .link_id = 5}; + struct ieee80211_bss_conf *link; + /* link A is the primary and link B is the secondary */ + struct iwl_mld_link_sel_data a = { + .chandef = params->chandef_a, + .link_id = 4, + }; + struct iwl_mld_link_sel_data b = { + .chandef = params->chandef_b, + .link_id = 5, + }; struct iwl_mld_kunit_link assoc_link = { + .chandef = params->primary_link_active ? a.chandef : b.chandef, .id = params->primary_link_active ? a.link_id : b.link_id, - .bandwidth = params->primary_link_active ? params->bw_a : params->bw_b, }; - bool result; + u32 result; vif = iwlmld_kunit_setup_mlo_assoc(BIT(a.link_id) | BIT(b.link_id), &assoc_link); - chandef_a.width = params->bw_a; - chandef_b.width = params->bw_b; - if (params->low_latency_vif) iwl_mld_vif_from_mac80211(vif)->low_latency_causes = 1; wiphy_lock(mld->wiphy); + link = wiphy_dereference(mld->wiphy, vif->link_conf[a.link_id]); + KUNIT_ALLOC_AND_ASSERT(test, link->bss); + link = wiphy_dereference(mld->wiphy, vif->link_conf[b.link_id]); + KUNIT_ALLOC_AND_ASSERT(test, link->bss); + /* Simulate channel load */ if (params->primary_link_active) { struct iwl_mld_phy *phy = @@ -282,22 +324,22 @@ static void test_iwl_mld_channel_load_allows_emlsr(struct kunit *test) phy->avg_channel_load_not_by_us = params->chan_load_not_by_us; } - result = iwl_mld_channel_load_allows_emlsr(mld, vif, &a, &b); + result = iwl_mld_emlsr_pair_state(vif, &a, &b); wiphy_unlock(mld->wiphy); KUNIT_EXPECT_EQ(test, result, params->expected_result); } -static struct kunit_case channel_load_criteria_test_cases[] = { - KUNIT_CASE_PARAM(test_iwl_mld_channel_load_allows_emlsr, channel_load_gen_params), +static struct kunit_case link_pair_criteria_test_cases[] = { + KUNIT_CASE_PARAM(test_iwl_mld_link_pair_allows_emlsr, link_pair_gen_params), {} }; -static struct kunit_suite channel_load_criteria_tests = { - .name = "iwlmld_channel_load_allows_emlsr", - .test_cases = channel_load_criteria_test_cases, +static struct kunit_suite link_pair_criteria_tests = { + .name = "iwlmld_link_pair_allows_emlsr", + .test_cases = link_pair_criteria_test_cases, .init = iwlmld_kunit_test_init, }; -kunit_test_suite(channel_load_criteria_tests); +kunit_test_suite(link_pair_criteria_tests); diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tests/link.c b/drivers/net/wireless/intel/iwlwifi/mld/tests/link.c index 4a4eaa134bd3..69a0d67858bf 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/tests/link.c +++ b/drivers/net/wireless/intel/iwlwifi/mld/tests/link.c @@ -63,11 +63,11 @@ static void test_missed_beacon(struct kunit *test) struct iwl_rx_packet *pkt; struct iwl_mld_kunit_link link1 = { .id = 0, - .band = NL80211_BAND_6GHZ, + .chandef = &chandef_6ghz_160mhz, }; struct iwl_mld_kunit_link link2 = { .id = 1, - .band = NL80211_BAND_5GHZ, + .chandef = &chandef_5ghz_80mhz, }; kunit_activate_static_stub(test, ieee80211_connection_loss, diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tests/utils.c b/drivers/net/wireless/intel/iwlwifi/mld/tests/utils.c index 9712ee696509..26cf27be762d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/tests/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mld/tests/utils.c @@ -24,7 +24,7 @@ int iwlmld_kunit_test_init(struct kunit *test) { struct iwl_mld *mld; struct iwl_trans *trans; - const struct iwl_cfg *cfg; + const struct iwl_rf_cfg *cfg; struct iwl_fw *fw; struct ieee80211_hw *hw; @@ -146,7 +146,7 @@ iwlmld_kunit_add_link(struct ieee80211_vif *vif, int link_id) } struct ieee80211_chanctx_conf * -iwlmld_kunit_add_chanctx_from_def(struct cfg80211_chan_def *def) +iwlmld_kunit_add_chanctx(const struct cfg80211_chan_def *def) { struct kunit *test = kunit_get_current_test(); struct iwl_mld *mld = test->priv; @@ -346,8 +346,7 @@ iwlmld_kunit_setup_assoc(bool mlo, struct iwl_mld_kunit_link *assoc_link) else link = &vif->bss_conf; - chan_ctx = iwlmld_kunit_add_chanctx(assoc_link->band, - assoc_link->bandwidth); + chan_ctx = iwlmld_kunit_add_chanctx(assoc_link->chandef); wiphy_lock(mld->wiphy); iwlmld_kunit_assign_chanctx_to_link(vif, link, chan_ctx); @@ -428,7 +427,7 @@ struct ieee80211_vif *iwlmld_kunit_assoc_emlsr(struct iwl_mld_kunit_link *link1, link = wiphy_dereference(mld->wiphy, vif->link_conf[link2->id]); KUNIT_EXPECT_NOT_NULL(test, link); - chan_ctx = iwlmld_kunit_add_chanctx(link2->band, link2->bandwidth); + chan_ctx = iwlmld_kunit_add_chanctx(link2->chandef); iwlmld_kunit_assign_chanctx_to_link(vif, link, chan_ctx); wiphy_unlock(mld->wiphy); @@ -472,3 +471,33 @@ struct iwl_mld_phy *iwlmld_kunit_get_phy_of_link(struct ieee80211_vif *vif, return iwl_mld_phy_from_mac80211(chanctx); } + +static const struct chandef_case { + const char *desc; + const struct cfg80211_chan_def *chandef; +} chandef_cases[] = { +#define CHANDEF(c, ...) { .desc = "chandef " #c " valid", .chandef = &c, }, + CHANDEF_LIST +#undef CHANDEF +}; + +KUNIT_ARRAY_PARAM_DESC(chandef, chandef_cases, desc); + +static void test_iwl_mld_chandef_valid(struct kunit *test) +{ + const struct chandef_case *params = test->param_value; + + KUNIT_EXPECT_EQ(test, true, cfg80211_chandef_valid(params->chandef)); +} + +static struct kunit_case chandef_test_cases[] = { + KUNIT_CASE_PARAM(test_iwl_mld_chandef_valid, chandef_gen_params), + {} +}; + +static struct kunit_suite chandef_tests = { + .name = "iwlmld_valid_test_chandefs", + .test_cases = chandef_test_cases, +}; + +kunit_test_suite(chandef_tests); diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tests/utils.h b/drivers/net/wireless/intel/iwlwifi/mld/tests/utils.h index d3723653cf1b..edf8eef4e81a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/tests/utils.h +++ b/drivers/net/wireless/intel/iwlwifi/mld/tests/utils.h @@ -14,9 +14,8 @@ struct iwl_mld; int iwlmld_kunit_test_init(struct kunit *test); struct iwl_mld_kunit_link { + const struct cfg80211_chan_def *chandef; u8 id; - enum nl80211_band band; - enum nl80211_chan_width bandwidth; }; enum nl80211_iftype; @@ -42,50 +41,57 @@ static struct ieee80211_channel _name = { \ .hw_value = (_freq), \ } -#define CHANDEF(_name, _channel, _freq1, _width) \ -__maybe_unused static struct cfg80211_chan_def _name = { \ - .chan = &(_channel), \ - .center_freq1 = (_freq1), \ - .width = (_width), \ -} - CHANNEL(chan_2ghz, NL80211_BAND_2GHZ, 2412); +CHANNEL(chan_2ghz_11, NL80211_BAND_2GHZ, 2462); CHANNEL(chan_5ghz, NL80211_BAND_5GHZ, 5200); +CHANNEL(chan_5ghz_120, NL80211_BAND_5GHZ, 5600); CHANNEL(chan_6ghz, NL80211_BAND_6GHZ, 6115); +CHANNEL(chan_6ghz_221, NL80211_BAND_6GHZ, 7055); /* Feel free to add more */ +#undef CHANNEL + +#define CHANDEF_LIST \ + CHANDEF(chandef_2ghz_20mhz, chan_2ghz, 2412, \ + NL80211_CHAN_WIDTH_20) \ + CHANDEF(chandef_2ghz_40mhz, chan_2ghz, 2422, \ + NL80211_CHAN_WIDTH_40) \ + CHANDEF(chandef_2ghz_11_20mhz, chan_2ghz_11, 2462, \ + NL80211_CHAN_WIDTH_20) \ + CHANDEF(chandef_5ghz_20mhz, chan_5ghz, 5200, \ + NL80211_CHAN_WIDTH_20) \ + CHANDEF(chandef_5ghz_40mhz, chan_5ghz, 5210, \ + NL80211_CHAN_WIDTH_40) \ + CHANDEF(chandef_5ghz_80mhz, chan_5ghz, 5210, \ + NL80211_CHAN_WIDTH_80) \ + CHANDEF(chandef_5ghz_160mhz, chan_5ghz, 5250, \ + NL80211_CHAN_WIDTH_160) \ + CHANDEF(chandef_5ghz_120_40mhz, chan_5ghz_120, 5610, \ + NL80211_CHAN_WIDTH_40) \ + CHANDEF(chandef_6ghz_20mhz, chan_6ghz, 6115, \ + NL80211_CHAN_WIDTH_20) \ + CHANDEF(chandef_6ghz_40mhz, chan_6ghz, 6125, \ + NL80211_CHAN_WIDTH_40) \ + CHANDEF(chandef_6ghz_80mhz, chan_6ghz, 6145, \ + NL80211_CHAN_WIDTH_80) \ + CHANDEF(chandef_6ghz_160mhz, chan_6ghz, 6185, \ + NL80211_CHAN_WIDTH_160) \ + CHANDEF(chandef_6ghz_320mhz, chan_6ghz, 6105, \ + NL80211_CHAN_WIDTH_320) \ + CHANDEF(chandef_6ghz_221_160mhz, chan_6ghz_221, 6985, \ + NL80211_CHAN_WIDTH_160) \ + /* Feel free to add more */ -CHANDEF(chandef_2ghz, chan_2ghz, 2412, NL80211_CHAN_WIDTH_20); -CHANDEF(chandef_5ghz, chan_5ghz, 5200, NL80211_CHAN_WIDTH_40); -CHANDEF(chandef_6ghz, chan_6ghz, 6115, NL80211_CHAN_WIDTH_160); -/* Feel free to add more */ - -//struct cfg80211_chan_def; +#define CHANDEF(_name, _channel, _freq1, _width) \ +__maybe_unused static const struct cfg80211_chan_def _name = { \ + .chan = &(_channel), \ + .center_freq1 = (_freq1), \ + .width = (_width), \ +}; +CHANDEF_LIST +#undef CHANDEF struct ieee80211_chanctx_conf * -iwlmld_kunit_add_chanctx_from_def(struct cfg80211_chan_def *def); - -static inline struct ieee80211_chanctx_conf * -iwlmld_kunit_add_chanctx(enum nl80211_band band, enum nl80211_chan_width width) -{ - struct cfg80211_chan_def chandef; - - switch (band) { - case NL80211_BAND_2GHZ: - chandef = chandef_2ghz; - break; - case NL80211_BAND_5GHZ: - chandef = chandef_5ghz; - break; - default: - case NL80211_BAND_6GHZ: - chandef = chandef_6ghz; - break; - } - - chandef.width = width; - - return iwlmld_kunit_add_chanctx_from_def(&chandef); -} +iwlmld_kunit_add_chanctx(const struct cfg80211_chan_def *def); void iwlmld_kunit_assign_chanctx_to_link(struct ieee80211_vif *vif, struct ieee80211_bss_conf *link, diff --git a/drivers/net/wireless/intel/iwlwifi/mld/thermal.c b/drivers/net/wireless/intel/iwlwifi/mld/thermal.c index 1909953a9be9..f8a8c35066be 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/thermal.c +++ b/drivers/net/wireless/intel/iwlwifi/mld/thermal.c @@ -13,6 +13,9 @@ #include "mld.h" #include "hcmd.h" +#define IWL_MLD_NUM_CTDP_STEPS 20 +#define IWL_MLD_MIN_CTDP_BUDGET_MW 150 + #define IWL_MLD_CT_KILL_DURATION (5 * HZ) void iwl_mld_handle_ct_kill_notif(struct iwl_mld *mld, @@ -116,8 +119,8 @@ free_resp: static int compare_temps(const void *a, const void *b) { - return ((s16)le16_to_cpu(*(__le16 *)a) - - (s16)le16_to_cpu(*(__le16 *)b)); + return ((s16)le16_to_cpu(*(const __le16 *)a) - + (s16)le16_to_cpu(*(const __le16 *)b)); } struct iwl_trip_walk_data { @@ -272,43 +275,27 @@ static void iwl_mld_thermal_zone_register(struct iwl_mld *mld) } } -/* budget in mWatt */ -static const u32 iwl_mld_cdev_budgets[] = { - 2400, /* cooling state 0 */ - 2000, /* cooling state 1 */ - 1800, /* cooling state 2 */ - 1600, /* cooling state 3 */ - 1400, /* cooling state 4 */ - 1200, /* cooling state 5 */ - 1000, /* cooling state 6 */ - 900, /* cooling state 7 */ - 800, /* cooling state 8 */ - 700, /* cooling state 9 */ - 650, /* cooling state 10 */ - 600, /* cooling state 11 */ - 550, /* cooling state 12 */ - 500, /* cooling state 13 */ - 450, /* cooling state 14 */ - 400, /* cooling state 15 */ - 350, /* cooling state 16 */ - 300, /* cooling state 17 */ - 250, /* cooling state 18 */ - 200, /* cooling state 19 */ - 150, /* cooling state 20 */ -}; - int iwl_mld_config_ctdp(struct iwl_mld *mld, u32 state, enum iwl_ctdp_cmd_operation op) { struct iwl_ctdp_cmd cmd = { .operation = cpu_to_le32(op), - .budget = cpu_to_le32(iwl_mld_cdev_budgets[state]), .window_size = 0, }; + u32 budget; int ret; lockdep_assert_wiphy(mld->wiphy); + /* Do a linear scale from IWL_MLD_MIN_CTDP_BUDGET_MW to the configured + * maximum in the predefined number of steps. + */ + budget = ((mld->power_budget_mw - IWL_MLD_MIN_CTDP_BUDGET_MW) * + (IWL_MLD_NUM_CTDP_STEPS - 1 - state)) / + (IWL_MLD_NUM_CTDP_STEPS - 1) + + IWL_MLD_MIN_CTDP_BUDGET_MW; + cmd.budget = cpu_to_le32(budget); + ret = iwl_mld_send_cmd_pdu(mld, WIDE_ID(PHY_OPS_GROUP, CTDP_CONFIG_CMD), &cmd); @@ -326,7 +313,7 @@ int iwl_mld_config_ctdp(struct iwl_mld *mld, u32 state, static int iwl_mld_tcool_get_max_state(struct thermal_cooling_device *cdev, unsigned long *state) { - *state = ARRAY_SIZE(iwl_mld_cdev_budgets) - 1; + *state = IWL_MLD_NUM_CTDP_STEPS - 1; return 0; } @@ -354,7 +341,7 @@ static int iwl_mld_tcool_set_cur_state(struct thermal_cooling_device *cdev, goto unlock; } - if (new_state >= ARRAY_SIZE(iwl_mld_cdev_budgets)) { + if (new_state >= IWL_MLD_NUM_CTDP_STEPS) { ret = -EINVAL; goto unlock; } @@ -417,10 +404,50 @@ static void iwl_mld_cooling_device_unregister(struct iwl_mld *mld) } #endif /* CONFIG_THERMAL */ +static u32 iwl_mld_ctdp_get_max_budget(struct iwl_mld *mld) +{ + u64 bios_power_budget = 0; + u32 default_power_budget; + + switch (CSR_HW_RFID_TYPE(mld->trans->info.hw_rf_id)) { + case IWL_CFG_RF_TYPE_GF: + /* dual-radio devices have a higher budget */ + if (CSR_HW_RFID_IS_CDB(mld->trans->info.hw_rf_id)) + default_power_budget = 5200; + else + default_power_budget = 2880; + break; + case IWL_CFG_RF_TYPE_FM: + default_power_budget = 3450; + break; + case IWL_CFG_RF_TYPE_WH: + case IWL_CFG_RF_TYPE_PE: + default: + default_power_budget = 5550; + break; + } + + iwl_bios_get_pwr_limit(&mld->fwrt, &bios_power_budget); + + /* 32bit in UEFI, 16bit in ACPI; use BIOS value if it is in range */ + if (bios_power_budget && + bios_power_budget != 0xffff && bios_power_budget != 0xffffffff && + bios_power_budget >= IWL_MLD_MIN_CTDP_BUDGET_MW && + bios_power_budget <= default_power_budget) + return (u32)bios_power_budget; + + return default_power_budget; +} + void iwl_mld_thermal_initialize(struct iwl_mld *mld) { + lockdep_assert_not_held(&mld->wiphy->mtx); + wiphy_delayed_work_init(&mld->ct_kill_exit_wk, iwl_mld_exit_ctkill); + mld->power_budget_mw = iwl_mld_ctdp_get_max_budget(mld); + IWL_DEBUG_TEMP(mld, "cTDP power budget: %d mW\n", mld->power_budget_mw); + #ifdef CONFIG_THERMAL iwl_mld_cooling_device_register(mld); iwl_mld_thermal_zone_register(mld); @@ -429,7 +456,9 @@ void iwl_mld_thermal_initialize(struct iwl_mld *mld) void iwl_mld_thermal_exit(struct iwl_mld *mld) { + wiphy_lock(mld->wiphy); wiphy_delayed_work_cancel(mld->wiphy, &mld->ct_kill_exit_wk); + wiphy_unlock(mld->wiphy); #ifdef CONFIG_THERMAL iwl_mld_cooling_device_unregister(mld); diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tlc.c b/drivers/net/wireless/intel/iwlwifi/mld/tlc.c index f054cc921d9d..a9ca92c0455e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/tlc.c +++ b/drivers/net/wireless/intel/iwlwifi/mld/tlc.c @@ -44,7 +44,7 @@ iwl_mld_get_tlc_cmd_flags(struct iwl_mld *mld, u16 flags = 0; /* STBC flags */ - if (mld->cfg->ht_params->stbc && + if (mld->cfg->ht_params.stbc && (hweight8(iwl_mld_get_valid_tx_ant(mld)) > 1)) { if (he_cap->has_he && he_cap->he_cap_elem.phy_cap_info[2] & IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ) @@ -56,7 +56,7 @@ iwl_mld_get_tlc_cmd_flags(struct iwl_mld *mld, } /* LDPC */ - if (mld->cfg->ht_params->ldpc && + if (mld->cfg->ht_params.ldpc && ((ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING) || (has_vht && (vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC)))) flags |= IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK; @@ -658,7 +658,9 @@ void iwl_mld_handle_tlc_notif(struct iwl_mld *mld, if (WARN_ON(!mld_link_sta)) return; - mld_link_sta->last_rate_n_flags = le32_to_cpu(notif->rate); + mld_link_sta->last_rate_n_flags = + iwl_v3_rate_from_v2_v3(notif->rate, + mld->fw_rates_ver_3); rs_pretty_print_rate(pretty_rate, sizeof(pretty_rate), mld_link_sta->last_rate_n_flags); diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tx.c b/drivers/net/wireless/intel/iwlwifi/mld/tx.c index 543abe72e465..3b4b575aadaa 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mld/tx.c @@ -76,14 +76,14 @@ static int iwl_mld_allocate_txq(struct iwl_mld *mld, struct ieee80211_txq *txq) */ unsigned int watchdog_timeout = txq->vif->type == NL80211_IFTYPE_AP ? IWL_WATCHDOG_DISABLED : - mld->trans->trans_cfg->base_params->wd_timeout; + mld->trans->mac_cfg->base->wd_timeout; int queue, size; lockdep_assert_wiphy(mld->wiphy); if (tid == IWL_MGMT_TID) size = max_t(u32, IWL_MGMT_QUEUE_SIZE, - mld->trans->cfg->min_txq_size); + mld->trans->mac_cfg->base->min_txq_size); else size = iwl_mld_get_queue_size(mld, txq); @@ -391,9 +391,9 @@ static u32 iwl_mld_mac80211_rate_idx_to_fw(struct iwl_mld *mld, /* Set CCK or OFDM flag */ if (rate_idx <= IWL_LAST_CCK_RATE) - rate_flags |= RATE_MCS_CCK_MSK; + rate_flags |= RATE_MCS_MOD_TYPE_CCK; else - rate_flags |= RATE_MCS_LEGACY_OFDM_MSK; + rate_flags |= RATE_MCS_MOD_TYPE_LEGACY_OFDM; /* Legacy rates are indexed: * 0 - 3 for CCK and 0 - 7 for OFDM @@ -425,47 +425,40 @@ static u32 iwl_mld_get_inject_tx_rate(struct iwl_mld *mld, struct ieee80211_tx_rate *rate = &info->control.rates[0]; u32 result; - /* we only care about legacy/HT/VHT so far, so we can - * build in v1 and use iwl_new_rate_from_v1() - * FIXME: in newer devices we only support the new rates, build - * the rate_n_flags in the new format here instead of using v1 and - * converting it. - */ - if (rate->flags & IEEE80211_TX_RC_VHT_MCS) { u8 mcs = ieee80211_rate_get_vht_mcs(rate); u8 nss = ieee80211_rate_get_vht_nss(rate); - result = RATE_MCS_VHT_MSK_V1; - result |= u32_encode_bits(mcs, RATE_VHT_MCS_RATE_CODE_MSK); + result = RATE_MCS_MOD_TYPE_VHT; + result |= u32_encode_bits(mcs, RATE_MCS_CODE_MSK); result |= u32_encode_bits(nss, RATE_MCS_NSS_MSK); if (rate->flags & IEEE80211_TX_RC_SHORT_GI) - result |= RATE_MCS_SGI_MSK_V1; + result |= RATE_MCS_SGI_MSK; if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) - result |= u32_encode_bits(1, RATE_MCS_CHAN_WIDTH_MSK_V1); + result |= RATE_MCS_CHAN_WIDTH_40; else if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH) - result |= u32_encode_bits(2, RATE_MCS_CHAN_WIDTH_MSK_V1); + result |= RATE_MCS_CHAN_WIDTH_80; else if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH) - result |= u32_encode_bits(3, RATE_MCS_CHAN_WIDTH_MSK_V1); - - result = iwl_new_rate_from_v1(result); + result |= RATE_MCS_CHAN_WIDTH_160; } else if (rate->flags & IEEE80211_TX_RC_MCS) { - result = RATE_MCS_HT_MSK_V1; - result |= u32_encode_bits(rate->idx, - RATE_HT_MCS_RATE_CODE_MSK_V1 | - RATE_HT_MCS_NSS_MSK_V1); + /* only MCS 0-15 are supported */ + u8 mcs = rate->idx & 7; + u8 nss = rate->idx > 7; + + result = RATE_MCS_MOD_TYPE_HT; + result |= u32_encode_bits(mcs, RATE_MCS_CODE_MSK); + result |= u32_encode_bits(nss, RATE_MCS_NSS_MSK); + if (rate->flags & IEEE80211_TX_RC_SHORT_GI) - result |= RATE_MCS_SGI_MSK_V1; + result |= RATE_MCS_SGI_MSK; if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) - result |= u32_encode_bits(1, RATE_MCS_CHAN_WIDTH_MSK_V1); + result |= RATE_MCS_CHAN_WIDTH_40; if (info->flags & IEEE80211_TX_CTL_LDPC) - result |= RATE_MCS_LDPC_MSK_V1; + result |= RATE_MCS_LDPC_MSK; if (u32_get_bits(info->flags, IEEE80211_TX_CTL_STBC)) result |= RATE_MCS_STBC_MSK; - - result = iwl_new_rate_from_v1(result); } else { result = iwl_mld_mac80211_rate_idx_to_fw(mld, info, rate->idx); } @@ -479,19 +472,23 @@ static u32 iwl_mld_get_inject_tx_rate(struct iwl_mld *mld, return result; } -static u32 iwl_mld_get_tx_rate_n_flags(struct iwl_mld *mld, - struct ieee80211_tx_info *info, - struct ieee80211_sta *sta, __le16 fc) +static __le32 iwl_mld_get_tx_rate_n_flags(struct iwl_mld *mld, + struct ieee80211_tx_info *info, + struct ieee80211_sta *sta, __le16 fc) { + u32 rate; + if (unlikely(info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT)) - return iwl_mld_get_inject_tx_rate(mld, info, sta, fc); + rate = iwl_mld_get_inject_tx_rate(mld, info, sta, fc); + else + rate = iwl_mld_mac80211_rate_idx_to_fw(mld, info, -1) | + iwl_mld_get_tx_ant(mld, info, sta, fc); - return iwl_mld_mac80211_rate_idx_to_fw(mld, info, -1) | - iwl_mld_get_tx_ant(mld, info, sta, fc); + return iwl_v3_rate_to_v2_v3(rate, mld->fw_rates_ver_3); } static void -iwl_mld_fill_tx_cmd_hdr(struct iwl_tx_cmd_gen3 *tx_cmd, +iwl_mld_fill_tx_cmd_hdr(struct iwl_tx_cmd *tx_cmd, struct sk_buff *skb, bool amsdu) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); @@ -537,11 +534,11 @@ iwl_mld_fill_tx_cmd(struct iwl_mld *mld, struct sk_buff *skb, struct ieee80211_hdr *hdr = (void *)skb->data; struct iwl_mld_sta *mld_sta = sta ? iwl_mld_sta_from_mac80211(sta) : NULL; - struct iwl_tx_cmd_gen3 *tx_cmd; + struct iwl_tx_cmd *tx_cmd; bool amsdu = ieee80211_is_data_qos(hdr->frame_control) && (*ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_A_MSDU_PRESENT); - u32 rate_n_flags = 0; + __le32 rate_n_flags = 0; u16 flags = 0; dev_tx_cmd->hdr.cmd = TX_CMD; @@ -576,7 +573,7 @@ iwl_mld_fill_tx_cmd(struct iwl_mld *mld, struct sk_buff *skb, tx_cmd->flags = cpu_to_le16(flags); - tx_cmd->rate_n_flags = cpu_to_le32(rate_n_flags); + tx_cmd->rate_n_flags = rate_n_flags; } /* Caller of this need to check that info->control.vif is not NULL */ @@ -641,16 +638,36 @@ iwl_mld_get_tx_queue_id(struct iwl_mld *mld, struct ieee80211_txq *txq, case NL80211_IFTYPE_P2P_DEVICE: mld_vif = iwl_mld_vif_from_mac80211(info->control.vif); - if (mld_vif->roc_activity == ROC_NUM_ACTIVITIES) { - IWL_DEBUG_DROP(mld, "Drop tx outside ROC\n"); + if (mld_vif->roc_activity != ROC_ACTIVITY_P2P_DISC && + mld_vif->roc_activity != ROC_ACTIVITY_P2P_NEG) { + IWL_DEBUG_DROP(mld, + "Drop tx outside ROC with activity %d\n", + mld_vif->roc_activity); return IWL_MLD_INVALID_DROP_TX; } WARN_ON(!ieee80211_is_mgmt(fc)); - return mld_vif->deflink.aux_sta.queue_id; + return mld_vif->aux_sta.queue_id; + case NL80211_IFTYPE_MONITOR: + mld_vif = iwl_mld_vif_from_mac80211(info->control.vif); + return mld_vif->deflink.mon_sta.queue_id; + case NL80211_IFTYPE_STATION: + mld_vif = iwl_mld_vif_from_mac80211(info->control.vif); + + if (!(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)) { + IWL_DEBUG_DROP(mld, "Drop tx not off-channel\n"); + return IWL_MLD_INVALID_DROP_TX; + } + + if (mld_vif->roc_activity != ROC_ACTIVITY_HOTSPOT) { + IWL_DEBUG_DROP(mld, "Drop tx outside ROC\n"); + return IWL_MLD_INVALID_DROP_TX; + } + + WARN_ON(!ieee80211_is_mgmt(fc)); + return mld_vif->aux_sta.queue_id; default: - /* TODO: consider monitor (task=monitor) */ WARN_ONCE(1, "Unsupported vif type\n"); break; } @@ -831,7 +848,7 @@ static int iwl_mld_tx_tso_segment(struct iwl_mld *mld, struct sk_buff *skb, * 1 more for the potential data in the header */ if ((num_subframes * 2 + skb_shinfo(skb)->nr_frags + 1) > - mld->trans->max_skb_frags) + mld->trans->info.max_skb_frags) num_subframes = 1; if (num_subframes > 1) @@ -977,11 +994,14 @@ void iwl_mld_tx_from_txq(struct iwl_mld *mld, struct ieee80211_txq *txq) rcu_read_unlock(); } -static void iwl_mld_hwrate_to_tx_rate(u32 rate_n_flags, +static void iwl_mld_hwrate_to_tx_rate(struct iwl_mld *mld, + __le32 rate_n_flags_fw, struct ieee80211_tx_info *info) { enum nl80211_band band = info->band; struct ieee80211_tx_rate *tx_rate = &info->status.rates[0]; + u32 rate_n_flags = iwl_v3_rate_from_v2_v3(rate_n_flags_fw, + mld->fw_rates_ver_3); u32 sgi = rate_n_flags & RATE_MCS_SGI_MSK; u32 chan_width = rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK; u32 format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK; @@ -1006,18 +1026,19 @@ static void iwl_mld_hwrate_to_tx_rate(u32 rate_n_flags, } switch (format) { - case RATE_MCS_HT_MSK: + case RATE_MCS_MOD_TYPE_HT: tx_rate->flags |= IEEE80211_TX_RC_MCS; tx_rate->idx = RATE_HT_MCS_INDEX(rate_n_flags); break; - case RATE_MCS_VHT_MSK: + case RATE_MCS_MOD_TYPE_VHT: ieee80211_rate_set_vht(tx_rate, rate_n_flags & RATE_MCS_CODE_MSK, - FIELD_GET(RATE_MCS_NSS_MSK, - rate_n_flags) + 1); + u32_get_bits(rate_n_flags, + RATE_MCS_NSS_MSK) + 1); tx_rate->flags |= IEEE80211_TX_RC_VHT_MCS; break; - case RATE_MCS_HE_MSK: + case RATE_MCS_MOD_TYPE_HE: + case RATE_MCS_MOD_TYPE_EHT: /* mac80211 cannot do this without ieee80211_tx_status_ext() * but it only matters for radiotap */ @@ -1111,8 +1132,7 @@ void iwl_mld_handle_tx_resp_notif(struct iwl_mld *mld, iwl_dbg_tlv_time_point(&mld->fwrt, tp, NULL); } - iwl_mld_hwrate_to_tx_rate(le32_to_cpu(tx_resp->initial_rate), - info); + iwl_mld_hwrate_to_tx_rate(mld, tx_resp->initial_rate, info); if (likely(!iwl_mld_time_sync_frame(mld, skb, hdr->addr1))) ieee80211_tx_status_skb(mld->hw, skb); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c index 21641d41a958..13cdc077d8d3 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2013-2014, 2018-2020, 2022-2024 Intel Corporation + * Copyright (C) 2013-2014, 2018-2020, 2022-2025 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH */ #include <linux/ieee80211.h> @@ -181,7 +181,7 @@ static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, struct iwl_mvm_sta *mvmsta; u32 value; - if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) + if (mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) return 0; mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id); @@ -569,7 +569,7 @@ static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm) mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_bt_notif_iterator, &data); - if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { + if (mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { rcu_read_unlock(); return; } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index 3e8b7168af01..507c03198c92 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2024 Intel Corporation + * Copyright (C) 2012-2014, 2018-2025 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -300,7 +300,7 @@ static void iwl_mvm_wowlan_get_rsc_tsc_data(struct ieee80211_hw *hw, for (i = 0; i < IWL_MAX_TID_COUNT; i++) { pn = iwl_mvm_find_max_pn(key, ptk_pn, &seq, i, - mvm->trans->num_rx_queues); + mvm->trans->info.num_rxqs); aes_sc[i].pn = cpu_to_le64((u64)pn[5] | ((u64)pn[4] << 8) | ((u64)pn[3] << 16) | @@ -421,7 +421,7 @@ static void iwl_mvm_wowlan_get_rsc_v5_data(struct ieee80211_hw *hw, for (i = 0; i < IWL_MAX_TID_COUNT; i++) { pn = iwl_mvm_find_max_pn(key, ptk_pn, &seq, i, - mvm->trans->num_rx_queues); + mvm->trans->info.num_rxqs); rsc[i] = cpu_to_le64((u64)pn[5] | ((u64)pn[4] << 8) | ((u64)pn[3] << 16) | @@ -1266,7 +1266,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw, }; struct iwl_host_cmd d3_cfg_cmd = { .id = D3_CONFIG_CMD, - .flags = CMD_WANT_SKB | CMD_SEND_IN_D3, + .flags = CMD_WANT_SKB, .data[0] = &d3_cfg_cmd_data, .len[0] = sizeof(d3_cfg_cmd_data), }; @@ -1370,11 +1370,9 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw, * recording before entering D3. In later devices the FW stops the * recording automatically. */ - if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_9000) + if (mvm->trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_9000) iwl_fw_dbg_stop_restart_recording(&mvm->fwrt, NULL, true); - mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3; - /* must be last -- this switches firmware state */ ret = iwl_mvm_send_cmd(mvm, &d3_cfg_cmd); if (ret) @@ -1686,7 +1684,7 @@ static void iwl_mvm_set_aes_ptk_rx_seq(struct iwl_mvm *mvm, for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { int i; - for (i = 1; i < mvm->trans->num_rx_queues; i++) + for (i = 1; i < mvm->trans->info.num_rxqs; i++) memcpy(ptk_pn->q[i].pn[tid], status->ptk.aes.seq[tid].ccmp.pn, IEEE80211_CCMP_PN_LEN); @@ -2825,7 +2823,7 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm, status->qos_seq_ctr[i] + 0x10; } - if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000) { + if (mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_22000) { i = mvm->offload_tid; iwl_trans_set_q_ptrs(mvm->trans, mvm_ap_sta->tid_data[i].txq_id, @@ -3407,9 +3405,9 @@ static int iwl_mvm_resume_firmware(struct iwl_mvm *mvm, bool test) int ret; enum iwl_d3_status d3_status; struct iwl_host_cmd cmd = { - .id = D0I3_END_CMD, - .flags = CMD_WANT_SKB | CMD_SEND_IN_D3, - }; + .id = D0I3_END_CMD, + .flags = CMD_WANT_SKB, + }; bool reset = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); @@ -3427,7 +3425,7 @@ static int iwl_mvm_resume_firmware(struct iwl_mvm *mvm, bool test) * AX210 and above don't need the command since they have * the doorbell interrupt. */ - if (mvm->trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_22000 && + if (mvm->trans->mac_cfg->device_family <= IWL_DEVICE_FAMILY_22000 && fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_D0I3_END_FIRST)) { ret = iwl_mvm_send_cmd(mvm, &cmd); if (ret < 0) @@ -3564,9 +3562,6 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) iwl_mvm_unblock_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_WOWLAN); - /* after the successful handshake, we're out of D3 */ - mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; - /* when reset is required we can't send these following commands */ if (d3_data.d3_end_flags & IWL_D0I3_RESET_REQUIRE) goto query_wakeup_reasons; @@ -3639,9 +3634,6 @@ out: */ set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status); - /* regardless of what happened, we're now out of D3 */ - mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; - return 1; } @@ -3679,8 +3671,7 @@ void iwl_mvm_fast_suspend(struct iwl_mvm *mvm) set_bit(IWL_MVM_STATUS_IN_D3, &mvm->status); WARN_ON(iwl_mvm_power_update_device(mvm)); - mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3; - ret = iwl_mvm_send_cmd_pdu(mvm, D3_CONFIG_CMD, CMD_SEND_IN_D3, + ret = iwl_mvm_send_cmd_pdu(mvm, D3_CONFIG_CMD, 0, sizeof(d3_cfg_cmd_data), &d3_cfg_cmd_data); if (ret) IWL_ERR(mvm, @@ -3735,7 +3726,6 @@ int iwl_mvm_fast_resume(struct iwl_mvm *mvm) out: clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status); - mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; mvm->fast_resume = false; return ret; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index 671d3f8d79c1..86a87ea89916 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -1277,7 +1277,7 @@ static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mvm *mvm, return -EIO; /* supporting only MQ RX */ - if (!mvm->trans->trans_cfg->mq_rx_supported) + if (!mvm->trans->mac_cfg->mq_rx_supported) return -EOPNOTSUPP; rxb._page = alloc_pages(GFP_ATOMIC, 0); @@ -1468,7 +1468,7 @@ static ssize_t iwl_dbgfs_fw_dbg_clear_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { - if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_9000) + if (mvm->trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_9000) return -EOPNOTSUPP; /* diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 94b08bb6fd4f..819e3228462a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -29,8 +29,8 @@ #define MVM_UCODE_CALIB_TIMEOUT (2 * HZ) struct iwl_mvm_alive_data { + __le32 sku_id[3]; bool valid; - u32 scd_base_addr; }; static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant) @@ -57,13 +57,13 @@ static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm) BIT(IWL_RSS_HASH_TYPE_IPV6_PAYLOAD), }; - if (mvm->trans->num_rx_queues == 1) + if (mvm->trans->info.num_rxqs == 1) return 0; /* Do not direct RSS traffic to Q 0 which is our fallback queue */ for (i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++) cmd.indirection_table[i] = - 1 + (i % (mvm->trans->num_rx_queues - 1)); + 1 + (i % (mvm->trans->info.num_rxqs - 1)); netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key)); return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd); @@ -114,7 +114,7 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, u32 i; - if (version == 6) { + if (version >= 6) { struct iwl_alive_ntf_v6 *palive; if (pkt_len < sizeof(*palive)) @@ -157,6 +157,17 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, } } } + + if (version >= 8) { + const struct iwl_alive_ntf *palive_v8 = + (void *)pkt->data; + + if (pkt_len < sizeof(*palive_v8)) + return false; + + IWL_DEBUG_FW(mvm, "platform id: 0x%llx\n", + palive_v8->platform_id); + } } if (version >= 5) { @@ -171,14 +182,15 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, lmac2 = &palive->lmac_data[1]; status = le16_to_cpu(palive->status); - mvm->trans->sku_id[0] = le32_to_cpu(palive->sku_id.data[0]); - mvm->trans->sku_id[1] = le32_to_cpu(palive->sku_id.data[1]); - mvm->trans->sku_id[2] = le32_to_cpu(palive->sku_id.data[2]); + BUILD_BUG_ON(sizeof(palive->sku_id.data) != + sizeof(alive_data->sku_id)); + memcpy(alive_data->sku_id, palive->sku_id.data, + sizeof(palive->sku_id.data)); IWL_DEBUG_FW(mvm, "Got sku_id: 0x0%x 0x0%x 0x0%x\n", - mvm->trans->sku_id[0], - mvm->trans->sku_id[1], - mvm->trans->sku_id[2]); + le32_to_cpu(alive_data->sku_id[0]), + le32_to_cpu(alive_data->sku_id[1]), + le32_to_cpu(alive_data->sku_id[2])); } else if (iwl_rx_packet_payload_len(pkt) == sizeof(struct iwl_alive_ntf_v4)) { struct iwl_alive_ntf_v4 *palive; @@ -221,7 +233,7 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, if (umac_error_table) { if (umac_error_table >= - mvm->trans->cfg->min_umac_error_event_table) { + mvm->trans->mac_cfg->base->min_umac_error_event_table) { iwl_fw_umac_set_alive_err_table(mvm->trans, umac_error_table); } else { @@ -233,7 +245,6 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, } } - alive_data->scd_base_addr = le32_to_cpu(lmac1->dbg_ptrs.scd_base_ptr); alive_data->valid = status == IWL_ALIVE_STATUS_OK; IWL_DEBUG_FW(mvm, @@ -282,7 +293,7 @@ static void iwl_mvm_print_pd_notification(struct iwl_mvm *mvm) IWL_ERR(mvm, #reg_name ": 0x%x\n", iwl_read_umac_prph(trans, reg_name)) struct iwl_trans *trans = mvm->trans; - enum iwl_device_family device_family = trans->trans_cfg->device_family; + enum iwl_device_family device_family = trans->mac_cfg->device_family; if (device_family < IWL_DEVICE_FAMILY_8000) return; @@ -304,7 +315,6 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, { struct iwl_notification_wait alive_wait; struct iwl_mvm_alive_data alive_data = {}; - const struct fw_img *fw; int ret; enum iwl_ucode_type old_type = mvm->fwrt.cur_fw_img; static const u16 alive_cmd[] = { UCODE_ALIVE_NTFY }; @@ -317,11 +327,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE) && !(fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED))) - fw = iwl_get_ucode_image(mvm->fw, IWL_UCODE_REGULAR_USNIFFER); - else - fw = iwl_get_ucode_image(mvm->fw, ucode_type); - if (WARN_ON(!fw)) - return -EINVAL; + ucode_type = IWL_UCODE_REGULAR_USNIFFER; iwl_fw_set_current_image(&mvm->fwrt, ucode_type); clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); @@ -334,7 +340,8 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, * For the unified firmware case, the ucode_type is not * INIT, but we still need to run it. */ - ret = iwl_trans_start_fw(mvm->trans, fw, run_in_rfkill); + ret = iwl_trans_start_fw(mvm->trans, mvm->fw, ucode_type, + run_in_rfkill); if (ret) { iwl_fw_set_current_image(&mvm->fwrt, old_type); iwl_remove_notification(&mvm->notif_wait, &alive_wait); @@ -348,7 +355,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait, MVM_UCODE_ALIVE_TIMEOUT); - if (mvm->trans->trans_cfg->device_family == + if (mvm->trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_AX210) { /* print these registers regardless of alive fail/success */ IWL_INFO(mvm, "WFPM_UMAC_PD_NOTIFICATION: 0x%x\n", @@ -365,14 +372,14 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, struct iwl_trans *trans = mvm->trans; /* SecBoot info */ - if (trans->trans_cfg->device_family >= + if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_22000) { IWL_ERR(mvm, "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n", iwl_read_umac_prph(trans, UMAG_SB_CPU_1_STATUS), iwl_read_umac_prph(trans, UMAG_SB_CPU_2_STATUS)); - } else if (trans->trans_cfg->device_family >= + } else if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_8000) { IWL_ERR(mvm, "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n", @@ -383,7 +390,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, iwl_mvm_print_pd_notification(mvm); /* LMAC/UMAC PC info */ - if (trans->trans_cfg->device_family >= + if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_22000) { pc_data = trans->dbg.pc_data; for (count = 0; count < trans->dbg.num_pc; @@ -391,7 +398,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, IWL_ERR(mvm, "%s: 0x%x\n", pc_data->pc_name, pc_data->pc_address); - } else if (trans->trans_cfg->device_family >= + } else if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_9000) { IWL_ERR(mvm, "UMAC PC: 0x%x\n", iwl_read_umac_prph(trans, @@ -422,10 +429,10 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, /* if reached this point, Alive notification was received */ iwl_mei_alive_notif(true); - iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr); + iwl_trans_fw_alive(mvm->trans); ret = iwl_pnvm_load(mvm->trans, &mvm->notif_wait, - &mvm->fw->ucode_capa); + &mvm->fw->ucode_capa, alive_data.sku_id); if (ret) { IWL_ERR(mvm, "Timeout waiting for PNVM load!\n"); iwl_fw_set_current_image(&mvm->fwrt, old_type); @@ -490,7 +497,7 @@ static void iwl_mvm_uats_init(struct iwl_mvm *mvm) .dataflags[0] = IWL_HCMD_DFL_NOCOPY, }; - if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) { + if (mvm->trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210) { IWL_DEBUG_RADIO(mvm, "UATS feature is not supported\n"); return; } @@ -504,11 +511,10 @@ static void iwl_mvm_uats_init(struct iwl_mvm *mvm) return; } - ret = iwl_uefi_get_uats_table(mvm->trans, &mvm->fwrt); - if (ret < 0) { - IWL_DEBUG_FW(mvm, "failed to read UATS table (%d)\n", ret); + iwl_uefi_get_uats_table(mvm->trans, &mvm->fwrt); + + if (!mvm->fwrt.uats_valid) return; - } ret = iwl_mvm_send_cmd(mvm, &cmd); if (ret < 0) @@ -578,7 +584,7 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm) /* set flags extra PHY configuration flags from the device's cfg */ phy_cfg_cmd.phy_cfg |= - cpu_to_le32(mvm->trans->trans_cfg->extra_phy_cfg_flags); + cpu_to_le32(mvm->trans->mac_cfg->extra_phy_cfg_flags); phy_cfg_cmd.calib_control.event_trigger = mvm->fw->default_calib[ucode_type].event_trigger; @@ -617,7 +623,7 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm) mvm->rfkill_safe_init_done = false; - if (mvm->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210) { + if (mvm->trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_AX210) { sb_cfg = iwl_read_umac_prph(mvm->trans, SB_MODIFY_CFG_FLAG); /* if needed, we'll reset this on our way out later */ mvm->fw_product_reset = sb_cfg == SB_CFG_RESIDES_IN_ROM; @@ -751,7 +757,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm) goto remove_notif; } - if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000) { + if (mvm->trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_8000) { ret = iwl_mvm_send_bt_init_conf(mvm); if (ret) goto remove_notif; @@ -1265,7 +1271,7 @@ void iwl_mvm_get_bios_tables(struct iwl_mvm *mvm) } } - iwl_acpi_get_phy_filters(&mvm->fwrt, &mvm->fwrt.phy_filters); + iwl_acpi_get_phy_filters(&mvm->fwrt); if (iwl_bios_get_eckv(&mvm->fwrt, &mvm->ext_clock_valid)) IWL_DEBUG_RADIO(mvm, "ECKV table doesn't exist in BIOS\n"); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/led.c b/drivers/net/wireless/intel/iwlwifi/mvm/led.c index 1ea7c44250d4..c3cc1ea3ccc9 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/led.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/led.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2019 Intel Corporation + * Copyright (C) 2012-2014, 2018-2019, 2025 Intel Corporation * Copyright (C) 2017 Intel Deutschland GmbH */ #include <linux/leds.h> @@ -102,7 +102,7 @@ void iwl_mvm_leds_sync(struct iwl_mvm *mvm) * if we control through the register, we're doing it * even when the firmware isn't up, so no need to sync */ - if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000) + if (mvm->trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_8000) return; iwl_mvm_led_set(mvm, mvm->led.brightness > 0); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index bec18d197f31..9098a36530cc 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2024 Intel Corporation + * Copyright (C) 2012-2014, 2018-2025 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH */ @@ -938,12 +938,19 @@ u8 iwl_mvm_mac_ctxt_get_lowest_rate(struct iwl_mvm *mvm, u16 iwl_mvm_mac_ctxt_get_beacon_flags(const struct iwl_fw *fw, u8 rate_idx) { - u16 flags = iwl_mvm_mac80211_idx_to_hwrate(fw, rate_idx); bool is_new_rate = iwl_fw_lookup_cmd_ver(fw, BEACON_TEMPLATE_CMD, 0) > 10; + u16 flags, cck_flag; - if (rate_idx <= IWL_FIRST_CCK_RATE) - flags |= is_new_rate ? IWL_MAC_BEACON_CCK - : IWL_MAC_BEACON_CCK_V1; + if (is_new_rate) { + flags = iwl_mvm_mac80211_idx_to_hwrate(fw, rate_idx); + cck_flag = IWL_MAC_BEACON_CCK; + } else { + cck_flag = IWL_MAC_BEACON_CCK_V1; + flags = iwl_fw_rate_idx_to_plcp(rate_idx); + } + + if (rate_idx <= IWL_LAST_CCK_RATE) + flags |= cck_flag; return flags; } @@ -969,7 +976,7 @@ u8 iwl_mvm_mac_ctxt_get_beacon_rate(struct iwl_mvm *mvm, static void iwl_mvm_mac_ctxt_set_tx(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct sk_buff *beacon, - struct iwl_tx_cmd *tx) + struct iwl_tx_cmd_v6 *tx) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct ieee80211_tx_info *info; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 5d8f50a455d7..0f056a6641bd 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -146,7 +146,7 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy, MCC_UPDATE_CMD, 0); IWL_DEBUG_LAR(mvm, "MCC update response version: %d\n", resp_ver); - regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg, + regd = iwl_parse_nvm_mcc_info(mvm->trans, __le32_to_cpu(resp->n_channels), resp->channels, __le16_to_cpu(resp->mcc), @@ -311,8 +311,8 @@ int iwl_mvm_op_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant) struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); /* This has been tested on those devices only */ - if (mvm->trans->trans_cfg->device_family != IWL_DEVICE_FAMILY_9000 && - mvm->trans->trans_cfg->device_family != IWL_DEVICE_FAMILY_22000) + if (mvm->trans->mac_cfg->device_family != IWL_DEVICE_FAMILY_9000 && + mvm->trans->mac_cfg->device_family != IWL_DEVICE_FAMILY_22000) return -EOPNOTSUPP; if (!mvm->nvm_data) @@ -391,7 +391,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) * for older devices. We also don't see this issue on any newer * devices. */ - if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000) + if (mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_9000) ieee80211_hw_set(hw, TX_AMSDU); ieee80211_hw_set(hw, TX_FRAG_LIST); @@ -402,7 +402,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) /* We want to use the mac80211's reorder buffer for 9000 */ if (iwl_mvm_has_new_rx_api(mvm) && - mvm->trans->trans_cfg->device_family > IWL_DEVICE_FAMILY_9000) + mvm->trans->mac_cfg->device_family > IWL_DEVICE_FAMILY_9000) ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER); if (fw_has_capa(&mvm->fw->ucode_capa, @@ -417,10 +417,10 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) return -EINVAL; } - if (mvm->trans->num_rx_queues > 1) + if (mvm->trans->info.num_rxqs > 1) ieee80211_hw_set(hw, USES_RSS); - if (mvm->trans->max_skb_frags) + if (mvm->trans->info.max_skb_frags) hw->netdev_features = NETIF_F_HIGHDMA | NETIF_F_SG; hw->queues = IEEE80211_NUM_ACS; @@ -441,7 +441,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES; hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP; - hw->max_tx_fragments = mvm->trans->max_skb_frags; + hw->max_tx_fragments = mvm->trans->info.max_skb_frags; BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 6); memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers)); @@ -544,7 +544,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG | REGULATORY_DISABLE_BEACON_HINTS; - if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) + if (mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_DFS_CONCURRENT); @@ -610,7 +610,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) hw->wiphy->bands[NL80211_BAND_6GHZ] = &mvm->nvm_data->bands[NL80211_BAND_6GHZ]; - hw->wiphy->hw_version = mvm->trans->hw_id; + hw->wiphy->hw_version = mvm->trans->info.hw_id; if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM) hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; @@ -731,8 +731,6 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD; - ieee80211_hw_set(hw, DISALLOW_PUNCTURING_5GHZ); - #ifdef CONFIG_PM_SLEEP if ((unified || mvm->fw->img[IWL_UCODE_WOWLAN].num_sec) && device_can_wakeup(mvm->trans->dev)) { @@ -771,7 +769,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) hw->wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH; } - hw->netdev_features |= mvm->cfg->features; + hw->netdev_features |= mvm->trans->mac_cfg->base->features; if (!iwl_mvm_is_csum_supported(mvm)) hw->netdev_features &= ~IWL_CSUM_NETIF_FLAGS_MASK; @@ -1379,7 +1377,7 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm, bool suspend) iwl_mvm_rm_aux_sta(mvm); if (suspend && - mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000) { + mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_22000) { iwl_mvm_fast_suspend(mvm); /* From this point on, we won't touch the device */ iwl_mvm_mei_device_state(mvm, false); @@ -1988,15 +1986,8 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw, * interface is be handled as part of the stop_ap flow. */ if (vif->type == NL80211_IFTYPE_AP || - vif->type == NL80211_IFTYPE_ADHOC) { -#ifdef CONFIG_NL80211_TESTMODE - if (vif == mvm->noa_vif) { - mvm->noa_vif = NULL; - mvm->noa_duration = 0; - } -#endif + vif->type == NL80211_IFTYPE_ADHOC) goto out; - } iwl_mvm_power_update_mac(mvm); @@ -3059,7 +3050,7 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw, * context. For the newer, the beacon is a resource that belongs to a * MAC, so need to send beacon template after adding the mac. */ - if (mvm->trans->trans_cfg->device_family > IWL_DEVICE_FAMILY_22000) { + if (mvm->trans->mac_cfg->device_family > IWL_DEVICE_FAMILY_22000) { /* Add the mac context */ ret = iwl_mvm_mac_ctxt_add(mvm, vif); if (ret) @@ -4105,7 +4096,8 @@ void iwl_mvm_smps_workaround(struct iwl_mvm *mvm, struct ieee80211_vif *vif, { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - if (!iwl_mvm_has_rlc_offload(mvm)) + if (!iwl_mvm_has_rlc_offload(mvm) || + iwl_fw_lookup_cmd_ver(mvm->fw, MAC_PM_POWER_TABLE, 0) >= 2) return; mvmvif->ps_disabled = !vif->cfg.ps; @@ -4395,7 +4387,7 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw, switch (key->cipher) { case WLAN_CIPHER_SUITE_TKIP: - if (!mvm->trans->trans_cfg->gen2) { + if (!mvm->trans->mac_cfg->gen2) { key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE; } else if (vif->type == NL80211_IFTYPE_STATION) { @@ -4512,7 +4504,7 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw, WARN_ON(rcu_access_pointer(mvmsta->ptk_pn[keyidx])); ptk_pn = kzalloc(struct_size(ptk_pn, q, - mvm->trans->num_rx_queues), + mvm->trans->info.num_rxqs), GFP_KERNEL); if (!ptk_pn) { ret = -ENOMEM; @@ -4521,7 +4513,7 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw, for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { ieee80211_get_key_rx_seq(key, tid, &seq); - for (q = 0; q < mvm->trans->num_rx_queues; q++) + for (q = 0; q < mvm->trans->info.num_rxqs; q++) memcpy(ptk_pn->q[q].pn[tid], seq.ccmp.pn, IEEE80211_CCMP_PN_LEN); @@ -5525,70 +5517,6 @@ static int iwl_mvm_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, &mvm_sta->vif->bss_conf); } -#ifdef CONFIG_NL80211_TESTMODE -static const struct nla_policy iwl_mvm_tm_policy[IWL_MVM_TM_ATTR_MAX + 1] = { - [IWL_MVM_TM_ATTR_CMD] = { .type = NLA_U32 }, - [IWL_MVM_TM_ATTR_NOA_DURATION] = { .type = NLA_U32 }, - [IWL_MVM_TM_ATTR_BEACON_FILTER_STATE] = { .type = NLA_U32 }, -}; - -static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - void *data, int len) -{ - struct nlattr *tb[IWL_MVM_TM_ATTR_MAX + 1]; - int err; - u32 noa_duration; - - err = nla_parse_deprecated(tb, IWL_MVM_TM_ATTR_MAX, data, len, - iwl_mvm_tm_policy, NULL); - if (err) - return err; - - if (!tb[IWL_MVM_TM_ATTR_CMD]) - return -EINVAL; - - switch (nla_get_u32(tb[IWL_MVM_TM_ATTR_CMD])) { - case IWL_MVM_TM_CMD_SET_NOA: - if (!vif || vif->type != NL80211_IFTYPE_AP || !vif->p2p || - !vif->bss_conf.enable_beacon || - !tb[IWL_MVM_TM_ATTR_NOA_DURATION]) - return -EINVAL; - - noa_duration = nla_get_u32(tb[IWL_MVM_TM_ATTR_NOA_DURATION]); - if (noa_duration >= vif->bss_conf.beacon_int) - return -EINVAL; - - mvm->noa_duration = noa_duration; - mvm->noa_vif = vif; - - return iwl_mvm_update_quotas(mvm, true, NULL); - case IWL_MVM_TM_CMD_SET_BEACON_FILTER: - /* must be associated client vif - ignore authorized */ - if (!vif || vif->type != NL80211_IFTYPE_STATION || - !vif->cfg.assoc || !vif->bss_conf.dtim_period || - !tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE]) - return -EINVAL; - - if (nla_get_u32(tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE])) - return iwl_mvm_enable_beacon_filter(mvm, vif); - return iwl_mvm_disable_beacon_filter(mvm, vif); - } - - return -EOPNOTSUPP; -} - -int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - void *data, int len) -{ - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - - guard(mvm)(mvm); - return __iwl_mvm_mac_testmode_cmd(mvm, vif, data, len); -} -#endif - void iwl_mvm_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_channel_switch *chsw) { @@ -6140,12 +6068,12 @@ static void iwl_mvm_set_sta_rate(u32 rate_n_flags, struct rate_info *rinfo) break; } - if (format == RATE_MCS_CCK_MSK || - format == RATE_MCS_LEGACY_OFDM_MSK) { + if (format == RATE_MCS_MOD_TYPE_CCK || + format == RATE_MCS_MOD_TYPE_LEGACY_OFDM) { int rate = u32_get_bits(rate_n_flags, RATE_LEGACY_RATE_MSK); /* add the offset needed to get to the legacy ofdm indices */ - if (format == RATE_MCS_LEGACY_OFDM_MSK) + if (format == RATE_MCS_MOD_TYPE_LEGACY_OFDM) rate += IWL_FIRST_OFDM_RATE; switch (rate) { @@ -6190,7 +6118,7 @@ static void iwl_mvm_set_sta_rate(u32 rate_n_flags, struct rate_info *rinfo) rinfo->nss = u32_get_bits(rate_n_flags, RATE_MCS_NSS_MSK) + 1; - rinfo->mcs = format == RATE_MCS_HT_MSK ? + rinfo->mcs = format == RATE_MCS_MOD_TYPE_HT ? RATE_HT_MCS_INDEX(rate_n_flags) : u32_get_bits(rate_n_flags, RATE_MCS_CODE_MSK); @@ -6198,11 +6126,11 @@ static void iwl_mvm_set_sta_rate(u32 rate_n_flags, struct rate_info *rinfo) rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI; switch (format) { - case RATE_MCS_EHT_MSK: + case RATE_MCS_MOD_TYPE_EHT: /* TODO: GI/LTF/RU. How does the firmware encode them? */ rinfo->flags |= RATE_INFO_FLAGS_EHT_MCS; break; - case RATE_MCS_HE_MSK: + case RATE_MCS_MOD_TYPE_HE: gi_ltf = u32_get_bits(rate_n_flags, RATE_MCS_HE_GI_LTF_MSK); rinfo->flags |= RATE_INFO_FLAGS_HE_MCS; @@ -6243,10 +6171,10 @@ static void iwl_mvm_set_sta_rate(u32 rate_n_flags, struct rate_info *rinfo) if (rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK) rinfo->he_dcm = 1; break; - case RATE_MCS_HT_MSK: + case RATE_MCS_MOD_TYPE_HT: rinfo->flags |= RATE_INFO_FLAGS_MCS; break; - case RATE_MCS_VHT_MSK: + case RATE_MCS_MOD_TYPE_VHT: rinfo->flags |= RATE_INFO_FLAGS_VHT_MCS; break; } @@ -6440,7 +6368,7 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, }; int ret; - cmd->rxq_mask = cpu_to_le32(BIT(mvm->trans->num_rx_queues) - 1); + cmd->rxq_mask = cpu_to_le32(BIT(mvm->trans->info.num_rxqs) - 1); cmd->count = cpu_to_le32(sizeof(struct iwl_mvm_internal_rxq_notif) + size); notif->type = type; @@ -6455,7 +6383,7 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, if (sync) { notif->cookie = mvm->queue_sync_cookie; - mvm->queue_sync_state = (1 << mvm->trans->num_rx_queues) - 1; + mvm->queue_sync_state = (1 << mvm->trans->info.num_rxqs) - 1; } ret = iwl_mvm_send_cmd(mvm, &hcmd); @@ -6648,8 +6576,6 @@ const struct ieee80211_ops iwl_mvm_hw_ops = { .sync_rx_queues = iwl_mvm_sync_rx_queues, - CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd) - #ifdef CONFIG_PM_SLEEP /* look at d3.c */ .suspend = iwl_mvm_suspend, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c index bb7851042177..81ca9ff67be9 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c @@ -1,17 +1,25 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2022 - 2024 Intel Corporation + * Copyright (C) 2022 - 2025 Intel Corporation */ #include "mvm.h" static void iwl_mvm_mld_set_he_support(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct iwl_mac_config_cmd *cmd) + struct iwl_mac_config_cmd *cmd, + int cmd_ver) { - if (vif->type == NL80211_IFTYPE_AP) - cmd->he_ap_support = cpu_to_le16(1); - else - cmd->he_support = cpu_to_le16(1); + if (vif->type == NL80211_IFTYPE_AP) { + if (cmd_ver == 2) + cmd->wifi_gen_v2.he_ap_support = cpu_to_le16(1); + else + cmd->wifi_gen.he_ap_support = 1; + } else { + if (cmd_ver == 2) + cmd->wifi_gen_v2.he_support = cpu_to_le16(1); + else + cmd->wifi_gen.he_support = 1; + } } static void iwl_mvm_mld_mac_ctxt_cmd_common(struct iwl_mvm *mvm, @@ -22,6 +30,12 @@ static void iwl_mvm_mld_mac_ctxt_cmd_common(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct ieee80211_bss_conf *link_conf; unsigned int link_id; + int cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, + WIDE_ID(MAC_CONF_GROUP, + MAC_CONFIG_CMD), 0); + + if (WARN_ON(cmd_ver < 1 && cmd_ver > 3)) + return; cmd->id_and_color = cpu_to_le32(mvmvif->id); cmd->action = cpu_to_le32(action); @@ -30,8 +44,8 @@ static void iwl_mvm_mld_mac_ctxt_cmd_common(struct iwl_mvm *mvm, memcpy(cmd->local_mld_addr, vif->addr, ETH_ALEN); - cmd->he_support = 0; - cmd->eht_support = 0; + cmd->wifi_gen_v2.he_support = 0; + cmd->wifi_gen_v2.eht_support = 0; /* should be set by specific context type handler */ cmd->filter_flags = 0; @@ -51,8 +65,11 @@ static void iwl_mvm_mld_mac_ctxt_cmd_common(struct iwl_mvm *mvm, * and enable both when we have MLO. */ if (ieee80211_vif_is_mld(vif)) { - iwl_mvm_mld_set_he_support(mvm, vif, cmd); - cmd->eht_support = cpu_to_le32(1); + iwl_mvm_mld_set_he_support(mvm, vif, cmd, cmd_ver); + if (cmd_ver == 2) + cmd->wifi_gen_v2.eht_support = cpu_to_le32(1); + else + cmd->wifi_gen.eht_support = 1; return; } @@ -63,16 +80,19 @@ static void iwl_mvm_mld_mac_ctxt_cmd_common(struct iwl_mvm *mvm, continue; if (link_conf->he_support) - iwl_mvm_mld_set_he_support(mvm, vif, cmd); + iwl_mvm_mld_set_he_support(mvm, vif, cmd, cmd_ver); - /* it's not reasonable to have EHT without HE and FW API doesn't + /* It's not reasonable to have EHT without HE and FW API doesn't * support it. Ignore EHT in this case. */ if (!link_conf->he_support && link_conf->eht_support) continue; if (link_conf->eht_support) { - cmd->eht_support = cpu_to_le32(1); + if (cmd_ver == 2) + cmd->wifi_gen_v2.eht_support = cpu_to_le32(1); + else + cmd->wifi_gen.eht_support = 1; break; } } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c index 78d7153a0cfc..bf24f8cb673e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c @@ -150,19 +150,6 @@ static void iwl_mvm_mld_mac_remove_interface(struct ieee80211_hw *hw, iwl_mvm_vif_dbgfs_rm_link(mvm, vif); - /* For AP/GO interface, the tear down of the resources allocated to the - * interface is be handled as part of the stop_ap flow. - */ - if (vif->type == NL80211_IFTYPE_AP || - vif->type == NL80211_IFTYPE_ADHOC) { -#ifdef CONFIG_NL80211_TESTMODE - if (vif == mvm->noa_vif) { - mvm->noa_vif = NULL; - mvm->noa_duration = 0; - } -#endif - } - iwl_mvm_power_update_mac(mvm); /* Before the interface removal, mac80211 would cancel the ROC, and the @@ -1403,8 +1390,6 @@ const struct ieee80211_ops iwl_mvm_mld_hw_ops = { .sync_rx_queues = iwl_mvm_sync_rx_queues, - CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd) - #ifdef CONFIG_PM_SLEEP /* look at d3.c */ .suspend = iwl_mvm_suspend, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c index 9dd670041137..e1010521c3ea 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2022-2024 Intel Corporation + * Copyright (C) 2022-2025 Intel Corporation */ #include "mvm.h" #include "time-sync.h" @@ -48,9 +48,13 @@ u32 iwl_mvm_sta_fw_id_mask(struct iwl_mvm *mvm, struct ieee80211_sta *sta, static int iwl_mvm_mld_send_sta_cmd(struct iwl_mvm *mvm, struct iwl_sta_cfg_cmd *cmd) { + u32 cmd_id = WIDE_ID(MAC_CONF_GROUP, STA_CONFIG_CMD); + int cmd_len = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 0) > 1 ? + sizeof(*cmd) : + sizeof(struct iwl_sta_cfg_cmd_v1); int ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, STA_CONFIG_CMD), - 0, sizeof(*cmd), cmd); + 0, cmd_len, cmd); if (ret) IWL_ERR(mvm, "STA_CONFIG_CMD send failed, ret=0x%x\n", ret); return ret; @@ -144,7 +148,7 @@ int iwl_mvm_mld_add_int_sta_with_queue(struct iwl_mvm *mvm, { int ret, txq; unsigned int wdg_timeout = _wdg_timeout ? *_wdg_timeout : - mvm->trans->trans_cfg->base_params->wd_timeout; + mvm->trans->mac_cfg->base->wd_timeout; if (WARN_ON_ONCE(sta->sta_id == IWL_INVALID_STA)) return -ENOSPC; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 1e3639fa6b27..a4f412e750d0 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -126,7 +126,7 @@ struct iwl_mvm_time_event_data { /* Power management */ /** - * enum iwl_power_scheme + * enum iwl_power_scheme - iwl power schemes * @IWL_POWER_SCHEME_CAM: Continuously Active Mode * @IWL_POWER_SCHEME_BPS: Balanced Power Save (default) * @IWL_POWER_SCHEME_LP: Low Power @@ -664,6 +664,8 @@ enum iwl_mvm_sched_scan_pass_all_states { * @min_backoff: The minimal tx backoff due to power restrictions * @params: Parameters to configure the thermal throttling algorithm. * @throttle: Is thermal throttling is active? + * @power_budget_mw: maximum cTDP power budget as defined for this system and + * device */ struct iwl_mvm_tt_mgmt { struct delayed_work ct_kill_exit; @@ -672,6 +674,8 @@ struct iwl_mvm_tt_mgmt { u32 min_backoff; struct iwl_tt_params params; bool throttle; + + u32 power_budget_mw; }; #ifdef CONFIG_THERMAL @@ -999,7 +1003,7 @@ struct iwl_mvm { struct iwl_trans *trans; const struct iwl_fw *fw; - const struct iwl_cfg *cfg; + const struct iwl_rf_cfg *cfg; struct iwl_phy_db *phy_db; struct ieee80211_hw *hw; @@ -1033,6 +1037,8 @@ struct iwl_mvm { u8 cca_40mhz_workaround; + u8 fw_rates_ver; + u32 ampdu_ref; bool ampdu_toggle; @@ -1246,11 +1252,6 @@ struct iwl_mvm { struct iwl_time_quota_cmd last_quota_cmd; -#ifdef CONFIG_NL80211_TESTMODE - u32 noa_duration; - struct ieee80211_vif *noa_vif; -#endif - /* Tx queues */ u16 aux_queue; u16 snif_queue; @@ -1558,7 +1559,7 @@ static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm) * Enable LAR only if it is supported by the FW (TLV) && * enabled in the NVM */ - if (mvm->cfg->nvm_type == IWL_NVM_EXT) + if (mvm->trans->cfg->nvm_type == IWL_NVM_EXT) return nvm_lar && tlv_lar; else return tlv_lar; @@ -1622,13 +1623,13 @@ static inline bool iwl_mvm_has_new_station_api(const struct iwl_fw *fw) static inline bool iwl_mvm_has_new_tx_api(struct iwl_mvm *mvm) { /* TODO - replace with TLV once defined */ - return mvm->trans->trans_cfg->gen2; + return mvm->trans->mac_cfg->gen2; } static inline bool iwl_mvm_has_unified_ucode(struct iwl_mvm *mvm) { /* TODO - better define this */ - return mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000; + return mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_22000; } static inline bool iwl_mvm_is_cdb_supported(struct iwl_mvm *mvm) @@ -1653,7 +1654,7 @@ static inline bool iwl_mvm_cdb_scan_api(struct iwl_mvm *mvm) * but then there's a little bit of code in scan that won't make * any sense... */ - return mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000; + return mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_22000; } static inline bool iwl_mvm_is_scan_ext_chan_supported(struct iwl_mvm *mvm) @@ -1728,13 +1729,13 @@ static inline bool iwl_mvm_is_ctdp_supported(struct iwl_mvm *mvm) static inline bool iwl_mvm_is_esr_supported(struct iwl_trans *trans) { - if (CSR_HW_RFID_IS_CDB(trans->hw_rf_id)) + if (CSR_HW_RFID_IS_CDB(trans->info.hw_rf_id)) return false; - switch (CSR_HW_RFID_TYPE(trans->hw_rf_id)) { + switch (CSR_HW_RFID_TYPE(trans->info.hw_rf_id)) { case IWL_CFG_RF_TYPE_FM: /* Step A doesn't support eSR */ - return CSR_HW_RFID_STEP(trans->hw_rf_id); + return CSR_HW_RFID_STEP(trans->info.hw_rf_id); case IWL_CFG_RF_TYPE_WH: case IWL_CFG_RF_TYPE_PE: return true; @@ -1753,8 +1754,8 @@ static inline int iwl_mvm_max_active_links(struct iwl_mvm *mvm, /* Check if HW supports eSR or STR */ if (iwl_mvm_is_esr_supported(trans) || - (CSR_HW_RFID_TYPE(trans->hw_rf_id) == IWL_CFG_RF_TYPE_FM && - CSR_HW_RFID_IS_CDB(trans->hw_rf_id))) + (CSR_HW_RFID_TYPE(trans->info.hw_rf_id) == IWL_CFG_RF_TYPE_FM && + CSR_HW_RFID_IS_CDB(trans->info.hw_rf_id))) return IWL_FW_MAX_ACTIVE_LINKS_NUM; return 1; @@ -1767,7 +1768,7 @@ extern const u8 iwl_mvm_ac_to_bz_tx_fifo[]; static inline u8 iwl_mvm_mac_ac_to_tx_fifo(struct iwl_mvm *mvm, enum ieee80211_ac_numbers ac) { - if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) + if (mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) return iwl_mvm_ac_to_bz_tx_fifo[ac]; if (iwl_mvm_has_new_tx_api(mvm)) return iwl_mvm_ac_to_gen2_tx_fifo[ac]; @@ -1806,9 +1807,6 @@ int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags, void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags, enum nl80211_band band, struct ieee80211_tx_rate *r); -void iwl_mvm_hwrate_to_tx_rate_v1(u32 rate_n_flags, - enum nl80211_band band, - struct ieee80211_tx_rate *r); u8 iwl_mvm_mac80211_idx_to_hwrate(const struct iwl_fw *fw, int rate_idx); u8 iwl_mvm_mac80211_ac_to_ucode_ac(enum ieee80211_ac_numbers ac); bool iwl_mvm_is_nic_ack_enabled(struct iwl_mvm *mvm, struct ieee80211_vif *vif); @@ -1839,9 +1837,9 @@ int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_sta *sta); int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb); void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, - struct iwl_tx_cmd *tx_cmd, + struct iwl_tx_cmd_v6 *tx_cmd, struct ieee80211_tx_info *info, u8 sta_id); -void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd, +void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd_v6 *tx_cmd, struct ieee80211_tx_info *info, struct ieee80211_sta *sta, __le16 fc); void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq); @@ -1872,7 +1870,7 @@ int iwl_mvm_set_sta_pkt_ext(struct iwl_mvm *mvm, void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm); static inline void iwl_mvm_set_tx_cmd_ccmp(struct ieee80211_tx_info *info, - struct iwl_tx_cmd *tx_cmd) + struct iwl_tx_cmd_v6 *tx_cmd) { struct ieee80211_key_conf *keyconf = info->control.hw_key; @@ -2134,6 +2132,10 @@ bool iwl_mvm_mld_valid_link_pair(struct ieee80211_vif *vif, const struct iwl_mvm_link_sel_data *b); s8 iwl_mvm_average_dbm_values(const struct iwl_umac_scan_channel_survey_notif *notif); + + +extern const struct iwl_hcmd_arr iwl_mvm_groups[]; +extern const unsigned int iwl_mvm_groups_size; #endif /* AP and IBSS */ @@ -2455,7 +2457,7 @@ void iwl_mvm_vif_set_low_latency(struct iwl_mvm_vif *mvmvif, bool set, */ static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm) { - return ((BIT(mvm->trans->trans_cfg->base_params->num_of_queues) - 1) & + return ((BIT(mvm->trans->mac_cfg->base->num_of_queues) - 1) & ~BIT(IWL_MVM_DQA_CMD_QUEUE)); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c index 80ec59c58ae4..953218f1e025 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2019, 2021-2024 Intel Corporation + * Copyright (C) 2012-2014, 2018-2019, 2021-2025 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -120,7 +120,7 @@ static int iwl_nvm_read_chunk(struct iwl_mvm *mvm, u16 section, } else { IWL_DEBUG_EEPROM(mvm->trans->dev, "NVM access command failed with status %d (device: %s)\n", - ret, mvm->trans->name); + ret, mvm->trans->info.name); ret = -ENODATA; } goto exit; @@ -191,7 +191,7 @@ static int iwl_nvm_read_section(struct iwl_mvm *mvm, u16 section, while (ret == length) { /* Check no memory assumptions fail and cause an overflow */ if ((size_read + offset + length) > - mvm->trans->trans_cfg->base_params->eeprom_size) { + mvm->trans->mac_cfg->base->eeprom_size) { IWL_ERR(mvm, "EEPROM size is too small for NVM\n"); return -ENOBUFS; } @@ -206,7 +206,7 @@ static int iwl_nvm_read_section(struct iwl_mvm *mvm, u16 section, offset += ret; } - iwl_nvm_fixups(mvm->trans->hw_id, section, data, offset); + iwl_nvm_fixups(mvm->trans->info.hw_id, section, data, offset); IWL_DEBUG_EEPROM(mvm->trans->dev, "NVM section %d read completed\n", section); @@ -226,7 +226,7 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm) /* Checking for required sections */ if (mvm->trans->cfg->nvm_type == IWL_NVM) { if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data || - !mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data) { + !mvm->nvm_sections[mvm->trans->mac_cfg->base->nvm_hw_section_num].data) { IWL_ERR(mvm, "Can't parse empty OTP/NVM sections\n"); return NULL; } @@ -244,7 +244,7 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm) return NULL; } /* MAC_OVERRIDE or at least HW section must exist */ - if (!mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data && + if (!mvm->nvm_sections[mvm->trans->mac_cfg->base->nvm_hw_section_num].data && !mvm->nvm_sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data) { IWL_ERR(mvm, "Can't parse mac_address, empty sections\n"); @@ -260,7 +260,7 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm) } } - hw = (const __be16 *)sections[mvm->cfg->nvm_hw_section_num].data; + hw = (const __be16 *)sections[mvm->trans->mac_cfg->base->nvm_hw_section_num].data; sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data; calib = (const __le16 *)sections[NVM_SECTION_TYPE_CALIBRATION].data; mac_override = @@ -308,16 +308,15 @@ int iwl_nvm_init(struct iwl_mvm *mvm) int ret, section; u32 size_read = 0; u8 *nvm_buffer, *temp; - const char *nvm_file_C = mvm->cfg->default_nvm_file_C_step; - if (WARN_ON_ONCE(mvm->cfg->nvm_hw_section_num >= NVM_MAX_NUM_SECTIONS)) + if (WARN_ON_ONCE(mvm->trans->mac_cfg->base->nvm_hw_section_num >= NVM_MAX_NUM_SECTIONS)) return -EINVAL; /* load NVM values from nic */ /* Read From FW NVM */ IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from NVM\n"); - nvm_buffer = kmalloc(mvm->trans->trans_cfg->base_params->eeprom_size, + nvm_buffer = kmalloc(mvm->trans->mac_cfg->base->eeprom_size, GFP_KERNEL); if (!nvm_buffer) return -ENOMEM; @@ -338,7 +337,7 @@ int iwl_nvm_init(struct iwl_mvm *mvm) break; } - iwl_nvm_fixups(mvm->trans->hw_id, section, temp, ret); + iwl_nvm_fixups(mvm->trans->info.hw_id, section, temp, ret); mvm->nvm_sections[section].data = temp; mvm->nvm_sections[section].length = ret; @@ -367,7 +366,7 @@ int iwl_nvm_init(struct iwl_mvm *mvm) mvm->nvm_reg_blob.size = ret; break; default: - if (section == mvm->cfg->nvm_hw_section_num) { + if (section == mvm->trans->mac_cfg->base->nvm_hw_section_num) { mvm->nvm_hw_blob.data = temp; mvm->nvm_hw_blob.size = ret; break; @@ -384,21 +383,8 @@ int iwl_nvm_init(struct iwl_mvm *mvm) /* read External NVM file from the mod param */ ret = iwl_read_external_nvm(mvm->trans, mvm->nvm_file_name, mvm->nvm_sections); - if (ret) { - mvm->nvm_file_name = nvm_file_C; - - if ((ret == -EFAULT || ret == -ENOENT) && - mvm->nvm_file_name) { - /* in case nvm file was failed try again */ - ret = iwl_read_external_nvm(mvm->trans, - mvm->nvm_file_name, - mvm->nvm_sections); - if (ret) - return ret; - } else { - return ret; - } - } + if (ret) + return ret; } /* parse the relevant nvm sections */ @@ -554,7 +540,7 @@ int iwl_mvm_init_mcc(struct iwl_mvm *mvm) struct ieee80211_regdomain *regd; char mcc[3]; - if (mvm->cfg->nvm_type == IWL_NVM_EXT) { + if (mvm->trans->cfg->nvm_type == IWL_NVM_EXT) { tlv_lar = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_LAR_SUPPORT); nvm_lar = mvm->nvm_data->lar_enabled; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 76603ef02704..a2dc5c3b0596 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -92,11 +92,11 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode) IWL_DEBUG_INFO(mvm, "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type, radio_cfg_step, radio_cfg_dash); - if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) + if (mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) return; /* SKU control */ - reg_val = CSR_HW_REV_STEP_DASH(mvm->trans->hw_rev); + reg_val = CSR_HW_REV_STEP_DASH(mvm->trans->info.hw_rev); /* radio configuration */ reg_val |= radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE; @@ -114,7 +114,7 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode) * unrelated errors. Need to further investigate this, but for now * we'll separate cases. */ - if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000) + if (mvm->trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_8000) reg_val |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI; if (iwl_fw_dbg_is_d3_debug_enabled(&mvm->fwrt)) @@ -135,7 +135,7 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode) * (PCIe power is lost before PERST# is asserted), causing ME FW * to lose ownership and not being able to obtain it back. */ - if (!mvm->trans->cfg->apmg_not_supported) + if (!mvm->trans->mac_cfg->base->apmg_not_supported) iwl_set_bits_mask_prph(mvm->trans, APMG_PS_CTRL_REG, APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS, ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS); @@ -777,7 +777,8 @@ static const struct iwl_hcmd_names iwl_mvm_bt_coex_names[] = { HCMD_NAME(PROFILE_NOTIF), }; -static const struct iwl_hcmd_arr iwl_mvm_groups[] = { +VISIBLE_IF_IWLWIFI_KUNIT +const struct iwl_hcmd_arr iwl_mvm_groups[] = { [LEGACY_GROUP] = HCMD_ARR(iwl_mvm_legacy_names), [LONG_GROUP] = HCMD_ARR(iwl_mvm_legacy_names), [SYSTEM_GROUP] = HCMD_ARR(iwl_mvm_system_names), @@ -793,6 +794,11 @@ static const struct iwl_hcmd_arr iwl_mvm_groups[] = { [DEBUG_GROUP] = HCMD_ARR(iwl_mvm_debug_names), [STATISTICS_GROUP] = HCMD_ARR(iwl_mvm_statistics_names), }; +EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mvm_groups); +#if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS) +const unsigned int iwl_mvm_groups_size = ARRAY_SIZE(iwl_mvm_groups); +EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mvm_groups_size); +#endif /* this forward declaration can avoid to export the function */ static void iwl_mvm_async_handlers_wk(struct work_struct *wk); @@ -1272,13 +1278,12 @@ static void iwl_mvm_trig_link_selection(struct wiphy *wiphy, } static struct iwl_op_mode * -iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, +iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_rf_cfg *cfg, const struct iwl_fw *fw, struct dentry *dbgfs_dir) { struct ieee80211_hw *hw; struct iwl_op_mode *op_mode; struct iwl_mvm *mvm; - struct iwl_trans_config trans_cfg = {}; static const u8 no_reclaim_cmds[] = { TX_CMD, }; @@ -1286,6 +1291,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, size_t scan_size; u32 min_backoff; struct iwl_mvm_csme_conn_info *csme_conn_info __maybe_unused; + int ratecheck; int err; /* @@ -1306,18 +1312,13 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, if (!hw) return ERR_PTR(-ENOMEM); - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) + if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) max_agg = 512; else max_agg = IEEE80211_MAX_AMPDU_BUF_HE; hw->max_rx_aggregation_subframes = max_agg; - if (cfg->max_tx_agg_size) - hw->max_tx_aggregation_subframes = cfg->max_tx_agg_size; - else - hw->max_tx_aggregation_subframes = max_agg; - op_mode = hw->priv; mvm = IWL_OP_MODE_GET_MVM(op_mode); @@ -1337,19 +1338,58 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, mvm->init_status = 0; + /* start with v1 rates */ + mvm->fw_rates_ver = 1; + + /* check for rates version 2 */ + ratecheck = + (iwl_fw_lookup_cmd_ver(mvm->fw, TX_CMD, 0) >= 8) + + (iwl_fw_lookup_notif_ver(mvm->fw, DATA_PATH_GROUP, + TLC_MNG_UPDATE_NOTIF, 0) >= 3) + + (iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, + REPLY_RX_MPDU_CMD, 0) >= 4) + + (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, TX_CMD, 0) >= 6); + if (ratecheck != 0 && ratecheck != 4) { + IWL_ERR(mvm, "Firmware has inconsistent rates\n"); + err = -EINVAL; + goto out_free; + } + if (ratecheck == 4) + mvm->fw_rates_ver = 2; + + /* check for rates version 3 */ + ratecheck = + (iwl_fw_lookup_cmd_ver(mvm->fw, TX_CMD, 0) >= 11) + + (iwl_fw_lookup_notif_ver(mvm->fw, DATA_PATH_GROUP, + TLC_MNG_UPDATE_NOTIF, 0) >= 4) + + (iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, + REPLY_RX_MPDU_CMD, 0) >= 6) + + (iwl_fw_lookup_notif_ver(mvm->fw, DATA_PATH_GROUP, + RX_NO_DATA_NOTIF, 0) >= 4) + + (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, TX_CMD, 0) >= 9); + if (ratecheck != 0 && ratecheck != 5) { + IWL_ERR(mvm, "Firmware has inconsistent rates\n"); + err = -EINVAL; + goto out_free; + } + if (ratecheck == 5) + mvm->fw_rates_ver = 3; + + trans->conf.rx_mpdu_cmd = REPLY_RX_MPDU_CMD; + if (iwl_mvm_has_new_rx_api(mvm)) { op_mode->ops = &iwl_mvm_ops_mq; - trans->rx_mpdu_cmd_hdr_size = - (trans->trans_cfg->device_family >= + trans->conf.rx_mpdu_cmd_hdr_size = + (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) ? sizeof(struct iwl_rx_mpdu_desc) : IWL_RX_DESC_SIZE_V1; } else { op_mode->ops = &iwl_mvm_ops; - trans->rx_mpdu_cmd_hdr_size = + trans->conf.rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_res_start); - if (WARN_ON(trans->num_rx_queues > 1)) { + if (WARN_ON(trans->info.num_rxqs > 1)) { err = -EINVAL; goto out_free; } @@ -1437,53 +1477,50 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, * Populate the state variables that the transport layer needs * to know about. */ - trans_cfg.op_mode = op_mode; - trans_cfg.no_reclaim_cmds = no_reclaim_cmds; - trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds); + BUILD_BUG_ON(sizeof(no_reclaim_cmds) > + sizeof(trans->conf.no_reclaim_cmds)); + memcpy(trans->conf.no_reclaim_cmds, no_reclaim_cmds, + sizeof(no_reclaim_cmds)); + trans->conf.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds); - trans_cfg.rx_buf_size = iwl_amsdu_size_to_rxb_size(); + trans->conf.rx_buf_size = iwl_amsdu_size_to_rxb_size(); - trans->wide_cmd_header = true; - trans_cfg.bc_table_dword = - mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210; + trans->conf.wide_cmd_header = true; - trans_cfg.command_groups = iwl_mvm_groups; - trans_cfg.command_groups_size = ARRAY_SIZE(iwl_mvm_groups); + trans->conf.command_groups = iwl_mvm_groups; + trans->conf.command_groups_size = ARRAY_SIZE(iwl_mvm_groups); - trans_cfg.cmd_queue = IWL_MVM_DQA_CMD_QUEUE; - trans_cfg.cmd_fifo = IWL_MVM_TX_FIFO_CMD; - trans_cfg.scd_set_active = true; + trans->conf.cmd_queue = IWL_MVM_DQA_CMD_QUEUE; + trans->conf.cmd_fifo = IWL_MVM_TX_FIFO_CMD; + trans->conf.scd_set_active = true; - trans_cfg.cb_data_offs = offsetof(struct ieee80211_tx_info, - driver_data[2]); + trans->conf.cb_data_offs = offsetof(struct ieee80211_tx_info, + driver_data[2]); snprintf(mvm->hw->wiphy->fw_version, sizeof(mvm->hw->wiphy->fw_version), "%.31s", fw->fw_version); - trans_cfg.fw_reset_handshake = fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_FW_RESET_HANDSHAKE); + trans->conf.fw_reset_handshake = + fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_FW_RESET_HANDSHAKE); - trans_cfg.queue_alloc_cmd_ver = + trans->conf.queue_alloc_cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(DATA_PATH_GROUP, SCD_QUEUE_CONFIG_CMD), 0); mvm->sta_remove_requires_queue_remove = - trans_cfg.queue_alloc_cmd_ver > 0; + trans->conf.queue_alloc_cmd_ver > 0; mvm->mld_api_is_used = iwl_mvm_has_mld_api(mvm->fw); /* Configure transport layer */ - iwl_trans_configure(mvm->trans, &trans_cfg); + iwl_trans_op_mode_enter(mvm->trans, op_mode); - trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD; trans->dbg.dest_tlv = mvm->fw->dbg.dest_tlv; trans->dbg.n_dest_reg = mvm->fw->dbg.n_dest_reg; - trans->iml = mvm->fw->iml; - trans->iml_len = mvm->fw->iml_len; - /* set up notification wait support */ iwl_notification_wait_init(&mvm->notif_wait); @@ -2100,7 +2137,7 @@ static bool iwl_mvm_sw_reset(struct iwl_op_mode *op_mode, mvm->fwrt.trans->dbg.restart_required = false; ieee80211_restart_hw(mvm->hw); return true; - } else if (mvm->trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_8000) { + } else if (mvm->trans->mac_cfg->device_family <= IWL_DEVICE_FAMILY_8000) { ieee80211_restart_hw(mvm->hw); return true; } @@ -2125,7 +2162,6 @@ static void iwl_op_mode_mvm_device_powered_off(struct iwl_op_mode *op_mode) mutex_lock(&mvm->mutex); clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status); - mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; iwl_mvm_stop_device(mvm); mvm->fast_resume = false; mutex_unlock(&mvm->mutex); @@ -2165,7 +2201,7 @@ static void iwl_mvm_rx_mq_rss(struct iwl_op_mode *op_mode, struct iwl_rx_packet *pkt = rxb_addr(rxb); u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd); - if (unlikely(queue >= mvm->trans->num_rx_queues)) + if (unlikely(queue >= mvm->trans->info.num_rxqs)) return; if (unlikely(cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE))) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c index a386b315e52f..0057fddf88f0 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2019, 2021-2024 Intel Corporation + * Copyright (C) 2012-2014, 2018-2019, 2021-2025 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH */ @@ -376,6 +376,9 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, if (!vif->cfg.ps || !mvmvif->pm_enabled) return; + if (iwl_fw_lookup_cmd_ver(mvm->fw, MAC_PM_POWER_TABLE, 0) >= 2) + cmd->flags |= cpu_to_le16(POWER_FLAGS_ENABLE_SMPS_MSK); + if (iwl_mvm_vif_low_latency(mvmvif) && vif->p2p && (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS) || diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/quota.c b/drivers/net/wireless/intel/iwlwifi/mvm/quota.c index aad2614af9ad..798a7e4bea83 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/quota.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/quota.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018, 2021-2022 Intel Corporation + * Copyright (C) 2012-2014, 2018, 2021-2022, 2025 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -86,45 +86,6 @@ static void iwl_mvm_quota_iterator(void *_data, u8 *mac, } } -static void iwl_mvm_adjust_quota_for_noa(struct iwl_mvm *mvm, - struct iwl_time_quota_cmd *cmd) -{ -#ifdef CONFIG_NL80211_TESTMODE - struct iwl_mvm_vif *mvmvif; - int i, phy_id = -1, beacon_int = 0; - - if (!mvm->noa_duration || !mvm->noa_vif) - return; - - mvmvif = iwl_mvm_vif_from_mac80211(mvm->noa_vif); - if (!mvmvif->ap_ibss_active) - return; - - phy_id = mvmvif->deflink.phy_ctxt->id; - beacon_int = mvm->noa_vif->bss_conf.beacon_int; - - for (i = 0; i < MAX_BINDINGS; i++) { - struct iwl_time_quota_data *data = - iwl_mvm_quota_cmd_get_quota(mvm, cmd, - i); - u32 id_n_c = le32_to_cpu(data->id_and_color); - u32 id = (id_n_c & FW_CTXT_ID_MSK) >> FW_CTXT_ID_POS; - u32 quota = le32_to_cpu(data->quota); - - if (id != phy_id) - continue; - - quota *= (beacon_int - mvm->noa_duration); - quota /= beacon_int; - - IWL_DEBUG_QUOTA(mvm, "quota: adjust for NoA from %d to %d\n", - le32_to_cpu(data->quota), quota); - - data->quota = cpu_to_le32(quota); - } -#endif -} - int iwl_mvm_update_quotas(struct iwl_mvm *mvm, bool force_update, struct ieee80211_vif *disabled_vif) @@ -260,8 +221,6 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, } } - iwl_mvm_adjust_quota_for_noa(mvm, &cmd); - /* check that we have non-zero quota for all valid bindings */ for (i = 0; i < MAX_BINDINGS; i++) { qdata = iwl_mvm_quota_cmd_get_quota(mvm, &cmd, i); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c index de5ac000272e..89ac4c6b3e54 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2017 Intel Deutschland GmbH - * Copyright (C) 2018-2024 Intel Corporation + * Copyright (C) 2018-2025 Intel Corporation */ #include "rs.h" #include "fw-api.h" @@ -72,7 +72,7 @@ static u16 rs_fw_get_config_flags(struct iwl_mvm *mvm, u16 flags = 0; /* get STBC flags */ - if (mvm->cfg->ht_params->stbc && + if (mvm->cfg->ht_params.stbc && (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1)) { if (he_cap->has_he && he_cap->he_cap_elem.phy_cap_info[2] & IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ) @@ -83,7 +83,7 @@ static u16 rs_fw_get_config_flags(struct iwl_mvm *mvm, flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK; } - if (mvm->cfg->ht_params->ldpc && + if (mvm->cfg->ht_params.ldpc && ((ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING) || (vht_ena && (vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC)))) flags |= IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK; @@ -454,22 +454,11 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, if (flags & IWL_TLC_NOTIF_FLAG_RATE) { char pretty_rate[100]; - if (iwl_fw_lookup_notif_ver(mvm->fw, DATA_PATH_GROUP, - TLC_MNG_UPDATE_NOTIF, 0) < 3) { - rs_pretty_print_rate_v1(pretty_rate, - sizeof(pretty_rate), - le32_to_cpu(notif->rate)); - IWL_DEBUG_RATE(mvm, - "Got rate in old format. Rate: %s. Converting.\n", - pretty_rate); - lq_sta->last_rate_n_flags = - iwl_new_rate_from_v1(le32_to_cpu(notif->rate)); - } else { - lq_sta->last_rate_n_flags = le32_to_cpu(notif->rate); - } + lq_sta->last_rate_n_flags = + iwl_mvm_v3_rate_from_fw(notif->rate, mvm->fw_rates_ver); rs_pretty_print_rate(pretty_rate, sizeof(pretty_rate), lq_sta->last_rate_n_flags); - IWL_DEBUG_RATE(mvm, "new rate: %s\n", pretty_rate); + IWL_DEBUG_RATE(mvm, "rate: %s\n", pretty_rate); } if (flags & IWL_TLC_NOTIF_FLAG_AMSDU && !mvm_link_sta->orig_amsdu_len) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index 068c58e9c1eb..5802ed80a9ca 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c @@ -2,6 +2,7 @@ /****************************************************************************** * * Copyright(c) 2005 - 2014, 2018 - 2023 Intel Corporation. All rights reserved. + * Copyright(c) 2025 Intel Corporation * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH *****************************************************************************/ @@ -895,7 +896,7 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate, WARN_ON_ONCE(1); } } else if (ucode_rate & RATE_MCS_VHT_MSK_V1) { - nss = FIELD_GET(RATE_MCS_NSS_MSK, ucode_rate) + 1; + nss = FIELD_GET(RATE_VHT_MCS_NSS_MSK, ucode_rate) + 1; if (nss == 1) { rate->type = LQ_VHT_SISO; @@ -909,7 +910,7 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate, WARN_ON_ONCE(1); } } else if (ucode_rate & RATE_MCS_HE_MSK_V1) { - nss = FIELD_GET(RATE_MCS_NSS_MSK, ucode_rate) + 1; + nss = FIELD_GET(RATE_VHT_MCS_NSS_MSK, ucode_rate) + 1; if (nss == 1) { rate->type = LQ_HE_SISO; @@ -2696,8 +2697,10 @@ static void rs_drv_get_rate(void *mvm_r, struct ieee80211_sta *sta, lq_sta = mvm_sta; spin_lock_bh(&lq_sta->pers.lock); - iwl_mvm_hwrate_to_tx_rate_v1(lq_sta->last_rate_n_flags, - info->band, &info->control.rates[0]); + iwl_mvm_hwrate_to_tx_rate(iwl_mvm_v3_rate_from_fw( + cpu_to_le32(lq_sta->last_rate_n_flags), + 1), + info->band, &info->control.rates[0]); info->control.rates[0].count = 1; /* Report the optimal rate based on rssi and STA caps if we haven't @@ -2707,8 +2710,12 @@ static void rs_drv_get_rate(void *mvm_r, struct ieee80211_sta *sta, optimal_rate = rs_get_optimal_rate(mvm, lq_sta); last_ucode_rate = ucode_rate_from_rs_rate(mvm, optimal_rate); - iwl_mvm_hwrate_to_tx_rate_v1(last_ucode_rate, info->band, - &txrc->reported_rate); + last_ucode_rate = + iwl_mvm_v3_rate_from_fw(cpu_to_le32(last_ucode_rate), + 1); + iwl_mvm_hwrate_to_tx_rate(last_ucode_rate, info->band, + &txrc->reported_rate); + txrc->reported_rate.count = 1; } spin_unlock_bh(&lq_sta->pers.lock); } @@ -2813,11 +2820,11 @@ static void rs_ht_init(struct iwl_mvm *mvm, lq_sta->active_mimo2_rate &= ~((u16)0x2); lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE; - if (mvm->cfg->ht_params->ldpc && + if (mvm->cfg->ht_params.ldpc && (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)) lq_sta->ldpc = true; - if (mvm->cfg->ht_params->stbc && + if (mvm->cfg->ht_params.stbc && (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) && (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC)) lq_sta->stbc_capable = true; @@ -2832,11 +2839,11 @@ static void rs_vht_init(struct iwl_mvm *mvm, { rs_vht_set_enabled_rates(sta, vht_cap, lq_sta); - if (mvm->cfg->ht_params->ldpc && + if (mvm->cfg->ht_params.ldpc && (vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC)) lq_sta->ldpc = true; - if (mvm->cfg->ht_params->stbc && + if (mvm->cfg->ht_params.stbc && (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) && (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK)) lq_sta->stbc_capable = true; @@ -2887,10 +2894,10 @@ void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg) if (rate & RATE_MCS_HT_MSK_V1) { mvm->drv_rx_stats.ht_frames++; - nss = ((rate & RATE_HT_MCS_NSS_MSK_V1) >> RATE_HT_MCS_NSS_POS_V1) + 1; + nss = FIELD_GET(RATE_HT_MCS_MIMO2_MSK, rate) + 1; } else if (rate & RATE_MCS_VHT_MSK_V1) { mvm->drv_rx_stats.vht_frames++; - nss = FIELD_GET(RATE_MCS_NSS_MSK, rate) + 1; + nss = FIELD_GET(RATE_VHT_MCS_NSS_MSK, rate) + 1; } else { mvm->drv_rx_stats.legacy_frames++; } @@ -3304,7 +3311,7 @@ static void rs_build_rates_table_from_fixed(struct iwl_mvm *mvm, if (num_of_ant(ant) == 1) lq_cmd->single_stream_ant_msk = ant; - if (!mvm->trans->trans_cfg->gen2) + if (!mvm->trans->mac_cfg->gen2) lq_cmd->agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF; else lq_cmd->agg_frame_cnt_limit = @@ -3673,16 +3680,15 @@ int rs_pretty_print_rate_v1(char *buf, int bufsz, const u32 rate) if (rate & RATE_MCS_VHT_MSK_V1) { type = "VHT"; mcs = rate & RATE_VHT_MCS_RATE_CODE_MSK; - nss = FIELD_GET(RATE_MCS_NSS_MSK, rate) + 1; + nss = FIELD_GET(RATE_VHT_MCS_NSS_MSK, rate) + 1; } else if (rate & RATE_MCS_HT_MSK_V1) { type = "HT"; mcs = rate & RATE_HT_MCS_INDEX_MSK_V1; - nss = ((rate & RATE_HT_MCS_NSS_MSK_V1) - >> RATE_HT_MCS_NSS_POS_V1) + 1; + nss = FIELD_GET(RATE_HT_MCS_MIMO2_MSK, rate) + 1; } else if (rate & RATE_MCS_HE_MSK_V1) { type = "HE"; mcs = rate & RATE_VHT_MCS_RATE_CODE_MSK; - nss = FIELD_GET(RATE_MCS_NSS_MSK, rate) + 1; + nss = FIELD_GET(RATE_VHT_MCS_NSS_MSK, rate) + 1; } else { type = "Unknown"; /* shouldn't happen */ } @@ -4172,3 +4178,167 @@ int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, else return rs_drv_tx_protection(mvm, mvmsta, enable); } + +static u32 iwl_legacy_rate_to_fw_idx(u32 rate_n_flags) +{ + int rate = rate_n_flags & RATE_LEGACY_RATE_MSK_V1; + int idx; + bool ofdm = !(rate_n_flags & RATE_MCS_CCK_MSK_V1); + int offset = ofdm ? IWL_FIRST_OFDM_RATE : 0; + int last = ofdm ? IWL_RATE_COUNT_LEGACY : IWL_FIRST_OFDM_RATE; + + for (idx = offset; idx < last; idx++) + if (iwl_fw_rate_idx_to_plcp(idx) == rate) + return idx - offset; + return IWL_RATE_INVALID; +} + +u32 iwl_mvm_v3_rate_from_fw(__le32 rate, u8 rate_ver) +{ + u32 rate_v3 = 0, rate_v1; + u32 dup = 0; + + if (rate_ver > 1) + return iwl_v3_rate_from_v2_v3(rate, rate_ver >= 3); + + rate_v1 = le32_to_cpu(rate); + if (rate_v1 == 0) + return rate_v1; + /* convert rate */ + if (rate_v1 & RATE_MCS_HT_MSK_V1) { + u32 nss; + + rate_v3 |= RATE_MCS_MOD_TYPE_HT; + rate_v3 |= + rate_v1 & RATE_HT_MCS_RATE_CODE_MSK_V1; + nss = u32_get_bits(rate_v1, RATE_HT_MCS_MIMO2_MSK); + rate_v3 |= u32_encode_bits(nss, RATE_MCS_NSS_MSK); + } else if (rate_v1 & RATE_MCS_VHT_MSK_V1 || + rate_v1 & RATE_MCS_HE_MSK_V1) { + u32 nss = u32_get_bits(rate_v1, RATE_VHT_MCS_NSS_MSK); + + rate_v3 |= rate_v1 & RATE_VHT_MCS_RATE_CODE_MSK; + + rate_v3 |= u32_encode_bits(nss, RATE_MCS_NSS_MSK); + + if (rate_v1 & RATE_MCS_HE_MSK_V1) { + u32 he_type_bits = rate_v1 & RATE_MCS_HE_TYPE_MSK_V1; + u32 he_type = he_type_bits >> RATE_MCS_HE_TYPE_POS_V1; + u32 he_106t = (rate_v1 & RATE_MCS_HE_106T_MSK_V1) >> + RATE_MCS_HE_106T_POS_V1; + u32 he_gi_ltf = (rate_v1 & RATE_MCS_HE_GI_LTF_MSK_V1) >> + RATE_MCS_HE_GI_LTF_POS; + + if ((he_type_bits == RATE_MCS_HE_TYPE_SU || + he_type_bits == RATE_MCS_HE_TYPE_EXT_SU) && + he_gi_ltf == RATE_MCS_HE_SU_4_LTF) + /* the new rate have an additional bit to + * represent the value 4 rather then using SGI + * bit for this purpose - as it was done in the + * old rate + */ + he_gi_ltf += (rate_v1 & RATE_MCS_SGI_MSK_V1) >> + RATE_MCS_SGI_POS_V1; + + rate_v3 |= he_gi_ltf << RATE_MCS_HE_GI_LTF_POS; + rate_v3 |= he_type << RATE_MCS_HE_TYPE_POS; + rate_v3 |= he_106t << RATE_MCS_HE_106T_POS; + rate_v3 |= rate_v1 & RATE_HE_DUAL_CARRIER_MODE_MSK; + rate_v3 |= RATE_MCS_MOD_TYPE_HE; + } else { + rate_v3 |= RATE_MCS_MOD_TYPE_VHT; + } + /* if legacy format */ + } else { + u32 legacy_rate = iwl_legacy_rate_to_fw_idx(rate_v1); + + if (WARN_ON_ONCE(legacy_rate == IWL_RATE_INVALID)) + legacy_rate = (rate_v1 & RATE_MCS_CCK_MSK_V1) ? + IWL_FIRST_CCK_RATE : IWL_FIRST_OFDM_RATE; + + rate_v3 |= legacy_rate; + if (!(rate_v1 & RATE_MCS_CCK_MSK_V1)) + rate_v3 |= RATE_MCS_MOD_TYPE_LEGACY_OFDM; + } + + /* convert flags */ + if (rate_v1 & RATE_MCS_LDPC_MSK_V1) + rate_v3 |= RATE_MCS_LDPC_MSK; + rate_v3 |= (rate_v1 & RATE_MCS_CHAN_WIDTH_MSK_V1) | + (rate_v1 & RATE_MCS_ANT_AB_MSK) | + (rate_v1 & RATE_MCS_STBC_MSK) | + (rate_v1 & RATE_MCS_BF_MSK); + + dup = (rate_v1 & RATE_MCS_DUP_MSK_V1) >> RATE_MCS_DUP_POS_V1; + if (dup) { + rate_v3 |= RATE_MCS_DUP_MSK; + rate_v3 |= dup << RATE_MCS_CHAN_WIDTH_POS; + } + + if ((!(rate_v1 & RATE_MCS_HE_MSK_V1)) && + (rate_v1 & RATE_MCS_SGI_MSK_V1)) + rate_v3 |= RATE_MCS_SGI_MSK; + + return rate_v3; +} + +__le32 iwl_mvm_v3_rate_to_fw(u32 rate, u8 rate_ver) +{ + u32 result = 0; + int rate_idx; + + if (rate_ver > 1) + return iwl_v3_rate_to_v2_v3(rate, rate_ver > 2); + + switch (rate & RATE_MCS_MOD_TYPE_MSK) { + case RATE_MCS_MOD_TYPE_CCK: + result = RATE_MCS_CCK_MSK_V1; + fallthrough; + case RATE_MCS_MOD_TYPE_LEGACY_OFDM: + rate_idx = u32_get_bits(rate, RATE_LEGACY_RATE_MSK); + if (!(result & RATE_MCS_CCK_MSK_V1)) + rate_idx += IWL_FIRST_OFDM_RATE; + result |= u32_encode_bits(iwl_fw_rate_idx_to_plcp(rate_idx), + RATE_LEGACY_RATE_MSK_V1); + break; + case RATE_MCS_MOD_TYPE_HT: + result = RATE_MCS_HT_MSK_V1; + result |= u32_encode_bits(u32_get_bits(rate, + RATE_HT_MCS_CODE_MSK), + RATE_HT_MCS_RATE_CODE_MSK_V1); + result |= u32_encode_bits(u32_get_bits(rate, + RATE_MCS_NSS_MSK), + RATE_HT_MCS_MIMO2_MSK); + break; + case RATE_MCS_MOD_TYPE_VHT: + result = RATE_MCS_VHT_MSK_V1; + result |= u32_encode_bits(u32_get_bits(rate, + RATE_VHT_MCS_NSS_MSK), + RATE_MCS_CODE_MSK); + result |= u32_encode_bits(u32_get_bits(rate, RATE_MCS_NSS_MSK), + RATE_VHT_MCS_NSS_MSK); + break; + case RATE_MCS_MOD_TYPE_HE: /* not generated */ + default: + WARN_ONCE(1, "bad modulation type %d\n", + u32_get_bits(rate, RATE_MCS_MOD_TYPE_MSK)); + return 0; + } + + if (rate & RATE_MCS_LDPC_MSK) + result |= RATE_MCS_LDPC_MSK_V1; + WARN_ON_ONCE(u32_get_bits(rate, RATE_MCS_CHAN_WIDTH_MSK) > + RATE_MCS_CHAN_WIDTH_160_VAL); + result |= (rate & RATE_MCS_CHAN_WIDTH_MSK_V1) | + (rate & RATE_MCS_ANT_AB_MSK) | + (rate & RATE_MCS_STBC_MSK) | + (rate & RATE_MCS_BF_MSK); + + /* not handling DUP since we don't use it */ + WARN_ON_ONCE(rate & RATE_MCS_DUP_MSK); + + if (rate & RATE_MCS_SGI_MSK) + result |= RATE_MCS_SGI_MSK_V1; + + return cpu_to_le32(result); +} diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h index ea81cb236d5c..69259ebb966b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h @@ -3,7 +3,7 @@ * * Copyright(c) 2015 Intel Mobile Communications GmbH * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright (C) 2003 - 2014, 2018 - 2024 Intel Corporation + * Copyright (C) 2003 - 2014, 2018 - 2025 Intel Corporation *****************************************************************************/ #ifndef __rs_h__ @@ -424,6 +424,9 @@ void iwl_mvm_rate_control_unregister(void); struct iwl_mvm_sta; +u32 iwl_mvm_v3_rate_from_fw(__le32 rate, u8 rate_ver); +__le32 iwl_mvm_v3_rate_to_fw(u32 rate, u8 rate_ver); + int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, bool enable); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c index 2dbef7b46355..8eb0aa448c85 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2024 Intel Corporation + * Copyright (C) 2012-2014, 2018-2025 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -490,8 +490,6 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, if (!(rate_n_flags & RATE_MCS_CCK_MSK_V1) && rate_n_flags & RATE_MCS_SGI_MSK_V1) rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; - if (rate_n_flags & RATE_HT_MCS_GF_MSK) - rx_status->enc_flags |= RX_ENC_FLAG_HT_GF; if (rate_n_flags & RATE_MCS_LDPC_MSK_V1) rx_status->enc_flags |= RX_ENC_FLAG_LDPC; if (rate_n_flags & RATE_MCS_HT_MSK_V1) { @@ -1001,7 +999,7 @@ static void iwl_mvm_update_esr_mode_tpt(struct iwl_mvm *mvm) sec_link = mvmvif->link[sec_link]->fw_link_id; /* Sum up RX and TX MPDUs from the different queues/links */ - for (int q = 0; q < mvm->trans->num_rx_queues; q++) { + for (int q = 0; q < mvm->trans->info.num_rxqs; q++) { spin_lock_bh(&mvmsta->mpdu_counters[q].lock); /* The link IDs that doesn't exist will contain 0 */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index 358643ae0e42..077aadbf95db 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -171,7 +171,7 @@ static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb, shdr->type != htons(ETH_P_PAE) && shdr->type != htons(ETH_P_TDLS)))) skb->ip_summed = CHECKSUM_NONE; - else if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ) + else if (mvm->trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_BZ) /* mac80211 assumes full CSUM including SNAP header */ skb_postpush_rcsum(skb, shdr, sizeof(*shdr)); } @@ -409,7 +409,7 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_sta *sta, !(status & IWL_RX_MPDU_RES_STATUS_TTAK_OK)) return 0; - if (mvm->trans->trans_cfg->gen2 && + if (mvm->trans->mac_cfg->gen2 && !(status & RX_MPDU_RES_STATUS_MIC_OK)) stats->flag |= RX_FLAG_MMIC_ERROR; @@ -426,7 +426,7 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_sta *sta, if (pkt_flags & FH_RSCSR_RADA_EN) { stats->flag |= RX_FLAG_ICV_STRIPPED; - if (mvm->trans->trans_cfg->gen2) + if (mvm->trans->mac_cfg->gen2) stats->flag |= RX_FLAG_MMIC_STRIPPED; } @@ -462,7 +462,7 @@ static void iwl_mvm_rx_csum(struct iwl_mvm *mvm, { struct iwl_rx_mpdu_desc *desc = (void *)pkt->data; - if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { + if (mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { if (pkt->len_n_flags & cpu_to_le32(FH_RSCSR_RPA_EN)) { u16 hwsum = be16_to_cpu(desc->v3.raw_xsum); @@ -744,7 +744,7 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm, baid = (reorder & IWL_RX_MPDU_REORDER_BAID_MASK) >> IWL_RX_MPDU_REORDER_BAID_SHIFT; - if (mvm->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_9000) + if (mvm->trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_9000) return false; /* @@ -1944,7 +1944,7 @@ static void iwl_mvm_rx_fill_status(struct iwl_mvm *mvm, } /* must be before L-SIG data */ - if (format == RATE_MCS_HE_MSK) + if (format == RATE_MCS_MOD_TYPE_HE) iwl_mvm_rx_he(mvm, skb, phy_data, queue); iwl_mvm_decode_lsig(skb, phy_data); @@ -1966,45 +1966,45 @@ static void iwl_mvm_rx_fill_status(struct iwl_mvm *mvm, phy_data->energy_a, phy_data->energy_b); /* using TLV format and must be after all fixed len fields */ - if (format == RATE_MCS_EHT_MSK) + if (format == RATE_MCS_MOD_TYPE_EHT) iwl_mvm_rx_eht(mvm, skb, phy_data, queue); if (unlikely(mvm->monitor_on)) iwl_mvm_add_rtap_sniffer_config(mvm, skb); - is_sgi = format == RATE_MCS_HE_MSK ? + is_sgi = format == RATE_MCS_MOD_TYPE_HE ? iwl_he_is_sgi(rate_n_flags) : rate_n_flags & RATE_MCS_SGI_MSK; - if (!(format == RATE_MCS_CCK_MSK) && is_sgi) + if (!(format == RATE_MCS_MOD_TYPE_CCK) && is_sgi) rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; if (rate_n_flags & RATE_MCS_LDPC_MSK) rx_status->enc_flags |= RX_ENC_FLAG_LDPC; switch (format) { - case RATE_MCS_VHT_MSK: + case RATE_MCS_MOD_TYPE_VHT: rx_status->encoding = RX_ENC_VHT; break; - case RATE_MCS_HE_MSK: + case RATE_MCS_MOD_TYPE_HE: rx_status->encoding = RX_ENC_HE; rx_status->he_dcm = !!(rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK); break; - case RATE_MCS_EHT_MSK: + case RATE_MCS_MOD_TYPE_EHT: rx_status->encoding = RX_ENC_EHT; break; } switch (format) { - case RATE_MCS_HT_MSK: + case RATE_MCS_MOD_TYPE_HT: rx_status->encoding = RX_ENC_HT; rx_status->rate_idx = RATE_HT_MCS_INDEX(rate_n_flags); rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; break; - case RATE_MCS_VHT_MSK: - case RATE_MCS_HE_MSK: - case RATE_MCS_EHT_MSK: + case RATE_MCS_MOD_TYPE_VHT: + case RATE_MCS_MOD_TYPE_HE: + case RATE_MCS_MOD_TYPE_EHT: rx_status->nss = u32_get_bits(rate_n_flags, RATE_MCS_NSS_MSK) + 1; rx_status->rate_idx = rate_n_flags & RATE_MCS_CODE_MSK; @@ -2048,7 +2048,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))) return; - if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) + if (mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) desc_size = sizeof(*desc); else desc_size = IWL_RX_DESC_SIZE_V1; @@ -2058,8 +2058,10 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, return; } - if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { - phy_data.rate_n_flags = le32_to_cpu(desc->v3.rate_n_flags); + if (mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { + phy_data.rate_n_flags = + iwl_mvm_v3_rate_from_fw(desc->v3.rate_n_flags, + mvm->fw_rates_ver); phy_data.channel = desc->v3.channel; phy_data.gp2_on_air_rise = le32_to_cpu(desc->v3.gp2_on_air_rise); phy_data.energy_a = desc->v3.energy_a; @@ -2072,7 +2074,9 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, phy_data.eht_d4 = desc->phy_eht_data4; phy_data.d5 = desc->v3.phy_data5; } else { - phy_data.rate_n_flags = le32_to_cpu(desc->v1.rate_n_flags); + phy_data.rate_n_flags = + iwl_mvm_v3_rate_from_fw(desc->v1.rate_n_flags, + mvm->fw_rates_ver); phy_data.channel = desc->v1.channel; phy_data.gp2_on_air_rise = le32_to_cpu(desc->v1.gp2_on_air_rise); phy_data.energy_a = desc->v1.energy_a; @@ -2084,13 +2088,6 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, phy_data.d3 = desc->v1.phy_data3; } - if (iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, - REPLY_RX_MPDU_CMD, 0) < 4) { - phy_data.rate_n_flags = iwl_new_rate_from_v1(phy_data.rate_n_flags); - IWL_DEBUG_DROP(mvm, "Got old format rate, converting. New rate: 0x%x\n", - phy_data.rate_n_flags); - } - format = phy_data.rate_n_flags & RATE_MCS_MOD_TYPE_MSK; len = le16_to_cpu(desc->mpdu_len); @@ -2138,14 +2135,14 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, } /* set the preamble flag if appropriate */ - if (format == RATE_MCS_CCK_MSK && + if (format == RATE_MCS_MOD_TYPE_CCK && phy_data.phy_info & IWL_RX_MPDU_PHY_SHORT_PREAMBLE) rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE; if (likely(!(phy_data.phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD))) { u64 tsf_on_air_rise; - if (mvm->trans->trans_cfg->device_family >= + if (mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) tsf_on_air_rise = le64_to_cpu(desc->v3.tsf_on_air_rise); else @@ -2304,7 +2301,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, *qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; - if (mvm->trans->trans_cfg->device_family == + if (mvm->trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_9000) { iwl_mvm_flip_address(hdr->addr3); @@ -2350,7 +2347,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc) && likely(!iwl_mvm_time_sync_frame(mvm, skb, hdr->addr2)) && likely(!iwl_mvm_mei_filter_scan(mvm, skb))) { - if (mvm->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_9000 && + if (mvm->trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_9000 && (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU) && !(desc->amsdu_info & IWL_RX_MPDU_AMSDU_LAST_SUBFRAME)) rx_status->flag |= RX_FLAG_AMSDU_MORE; @@ -2384,7 +2381,6 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi, phy_data.d1 = desc->phy_info[1]; phy_data.phy_info = IWL_RX_MPDU_PHY_TSF_OVERLOAD; phy_data.gp2_on_air_rise = le32_to_cpu(desc->on_air_rise_time); - phy_data.rate_n_flags = le32_to_cpu(desc->rate); phy_data.energy_a = u32_get_bits(rssi, RX_NO_DATA_CHAIN_A_MSK); phy_data.energy_b = u32_get_bits(rssi, RX_NO_DATA_CHAIN_B_MSK); phy_data.channel = u32_get_bits(rssi, RX_NO_DATA_CHANNEL_MSK); @@ -2392,14 +2388,8 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi, phy_data.rx_vec[0] = desc->rx_vec[0]; phy_data.rx_vec[1] = desc->rx_vec[1]; - if (iwl_fw_lookup_notif_ver(mvm->fw, DATA_PATH_GROUP, - RX_NO_DATA_NOTIF, 0) < 2) { - IWL_DEBUG_DROP(mvm, "Got an old rate format. Old rate: 0x%x\n", - phy_data.rate_n_flags); - phy_data.rate_n_flags = iwl_new_rate_from_v1(phy_data.rate_n_flags); - IWL_DEBUG_DROP(mvm, " Rate after conversion to the new format: 0x%x\n", - phy_data.rate_n_flags); - } + phy_data.rate_n_flags = iwl_mvm_v3_rate_from_fw(desc->rate, + mvm->fw_rates_ver); format = phy_data.rate_n_flags & RATE_MCS_MOD_TYPE_MSK; @@ -2412,7 +2402,7 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi, phy_data.rx_vec[2] = desc->rx_vec[2]; phy_data.rx_vec[3] = desc->rx_vec[3]; } else { - if (format == RATE_MCS_EHT_MSK) + if (format == RATE_MCS_MOD_TYPE_EHT) /* no support for EHT before version 3 API */ return; } @@ -2473,17 +2463,17 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi, * may be up to 8 spatial streams. */ switch (format) { - case RATE_MCS_VHT_MSK: + case RATE_MCS_MOD_TYPE_VHT: rx_status->nss = le32_get_bits(desc->rx_vec[0], RX_NO_DATA_RX_VEC0_VHT_NSTS_MSK) + 1; break; - case RATE_MCS_HE_MSK: + case RATE_MCS_MOD_TYPE_HE: rx_status->nss = le32_get_bits(desc->rx_vec[0], RX_NO_DATA_RX_VEC0_HE_NSTS_MSK) + 1; break; - case RATE_MCS_EHT_MSK: + case RATE_MCS_MOD_TYPE_EHT: rx_status->nss = le32_get_bits(desc->rx_vec[2], RX_NO_DATA_RX_VEC2_EHT_NSTS_MSK) + 1; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 78fd7faaed97..8ec4a007b4b0 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2015, 2018-2024 Intel Corporation + * Copyright (C) 2012-2015, 2018-2025 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -791,10 +791,10 @@ static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, lockdep_assert_held(&mvm->mutex); - if (WARN(maxq >= mvm->trans->trans_cfg->base_params->num_of_queues, + if (WARN(maxq >= mvm->trans->mac_cfg->base->num_of_queues, "max queue %d >= num_of_queues (%d)", maxq, - mvm->trans->trans_cfg->base_params->num_of_queues)) - maxq = mvm->trans->trans_cfg->base_params->num_of_queues - 1; + mvm->trans->mac_cfg->base->num_of_queues)) + maxq = mvm->trans->mac_cfg->base->num_of_queues - 1; /* This should not be hit with new TX path */ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) @@ -852,7 +852,7 @@ int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, if (tid == IWL_MAX_TID_COUNT) { tid = IWL_MGMT_TID; size = max_t(u32, IWL_MGMT_QUEUE_SIZE, - mvm->trans->cfg->min_txq_size); + mvm->trans->mac_cfg->base->min_txq_size); } else { size = iwl_mvm_get_queue_size(sta); } @@ -1765,7 +1765,7 @@ int iwl_mvm_sta_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif, mvm_sta->deflink.sta_id = sta_id; rcu_assign_pointer(mvm_sta->link[0], &mvm_sta->deflink); - if (!mvm->trans->trans_cfg->gen2) + if (!mvm->trans->mac_cfg->gen2) mvm_sta->deflink.lq_sta.rs_drv.pers.max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF; else @@ -1798,7 +1798,7 @@ int iwl_mvm_sta_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif, if (iwl_mvm_has_new_rx_api(mvm)) { int q; - dup_data = kcalloc(mvm->trans->num_rx_queues, + dup_data = kcalloc(mvm->trans->info.num_rxqs, sizeof(*dup_data), GFP_KERNEL); if (!dup_data) return -ENOMEM; @@ -1811,7 +1811,7 @@ int iwl_mvm_sta_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif, * This thus allows receiving a packet with seqno 0 and the * retry bit set as the very first packet on a new TID. */ - for (q = 0; q < mvm->trans->num_rx_queues; q++) + for (q = 0; q < mvm->trans->info.num_rxqs; q++) memset(dup_data[q].last_seq, 0xff, sizeof(dup_data[q].last_seq)); mvm_sta->dup_data = dup_data; @@ -1839,11 +1839,11 @@ int iwl_mvm_sta_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif, if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p && !sta->tdls && ieee80211_vif_is_mld(vif)) { mvm_sta->mpdu_counters = - kcalloc(mvm->trans->num_rx_queues, + kcalloc(mvm->trans->info.num_rxqs, sizeof(*mvm_sta->mpdu_counters), GFP_KERNEL); if (mvm_sta->mpdu_counters) - for (int q = 0; q < mvm->trans->num_rx_queues; q++) + for (int q = 0; q < mvm->trans->info.num_rxqs; q++) spin_lock_init(&mvm_sta->mpdu_counters[q].lock); } @@ -2189,7 +2189,7 @@ static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 queue, u8 sta_id, u8 fifo) { unsigned int wdg_timeout = - mvm->trans->trans_cfg->base_params->wd_timeout; + mvm->trans->mac_cfg->base->wd_timeout; struct iwl_trans_txq_scd_cfg cfg = { .fifo = fifo, .sta_id = sta_id, @@ -2206,7 +2206,7 @@ static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 queue, static int iwl_mvm_enable_aux_snif_queue_tvqm(struct iwl_mvm *mvm, u8 sta_id) { unsigned int wdg_timeout = - mvm->trans->trans_cfg->base_params->wd_timeout; + mvm->trans->mac_cfg->base->wd_timeout; WARN_ON(!iwl_mvm_has_new_tx_api(mvm)); @@ -2717,7 +2717,7 @@ static void iwl_mvm_free_reorder(struct iwl_mvm *mvm, iwl_mvm_sync_rxq_del_ba(mvm, data->baid); - for (i = 0; i < mvm->trans->num_rx_queues; i++) { + for (i = 0; i < mvm->trans->info.num_rxqs; i++) { int j; struct iwl_mvm_reorder_buffer *reorder_buf = &data->reorder_buf[i]; @@ -2750,7 +2750,7 @@ static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm, { int i; - for (i = 0; i < mvm->trans->num_rx_queues; i++) { + for (i = 0; i < mvm->trans->info.num_rxqs; i++) { struct iwl_mvm_reorder_buffer *reorder_buf = &data->reorder_buf[i]; struct iwl_mvm_reorder_buf_entry *entries = @@ -2925,7 +2925,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, * before starting the BA session in the firmware */ baid_data = kzalloc(sizeof(*baid_data) + - mvm->trans->num_rx_queues * + mvm->trans->info.num_rxqs * reorder_buf_size, GFP_KERNEL); if (!baid_data) @@ -3177,7 +3177,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, * to align the wrap around of ssn so we compare relevant values. */ normalized_ssn = tid_data->ssn; - if (mvm->trans->trans_cfg->gen2) + if (mvm->trans->mac_cfg->gen2) normalized_ssn &= 0xff; if (normalized_ssn == tid_data->next_reclaimed) { @@ -4305,7 +4305,7 @@ u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data) * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need * to align the wrap around of ssn so we compare relevant values. */ - if (mvm->trans->trans_cfg->gen2) + if (mvm->trans->mac_cfg->gen2) sn &= 0xff; return ieee80211_sn_sub(sn, tid_data->next_reclaimed); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index af62c7f7c834..6b183f5e9bbc 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h @@ -259,7 +259,7 @@ struct iwl_mvm_tid_data { u16 seq_number; u16 next_reclaimed; /* The rest is Tx AGG related */ - u32 rate_n_flags; + __le32 rate_n_flags; u8 lq_color; bool amsdu_in_ampdu_allowed; enum iwl_mvm_agg_state state; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile b/drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile index 6bd56a28cffd..895d53f223e9 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile @@ -1,3 +1,3 @@ -iwlmvm-tests-y += module.o links.o scan.o +iwlmvm-tests-y += module.o links.o scan.o hcmd.o obj-$(CONFIG_IWLWIFI_KUNIT_TESTS) += iwlmvm-tests.o diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tests/hcmd.c b/drivers/net/wireless/intel/iwlwifi/mvm/tests/hcmd.c new file mode 100644 index 000000000000..1fee0320c756 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tests/hcmd.c @@ -0,0 +1,38 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * KUnit tests for channel helper functions + * + * Copyright (C) 2025 Intel Corporation + */ +#include <kunit/test.h> + +#include <iwl-trans.h> +#include "../mvm.h" + +MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING"); + +static void test_hcmd_names_sorted(struct kunit *test) +{ + for (int i = 0; i < iwl_mvm_groups_size; i++) { + const struct iwl_hcmd_arr *arr = &iwl_mvm_groups[i]; + + if (!arr->arr) + continue; + + for (int j = 0; j < arr->size - 1; j++) + KUNIT_EXPECT_LE(test, arr->arr[j].cmd_id, + arr->arr[j + 1].cmd_id); + } +} + +static struct kunit_case hcmd_names_cases[] = { + KUNIT_CASE(test_hcmd_names_sorted), + {}, +}; + +static struct kunit_suite hcmd_names = { + .name = "iwlmvm-hcmd-names", + .test_cases = hcmd_names_cases, +}; + +kunit_test_suite(hcmd_names); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c index 1a30bb1ff8ca..478408f802d9 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c @@ -771,15 +771,17 @@ static void iwl_mvm_cancel_session_protection(struct iwl_mvm *mvm, static void iwl_mvm_roc_rm_cmd(struct iwl_mvm *mvm, u32 activity) { - struct iwl_roc_req_v5 roc_cmd = { + struct iwl_roc_req roc_cmd = { .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE), .activity = cpu_to_le32(activity), }; + u8 ver = iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(MAC_CONF_GROUP, ROC_CMD), 0); + u16 cmd_len = ver < 6 ? sizeof(struct iwl_roc_req_v5) : sizeof(roc_cmd); int ret; lockdep_assert_held(&mvm->mutex); ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, ROC_CMD), 0, - sizeof(roc_cmd), &roc_cmd); + cmd_len, &roc_cmd); if (ret) IWL_ERR(mvm, "Couldn't send the ROC_CMD: %d\n", ret); } @@ -1102,11 +1104,13 @@ int iwl_mvm_roc_add_cmd(struct iwl_mvm *mvm, { int res; u32 duration_tu, delay; - struct iwl_roc_req_v5 roc_req = { + struct iwl_roc_req roc_req = { .action = cpu_to_le32(FW_CTXT_ACTION_ADD), .activity = cpu_to_le32(activity), .sta_id = cpu_to_le32(mvm->aux_sta.sta_id), }; + u8 ver = iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(MAC_CONF_GROUP, ROC_CMD), 0); + u16 cmd_len = ver < 6 ? sizeof(struct iwl_roc_req_v5) : sizeof(roc_req); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); lockdep_assert_held(&mvm->mutex); @@ -1136,7 +1140,7 @@ int iwl_mvm_roc_add_cmd(struct iwl_mvm *mvm, memcpy(roc_req.node_addr, vif->addr, ETH_ALEN); res = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, ROC_CMD), - 0, sizeof(roc_req), &roc_req); + 0, cmd_len, &roc_req); if (!res) mvmvif->roc_activity = activity; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c index c851290e75a2..53bab21ebae2 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2019-2022, 2024 Intel Corporation + * Copyright (C) 2012-2014, 2019-2022, 2024-2025 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015-2016 Intel Deutschland GmbH */ @@ -8,6 +8,9 @@ #include "mvm.h" +#define IWL_MVM_NUM_CTDP_STEPS 20 +#define IWL_MVM_MIN_CTDP_BUDGET_MW 150 + #define IWL_MVM_TEMP_NOTIF_WAIT_TIMEOUT HZ void iwl_mvm_enter_ctkill(struct iwl_mvm *mvm) @@ -479,43 +482,28 @@ static const struct iwl_tt_params iwl_mvm_default_tt_params = { .support_tx_backoff = true, }; -/* budget in mWatt */ -static const u32 iwl_mvm_cdev_budgets[] = { - 2400, /* cooling state 0 */ - 2000, /* cooling state 1 */ - 1800, /* cooling state 2 */ - 1600, /* cooling state 3 */ - 1400, /* cooling state 4 */ - 1200, /* cooling state 5 */ - 1000, /* cooling state 6 */ - 900, /* cooling state 7 */ - 800, /* cooling state 8 */ - 700, /* cooling state 9 */ - 650, /* cooling state 10 */ - 600, /* cooling state 11 */ - 550, /* cooling state 12 */ - 500, /* cooling state 13 */ - 450, /* cooling state 14 */ - 400, /* cooling state 15 */ - 350, /* cooling state 16 */ - 300, /* cooling state 17 */ - 250, /* cooling state 18 */ - 200, /* cooling state 19 */ - 150, /* cooling state 20 */ -}; - int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 state) { struct iwl_ctdp_cmd cmd = { .operation = cpu_to_le32(op), - .budget = cpu_to_le32(iwl_mvm_cdev_budgets[state]), .window_size = 0, }; + u32 budget; int ret; u32 status; lockdep_assert_held(&mvm->mutex); + /* Do a linear scale from IWL_MVM_MIN_CTDP_BUDGET_MW to the configured + * maximum in the predefined number of steps. + */ + budget = ((mvm->thermal_throttle.power_budget_mw - + IWL_MVM_MIN_CTDP_BUDGET_MW) * + (IWL_MVM_NUM_CTDP_STEPS - 1 - state)) / + (IWL_MVM_NUM_CTDP_STEPS - 1) + + IWL_MVM_MIN_CTDP_BUDGET_MW; + cmd.budget = cpu_to_le32(budget); + status = 0; ret = iwl_mvm_send_cmd_pdu_status(mvm, WIDE_ID(PHY_OPS_GROUP, CTDP_CONFIG_CMD), @@ -552,8 +540,8 @@ int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 state) #ifdef CONFIG_THERMAL static int compare_temps(const void *a, const void *b) { - return ((s16)le16_to_cpu(*(__le16 *)a) - - (s16)le16_to_cpu(*(__le16 *)b)); + return ((s16)le16_to_cpu(*(const __le16 *)a) - + (s16)le16_to_cpu(*(const __le16 *)b)); } struct iwl_trip_walk_data { @@ -707,7 +695,7 @@ static void iwl_mvm_thermal_zone_register(struct iwl_mvm *mvm) static int iwl_mvm_tcool_get_max_state(struct thermal_cooling_device *cdev, unsigned long *state) { - *state = ARRAY_SIZE(iwl_mvm_cdev_budgets) - 1; + *state = IWL_MVM_NUM_CTDP_STEPS - 1; return 0; } @@ -733,7 +721,7 @@ static int iwl_mvm_tcool_set_cur_state(struct thermal_cooling_device *cdev, mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) return -EIO; - if (new_state >= ARRAY_SIZE(iwl_mvm_cdev_budgets)) + if (new_state >= IWL_MVM_NUM_CTDP_STEPS) return -EINVAL; return iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START, @@ -794,6 +782,47 @@ static void iwl_mvm_cooling_device_unregister(struct iwl_mvm *mvm) } #endif /* CONFIG_THERMAL */ +static u32 iwl_mvm_ctdp_get_max_budget(struct iwl_mvm *mvm) +{ + u64 bios_power_budget = 0; + u32 default_power_budget; + + switch (CSR_HW_RFID_TYPE(mvm->trans->info.hw_rf_id)) { + case IWL_CFG_RF_TYPE_JF2: + case IWL_CFG_RF_TYPE_JF1: + default_power_budget = 2000; + break; + case IWL_CFG_RF_TYPE_HR2: + case IWL_CFG_RF_TYPE_HR1: + default_power_budget = 2400; + break; + case IWL_CFG_RF_TYPE_GF: + /* dual-radio devices have a higher budget */ + if (CSR_HW_RFID_IS_CDB(mvm->trans->info.hw_rf_id)) + default_power_budget = 5200; + else + default_power_budget = 2880; + break; + case IWL_CFG_RF_TYPE_FM: + default_power_budget = 3450; + break; + default: + default_power_budget = 5550; + break; + } + + iwl_bios_get_pwr_limit(&mvm->fwrt, &bios_power_budget); + + /* 32bit in UEFI, 16bit in ACPI; use BIOS value if it is in range */ + if (bios_power_budget && + bios_power_budget != 0xffff && bios_power_budget != 0xffffffff && + bios_power_budget >= IWL_MVM_MIN_CTDP_BUDGET_MW && + bios_power_budget <= default_power_budget) + return (u32)bios_power_budget; + + return default_power_budget; +} + void iwl_mvm_thermal_initialize(struct iwl_mvm *mvm, u32 min_backoff) { struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle; @@ -805,6 +834,8 @@ void iwl_mvm_thermal_initialize(struct iwl_mvm *mvm, u32 min_backoff) else tt->params = iwl_mvm_default_tt_params; + tt->power_budget_mw = iwl_mvm_ctdp_get_max_budget(mvm); + IWL_DEBUG_TEMP(mvm, "cTDP power budget: %d mW\n", tt->power_budget_mw); tt->throttle = false; tt->dynamic_smps = false; tt->min_backoff = min_backoff; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 91f6945f3f98..ac2cf1b8ce23 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -148,7 +148,7 @@ out: * Sets most of the Tx cmd's fields */ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, - struct iwl_tx_cmd *tx_cmd, + struct iwl_tx_cmd_v6 *tx_cmd, struct ieee80211_tx_info *info, u8 sta_id) { struct ieee80211_hdr *hdr = (void *)skb->data; @@ -283,14 +283,10 @@ static u32 iwl_mvm_convert_rate_idx(struct iwl_mvm *mvm, (rate_idx <= IWL_LAST_CCK_RATE); /* Set CCK or OFDM flag */ - if (iwl_fw_lookup_cmd_ver(mvm->fw, TX_CMD, 0) > 8) { - if (!is_cck) - rate_flags |= RATE_MCS_LEGACY_OFDM_MSK; - else - rate_flags |= RATE_MCS_CCK_MSK; - } else if (is_cck) { - rate_flags |= RATE_MCS_CCK_MSK_V1; - } + if (!is_cck) + rate_flags |= RATE_MCS_MOD_TYPE_LEGACY_OFDM; + else + rate_flags |= RATE_MCS_MOD_TYPE_CCK; return (u32)rate_plcp | rate_flags; } @@ -303,45 +299,35 @@ static u32 iwl_mvm_get_inject_tx_rate(struct iwl_mvm *mvm, struct ieee80211_tx_rate *rate = &info->control.rates[0]; u32 result; - /* - * we only care about legacy/HT/VHT so far, so we can - * build in v1 and use iwl_new_rate_from_v1() - */ - if (rate->flags & IEEE80211_TX_RC_VHT_MCS) { u8 mcs = ieee80211_rate_get_vht_mcs(rate); u8 nss = ieee80211_rate_get_vht_nss(rate); - result = RATE_MCS_VHT_MSK_V1; + result = RATE_MCS_MOD_TYPE_VHT; result |= u32_encode_bits(mcs, RATE_VHT_MCS_RATE_CODE_MSK); result |= u32_encode_bits(nss, RATE_MCS_NSS_MSK); if (rate->flags & IEEE80211_TX_RC_SHORT_GI) - result |= RATE_MCS_SGI_MSK_V1; + result |= RATE_MCS_SGI_MSK; if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) - result |= u32_encode_bits(1, RATE_MCS_CHAN_WIDTH_MSK_V1); + result |= RATE_MCS_CHAN_WIDTH_40; else if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH) - result |= u32_encode_bits(2, RATE_MCS_CHAN_WIDTH_MSK_V1); + result |= RATE_MCS_CHAN_WIDTH_80; else if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH) - result |= u32_encode_bits(3, RATE_MCS_CHAN_WIDTH_MSK_V1); - - if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, TX_CMD, 0) > 6) - result = iwl_new_rate_from_v1(result); + result |= RATE_MCS_CHAN_WIDTH_160; } else if (rate->flags & IEEE80211_TX_RC_MCS) { - result = RATE_MCS_HT_MSK_V1; - result |= u32_encode_bits(rate->idx, - RATE_HT_MCS_RATE_CODE_MSK_V1 | - RATE_HT_MCS_NSS_MSK_V1); + result = RATE_MCS_MOD_TYPE_HT; + result |= u32_encode_bits(rate->idx & 0x7, + RATE_HT_MCS_CODE_MSK); + result |= u32_encode_bits(rate->idx >> 3, + RATE_MCS_NSS_MSK); if (rate->flags & IEEE80211_TX_RC_SHORT_GI) - result |= RATE_MCS_SGI_MSK_V1; + result |= RATE_MCS_SGI_MSK; if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) - result |= u32_encode_bits(1, RATE_MCS_CHAN_WIDTH_MSK_V1); + result |= RATE_MCS_CHAN_WIDTH_40; if (info->flags & IEEE80211_TX_CTL_LDPC) - result |= RATE_MCS_LDPC_MSK_V1; + result |= RATE_MCS_LDPC_MSK; if (u32_get_bits(info->flags, IEEE80211_TX_CTL_STBC)) result |= RATE_MCS_STBC_MSK; - - if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, TX_CMD, 0) > 6) - result = iwl_new_rate_from_v1(result); } else { int rate_idx = info->control.rates[0].idx; @@ -391,21 +377,25 @@ static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm, return iwl_mvm_convert_rate_idx(mvm, info, rate_idx); } -static u32 iwl_mvm_get_tx_rate_n_flags(struct iwl_mvm *mvm, - struct ieee80211_tx_info *info, - struct ieee80211_sta *sta, __le16 fc) +static __le32 iwl_mvm_get_tx_rate_n_flags(struct iwl_mvm *mvm, + struct ieee80211_tx_info *info, + struct ieee80211_sta *sta, __le16 fc) { + u32 rate; + if (unlikely(info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT)) - return iwl_mvm_get_inject_tx_rate(mvm, info, sta, fc); + rate = iwl_mvm_get_inject_tx_rate(mvm, info, sta, fc); + else + rate = iwl_mvm_get_tx_rate(mvm, info, sta, fc) | + iwl_mvm_get_tx_ant(mvm, info, sta, fc); - return iwl_mvm_get_tx_rate(mvm, info, sta, fc) | - iwl_mvm_get_tx_ant(mvm, info, sta, fc); + return iwl_mvm_v3_rate_to_fw(rate, mvm->fw_rates_ver); } /* * Sets the fields in the Tx cmd that are rate related */ -void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd, +void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd_v6 *tx_cmd, struct ieee80211_tx_info *info, struct ieee80211_sta *sta, __le16 fc) { @@ -443,8 +433,7 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd, } /* Set the rate in the TX cmd */ - tx_cmd->rate_n_flags = - cpu_to_le32(iwl_mvm_get_tx_rate_n_flags(mvm, info, sta, fc)); + tx_cmd->rate_n_flags = iwl_mvm_get_tx_rate_n_flags(mvm, info, sta, fc); } static inline void iwl_mvm_set_tx_cmd_pn(struct ieee80211_tx_info *info, @@ -469,7 +458,7 @@ static inline void iwl_mvm_set_tx_cmd_pn(struct ieee80211_tx_info *info, */ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm, struct ieee80211_tx_info *info, - struct iwl_tx_cmd *tx_cmd, + struct iwl_tx_cmd_v6 *tx_cmd, struct sk_buff *skb_frag, int hdrlen) { @@ -543,7 +532,7 @@ static bool iwl_mvm_use_host_rate(struct iwl_mvm *mvm, * (since we don't necesarily know the link), but FW rate * selection was fixed. */ - return mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ; + return mvm->trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_BZ; } static void iwl_mvm_copy_hdr(void *cmd, const void *hdr, int hdrlen, @@ -567,7 +556,7 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct iwl_device_tx_cmd *dev_cmd; - struct iwl_tx_cmd *tx_cmd; + struct iwl_tx_cmd_v6 *tx_cmd; dev_cmd = iwl_trans_alloc_tx_cmd(mvm->trans); @@ -577,7 +566,7 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, dev_cmd->hdr.cmd = TX_CMD; if (iwl_mvm_has_new_tx_api(mvm)) { - u32 rate_n_flags = 0; + __le32 rate_n_flags = 0; u16 flags = 0; struct iwl_mvm_sta *mvmsta = sta ? iwl_mvm_sta_from_mac80211(sta) : NULL; @@ -609,9 +598,9 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, flags |= IWL_TX_FLAGS_HIGH_PRI; } - if (mvm->trans->trans_cfg->device_family >= + if (mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { - struct iwl_tx_cmd_gen3 *cmd = (void *)dev_cmd->payload; + struct iwl_tx_cmd *cmd = (void *)dev_cmd->payload; u32 offload_assist = iwl_mvm_tx_csum(mvm, skb, info, amsdu); @@ -624,9 +613,9 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, iwl_mvm_copy_hdr(cmd->hdr, hdr, hdrlen, addr3_override); cmd->flags = cpu_to_le16(flags); - cmd->rate_n_flags = cpu_to_le32(rate_n_flags); + cmd->rate_n_flags = rate_n_flags; } else { - struct iwl_tx_cmd_gen2 *cmd = (void *)dev_cmd->payload; + struct iwl_tx_cmd_v9 *cmd = (void *)dev_cmd->payload; u16 offload_assist = iwl_mvm_tx_csum(mvm, skb, info, amsdu); @@ -639,12 +628,12 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, iwl_mvm_copy_hdr(cmd->hdr, hdr, hdrlen, addr3_override); cmd->flags = cpu_to_le32(flags); - cmd->rate_n_flags = cpu_to_le32(rate_n_flags); + cmd->rate_n_flags = rate_n_flags; } goto out; } - tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; + tx_cmd = (struct iwl_tx_cmd_v6 *)dev_cmd->payload; if (info->control.hw_key) iwl_mvm_set_tx_cmd_crypto(mvm, info, tx_cmd, skb, hdrlen); @@ -1023,7 +1012,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, * 1 more for the potential data in the header */ if ((num_subframes * 2 + skb_shinfo(skb)->nr_frags + 1) > - mvm->trans->max_skb_frags) + mvm->trans->info.max_skb_frags) num_subframes = 1; if (num_subframes > 1) @@ -1185,7 +1174,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, seq_number &= IEEE80211_SCTL_SEQ; if (!iwl_mvm_has_new_tx_api(mvm)) { - struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; + struct iwl_tx_cmd_v6 *tx_cmd = (void *)dev_cmd->payload; hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); hdr->seq_ctrl |= cpu_to_le16(seq_number); @@ -1387,7 +1376,7 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm, * to align the wrap around of ssn so we compare relevant values. */ normalized_ssn = tid_data->ssn; - if (mvm->trans->trans_cfg->gen2) + if (mvm->trans->mac_cfg->gen2) normalized_ssn &= 0xff; if (normalized_ssn != tid_data->next_reclaimed) @@ -1467,7 +1456,7 @@ void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags, struct ieee80211_tx_rate *r) { u32 format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK; - u32 rate = format == RATE_MCS_HT_MSK ? + u32 rate = format == RATE_MCS_MOD_TYPE_HT ? RATE_HT_MCS_INDEX(rate_n_flags) : rate_n_flags & RATE_MCS_CODE_MSK; @@ -1477,68 +1466,51 @@ void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags, if (rate_n_flags & RATE_MCS_SGI_MSK) r->flags |= IEEE80211_TX_RC_SHORT_GI; - if (format == RATE_MCS_HT_MSK) { + switch (format) { + case RATE_MCS_MOD_TYPE_HT: r->flags |= IEEE80211_TX_RC_MCS; r->idx = rate; - } else if (format == RATE_MCS_VHT_MSK) { + break; + case RATE_MCS_MOD_TYPE_VHT: ieee80211_rate_set_vht(r, rate, FIELD_GET(RATE_MCS_NSS_MSK, rate_n_flags) + 1); r->flags |= IEEE80211_TX_RC_VHT_MCS; - } else if (format == RATE_MCS_HE_MSK) { + break; + case RATE_MCS_MOD_TYPE_HE: + case RATE_MCS_MOD_TYPE_EHT: /* mac80211 cannot do this without ieee80211_tx_status_ext() * but it only matters for radiotap */ r->idx = 0; - } else { + break; + default: r->idx = iwl_mvm_legacy_hw_idx_to_mac80211_idx(rate_n_flags, band); } } -void iwl_mvm_hwrate_to_tx_rate_v1(u32 rate_n_flags, - enum nl80211_band band, - struct ieee80211_tx_rate *r) -{ - if (rate_n_flags & RATE_HT_MCS_GF_MSK) - r->flags |= IEEE80211_TX_RC_GREEN_FIELD; - - r->flags |= - iwl_mvm_get_hwrate_chan_width(rate_n_flags & - RATE_MCS_CHAN_WIDTH_MSK_V1); - - if (rate_n_flags & RATE_MCS_SGI_MSK_V1) - r->flags |= IEEE80211_TX_RC_SHORT_GI; - if (rate_n_flags & RATE_MCS_HT_MSK_V1) { - r->flags |= IEEE80211_TX_RC_MCS; - r->idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK_V1; - } else if (rate_n_flags & RATE_MCS_VHT_MSK_V1) { - ieee80211_rate_set_vht( - r, rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK, - FIELD_GET(RATE_MCS_NSS_MSK, rate_n_flags) + 1); - r->flags |= IEEE80211_TX_RC_VHT_MCS; - } else { - r->idx = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags, - band); - } -} - /* * translate ucode response to mac80211 tx status control values */ -static void iwl_mvm_hwrate_to_tx_status(const struct iwl_fw *fw, - u32 rate_n_flags, +static void iwl_mvm_hwrate_to_tx_status(struct iwl_mvm *mvm, + __le32 rate_n_flags, struct ieee80211_tx_info *info) { struct ieee80211_tx_rate *r = &info->status.rates[0]; + u32 rate; - if (iwl_fw_lookup_notif_ver(fw, LONG_GROUP, - TX_CMD, 0) <= 6) - rate_n_flags = iwl_new_rate_from_v1(rate_n_flags); + /* + * Technically this conversion is incorrect for BA status, however: + * - we only use the BA notif data for older firmware that have + * host rate scaling and don't use newer rate formats + * - the firmware API changed together for BA notif and TX CMD + * as well + */ + rate = iwl_mvm_v3_rate_from_fw(rate_n_flags, mvm->fw_rates_ver); info->status.antenna = - ((rate_n_flags & RATE_MCS_ANT_AB_MSK) >> RATE_MCS_ANT_POS); - iwl_mvm_hwrate_to_tx_rate(rate_n_flags, - info->band, r); + ((rate & RATE_MCS_ANT_AB_MSK) >> RATE_MCS_ANT_POS); + iwl_mvm_hwrate_to_tx_rate(rate, info->band, r); } static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm, @@ -1603,7 +1575,7 @@ static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm *mvm, u32 val = le32_to_cpup((__le32 *)iwl_mvm_get_agg_status(mvm, tx_resp) + tx_resp->frame_count); - if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) + if (mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) return val & 0xFFFF; return val & 0xFFF; } @@ -1690,9 +1662,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, info->status.rates[0].count = tx_resp->failure_frame + 1; - iwl_mvm_hwrate_to_tx_status(mvm->fw, - le32_to_cpu(tx_resp->initial_rate), - info); + iwl_mvm_hwrate_to_tx_status(mvm, tx_resp->initial_rate, info); /* Don't assign the converted initial_rate, because driver * TLC uses this and doesn't support the new FW rate @@ -1934,7 +1904,7 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm, if (!WARN_ON_ONCE(!mvmsta)) { mvmsta->tid_data[tid].rate_n_flags = - le32_to_cpu(tx_resp->initial_rate); + tx_resp->initial_rate; mvmsta->tid_data[tid].tx_time = le16_to_cpu(tx_resp->wireless_media_time); mvmsta->tid_data[tid].lq_color = @@ -1959,7 +1929,7 @@ void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid, int txq, int index, - struct ieee80211_tx_info *tx_info, u32 rate, + struct ieee80211_tx_info *tx_info, __le32 rate, bool is_flush) { struct sk_buff_head reclaimed_skbs; @@ -2043,7 +2013,9 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid, tx_info->status.status_driver_data[0] = RS_DRV_DATA_PACK(tid_data->lq_color, tx_info->status.status_driver_data[0]); - tx_info->status.status_driver_data[1] = (void *)(uintptr_t)rate; + /* the value is only consumed for old FW that has v1 rates anyway */ + tx_info->status.status_driver_data[1] = + (void *)(uintptr_t)le32_to_cpu(rate); skb_queue_walk(&reclaimed_skbs, skb) { struct ieee80211_hdr *hdr = (void *)skb->data; @@ -2062,7 +2034,7 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid, info->flags |= IEEE80211_TX_STAT_AMPDU; memcpy(&info->status, &tx_info->status, sizeof(tx_info->status)); - iwl_mvm_hwrate_to_tx_status(mvm->fw, rate, info); + iwl_mvm_hwrate_to_tx_status(mvm, rate, info); } } @@ -2085,7 +2057,7 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid, goto out; tx_info->band = chanctx_conf->def.chan->band; - iwl_mvm_hwrate_to_tx_status(mvm->fw, rate, tx_info); + iwl_mvm_hwrate_to_tx_status(mvm, rate, tx_info); IWL_DEBUG_TX_REPLY(mvm, "No reclaim. Update rs directly\n"); iwl_mvm_rs_tx_status(mvm, sta, tid, tx_info, false); @@ -2174,7 +2146,7 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) (int)(le16_to_cpu(ba_tfd->q_num)), le16_to_cpu(ba_tfd->tfd_index), &ba_info, - le32_to_cpu(ba_res->tx_rate), false); + ba_res->tx_rate, false); } if (mvmsta) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index dd890dcd1505..62da0132f383 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2024 Intel Corporation + * Copyright (C) 2012-2014, 2018-2025 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH */ @@ -142,7 +142,7 @@ int iwl_mvm_legacy_hw_idx_to_mac80211_idx(u32 rate_n_flags, int rate = rate_n_flags & RATE_LEGACY_RATE_MSK; bool is_LB = band == NL80211_BAND_2GHZ; - if (format == RATE_MCS_LEGACY_OFDM_MSK) + if (format == RATE_MCS_MOD_TYPE_LEGACY_OFDM) return is_LB ? rate + IWL_FIRST_OFDM_RATE : rate; @@ -169,15 +169,9 @@ int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags, u8 iwl_mvm_mac80211_idx_to_hwrate(const struct iwl_fw *fw, int rate_idx) { - if (iwl_fw_lookup_cmd_ver(fw, TX_CMD, 0) > 8) - /* In the new rate legacy rates are indexed: - * 0 - 3 for CCK and 0 - 7 for OFDM. - */ - return (rate_idx >= IWL_FIRST_OFDM_RATE ? - rate_idx - IWL_FIRST_OFDM_RATE : - rate_idx); - - return iwl_fw_rate_idx_to_plcp(rate_idx); + return (rate_idx >= IWL_FIRST_OFDM_RATE ? + rate_idx - IWL_FIRST_OFDM_RATE : + rate_idx); } u8 iwl_mvm_mac80211_ac_to_ucode_ac(enum ieee80211_ac_numbers ac) @@ -748,7 +742,7 @@ unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { unsigned int default_timeout = - mvm->trans->trans_cfg->base_params->wd_timeout; + mvm->trans->mac_cfg->base->wd_timeout; /* * We can't know when the station is asleep or awake, so we @@ -1187,9 +1181,9 @@ u32 iwl_mvm_get_systime(struct iwl_mvm *mvm) { u32 reg_addr = DEVICE_SYSTEM_TIME_REG; - if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000 && - mvm->trans->cfg->gp2_reg_addr) - reg_addr = mvm->trans->cfg->gp2_reg_addr; + if (mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_22000 && + mvm->trans->mac_cfg->base->gp2_reg_addr) + reg_addr = mvm->trans->mac_cfg->base->gp2_reg_addr; return iwl_read_prph(mvm->trans, reg_addr); } diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-v2.c index 4f367c7fce25..976fd1f58da4 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-v2.c @@ -5,7 +5,7 @@ #include <linux/dmi.h> #include "iwl-trans.h" #include "iwl-fh.h" -#include "iwl-context-info-gen3.h" +#include "iwl-context-info-v2.h" #include "internal.h" #include "iwl-prph.h" @@ -97,11 +97,12 @@ out: *control_flags |= IWL_PRPH_SCRATCH_EARLY_DEBUG_EN | dbg_flags; } -int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, - const struct fw_img *fw) +int iwl_pcie_ctxt_info_v2_alloc(struct iwl_trans *trans, + const struct iwl_fw *fw, + const struct fw_img *img) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_context_info_gen3 *ctxt_info_gen3; + struct iwl_context_info_v2 *ctxt_info_v2; struct iwl_prph_scratch *prph_scratch; struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl; struct iwl_prph_info *prph_info; @@ -109,9 +110,9 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, u32 control_flags_ext = 0; int ret; int cmdq_size = max_t(u32, IWL_CMD_QUEUE_SIZE, - trans->cfg->min_txq_size); + trans->mac_cfg->base->min_txq_size); - switch (trans_pcie->rx_buf_size) { + switch (trans->conf.rx_buf_size) { case IWL_AMSDU_DEF: return -EINVAL; case IWL_AMSDU_2K: @@ -131,13 +132,13 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, break; } - if (trans->dsbr_urm_fw_dependent) + if (trans->conf.dsbr_urm_fw_dependent) control_flags_ext |= IWL_PRPH_SCRATCH_EXT_URM_FW; - if (trans->dsbr_urm_permanent) + if (trans->conf.dsbr_urm_permanent) control_flags_ext |= IWL_PRPH_SCRATCH_EXT_URM_PERM; - if (trans->ext_32khz_clock_valid) + if (trans->conf.ext_32khz_clock_valid) control_flags_ext |= IWL_PRPH_SCRATCH_EXT_32KHZ_CLK_VALID; /* Allocate prph scratch */ @@ -151,16 +152,16 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, prph_sc_ctrl->version.version = 0; prph_sc_ctrl->version.mac_id = - cpu_to_le16((u16)trans->hw_rev); + cpu_to_le16((u16)trans->info.hw_rev); prph_sc_ctrl->version.size = cpu_to_le16(sizeof(*prph_scratch) / 4); control_flags |= IWL_PRPH_SCRATCH_MTR_MODE; control_flags |= IWL_PRPH_MTR_FORMAT_256B & IWL_PRPH_SCRATCH_MTR_FORMAT; - if (trans->trans_cfg->imr_enabled) + if (trans->mac_cfg->imr_enabled) control_flags |= IWL_PRPH_SCRATCH_IMR_DEBUG_EN; - if (CSR_HW_REV_TYPE(trans->hw_rev) == IWL_CFG_MAC_TYPE_GL && + if (CSR_HW_REV_TYPE(trans->info.hw_rev) == IWL_CFG_MAC_TYPE_GL && iwl_is_force_scu_active_approved()) { control_flags |= IWL_PRPH_SCRATCH_SCU_FORCE_ACTIVE; IWL_DEBUG_FW(trans, @@ -168,6 +169,11 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, IWL_PRPH_SCRATCH_SCU_FORCE_ACTIVE); } + if (trans->do_top_reset) { + WARN_ON(trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_SC); + control_flags |= IWL_PRPH_SCRATCH_TOP_RESET; + } + /* initialize RX default queue */ prph_sc_ctrl->rbd_cfg.free_rbd_addr = cpu_to_le64(trans_pcie->rxq->bd_dma); @@ -178,15 +184,16 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, prph_sc_ctrl->control.control_flags_ext = cpu_to_le32(control_flags_ext); /* initialize the Step equalizer data */ - prph_sc_ctrl->step_cfg.mbx_addr_0 = cpu_to_le32(trans->mbx_addr_0_step); - prph_sc_ctrl->step_cfg.mbx_addr_1 = cpu_to_le32(trans->mbx_addr_1_step); + prph_sc_ctrl->step_cfg.mbx_addr_0 = + cpu_to_le32(trans->conf.mbx_addr_0_step); + prph_sc_ctrl->step_cfg.mbx_addr_1 = + cpu_to_le32(trans->conf.mbx_addr_1_step); /* allocate ucode sections in dram and set addresses */ - ret = iwl_pcie_init_fw_sec(trans, fw, &prph_scratch->dram.common); + ret = iwl_pcie_init_fw_sec(trans, img, &prph_scratch->dram.common); if (ret) goto err_free_prph_scratch; - /* Allocate prph information * currently we don't assign to the prph info anything, but it would get * assigned later @@ -206,18 +213,18 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, } /* Allocate context info */ - ctxt_info_gen3 = dma_alloc_coherent(trans->dev, - sizeof(*ctxt_info_gen3), - &trans_pcie->ctxt_info_dma_addr, - GFP_KERNEL); - if (!ctxt_info_gen3) { + ctxt_info_v2 = dma_alloc_coherent(trans->dev, + sizeof(*ctxt_info_v2), + &trans_pcie->ctxt_info_dma_addr, + GFP_KERNEL); + if (!ctxt_info_v2) { ret = -ENOMEM; goto err_free_prph_info; } - ctxt_info_gen3->prph_info_base_addr = + ctxt_info_v2->prph_info_base_addr = cpu_to_le64(trans_pcie->prph_info_dma_addr); - ctxt_info_gen3->prph_scratch_base_addr = + ctxt_info_v2->prph_scratch_base_addr = cpu_to_le64(trans_pcie->prph_scratch_dma_addr); /* @@ -229,34 +236,35 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, sizeof(prph_scratch->dram.fseq_img) != sizeof(*prph_scratch)); if (control_flags_ext & IWL_PRPH_SCRATCH_EXT_EXT_FSEQ) - ctxt_info_gen3->prph_scratch_size = + ctxt_info_v2->prph_scratch_size = cpu_to_le32(sizeof(*prph_scratch)); else - ctxt_info_gen3->prph_scratch_size = + ctxt_info_v2->prph_scratch_size = cpu_to_le32(offsetofend(typeof(*prph_scratch), dram.common)); - ctxt_info_gen3->cr_head_idx_arr_base_addr = + ctxt_info_v2->cr_head_idx_arr_base_addr = cpu_to_le64(trans_pcie->rxq->rb_stts_dma); - ctxt_info_gen3->tr_tail_idx_arr_base_addr = + ctxt_info_v2->tr_tail_idx_arr_base_addr = cpu_to_le64(trans_pcie->prph_info_dma_addr + PAGE_SIZE / 2); - ctxt_info_gen3->cr_tail_idx_arr_base_addr = + ctxt_info_v2->cr_tail_idx_arr_base_addr = cpu_to_le64(trans_pcie->prph_info_dma_addr + 3 * PAGE_SIZE / 4); - ctxt_info_gen3->mtr_base_addr = - cpu_to_le64(trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id]->dma_addr); - ctxt_info_gen3->mcr_base_addr = + ctxt_info_v2->mtr_base_addr = + cpu_to_le64(trans_pcie->txqs.txq[trans->conf.cmd_queue]->dma_addr); + ctxt_info_v2->mcr_base_addr = cpu_to_le64(trans_pcie->rxq->used_bd_dma); - ctxt_info_gen3->mtr_size = + ctxt_info_v2->mtr_size = cpu_to_le16(TFD_QUEUE_CB_SIZE(cmdq_size)); - ctxt_info_gen3->mcr_size = - cpu_to_le16(RX_QUEUE_CB_SIZE(trans->cfg->num_rbds)); + ctxt_info_v2->mcr_size = + cpu_to_le16(RX_QUEUE_CB_SIZE(iwl_trans_get_num_rbds(trans))); - trans_pcie->ctxt_info_gen3 = ctxt_info_gen3; + trans_pcie->ctxt_info_v2 = ctxt_info_v2; trans_pcie->prph_info = prph_info; trans_pcie->prph_scratch = prph_scratch; /* Allocate IML */ - trans_pcie->iml = dma_alloc_coherent(trans->dev, trans->iml_len, + trans_pcie->iml_len = fw->iml_len; + trans_pcie->iml = dma_alloc_coherent(trans->dev, fw->iml_len, &trans_pcie->iml_dma_addr, GFP_KERNEL); if (!trans_pcie->iml) { @@ -264,27 +272,15 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, goto err_free_ctxt_info; } - memcpy(trans_pcie->iml, trans->iml, trans->iml_len); - - iwl_enable_fw_load_int_ctx_info(trans); - - /* kick FW self load */ - iwl_write64(trans, CSR_CTXT_INFO_ADDR, - trans_pcie->ctxt_info_dma_addr); - iwl_write64(trans, CSR_IML_DATA_ADDR, - trans_pcie->iml_dma_addr); - iwl_write32(trans, CSR_IML_SIZE_ADDR, trans->iml_len); - - iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL, - CSR_AUTO_FUNC_BOOT_ENA); + memcpy(trans_pcie->iml, fw->iml, fw->iml_len); return 0; err_free_ctxt_info: - dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_gen3), - trans_pcie->ctxt_info_gen3, + dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_v2), + trans_pcie->ctxt_info_v2, trans_pcie->ctxt_info_dma_addr); - trans_pcie->ctxt_info_gen3 = NULL; + trans_pcie->ctxt_info_v2 = NULL; err_free_prph_info: dma_free_coherent(trans->dev, PAGE_SIZE, prph_info, trans_pcie->prph_info_dma_addr); @@ -298,14 +294,31 @@ err_free_prph_scratch: } -void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans, bool alive) +void iwl_pcie_ctxt_info_v2_kick(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + iwl_enable_fw_load_int_ctx_info(trans, trans->do_top_reset); + + /* kick FW self load */ + iwl_write64(trans, CSR_CTXT_INFO_ADDR, trans_pcie->ctxt_info_dma_addr); + iwl_write64(trans, CSR_IML_DATA_ADDR, trans_pcie->iml_dma_addr); + iwl_write32(trans, CSR_IML_SIZE_ADDR, trans_pcie->iml_len); + + iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL, + CSR_AUTO_FUNC_BOOT_ENA); +} + +void iwl_pcie_ctxt_info_v2_free(struct iwl_trans *trans, bool alive) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); if (trans_pcie->iml) { - dma_free_coherent(trans->dev, trans->iml_len, trans_pcie->iml, + dma_free_coherent(trans->dev, trans_pcie->iml_len, + trans_pcie->iml, trans_pcie->iml_dma_addr); trans_pcie->iml_dma_addr = 0; + trans_pcie->iml_len = 0; trans_pcie->iml = NULL; } @@ -314,15 +327,15 @@ void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans, bool alive) if (alive) return; - if (!trans_pcie->ctxt_info_gen3) + if (!trans_pcie->ctxt_info_v2) return; - /* ctxt_info_gen3 and prph_scratch are still needed for PNVM load */ - dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_gen3), - trans_pcie->ctxt_info_gen3, + /* ctxt_info_v2 and prph_scratch are still needed for PNVM load */ + dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_v2), + trans_pcie->ctxt_info_v2, trans_pcie->ctxt_info_dma_addr); trans_pcie->ctxt_info_dma_addr = 0; - trans_pcie->ctxt_info_gen3 = NULL; + trans_pcie->ctxt_info_v2 = NULL; dma_free_coherent(trans->dev, sizeof(*trans_pcie->prph_scratch), trans_pcie->prph_scratch, @@ -337,9 +350,9 @@ void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans, bool alive) trans_pcie->prph_info = NULL; } -static int iwl_pcie_load_payloads_continuously(struct iwl_trans *trans, - const struct iwl_pnvm_image *pnvm_data, - struct iwl_dram_data *dram) +static int iwl_pcie_load_payloads_contig(struct iwl_trans *trans, + const struct iwl_pnvm_image *pnvm_data, + struct iwl_dram_data *dram) { u32 len, len0, len1; @@ -426,9 +439,9 @@ static int iwl_pcie_load_payloads_segments } -int iwl_trans_pcie_ctx_info_gen3_load_pnvm(struct iwl_trans *trans, - const struct iwl_pnvm_image *pnvm_payloads, - const struct iwl_ucode_capabilities *capa) +int iwl_trans_pcie_ctx_info_v2_load_pnvm(struct iwl_trans *trans, + const struct iwl_pnvm_image *pnvm_payloads, + const struct iwl_ucode_capabilities *capa) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl = @@ -443,7 +456,7 @@ int iwl_trans_pcie_ctx_info_gen3_load_pnvm(struct iwl_trans *trans, if (WARN_ON(prph_sc_ctrl->pnvm_cfg.pnvm_size)) return -EBUSY; - if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) + if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210) return 0; if (!pnvm_payloads->n_chunks) { @@ -460,10 +473,8 @@ int iwl_trans_pcie_ctx_info_gen3_load_pnvm(struct iwl_trans *trans, trans->pnvm_loaded = true; } else { /* save only in one DRAM section */ - ret = iwl_pcie_load_payloads_continuously - (trans, - pnvm_payloads, - &dram_regions->drams[0]); + ret = iwl_pcie_load_payloads_contig(trans, pnvm_payloads, + &dram_regions->drams[0]); if (!ret) { dram_regions->n_regions = 1; trans->pnvm_loaded = true; @@ -498,7 +509,7 @@ static void iwl_pcie_set_pnvm_segments(struct iwl_trans *trans) cpu_to_le32(iwl_dram_regions_size(dram_regions)); } -static void iwl_pcie_set_continuous_pnvm(struct iwl_trans *trans) +static void iwl_pcie_set_contig_pnvm(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl = @@ -510,21 +521,21 @@ static void iwl_pcie_set_continuous_pnvm(struct iwl_trans *trans) cpu_to_le32(trans_pcie->pnvm_data.drams[0].size); } -void iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans, - const struct iwl_ucode_capabilities *capa) +void iwl_trans_pcie_ctx_info_v2_set_pnvm(struct iwl_trans *trans, + const struct iwl_ucode_capabilities *capa) { - if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) + if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210) return; if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG)) iwl_pcie_set_pnvm_segments(trans); else - iwl_pcie_set_continuous_pnvm(trans); + iwl_pcie_set_contig_pnvm(trans); } -int iwl_trans_pcie_ctx_info_gen3_load_reduce_power(struct iwl_trans *trans, - const struct iwl_pnvm_image *payloads, - const struct iwl_ucode_capabilities *capa) +int iwl_trans_pcie_ctx_info_v2_load_reduce_power(struct iwl_trans *trans, + const struct iwl_pnvm_image *payloads, + const struct iwl_ucode_capabilities *capa) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl = @@ -536,7 +547,7 @@ int iwl_trans_pcie_ctx_info_gen3_load_reduce_power(struct iwl_trans *trans, if (trans->reduce_power_loaded) return 0; - if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) + if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210) return 0; if (WARN_ON(prph_sc_ctrl->reduce_power_cfg.size)) @@ -556,10 +567,8 @@ int iwl_trans_pcie_ctx_info_gen3_load_reduce_power(struct iwl_trans *trans, trans->reduce_power_loaded = true; } else { /* save only in one DRAM section */ - ret = iwl_pcie_load_payloads_continuously - (trans, - payloads, - &dram_regions->drams[0]); + ret = iwl_pcie_load_payloads_contig(trans, payloads, + &dram_regions->drams[0]); if (!ret) { dram_regions->n_regions = 1; trans->reduce_power_loaded = true; @@ -582,7 +591,7 @@ static void iwl_pcie_set_reduce_power_segments(struct iwl_trans *trans) cpu_to_le32(iwl_dram_regions_size(dram_regions)); } -static void iwl_pcie_set_continuous_reduce_power(struct iwl_trans *trans) +static void iwl_pcie_set_contig_reduce_power(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl = @@ -595,15 +604,15 @@ static void iwl_pcie_set_continuous_reduce_power(struct iwl_trans *trans) } void -iwl_trans_pcie_ctx_info_gen3_set_reduce_power(struct iwl_trans *trans, - const struct iwl_ucode_capabilities *capa) +iwl_trans_pcie_ctx_info_v2_set_reduce_power(struct iwl_trans *trans, + const struct iwl_ucode_capabilities *capa) { - if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) + if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210) return; if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG)) iwl_pcie_set_reduce_power_segments(trans); else - iwl_pcie_set_continuous_reduce_power(trans); + iwl_pcie_set_contig_reduce_power(trans); } diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c index 3f0256b3565d..cb36baac14da 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c @@ -161,7 +161,7 @@ int iwl_pcie_init_fw_sec(struct iwl_trans *trans, } int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, - const struct fw_img *fw) + const struct fw_img *img) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_context_info *ctxt_info; @@ -180,11 +180,11 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, ctxt_info->version.version = 0; ctxt_info->version.mac_id = - cpu_to_le16((u16)trans->hw_rev); + cpu_to_le16((u16)trans->info.hw_rev); /* size is in DWs */ ctxt_info->version.size = cpu_to_le16(sizeof(*ctxt_info) / 4); - switch (trans_pcie->rx_buf_size) { + switch (trans->conf.rx_buf_size) { case IWL_AMSDU_2K: rb_size = IWL_CTXT_INFO_RB_SIZE_2K; break; @@ -202,10 +202,10 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, rb_size = IWL_CTXT_INFO_RB_SIZE_4K; } - WARN_ON(RX_QUEUE_CB_SIZE(trans->cfg->num_rbds) > 12); + WARN_ON(RX_QUEUE_CB_SIZE(iwl_trans_get_num_rbds(trans)) > 12); control_flags = IWL_CTXT_INFO_TFD_FORMAT_LONG; control_flags |= - u32_encode_bits(RX_QUEUE_CB_SIZE(trans->cfg->num_rbds), + u32_encode_bits(RX_QUEUE_CB_SIZE(iwl_trans_get_num_rbds(trans)), IWL_CTXT_INFO_RB_CB_SIZE); control_flags |= u32_encode_bits(rb_size, IWL_CTXT_INFO_RB_SIZE); ctxt_info->control.control_flags = cpu_to_le32(control_flags); @@ -218,12 +218,12 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, /* initialize TX command queue */ ctxt_info->hcmd_cfg.cmd_queue_addr = - cpu_to_le64(trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id]->dma_addr); + cpu_to_le64(trans_pcie->txqs.txq[trans->conf.cmd_queue]->dma_addr); ctxt_info->hcmd_cfg.cmd_queue_size = TFD_QUEUE_CB_SIZE(IWL_CMD_QUEUE_SIZE); /* allocate ucode sections in dram and set addresses */ - ret = iwl_pcie_init_fw_sec(trans, fw, &ctxt_info->dram); + ret = iwl_pcie_init_fw_sec(trans, img, &ctxt_info->dram); if (ret) { dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info), ctxt_info, trans_pcie->ctxt_info_dma_addr); @@ -232,7 +232,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, trans_pcie->ctxt_info = ctxt_info; - iwl_enable_fw_load_int_ctx_info(trans); + iwl_enable_fw_load_int_ctx_info(trans, false); /* Configure debug, if exists */ if (iwl_pcie_dbg_on(trans)) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 00056e76ea3d..656f8b06c27b 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2005-2014, 2018-2024 Intel Corporation + * Copyright (C) 2005-2014, 2018-2025 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -17,15 +17,13 @@ #include "iwl-prph.h" #include "internal.h" -#define TRANS_CFG_MARKER BIT(0) #define _IS_A(cfg, _struct) __builtin_types_compatible_p(typeof(cfg), \ struct _struct) extern int _invalid_type; -#define _TRANS_CFG_MARKER(cfg) \ - (__builtin_choose_expr(_IS_A(cfg, iwl_cfg_trans_params), \ - TRANS_CFG_MARKER, \ - __builtin_choose_expr(_IS_A(cfg, iwl_cfg), 0, _invalid_type))) -#define _ASSIGN_CFG(cfg) (_TRANS_CFG_MARKER(cfg) + (kernel_ulong_t)&(cfg)) +#define _TRANS_CFG_CHECK(cfg) \ + (__builtin_choose_expr(_IS_A(cfg, iwl_mac_cfg), \ + 0, _invalid_type)) +#define _ASSIGN_CFG(cfg) (_TRANS_CFG_CHECK(cfg) + (kernel_ulong_t)&(cfg)) #define IWL_PCI_DEVICE(dev, subdev, cfg) \ .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \ @@ -35,515 +33,518 @@ extern int _invalid_type; /* Hardware specific file defines the PCI IDs table for that hardware module */ VISIBLE_IF_IWLWIFI_KUNIT const struct pci_device_id iwl_hw_card_ids[] = { #if IS_ENABLED(CONFIG_IWLDVM) - {IWL_PCI_DEVICE(0x4232, 0x1201, iwl5100_agn_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x4232, 0x1301, iwl5100_agn_cfg)}, /* Half Mini Card */ - {IWL_PCI_DEVICE(0x4232, 0x1204, iwl5100_agn_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x4232, 0x1304, iwl5100_agn_cfg)}, /* Half Mini Card */ - {IWL_PCI_DEVICE(0x4232, 0x1205, iwl5100_bgn_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x4232, 0x1305, iwl5100_bgn_cfg)}, /* Half Mini Card */ - {IWL_PCI_DEVICE(0x4232, 0x1206, iwl5100_abg_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x4232, 0x1306, iwl5100_abg_cfg)}, /* Half Mini Card */ - {IWL_PCI_DEVICE(0x4232, 0x1221, iwl5100_agn_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x4232, 0x1321, iwl5100_agn_cfg)}, /* Half Mini Card */ - {IWL_PCI_DEVICE(0x4232, 0x1224, iwl5100_agn_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x4232, 0x1324, iwl5100_agn_cfg)}, /* Half Mini Card */ - {IWL_PCI_DEVICE(0x4232, 0x1225, iwl5100_bgn_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x4232, 0x1325, iwl5100_bgn_cfg)}, /* Half Mini Card */ - {IWL_PCI_DEVICE(0x4232, 0x1226, iwl5100_abg_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x4232, 0x1326, iwl5100_abg_cfg)}, /* Half Mini Card */ - {IWL_PCI_DEVICE(0x4237, 0x1211, iwl5100_agn_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x4237, 0x1311, iwl5100_agn_cfg)}, /* Half Mini Card */ - {IWL_PCI_DEVICE(0x4237, 0x1214, iwl5100_agn_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x4237, 0x1314, iwl5100_agn_cfg)}, /* Half Mini Card */ - {IWL_PCI_DEVICE(0x4237, 0x1215, iwl5100_bgn_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x4237, 0x1315, iwl5100_bgn_cfg)}, /* Half Mini Card */ - {IWL_PCI_DEVICE(0x4237, 0x1216, iwl5100_abg_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x4237, 0x1316, iwl5100_abg_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1201, iwl5000_mac_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1301, iwl5000_mac_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1204, iwl5000_mac_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1304, iwl5000_mac_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1205, iwl5000_mac_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1305, iwl5000_mac_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1206, iwl5000_mac_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1306, iwl5000_mac_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1221, iwl5000_mac_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1321, iwl5000_mac_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1224, iwl5000_mac_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1324, iwl5000_mac_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1225, iwl5000_mac_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1325, iwl5000_mac_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1226, iwl5000_mac_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1326, iwl5000_mac_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4237, 0x1211, iwl5000_mac_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4237, 0x1311, iwl5000_mac_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4237, 0x1214, iwl5000_mac_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4237, 0x1314, iwl5000_mac_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4237, 0x1215, iwl5000_mac_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4237, 0x1315, iwl5000_mac_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4237, 0x1216, iwl5000_mac_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4237, 0x1316, iwl5000_mac_cfg)}, /* Half Mini Card */ /* 5300 Series WiFi */ - {IWL_PCI_DEVICE(0x4235, 0x1021, iwl5300_agn_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x4235, 0x1121, iwl5300_agn_cfg)}, /* Half Mini Card */ - {IWL_PCI_DEVICE(0x4235, 0x1024, iwl5300_agn_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x4235, 0x1124, iwl5300_agn_cfg)}, /* Half Mini Card */ - {IWL_PCI_DEVICE(0x4235, 0x1001, iwl5300_agn_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x4235, 0x1101, iwl5300_agn_cfg)}, /* Half Mini Card */ - {IWL_PCI_DEVICE(0x4235, 0x1004, iwl5300_agn_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x4235, 0x1104, iwl5300_agn_cfg)}, /* Half Mini Card */ - {IWL_PCI_DEVICE(0x4236, 0x1011, iwl5300_agn_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x4236, 0x1111, iwl5300_agn_cfg)}, /* Half Mini Card */ - {IWL_PCI_DEVICE(0x4236, 0x1014, iwl5300_agn_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x4236, 0x1114, iwl5300_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4235, 0x1021, iwl5000_mac_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4235, 0x1121, iwl5000_mac_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4235, 0x1024, iwl5000_mac_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4235, 0x1124, iwl5000_mac_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4235, 0x1001, iwl5000_mac_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4235, 0x1101, iwl5000_mac_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4235, 0x1004, iwl5000_mac_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4235, 0x1104, iwl5000_mac_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4236, 0x1011, iwl5000_mac_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4236, 0x1111, iwl5000_mac_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4236, 0x1014, iwl5000_mac_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4236, 0x1114, iwl5000_mac_cfg)}, /* Half Mini Card */ /* 5350 Series WiFi/WiMax */ - {IWL_PCI_DEVICE(0x423A, 0x1001, iwl5350_agn_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x423A, 0x1021, iwl5350_agn_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x423B, 0x1011, iwl5350_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x423A, 0x1001, iwl5000_mac_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x423A, 0x1021, iwl5000_mac_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x423B, 0x1011, iwl5000_mac_cfg)}, /* Mini Card */ /* 5150 Series Wifi/WiMax */ - {IWL_PCI_DEVICE(0x423C, 0x1201, iwl5150_agn_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x423C, 0x1301, iwl5150_agn_cfg)}, /* Half Mini Card */ - {IWL_PCI_DEVICE(0x423C, 0x1206, iwl5150_abg_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x423C, 0x1306, iwl5150_abg_cfg)}, /* Half Mini Card */ - {IWL_PCI_DEVICE(0x423C, 0x1221, iwl5150_agn_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x423C, 0x1321, iwl5150_agn_cfg)}, /* Half Mini Card */ - {IWL_PCI_DEVICE(0x423C, 0x1326, iwl5150_abg_cfg)}, /* Half Mini Card */ - - {IWL_PCI_DEVICE(0x423D, 0x1211, iwl5150_agn_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x423D, 0x1311, iwl5150_agn_cfg)}, /* Half Mini Card */ - {IWL_PCI_DEVICE(0x423D, 0x1216, iwl5150_abg_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x423D, 0x1316, iwl5150_abg_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x423C, 0x1201, iwl5150_mac_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x423C, 0x1301, iwl5150_mac_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x423C, 0x1206, iwl5150_mac_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x423C, 0x1306, iwl5150_mac_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x423C, 0x1221, iwl5150_mac_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x423C, 0x1321, iwl5150_mac_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x423C, 0x1326, iwl5150_mac_cfg)}, /* Half Mini Card */ + + {IWL_PCI_DEVICE(0x423D, 0x1211, iwl5150_mac_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x423D, 0x1311, iwl5150_mac_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x423D, 0x1216, iwl5150_mac_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x423D, 0x1316, iwl5150_mac_cfg)}, /* Half Mini Card */ /* 6x00 Series */ - {IWL_PCI_DEVICE(0x422B, 0x1101, iwl6000_3agn_cfg)}, - {IWL_PCI_DEVICE(0x422B, 0x1108, iwl6000_3agn_cfg)}, - {IWL_PCI_DEVICE(0x422B, 0x1121, iwl6000_3agn_cfg)}, - {IWL_PCI_DEVICE(0x422B, 0x1128, iwl6000_3agn_cfg)}, - {IWL_PCI_DEVICE(0x422C, 0x1301, iwl6000i_2agn_cfg)}, - {IWL_PCI_DEVICE(0x422C, 0x1306, iwl6000i_2abg_cfg)}, - {IWL_PCI_DEVICE(0x422C, 0x1307, iwl6000i_2bg_cfg)}, - {IWL_PCI_DEVICE(0x422C, 0x1321, iwl6000i_2agn_cfg)}, - {IWL_PCI_DEVICE(0x422C, 0x1326, iwl6000i_2abg_cfg)}, - {IWL_PCI_DEVICE(0x4238, 0x1111, iwl6000_3agn_cfg)}, - {IWL_PCI_DEVICE(0x4238, 0x1118, iwl6000_3agn_cfg)}, - {IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_2agn_cfg)}, - {IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)}, + {IWL_PCI_DEVICE(0x422B, 0x1101, iwl6000_mac_cfg)}, + {IWL_PCI_DEVICE(0x422B, 0x1108, iwl6000_mac_cfg)}, + {IWL_PCI_DEVICE(0x422B, 0x1121, iwl6000_mac_cfg)}, + {IWL_PCI_DEVICE(0x422B, 0x1128, iwl6000_mac_cfg)}, + {IWL_PCI_DEVICE(0x422C, 0x1301, iwl6000i_mac_cfg)}, + {IWL_PCI_DEVICE(0x422C, 0x1306, iwl6000i_mac_cfg)}, + {IWL_PCI_DEVICE(0x422C, 0x1307, iwl6000i_mac_cfg)}, + {IWL_PCI_DEVICE(0x422C, 0x1321, iwl6000i_mac_cfg)}, + {IWL_PCI_DEVICE(0x422C, 0x1326, iwl6000i_mac_cfg)}, + {IWL_PCI_DEVICE(0x4238, 0x1111, iwl6000_mac_cfg)}, + {IWL_PCI_DEVICE(0x4238, 0x1118, iwl6000_mac_cfg)}, + {IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_mac_cfg)}, + {IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_mac_cfg)}, /* 6x05 Series */ - {IWL_PCI_DEVICE(0x0082, 0x1301, iwl6005_2agn_cfg)}, - {IWL_PCI_DEVICE(0x0082, 0x1306, iwl6005_2abg_cfg)}, - {IWL_PCI_DEVICE(0x0082, 0x1307, iwl6005_2bg_cfg)}, - {IWL_PCI_DEVICE(0x0082, 0x1308, iwl6005_2agn_cfg)}, - {IWL_PCI_DEVICE(0x0082, 0x1321, iwl6005_2agn_cfg)}, - {IWL_PCI_DEVICE(0x0082, 0x1326, iwl6005_2abg_cfg)}, - {IWL_PCI_DEVICE(0x0082, 0x1328, iwl6005_2agn_cfg)}, - {IWL_PCI_DEVICE(0x0085, 0x1311, iwl6005_2agn_cfg)}, - {IWL_PCI_DEVICE(0x0085, 0x1318, iwl6005_2agn_cfg)}, - {IWL_PCI_DEVICE(0x0085, 0x1316, iwl6005_2abg_cfg)}, - {IWL_PCI_DEVICE(0x0082, 0xC020, iwl6005_2agn_sff_cfg)}, - {IWL_PCI_DEVICE(0x0085, 0xC220, iwl6005_2agn_sff_cfg)}, - {IWL_PCI_DEVICE(0x0085, 0xC228, iwl6005_2agn_sff_cfg)}, - {IWL_PCI_DEVICE(0x0082, 0x4820, iwl6005_2agn_d_cfg)}, - {IWL_PCI_DEVICE(0x0082, 0x1304, iwl6005_2agn_mow1_cfg)},/* low 5GHz active */ - {IWL_PCI_DEVICE(0x0082, 0x1305, iwl6005_2agn_mow2_cfg)},/* high 5GHz active */ + {IWL_PCI_DEVICE(0x0082, 0x1301, iwl6005_mac_cfg)}, + {IWL_PCI_DEVICE(0x0082, 0x1306, iwl6005_mac_cfg)}, + {IWL_PCI_DEVICE(0x0082, 0x1307, iwl6005_mac_cfg)}, + {IWL_PCI_DEVICE(0x0082, 0x1308, iwl6005_mac_cfg)}, + {IWL_PCI_DEVICE(0x0082, 0x1321, iwl6005_mac_cfg)}, + {IWL_PCI_DEVICE(0x0082, 0x1326, iwl6005_mac_cfg)}, + {IWL_PCI_DEVICE(0x0082, 0x1328, iwl6005_mac_cfg)}, + {IWL_PCI_DEVICE(0x0085, 0x1311, iwl6005_mac_cfg)}, + {IWL_PCI_DEVICE(0x0085, 0x1318, iwl6005_mac_cfg)}, + {IWL_PCI_DEVICE(0x0085, 0x1316, iwl6005_mac_cfg)}, + {IWL_PCI_DEVICE(0x0082, 0xC020, iwl6005_mac_cfg)}, + {IWL_PCI_DEVICE(0x0085, 0xC220, iwl6005_mac_cfg)}, + {IWL_PCI_DEVICE(0x0085, 0xC228, iwl6005_mac_cfg)}, + {IWL_PCI_DEVICE(0x0082, 0x4820, iwl6005_mac_cfg)}, + {IWL_PCI_DEVICE(0x0082, 0x1304, iwl6005_mac_cfg)},/* low 5GHz active */ + {IWL_PCI_DEVICE(0x0082, 0x1305, iwl6005_mac_cfg)},/* high 5GHz active */ /* 6x30 Series */ - {IWL_PCI_DEVICE(0x008A, 0x5305, iwl1030_bgn_cfg)}, - {IWL_PCI_DEVICE(0x008A, 0x5307, iwl1030_bg_cfg)}, - {IWL_PCI_DEVICE(0x008A, 0x5325, iwl1030_bgn_cfg)}, - {IWL_PCI_DEVICE(0x008A, 0x5327, iwl1030_bg_cfg)}, - {IWL_PCI_DEVICE(0x008B, 0x5315, iwl1030_bgn_cfg)}, - {IWL_PCI_DEVICE(0x008B, 0x5317, iwl1030_bg_cfg)}, - {IWL_PCI_DEVICE(0x0090, 0x5211, iwl6030_2agn_cfg)}, - {IWL_PCI_DEVICE(0x0090, 0x5215, iwl6030_2bgn_cfg)}, - {IWL_PCI_DEVICE(0x0090, 0x5216, iwl6030_2abg_cfg)}, - {IWL_PCI_DEVICE(0x0091, 0x5201, iwl6030_2agn_cfg)}, - {IWL_PCI_DEVICE(0x0091, 0x5205, iwl6030_2bgn_cfg)}, - {IWL_PCI_DEVICE(0x0091, 0x5206, iwl6030_2abg_cfg)}, - {IWL_PCI_DEVICE(0x0091, 0x5207, iwl6030_2bg_cfg)}, - {IWL_PCI_DEVICE(0x0091, 0x5221, iwl6030_2agn_cfg)}, - {IWL_PCI_DEVICE(0x0091, 0x5225, iwl6030_2bgn_cfg)}, - {IWL_PCI_DEVICE(0x0091, 0x5226, iwl6030_2abg_cfg)}, + {IWL_PCI_DEVICE(0x008A, 0x5305, iwl1000_mac_cfg)}, + {IWL_PCI_DEVICE(0x008A, 0x5307, iwl1000_mac_cfg)}, + {IWL_PCI_DEVICE(0x008A, 0x5325, iwl1000_mac_cfg)}, + {IWL_PCI_DEVICE(0x008A, 0x5327, iwl1000_mac_cfg)}, + {IWL_PCI_DEVICE(0x008B, 0x5315, iwl1000_mac_cfg)}, + {IWL_PCI_DEVICE(0x008B, 0x5317, iwl1000_mac_cfg)}, + {IWL_PCI_DEVICE(0x0090, 0x5211, iwl6030_mac_cfg)}, + {IWL_PCI_DEVICE(0x0090, 0x5215, iwl6030_mac_cfg)}, + {IWL_PCI_DEVICE(0x0090, 0x5216, iwl6030_mac_cfg)}, + {IWL_PCI_DEVICE(0x0091, 0x5201, iwl6030_mac_cfg)}, + {IWL_PCI_DEVICE(0x0091, 0x5205, iwl6030_mac_cfg)}, + {IWL_PCI_DEVICE(0x0091, 0x5206, iwl6030_mac_cfg)}, + {IWL_PCI_DEVICE(0x0091, 0x5207, iwl6030_mac_cfg)}, + {IWL_PCI_DEVICE(0x0091, 0x5221, iwl6030_mac_cfg)}, + {IWL_PCI_DEVICE(0x0091, 0x5225, iwl6030_mac_cfg)}, + {IWL_PCI_DEVICE(0x0091, 0x5226, iwl6030_mac_cfg)}, /* 6x50 WiFi/WiMax Series */ - {IWL_PCI_DEVICE(0x0087, 0x1301, iwl6050_2agn_cfg)}, - {IWL_PCI_DEVICE(0x0087, 0x1306, iwl6050_2abg_cfg)}, - {IWL_PCI_DEVICE(0x0087, 0x1321, iwl6050_2agn_cfg)}, - {IWL_PCI_DEVICE(0x0087, 0x1326, iwl6050_2abg_cfg)}, - {IWL_PCI_DEVICE(0x0089, 0x1311, iwl6050_2agn_cfg)}, - {IWL_PCI_DEVICE(0x0089, 0x1316, iwl6050_2abg_cfg)}, + {IWL_PCI_DEVICE(0x0087, 0x1301, iwl6050_mac_cfg)}, + {IWL_PCI_DEVICE(0x0087, 0x1306, iwl6050_mac_cfg)}, + {IWL_PCI_DEVICE(0x0087, 0x1321, iwl6050_mac_cfg)}, + {IWL_PCI_DEVICE(0x0087, 0x1326, iwl6050_mac_cfg)}, + {IWL_PCI_DEVICE(0x0089, 0x1311, iwl6050_mac_cfg)}, + {IWL_PCI_DEVICE(0x0089, 0x1316, iwl6050_mac_cfg)}, /* 6150 WiFi/WiMax Series */ - {IWL_PCI_DEVICE(0x0885, 0x1305, iwl6150_bgn_cfg)}, - {IWL_PCI_DEVICE(0x0885, 0x1307, iwl6150_bg_cfg)}, - {IWL_PCI_DEVICE(0x0885, 0x1325, iwl6150_bgn_cfg)}, - {IWL_PCI_DEVICE(0x0885, 0x1327, iwl6150_bg_cfg)}, - {IWL_PCI_DEVICE(0x0886, 0x1315, iwl6150_bgn_cfg)}, - {IWL_PCI_DEVICE(0x0886, 0x1317, iwl6150_bg_cfg)}, + {IWL_PCI_DEVICE(0x0885, 0x1305, iwl6150_mac_cfg)}, + {IWL_PCI_DEVICE(0x0885, 0x1307, iwl6150_mac_cfg)}, + {IWL_PCI_DEVICE(0x0885, 0x1325, iwl6150_mac_cfg)}, + {IWL_PCI_DEVICE(0x0885, 0x1327, iwl6150_mac_cfg)}, + {IWL_PCI_DEVICE(0x0886, 0x1315, iwl6150_mac_cfg)}, + {IWL_PCI_DEVICE(0x0886, 0x1317, iwl6150_mac_cfg)}, /* 1000 Series WiFi */ - {IWL_PCI_DEVICE(0x0083, 0x1205, iwl1000_bgn_cfg)}, - {IWL_PCI_DEVICE(0x0083, 0x1305, iwl1000_bgn_cfg)}, - {IWL_PCI_DEVICE(0x0083, 0x1225, iwl1000_bgn_cfg)}, - {IWL_PCI_DEVICE(0x0083, 0x1325, iwl1000_bgn_cfg)}, - {IWL_PCI_DEVICE(0x0084, 0x1215, iwl1000_bgn_cfg)}, - {IWL_PCI_DEVICE(0x0084, 0x1315, iwl1000_bgn_cfg)}, - {IWL_PCI_DEVICE(0x0083, 0x1206, iwl1000_bg_cfg)}, - {IWL_PCI_DEVICE(0x0083, 0x1306, iwl1000_bg_cfg)}, - {IWL_PCI_DEVICE(0x0083, 0x1226, iwl1000_bg_cfg)}, - {IWL_PCI_DEVICE(0x0083, 0x1326, iwl1000_bg_cfg)}, - {IWL_PCI_DEVICE(0x0084, 0x1216, iwl1000_bg_cfg)}, - {IWL_PCI_DEVICE(0x0084, 0x1316, iwl1000_bg_cfg)}, + {IWL_PCI_DEVICE(0x0083, 0x1205, iwl1000_mac_cfg)}, + {IWL_PCI_DEVICE(0x0083, 0x1305, iwl1000_mac_cfg)}, + {IWL_PCI_DEVICE(0x0083, 0x1225, iwl1000_mac_cfg)}, + {IWL_PCI_DEVICE(0x0083, 0x1325, iwl1000_mac_cfg)}, + {IWL_PCI_DEVICE(0x0084, 0x1215, iwl1000_mac_cfg)}, + {IWL_PCI_DEVICE(0x0084, 0x1315, iwl1000_mac_cfg)}, + {IWL_PCI_DEVICE(0x0083, 0x1206, iwl1000_mac_cfg)}, + {IWL_PCI_DEVICE(0x0083, 0x1306, iwl1000_mac_cfg)}, + {IWL_PCI_DEVICE(0x0083, 0x1226, iwl1000_mac_cfg)}, + {IWL_PCI_DEVICE(0x0083, 0x1326, iwl1000_mac_cfg)}, + {IWL_PCI_DEVICE(0x0084, 0x1216, iwl1000_mac_cfg)}, + {IWL_PCI_DEVICE(0x0084, 0x1316, iwl1000_mac_cfg)}, /* 100 Series WiFi */ - {IWL_PCI_DEVICE(0x08AE, 0x1005, iwl100_bgn_cfg)}, - {IWL_PCI_DEVICE(0x08AE, 0x1007, iwl100_bg_cfg)}, - {IWL_PCI_DEVICE(0x08AF, 0x1015, iwl100_bgn_cfg)}, - {IWL_PCI_DEVICE(0x08AF, 0x1017, iwl100_bg_cfg)}, - {IWL_PCI_DEVICE(0x08AE, 0x1025, iwl100_bgn_cfg)}, - {IWL_PCI_DEVICE(0x08AE, 0x1027, iwl100_bg_cfg)}, + {IWL_PCI_DEVICE(0x08AE, 0x1005, iwl1000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08AE, 0x1007, iwl1000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08AF, 0x1015, iwl1000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08AF, 0x1017, iwl1000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08AE, 0x1025, iwl1000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08AE, 0x1027, iwl1000_mac_cfg)}, /* 130 Series WiFi */ - {IWL_PCI_DEVICE(0x0896, 0x5005, iwl130_bgn_cfg)}, - {IWL_PCI_DEVICE(0x0896, 0x5007, iwl130_bg_cfg)}, - {IWL_PCI_DEVICE(0x0897, 0x5015, iwl130_bgn_cfg)}, - {IWL_PCI_DEVICE(0x0897, 0x5017, iwl130_bg_cfg)}, - {IWL_PCI_DEVICE(0x0896, 0x5025, iwl130_bgn_cfg)}, - {IWL_PCI_DEVICE(0x0896, 0x5027, iwl130_bg_cfg)}, + {IWL_PCI_DEVICE(0x0896, 0x5005, iwl1000_mac_cfg)}, + {IWL_PCI_DEVICE(0x0896, 0x5007, iwl1000_mac_cfg)}, + {IWL_PCI_DEVICE(0x0897, 0x5015, iwl1000_mac_cfg)}, + {IWL_PCI_DEVICE(0x0897, 0x5017, iwl1000_mac_cfg)}, + {IWL_PCI_DEVICE(0x0896, 0x5025, iwl1000_mac_cfg)}, + {IWL_PCI_DEVICE(0x0896, 0x5027, iwl1000_mac_cfg)}, /* 2x00 Series */ - {IWL_PCI_DEVICE(0x0890, 0x4022, iwl2000_2bgn_cfg)}, - {IWL_PCI_DEVICE(0x0891, 0x4222, iwl2000_2bgn_cfg)}, - {IWL_PCI_DEVICE(0x0890, 0x4422, iwl2000_2bgn_cfg)}, - {IWL_PCI_DEVICE(0x0890, 0x4822, iwl2000_2bgn_d_cfg)}, + {IWL_PCI_DEVICE(0x0890, 0x4022, iwl2000_mac_cfg)}, + {IWL_PCI_DEVICE(0x0891, 0x4222, iwl2000_mac_cfg)}, + {IWL_PCI_DEVICE(0x0890, 0x4422, iwl2000_mac_cfg)}, + {IWL_PCI_DEVICE(0x0890, 0x4822, iwl2000_mac_cfg)}, /* 2x30 Series */ - {IWL_PCI_DEVICE(0x0887, 0x4062, iwl2030_2bgn_cfg)}, - {IWL_PCI_DEVICE(0x0888, 0x4262, iwl2030_2bgn_cfg)}, - {IWL_PCI_DEVICE(0x0887, 0x4462, iwl2030_2bgn_cfg)}, + {IWL_PCI_DEVICE(0x0887, 0x4062, iwl2030_mac_cfg)}, + {IWL_PCI_DEVICE(0x0888, 0x4262, iwl2030_mac_cfg)}, + {IWL_PCI_DEVICE(0x0887, 0x4462, iwl2030_mac_cfg)}, /* 6x35 Series */ - {IWL_PCI_DEVICE(0x088E, 0x4060, iwl6035_2agn_cfg)}, - {IWL_PCI_DEVICE(0x088E, 0x406A, iwl6035_2agn_sff_cfg)}, - {IWL_PCI_DEVICE(0x088F, 0x4260, iwl6035_2agn_cfg)}, - {IWL_PCI_DEVICE(0x088F, 0x426A, iwl6035_2agn_sff_cfg)}, - {IWL_PCI_DEVICE(0x088E, 0x4460, iwl6035_2agn_cfg)}, - {IWL_PCI_DEVICE(0x088E, 0x446A, iwl6035_2agn_sff_cfg)}, - {IWL_PCI_DEVICE(0x088E, 0x4860, iwl6035_2agn_cfg)}, - {IWL_PCI_DEVICE(0x088F, 0x5260, iwl6035_2agn_cfg)}, + {IWL_PCI_DEVICE(0x088E, 0x4060, iwl6030_mac_cfg)}, + {IWL_PCI_DEVICE(0x088E, 0x406A, iwl6030_mac_cfg)}, + {IWL_PCI_DEVICE(0x088F, 0x4260, iwl6030_mac_cfg)}, + {IWL_PCI_DEVICE(0x088F, 0x426A, iwl6030_mac_cfg)}, + {IWL_PCI_DEVICE(0x088E, 0x4460, iwl6030_mac_cfg)}, + {IWL_PCI_DEVICE(0x088E, 0x446A, iwl6030_mac_cfg)}, + {IWL_PCI_DEVICE(0x088E, 0x4860, iwl6030_mac_cfg)}, + {IWL_PCI_DEVICE(0x088F, 0x5260, iwl6030_mac_cfg)}, /* 105 Series */ - {IWL_PCI_DEVICE(0x0894, 0x0022, iwl105_bgn_cfg)}, - {IWL_PCI_DEVICE(0x0895, 0x0222, iwl105_bgn_cfg)}, - {IWL_PCI_DEVICE(0x0894, 0x0422, iwl105_bgn_cfg)}, - {IWL_PCI_DEVICE(0x0894, 0x0822, iwl105_bgn_d_cfg)}, + {IWL_PCI_DEVICE(0x0894, 0x0022, iwl105_mac_cfg)}, + {IWL_PCI_DEVICE(0x0895, 0x0222, iwl105_mac_cfg)}, + {IWL_PCI_DEVICE(0x0894, 0x0422, iwl105_mac_cfg)}, + {IWL_PCI_DEVICE(0x0894, 0x0822, iwl105_mac_cfg)}, /* 135 Series */ - {IWL_PCI_DEVICE(0x0892, 0x0062, iwl135_bgn_cfg)}, - {IWL_PCI_DEVICE(0x0893, 0x0262, iwl135_bgn_cfg)}, - {IWL_PCI_DEVICE(0x0892, 0x0462, iwl135_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0892, 0x0062, iwl135_mac_cfg)}, + {IWL_PCI_DEVICE(0x0893, 0x0262, iwl135_mac_cfg)}, + {IWL_PCI_DEVICE(0x0892, 0x0462, iwl135_mac_cfg)}, #endif /* CONFIG_IWLDVM */ #if IS_ENABLED(CONFIG_IWLMVM) /* 7260 Series */ - {IWL_PCI_DEVICE(0x08B1, 0x4070, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0x4072, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0x4170, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0x4C60, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0x4C70, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0x4060, iwl7260_2n_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0x406A, iwl7260_2n_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0x4160, iwl7260_2n_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0x4062, iwl7260_n_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0x4162, iwl7260_n_cfg)}, - {IWL_PCI_DEVICE(0x08B2, 0x4270, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B2, 0x4272, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B2, 0x4260, iwl7260_2n_cfg)}, - {IWL_PCI_DEVICE(0x08B2, 0x426A, iwl7260_2n_cfg)}, - {IWL_PCI_DEVICE(0x08B2, 0x4262, iwl7260_n_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0x4470, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0x4472, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0x4460, iwl7260_2n_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0x446A, iwl7260_2n_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0x4462, iwl7260_n_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0x4870, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0x486E, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0x4A70, iwl7260_2ac_cfg_high_temp)}, - {IWL_PCI_DEVICE(0x08B1, 0x4A6E, iwl7260_2ac_cfg_high_temp)}, - {IWL_PCI_DEVICE(0x08B1, 0x4A6C, iwl7260_2ac_cfg_high_temp)}, - {IWL_PCI_DEVICE(0x08B1, 0x4570, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0x4560, iwl7260_2n_cfg)}, - {IWL_PCI_DEVICE(0x08B2, 0x4370, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B2, 0x4360, iwl7260_2n_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0x5070, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0x5072, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0x5170, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0x5770, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0x4020, iwl7260_2n_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0x402A, iwl7260_2n_cfg)}, - {IWL_PCI_DEVICE(0x08B2, 0x4220, iwl7260_2n_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0x4420, iwl7260_2n_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0xC070, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0xC072, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0xC170, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0xC060, iwl7260_2n_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0xC06A, iwl7260_2n_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0xC160, iwl7260_2n_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0xC062, iwl7260_n_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0xC162, iwl7260_n_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0xC770, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0xC760, iwl7260_2n_cfg)}, - {IWL_PCI_DEVICE(0x08B2, 0xC270, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0xCC70, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0xCC60, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B2, 0xC272, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B2, 0xC260, iwl7260_2n_cfg)}, - {IWL_PCI_DEVICE(0x08B2, 0xC26A, iwl7260_n_cfg)}, - {IWL_PCI_DEVICE(0x08B2, 0xC262, iwl7260_n_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0xC470, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0xC472, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0xC460, iwl7260_2n_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0xC462, iwl7260_n_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0xC570, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0xC560, iwl7260_2n_cfg)}, - {IWL_PCI_DEVICE(0x08B2, 0xC370, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0xC360, iwl7260_2n_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0xC020, iwl7260_2n_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0xC02A, iwl7260_2n_cfg)}, - {IWL_PCI_DEVICE(0x08B2, 0xC220, iwl7260_2n_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0xC420, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4070, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4072, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4170, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4C60, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4C70, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4060, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x406A, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4160, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4062, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4162, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0x4270, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0x4272, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0x4260, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0x426A, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0x4262, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4470, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4472, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4460, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x446A, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4462, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4870, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x486E, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4A70, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4A6E, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4A6C, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4570, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4560, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0x4370, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0x4360, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x5070, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x5072, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x5170, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x5770, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4020, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x402A, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0x4220, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4420, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC070, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC072, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC170, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC060, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC06A, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC160, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC062, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC162, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC770, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC760, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0xC270, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xCC70, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xCC60, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0xC272, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0xC260, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0xC26A, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0xC262, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC470, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC472, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC460, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC462, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC570, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC560, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0xC370, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC360, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC020, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC02A, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0xC220, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC420, iwl7000_mac_cfg)}, /* 3160 Series */ - {IWL_PCI_DEVICE(0x08B3, 0x0070, iwl3160_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B3, 0x0072, iwl3160_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B3, 0x0170, iwl3160_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B3, 0x0172, iwl3160_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B3, 0x0060, iwl3160_2n_cfg)}, - {IWL_PCI_DEVICE(0x08B3, 0x0062, iwl3160_n_cfg)}, - {IWL_PCI_DEVICE(0x08B4, 0x0270, iwl3160_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B4, 0x0272, iwl3160_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B3, 0x0470, iwl3160_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B3, 0x0472, iwl3160_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B4, 0x0370, iwl3160_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B3, 0x8070, iwl3160_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B3, 0x8072, iwl3160_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B3, 0x8170, iwl3160_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B3, 0x8172, iwl3160_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B3, 0x8060, iwl3160_2n_cfg)}, - {IWL_PCI_DEVICE(0x08B3, 0x8062, iwl3160_n_cfg)}, - {IWL_PCI_DEVICE(0x08B4, 0x8270, iwl3160_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B4, 0x8370, iwl3160_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B4, 0x8272, iwl3160_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B3, 0x8470, iwl3160_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B3, 0x8570, iwl3160_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B3, 0x1070, iwl3160_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B3, 0x1170, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x0070, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x0072, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x0170, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x0172, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x0060, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x0062, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B4, 0x0270, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B4, 0x0272, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x0470, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x0472, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B4, 0x0370, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x8070, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x8072, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x8170, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x8172, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x8060, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x8062, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B4, 0x8270, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B4, 0x8370, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B4, 0x8272, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x8470, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x8570, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x1070, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x1170, iwl7000_mac_cfg)}, /* 3165 Series */ - {IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)}, - {IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)}, - {IWL_PCI_DEVICE(0x3166, 0x4212, iwl3165_2ac_cfg)}, - {IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)}, - {IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)}, - {IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)}, - {IWL_PCI_DEVICE(0x3166, 0x4310, iwl3165_2ac_cfg)}, - {IWL_PCI_DEVICE(0x3166, 0x4210, iwl3165_2ac_cfg)}, - {IWL_PCI_DEVICE(0x3165, 0x8010, iwl3165_2ac_cfg)}, - {IWL_PCI_DEVICE(0x3165, 0x8110, iwl3165_2ac_cfg)}, + {IWL_PCI_DEVICE(0x3165, 0x4010, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x3165, 0x4012, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x3166, 0x4212, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x3165, 0x4410, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x3165, 0x4510, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x3165, 0x4110, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x3166, 0x4310, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x3166, 0x4210, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x3165, 0x8010, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x3165, 0x8110, iwl7000_mac_cfg)}, /* 3168 Series */ - {IWL_PCI_DEVICE(0x24FB, 0x2010, iwl3168_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24FB, 0x2110, iwl3168_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24FB, 0x2050, iwl3168_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24FB, 0x2150, iwl3168_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24FB, 0x0000, iwl3168_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FB, 0x2010, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24FB, 0x2110, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24FB, 0x2050, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24FB, 0x2150, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24FB, 0x0000, iwl7000_mac_cfg)}, /* 7265 Series */ - {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x5110, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x5100, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_n_cfg)}, - {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x5C10, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x5412, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x5510, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x500A, iwl7265_2n_cfg)}, - {IWL_PCI_DEVICE(0x095B, 0x5200, iwl7265_2n_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x5002, iwl7265_n_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x5102, iwl7265_n_cfg)}, - {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x9012, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x900A, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095B, 0x9210, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095B, 0x9200, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095B, 0x9310, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x5020, iwl7265_2n_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x502A, iwl7265_2n_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x5420, iwl7265_2n_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x5090, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x5190, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x5590, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095B, 0x5290, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x5490, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x5F10, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095B, 0x5212, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095B, 0x520A, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x9000, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x9400, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x9E10, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5110, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5100, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5C10, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5412, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5510, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5400, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5000, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x500A, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x5200, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5002, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5102, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x9012, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x900A, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x9210, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x9200, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x9310, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5020, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x502A, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5420, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5090, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5190, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5590, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x5290, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5490, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5F10, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x5212, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x520A, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x9000, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x9400, iwl7000_mac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x9E10, iwl7000_mac_cfg)}, /* 8000 Series */ - {IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0x1010, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0x10B0, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0x0130, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0x1130, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0x0132, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0x1132, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0x0110, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0x01F0, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0x0012, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0x1012, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0x1110, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0x0050, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0x0250, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0x1050, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0x0150, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0x1150, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F4, 0x0030, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0xC110, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0xD010, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0xD0B0, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0xB0B0, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0x8110, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0x9010, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0x9110, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F4, 0x8030, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F4, 0x9030, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F4, 0xC030, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F4, 0xD030, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0x8130, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0x9130, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0x8132, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0x9132, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0x8050, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0x8150, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0x9050, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0x9150, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0x0004, iwl8260_2n_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0x0044, iwl8260_2n_cfg)}, - {IWL_PCI_DEVICE(0x24F5, 0x0010, iwl4165_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F6, 0x0030, iwl4165_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0x0810, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0x0910, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0x0850, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0x0950, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0x0930, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0x0000, iwl8265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F3, 0x4010, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24FD, 0x0010, iwl8265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24FD, 0x0110, iwl8265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24FD, 0x1110, iwl8265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24FD, 0x1130, iwl8265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24FD, 0x0130, iwl8265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24FD, 0x1010, iwl8265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24FD, 0x10D0, iwl8265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24FD, 0x0050, iwl8265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24FD, 0x0150, iwl8265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24FD, 0x9010, iwl8265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24FD, 0x8110, iwl8265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24FD, 0x8050, iwl8265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24FD, 0x8010, iwl8265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24FD, 0x0810, iwl8265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24FD, 0x9110, iwl8265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24FD, 0x8130, iwl8265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24FD, 0x0910, iwl8265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24FD, 0x0930, iwl8265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24FD, 0x0950, iwl8265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24FD, 0x0850, iwl8265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24FD, 0x1014, iwl8265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24FD, 0x3E02, iwl8275_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24FD, 0x3E01, iwl8275_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24FD, 0x1012, iwl8275_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24FD, 0x0012, iwl8275_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24FD, 0x0014, iwl8265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x1010, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x10B0, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x0130, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x1130, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x0132, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x1132, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x0110, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x01F0, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x0012, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x1012, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x1110, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x0050, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x0250, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x1050, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x0150, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x1150, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24F4, 0x0030, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0xC110, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0xD010, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0xD0B0, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0xB0B0, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x8110, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x9010, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x9110, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24F4, 0x8030, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24F4, 0x9030, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24F4, 0xC030, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24F4, 0xD030, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x8130, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x9130, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x8132, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x9132, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x8050, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x8150, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x9050, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x9150, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x0004, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x0044, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24F5, 0x0010, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24F6, 0x0030, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x0810, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x0910, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x0850, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x0950, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x0930, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x0000, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x4010, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0xC030, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0xD030, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x0010, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x0110, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x1110, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x1130, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x0130, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x1010, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x10D0, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x0050, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x0150, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x9010, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x8110, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x8050, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x8010, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x0810, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x9110, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x8130, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x0910, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x0930, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x0950, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x0850, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x1014, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x3E02, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x3E01, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x1012, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x0012, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x0014, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x1431, iwl8000_mac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x1432, iwl8000_mac_cfg)}, /* 9000 Series */ - {IWL_PCI_DEVICE(0x2526, PCI_ANY_ID, iwl9000_trans_cfg)}, - {IWL_PCI_DEVICE(0x271B, PCI_ANY_ID, iwl9000_trans_cfg)}, - {IWL_PCI_DEVICE(0x271C, PCI_ANY_ID, iwl9000_trans_cfg)}, - {IWL_PCI_DEVICE(0x30DC, PCI_ANY_ID, iwl9560_long_latency_trans_cfg)}, - {IWL_PCI_DEVICE(0x31DC, PCI_ANY_ID, iwl9560_shared_clk_trans_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, PCI_ANY_ID, iwl9560_trans_cfg)}, - {IWL_PCI_DEVICE(0xA370, PCI_ANY_ID, iwl9560_trans_cfg)}, + {IWL_PCI_DEVICE(0x2526, PCI_ANY_ID, iwl9000_mac_cfg)}, + {IWL_PCI_DEVICE(0x271B, PCI_ANY_ID, iwl9000_mac_cfg)}, + {IWL_PCI_DEVICE(0x271C, PCI_ANY_ID, iwl9000_mac_cfg)}, + {IWL_PCI_DEVICE(0x30DC, PCI_ANY_ID, iwl9560_long_latency_mac_cfg)}, + {IWL_PCI_DEVICE(0x31DC, PCI_ANY_ID, iwl9560_shared_clk_mac_cfg)}, + {IWL_PCI_DEVICE(0x9DF0, PCI_ANY_ID, iwl9560_mac_cfg)}, + {IWL_PCI_DEVICE(0xA370, PCI_ANY_ID, iwl9560_mac_cfg)}, /* Qu devices */ - {IWL_PCI_DEVICE(0x02F0, PCI_ANY_ID, iwl_qu_trans_cfg)}, - {IWL_PCI_DEVICE(0x06F0, PCI_ANY_ID, iwl_qu_trans_cfg)}, + {IWL_PCI_DEVICE(0x02F0, PCI_ANY_ID, iwl_qu_mac_cfg)}, + {IWL_PCI_DEVICE(0x06F0, PCI_ANY_ID, iwl_qu_mac_cfg)}, - {IWL_PCI_DEVICE(0x34F0, PCI_ANY_ID, iwl_qu_medium_latency_trans_cfg)}, - {IWL_PCI_DEVICE(0x3DF0, PCI_ANY_ID, iwl_qu_medium_latency_trans_cfg)}, - {IWL_PCI_DEVICE(0x4DF0, PCI_ANY_ID, iwl_qu_medium_latency_trans_cfg)}, + {IWL_PCI_DEVICE(0x34F0, PCI_ANY_ID, iwl_qu_medium_latency_mac_cfg)}, + {IWL_PCI_DEVICE(0x3DF0, PCI_ANY_ID, iwl_qu_medium_latency_mac_cfg)}, + {IWL_PCI_DEVICE(0x4DF0, PCI_ANY_ID, iwl_qu_medium_latency_mac_cfg)}, - {IWL_PCI_DEVICE(0x43F0, PCI_ANY_ID, iwl_qu_long_latency_trans_cfg)}, - {IWL_PCI_DEVICE(0xA0F0, PCI_ANY_ID, iwl_qu_long_latency_trans_cfg)}, + {IWL_PCI_DEVICE(0x43F0, PCI_ANY_ID, iwl_qu_long_latency_mac_cfg)}, + {IWL_PCI_DEVICE(0xA0F0, PCI_ANY_ID, iwl_qu_long_latency_mac_cfg)}, - {IWL_PCI_DEVICE(0x2723, PCI_ANY_ID, iwl_ax200_trans_cfg)}, + {IWL_PCI_DEVICE(0x2723, PCI_ANY_ID, iwl_ax200_mac_cfg)}, -/* So devices */ - {IWL_PCI_DEVICE(0x2725, PCI_ANY_ID, iwl_so_trans_cfg)}, - {IWL_PCI_DEVICE(0x7A70, PCI_ANY_ID, iwl_so_long_latency_imr_trans_cfg)}, - {IWL_PCI_DEVICE(0x7AF0, PCI_ANY_ID, iwl_so_trans_cfg)}, - {IWL_PCI_DEVICE(0x51F0, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)}, - {IWL_PCI_DEVICE(0x51F1, PCI_ANY_ID, iwl_so_long_latency_imr_trans_cfg)}, - {IWL_PCI_DEVICE(0x54F0, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)}, - {IWL_PCI_DEVICE(0x7F70, PCI_ANY_ID, iwl_so_trans_cfg)}, +/* Ty/So devices */ + {IWL_PCI_DEVICE(0x2725, PCI_ANY_ID, iwl_ty_mac_cfg)}, + {IWL_PCI_DEVICE(0x7A70, PCI_ANY_ID, iwl_so_long_latency_imr_mac_cfg)}, + {IWL_PCI_DEVICE(0x7AF0, PCI_ANY_ID, iwl_so_mac_cfg)}, + {IWL_PCI_DEVICE(0x51F0, PCI_ANY_ID, iwl_so_long_latency_mac_cfg)}, + {IWL_PCI_DEVICE(0x51F1, PCI_ANY_ID, iwl_so_long_latency_imr_mac_cfg)}, + {IWL_PCI_DEVICE(0x54F0, PCI_ANY_ID, iwl_so_long_latency_mac_cfg)}, + {IWL_PCI_DEVICE(0x7F70, PCI_ANY_ID, iwl_so_mac_cfg)}, /* Ma devices */ - {IWL_PCI_DEVICE(0x2729, PCI_ANY_ID, iwl_ma_trans_cfg)}, - {IWL_PCI_DEVICE(0x7E40, PCI_ANY_ID, iwl_ma_trans_cfg)}, + {IWL_PCI_DEVICE(0x2729, PCI_ANY_ID, iwl_ma_mac_cfg)}, + {IWL_PCI_DEVICE(0x7E40, PCI_ANY_ID, iwl_ma_mac_cfg)}, #endif /* CONFIG_IWLMVM */ #if IS_ENABLED(CONFIG_IWLMLD) /* Bz devices */ - {IWL_PCI_DEVICE(0x272b, PCI_ANY_ID, iwl_gl_trans_cfg)}, - {IWL_PCI_DEVICE(0xA840, 0x0000, iwl_bz_trans_cfg)}, - {IWL_PCI_DEVICE(0xA840, 0x0090, iwl_bz_trans_cfg)}, - {IWL_PCI_DEVICE(0xA840, 0x0094, iwl_bz_trans_cfg)}, - {IWL_PCI_DEVICE(0xA840, 0x0098, iwl_bz_trans_cfg)}, - {IWL_PCI_DEVICE(0xA840, 0x009C, iwl_bz_trans_cfg)}, - {IWL_PCI_DEVICE(0xA840, 0x00C0, iwl_bz_trans_cfg)}, - {IWL_PCI_DEVICE(0xA840, 0x00C4, iwl_bz_trans_cfg)}, - {IWL_PCI_DEVICE(0xA840, 0x00E0, iwl_bz_trans_cfg)}, - {IWL_PCI_DEVICE(0xA840, 0x00E4, iwl_bz_trans_cfg)}, - {IWL_PCI_DEVICE(0xA840, 0x00E8, iwl_bz_trans_cfg)}, - {IWL_PCI_DEVICE(0xA840, 0x00EC, iwl_bz_trans_cfg)}, - {IWL_PCI_DEVICE(0xA840, 0x0100, iwl_bz_trans_cfg)}, - {IWL_PCI_DEVICE(0xA840, 0x0110, iwl_bz_trans_cfg)}, - {IWL_PCI_DEVICE(0xA840, 0x0114, iwl_bz_trans_cfg)}, - {IWL_PCI_DEVICE(0xA840, 0x0118, iwl_bz_trans_cfg)}, - {IWL_PCI_DEVICE(0xA840, 0x011C, iwl_bz_trans_cfg)}, - {IWL_PCI_DEVICE(0xA840, 0x0310, iwl_bz_trans_cfg)}, - {IWL_PCI_DEVICE(0xA840, 0x0314, iwl_bz_trans_cfg)}, - {IWL_PCI_DEVICE(0xA840, 0x0510, iwl_bz_trans_cfg)}, - {IWL_PCI_DEVICE(0xA840, 0x0A10, iwl_bz_trans_cfg)}, - {IWL_PCI_DEVICE(0xA840, 0x1671, iwl_bz_trans_cfg)}, - {IWL_PCI_DEVICE(0xA840, 0x1672, iwl_bz_trans_cfg)}, - {IWL_PCI_DEVICE(0xA840, 0x1771, iwl_bz_trans_cfg)}, - {IWL_PCI_DEVICE(0xA840, 0x1772, iwl_bz_trans_cfg)}, - {IWL_PCI_DEVICE(0xA840, 0x1791, iwl_bz_trans_cfg)}, - {IWL_PCI_DEVICE(0xA840, 0x1792, iwl_bz_trans_cfg)}, - {IWL_PCI_DEVICE(0xA840, 0x4090, iwl_bz_trans_cfg)}, - {IWL_PCI_DEVICE(0xA840, 0x40C4, iwl_bz_trans_cfg)}, - {IWL_PCI_DEVICE(0xA840, 0x40E0, iwl_bz_trans_cfg)}, - {IWL_PCI_DEVICE(0xA840, 0x4110, iwl_bz_trans_cfg)}, - {IWL_PCI_DEVICE(0xA840, 0x4314, iwl_bz_trans_cfg)}, - {IWL_PCI_DEVICE(0x7740, PCI_ANY_ID, iwl_bz_trans_cfg)}, - {IWL_PCI_DEVICE(0x4D40, PCI_ANY_ID, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0x272b, PCI_ANY_ID, iwl_gl_mac_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x0000, iwl_bz_mac_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x0090, iwl_bz_mac_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x0094, iwl_bz_mac_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x0098, iwl_bz_mac_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x009C, iwl_bz_mac_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x00C0, iwl_bz_mac_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x00C4, iwl_bz_mac_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x00E0, iwl_bz_mac_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x00E4, iwl_bz_mac_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x00E8, iwl_bz_mac_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x00EC, iwl_bz_mac_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x0100, iwl_bz_mac_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x0110, iwl_bz_mac_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x0114, iwl_bz_mac_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x0118, iwl_bz_mac_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x011C, iwl_bz_mac_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x0310, iwl_bz_mac_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x0314, iwl_bz_mac_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x0510, iwl_bz_mac_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x0A10, iwl_bz_mac_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x1671, iwl_bz_mac_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x1672, iwl_bz_mac_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x1771, iwl_bz_mac_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x1772, iwl_bz_mac_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x1791, iwl_bz_mac_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x1792, iwl_bz_mac_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x4090, iwl_bz_mac_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x40C4, iwl_bz_mac_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x40E0, iwl_bz_mac_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x4110, iwl_bz_mac_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x4314, iwl_bz_mac_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x1775, iwl_bz_mac_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x1776, iwl_bz_mac_cfg)}, + {IWL_PCI_DEVICE(0x7740, PCI_ANY_ID, iwl_bz_mac_cfg)}, + {IWL_PCI_DEVICE(0x4D40, PCI_ANY_ID, iwl_bz_mac_cfg)}, /* Sc devices */ - {IWL_PCI_DEVICE(0xE440, PCI_ANY_ID, iwl_sc_trans_cfg)}, - {IWL_PCI_DEVICE(0xE340, PCI_ANY_ID, iwl_sc_trans_cfg)}, - {IWL_PCI_DEVICE(0xD340, PCI_ANY_ID, iwl_sc_trans_cfg)}, - {IWL_PCI_DEVICE(0x6E70, PCI_ANY_ID, iwl_sc_trans_cfg)}, - -/* Dr devices */ - {IWL_PCI_DEVICE(0x272F, PCI_ANY_ID, iwl_dr_trans_cfg)}, + {IWL_PCI_DEVICE(0xE440, PCI_ANY_ID, iwl_sc_mac_cfg)}, + {IWL_PCI_DEVICE(0xE340, PCI_ANY_ID, iwl_sc_mac_cfg)}, + {IWL_PCI_DEVICE(0xD340, PCI_ANY_ID, iwl_sc_mac_cfg)}, + {IWL_PCI_DEVICE(0x6E70, PCI_ANY_ID, iwl_sc_mac_cfg)}, #endif /* CONFIG_IWLMLD */ {0} @@ -551,689 +552,518 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct pci_device_id iwl_hw_card_ids[] = { MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids); EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_hw_card_ids); -#define _IWL_DEV_INFO(_device, _subdevice, _mac_type, _mac_step, _rf_type, \ - _rf_id, _rf_step, _no_160, _cores, _cdb, _cfg, _name) \ - { .device = (_device), .subdevice = (_subdevice), .cfg = &(_cfg), \ - .name = _name, .mac_type = _mac_type, .rf_type = _rf_type, .rf_step = _rf_step, \ - .no_160 = _no_160, .cores = _cores, .rf_id = _rf_id, \ - .mac_step = _mac_step, .cdb = _cdb, .jacket = IWL_CFG_ANY } - -#define IWL_DEV_INFO(_device, _subdevice, _cfg, _name) \ - _IWL_DEV_INFO(_device, _subdevice, IWL_CFG_ANY, IWL_CFG_ANY, \ - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, \ - IWL_CFG_ANY, _cfg, _name) +#define _IWL_DEV_INFO(_cfg, _name, ...) { \ + .cfg = &_cfg, \ + .name = _name, \ + .device = IWL_CFG_ANY, \ + .subdevice = IWL_CFG_ANY, \ + .subdevice_m_h = 15, \ + __VA_ARGS__ \ +} +#define IWL_DEV_INFO(_cfg, _name, ...) \ + _IWL_DEV_INFO(_cfg, _name, __VA_ARGS__) + +#define DEVICE(n) .device = (n) +#define SUBDEV(n) .subdevice = (n) +#define _LOWEST_BIT(n) (__builtin_ffs(n) - 1) +#define _BIT_ABOVE_MASK(n) ((n) + (1 << _LOWEST_BIT(n))) +#define _HIGHEST_BIT(n) (__builtin_ffs(_BIT_ABOVE_MASK(n)) - 2) +#define _IS_POW2(n) (((n) & ((n) - 1)) == 0) +#define _IS_CONTIG(n) _IS_POW2(_BIT_ABOVE_MASK(n)) +#define _CHECK_MASK(m) BUILD_BUG_ON_ZERO(!_IS_CONTIG(m)) +#define SUBDEV_MASKED(v, m) .subdevice = (v) + _CHECK_MASK(m), \ + .subdevice_m_l = _LOWEST_BIT(m), \ + .subdevice_m_h = _HIGHEST_BIT(m) +#define RF_TYPE(n) .match_rf_type = 1, \ + .rf_type = IWL_CFG_RF_TYPE_##n +#define RF_STEP(n) .match_rf_step = 1, \ + .rf_step = SILICON_##n##_STEP +#define RF_ID(n) .match_rf_id = 1, \ + .rf_id = IWL_CFG_RF_ID_##n +#define NO_CDB .match_cdb = 1, .cdb = 0 +#define CDB .match_cdb = 1, .cdb = 1 +#define BW_NOT_LIMITED .match_bw_limit = 1, .bw_limit = 0 +#define BW_LIMITED .match_bw_limit = 1, .bw_limit = 1 VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = { +#if IS_ENABLED(CONFIG_IWLDVM) + IWL_DEV_INFO(iwl5100_n_cfg, iwl5100_agn_name, + DEVICE(0x4232), SUBDEV_MASKED(0x1, 0xF)), + IWL_DEV_INFO(iwl5100_n_cfg, iwl5100_agn_name, + DEVICE(0x4232), SUBDEV_MASKED(0x4, 0xF)), + IWL_DEV_INFO(iwl5100_n_cfg, iwl5100_bgn_name, + DEVICE(0x4232), SUBDEV_MASKED(0x5, 0xF)), + IWL_DEV_INFO(iwl5100_abg_cfg, iwl5100_abg_name, + DEVICE(0x4232), SUBDEV_MASKED(0x6, 0xF)), + IWL_DEV_INFO(iwl5100_n_cfg, iwl5100_agn_name, + DEVICE(0x4237), SUBDEV_MASKED(0x1, 0xF)), + IWL_DEV_INFO(iwl5100_n_cfg, iwl5100_agn_name, + DEVICE(0x4237), SUBDEV_MASKED(0x4, 0xF)), + IWL_DEV_INFO(iwl5100_n_cfg, iwl5100_bgn_name, + DEVICE(0x4237), SUBDEV_MASKED(0x5, 0xF)), + IWL_DEV_INFO(iwl5100_abg_cfg, iwl5100_abg_name, + DEVICE(0x4237), SUBDEV_MASKED(0x6, 0xF)), + +/* 5300 Series WiFi */ + IWL_DEV_INFO(iwl5300_agn_cfg, iwl5300_agn_name, + DEVICE(0x4235), SUBDEV_MASKED(0x1, 0xF)), + IWL_DEV_INFO(iwl5300_agn_cfg, iwl5300_agn_name, + DEVICE(0x4235), SUBDEV_MASKED(0x4, 0xF)), + IWL_DEV_INFO(iwl5300_agn_cfg, iwl5300_agn_name, + DEVICE(0x4236), SUBDEV_MASKED(0x1, 0xF)), + IWL_DEV_INFO(iwl5300_agn_cfg, iwl5300_agn_name, + DEVICE(0x4236), SUBDEV_MASKED(0x4, 0xF)), + +/* 5350 Series WiFi/WiMax */ + IWL_DEV_INFO(iwl5350_agn_cfg, iwl5350_agn_name, + DEVICE(0x423A)), + IWL_DEV_INFO(iwl5350_agn_cfg, iwl5350_agn_name, + DEVICE(0x423B)), + +/* 5150 Series Wifi/WiMax */ + IWL_DEV_INFO(iwl5150_agn_cfg, iwl5150_agn_name, + DEVICE(0x423C), SUBDEV_MASKED(0x1, 0xF)), + IWL_DEV_INFO(iwl5150_abg_cfg, iwl5150_abg_name, + DEVICE(0x423C), SUBDEV_MASKED(0x6, 0xF)), + + IWL_DEV_INFO(iwl5150_agn_cfg, iwl5150_agn_name, + DEVICE(0x423D), SUBDEV_MASKED(0x1, 0xF)), + IWL_DEV_INFO(iwl5150_abg_cfg, iwl5150_abg_name, + DEVICE(0x423D), SUBDEV_MASKED(0x6, 0xF)), + +/* 6x00 Series */ + IWL_DEV_INFO(iwl6000_3agn_cfg, iwl6000_3agn_name, + DEVICE(0x422B), SUBDEV_MASKED(0x1, 0xF)), + IWL_DEV_INFO(iwl6000_3agn_cfg, iwl6000_3agn_name, + DEVICE(0x422B), SUBDEV_MASKED(0x8, 0xF)), + IWL_DEV_INFO(iwl6000i_2agn_cfg, iwl6000i_2agn_name, + DEVICE(0x422C), SUBDEV_MASKED(0x1, 0xF)), + IWL_DEV_INFO(iwl6000i_non_n_cfg, iwl6000i_2abg_name, + DEVICE(0x422C), SUBDEV_MASKED(0x6, 0xF)), + IWL_DEV_INFO(iwl6000i_non_n_cfg, iwl6000i_2bg_name, + DEVICE(0x422C), SUBDEV_MASKED(0x7, 0xF)), + IWL_DEV_INFO(iwl6000_3agn_cfg, iwl6000_3agn_name, + DEVICE(0x4238), SUBDEV(0x1111)), + IWL_DEV_INFO(iwl6000_3agn_cfg, iwl6000_3agn_name, + DEVICE(0x4238), SUBDEV(0x1118)), + IWL_DEV_INFO(iwl6000i_2agn_cfg, iwl6000i_2agn_name, + DEVICE(0x4239), SUBDEV(0x1311)), + IWL_DEV_INFO(iwl6000i_non_n_cfg, iwl6000i_2abg_name, + DEVICE(0x4239), SUBDEV(0x1316)), + +/* 6x05 Series */ + IWL_DEV_INFO(iwl6005_n_cfg, iwl6005_2agn_name, + DEVICE(0x0082), SUBDEV_MASKED(0x1, 0xF)), + IWL_DEV_INFO(iwl6005_non_n_cfg, iwl6005_2abg_name, + DEVICE(0x0082), SUBDEV_MASKED(0x6, 0xF)), + IWL_DEV_INFO(iwl6005_non_n_cfg, iwl6005_2bg_name, + DEVICE(0x0082), SUBDEV_MASKED(0x7, 0xF)), + IWL_DEV_INFO(iwl6005_n_cfg, iwl6005_2agn_name, + DEVICE(0x0082), SUBDEV_MASKED(0x8, 0xF)), + + IWL_DEV_INFO(iwl6005_n_cfg, iwl6005_2agn_name, + DEVICE(0x0085), SUBDEV_MASKED(0x1, 0xF)), + IWL_DEV_INFO(iwl6005_n_cfg, iwl6005_2agn_name, + DEVICE(0x0085), SUBDEV_MASKED(0x8, 0xF)), + IWL_DEV_INFO(iwl6005_non_n_cfg, iwl6005_2abg_name, + DEVICE(0x0085), SUBDEV_MASKED(0x6, 0xF)), + + IWL_DEV_INFO(iwl6005_n_cfg, iwl6005_2agn_sff_name, + DEVICE(0x0082), SUBDEV_MASKED(0xC000, 0xF000)), + IWL_DEV_INFO(iwl6005_n_cfg, iwl6005_2agn_d_name, + DEVICE(0x0082), SUBDEV(0x4820)), + IWL_DEV_INFO(iwl6005_n_cfg, iwl6005_2agn_mow1_name, + DEVICE(0x0082), SUBDEV(0x1304)),/* low 5GHz active */ + IWL_DEV_INFO(iwl6005_n_cfg, iwl6005_2agn_mow2_name, + DEVICE(0x0082), SUBDEV(0x1305)),/* high 5GHz active */ + +/* 6x30 Series */ + IWL_DEV_INFO(iwl6030_n_cfg, iwl1030_bgn_name, + DEVICE(0x008A), SUBDEV_MASKED(0x5, 0xF)), + IWL_DEV_INFO(iwl6030_non_n_cfg, iwl1030_bg_name, + DEVICE(0x008A), SUBDEV_MASKED(0x7, 0xF)), + IWL_DEV_INFO(iwl6030_n_cfg, iwl1030_bgn_name, + DEVICE(0x008B), SUBDEV(0x5315)), + IWL_DEV_INFO(iwl6030_non_n_cfg, iwl1030_bg_name, + DEVICE(0x008B), SUBDEV(0x5317)), + IWL_DEV_INFO(iwl6030_n_cfg, iwl6030_2agn_name, + DEVICE(0x0090), SUBDEV(0x5211)), + IWL_DEV_INFO(iwl6030_n_cfg, iwl6030_2bgn_name, + DEVICE(0x0090), SUBDEV(0x5215)), + IWL_DEV_INFO(iwl6030_non_n_cfg, iwl6030_2abg_name, + DEVICE(0x0090), SUBDEV(0x5216)), + IWL_DEV_INFO(iwl6030_n_cfg, iwl6030_2agn_name, + DEVICE(0x0091), SUBDEV_MASKED(0x1, 0xF)), + IWL_DEV_INFO(iwl6030_n_cfg, iwl6030_2bgn_name, + DEVICE(0x0091), SUBDEV_MASKED(0x5, 0xF)), + IWL_DEV_INFO(iwl6030_non_n_cfg, iwl6030_2abg_name, + DEVICE(0x0091), SUBDEV_MASKED(0x6, 0xF)), + IWL_DEV_INFO(iwl6030_non_n_cfg, iwl6030_2bg_name, + DEVICE(0x0091), SUBDEV(0x5207)), + +/* 6x50 WiFi/WiMax Series */ + IWL_DEV_INFO(iwl6050_2agn_cfg, iwl6050_2agn_name, + DEVICE(0x0087), SUBDEV_MASKED(0x1, 0xF)), + IWL_DEV_INFO(iwl6050_2abg_cfg, iwl6050_2abg_name, + DEVICE(0x0087), SUBDEV_MASKED(0x6, 0xF)), + IWL_DEV_INFO(iwl6050_2agn_cfg, iwl6050_2agn_name, + DEVICE(0x0089), SUBDEV(0x1311)), + IWL_DEV_INFO(iwl6050_2abg_cfg, iwl6050_2abg_name, + DEVICE(0x0089), SUBDEV(0x1316)), + +/* 6150 WiFi/WiMax Series */ + IWL_DEV_INFO(iwl6150_bgn_cfg, iwl6150_bgn_name, + DEVICE(0x0885), SUBDEV_MASKED(0x5, 0xF)), + IWL_DEV_INFO(iwl6150_bg_cfg, iwl6150_bg_name, + DEVICE(0x0885), SUBDEV_MASKED(0x7, 0xF)), + IWL_DEV_INFO(iwl6150_bgn_cfg, iwl6150_bgn_name, + DEVICE(0x0886), SUBDEV(0x1315)), + IWL_DEV_INFO(iwl6150_bg_cfg, iwl6150_bg_name, + DEVICE(0x0886), SUBDEV(0x1317)), + +/* 1000 Series WiFi */ + IWL_DEV_INFO(iwl1000_bgn_cfg, iwl1000_bgn_name, + DEVICE(0x0083), SUBDEV_MASKED(0x5, 0xF)), + IWL_DEV_INFO(iwl1000_bg_cfg, iwl1000_bg_name, + DEVICE(0x0083), SUBDEV_MASKED(0x6, 0xF)), + IWL_DEV_INFO(iwl1000_bg_cfg, iwl1000_bg_name, + DEVICE(0x0084), SUBDEV(0x1216)), + IWL_DEV_INFO(iwl1000_bg_cfg, iwl1000_bg_name, + DEVICE(0x0084), SUBDEV(0x1316)), + +/* 100 Series WiFi */ + IWL_DEV_INFO(iwl100_bgn_cfg, iwl100_bgn_name, + DEVICE(0x08AE), SUBDEV_MASKED(0x5, 0xF)), + IWL_DEV_INFO(iwl100_bg_cfg, iwl100_bg_name, + DEVICE(0x08AE), SUBDEV_MASKED(0x7, 0xF)), + IWL_DEV_INFO(iwl100_bgn_cfg, iwl100_bgn_name, + DEVICE(0x08AF), SUBDEV(0x1015)), + IWL_DEV_INFO(iwl100_bg_cfg, iwl100_bg_name, + DEVICE(0x08AF), SUBDEV(0x1017)), + +/* 130 Series WiFi */ + IWL_DEV_INFO(iwl130_bgn_cfg, iwl130_bgn_name, + DEVICE(0x0896), SUBDEV_MASKED(0x5, 0xF)), + IWL_DEV_INFO(iwl130_bg_cfg, iwl130_bg_name, + DEVICE(0x0896), SUBDEV_MASKED(0x7, 0xF)), + IWL_DEV_INFO(iwl130_bgn_cfg, iwl130_bgn_name, + DEVICE(0x0897), SUBDEV(0x5015)), + IWL_DEV_INFO(iwl130_bg_cfg, iwl130_bg_name, + DEVICE(0x0897), SUBDEV(0x5017)), + +/* 2x00 Series */ + IWL_DEV_INFO(iwl2000_2bgn_cfg, iwl2000_2bgn_name, + DEVICE(0x0890), SUBDEV(0x4022)), + IWL_DEV_INFO(iwl2000_2bgn_cfg, iwl2000_2bgn_name, + DEVICE(0x0891), SUBDEV(0x4222)), + IWL_DEV_INFO(iwl2000_2bgn_cfg, iwl2000_2bgn_name, + DEVICE(0x0890), SUBDEV(0x4422)), + IWL_DEV_INFO(iwl2000_2bgn_cfg, iwl2000_2bgn_d_name, + DEVICE(0x0890), SUBDEV(0x4822)), + +/* 2x30 Series */ + IWL_DEV_INFO(iwl2030_2bgn_cfg, iwl2030_2bgn_name, + DEVICE(0x0887)), + IWL_DEV_INFO(iwl2030_2bgn_cfg, iwl2030_2bgn_name, + DEVICE(0x0888), SUBDEV(0x4262)), + +/* 6x35 Series */ + IWL_DEV_INFO(iwl6035_2agn_cfg, iwl6035_2agn_name, + DEVICE(0x088E), SUBDEV_MASKED(0x0, 0xF)), + IWL_DEV_INFO(iwl6035_2agn_cfg, iwl6035_2agn_sff_name, + DEVICE(0x088E), SUBDEV_MASKED(0xA, 0xF)), + IWL_DEV_INFO(iwl6035_2agn_cfg, iwl6035_2agn_name, + DEVICE(0x088F), SUBDEV_MASKED(0x0, 0xF)), + IWL_DEV_INFO(iwl6035_2agn_cfg, iwl6035_2agn_sff_name, + DEVICE(0x088F), SUBDEV_MASKED(0xA, 0xF)), + +/* 105 Series */ + IWL_DEV_INFO(iwl105_bgn_cfg, iwl105_bgn_name, + DEVICE(0x0894)), + IWL_DEV_INFO(iwl105_bgn_cfg, iwl105_bgn_name, + DEVICE(0x0895), SUBDEV(0x0222)), + +/* 135 Series */ + IWL_DEV_INFO(iwl135_bgn_cfg, iwl135_bgn_name, + DEVICE(0x0892)), + IWL_DEV_INFO(iwl135_bgn_cfg, iwl135_bgn_name, + DEVICE(0x0893), SUBDEV(0x0262)), +#endif /* CONFIG_IWLDVM */ + #if IS_ENABLED(CONFIG_IWLMVM) -/* 9000 */ - IWL_DEV_INFO(0x2526, 0x1550, iwl9260_2ac_cfg, iwl9260_killer_1550_name), - IWL_DEV_INFO(0x2526, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_name), - IWL_DEV_INFO(0x2526, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name), - IWL_DEV_INFO(0x30DC, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_name), - IWL_DEV_INFO(0x30DC, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name), - IWL_DEV_INFO(0x31DC, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_name), - IWL_DEV_INFO(0x31DC, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name), - IWL_DEV_INFO(0xA370, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_name), - IWL_DEV_INFO(0xA370, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name), - IWL_DEV_INFO(0x54F0, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_160_name), - IWL_DEV_INFO(0x54F0, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name), - IWL_DEV_INFO(0x51F0, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_160_name), - IWL_DEV_INFO(0x51F0, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_160_name), - IWL_DEV_INFO(0x51F0, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name), - IWL_DEV_INFO(0x51F0, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name), - IWL_DEV_INFO(0x51F1, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name), - IWL_DEV_INFO(0x54F0, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name), - IWL_DEV_INFO(0x54F0, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name), - IWL_DEV_INFO(0x7A70, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name), - IWL_DEV_INFO(0x7A70, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name), - IWL_DEV_INFO(0x7AF0, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name), - IWL_DEV_INFO(0x7AF0, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name), - IWL_DEV_INFO(0x7F70, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name), - IWL_DEV_INFO(0x7F70, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name), - - IWL_DEV_INFO(0x271C, 0x0214, iwl9260_2ac_cfg, iwl9260_1_name), - IWL_DEV_INFO(0x7E40, 0x1691, iwl_cfg_ma, iwl_ax411_killer_1690s_name), - IWL_DEV_INFO(0x7E40, 0x1692, iwl_cfg_ma, iwl_ax411_killer_1690i_name), - -/* AX200 */ - IWL_DEV_INFO(0x2723, IWL_CFG_ANY, iwl_ax200_cfg_cc, iwl_ax200_name), - IWL_DEV_INFO(0x2723, 0x1653, iwl_ax200_cfg_cc, iwl_ax200_killer_1650w_name), - IWL_DEV_INFO(0x2723, 0x1654, iwl_ax200_cfg_cc, iwl_ax200_killer_1650x_name), - - /* Qu with Hr */ - IWL_DEV_INFO(0x43F0, 0x0070, iwl_ax201_cfg_qu_hr, NULL), - IWL_DEV_INFO(0x43F0, 0x0074, iwl_ax201_cfg_qu_hr, NULL), - IWL_DEV_INFO(0x43F0, 0x0078, iwl_ax201_cfg_qu_hr, NULL), - IWL_DEV_INFO(0x43F0, 0x007C, iwl_ax201_cfg_qu_hr, NULL), - IWL_DEV_INFO(0x43F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, iwl_ax201_killer_1650s_name), - IWL_DEV_INFO(0x43F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, iwl_ax201_killer_1650i_name), - IWL_DEV_INFO(0x43F0, 0x2074, iwl_ax201_cfg_qu_hr, NULL), - IWL_DEV_INFO(0x43F0, 0x4070, iwl_ax201_cfg_qu_hr, NULL), - IWL_DEV_INFO(0xA0F0, 0x0070, iwl_ax201_cfg_qu_hr, NULL), - IWL_DEV_INFO(0xA0F0, 0x0074, iwl_ax201_cfg_qu_hr, NULL), - IWL_DEV_INFO(0xA0F0, 0x0078, iwl_ax201_cfg_qu_hr, NULL), - IWL_DEV_INFO(0xA0F0, 0x007C, iwl_ax201_cfg_qu_hr, NULL), - IWL_DEV_INFO(0xA0F0, 0x0A10, iwl_ax201_cfg_qu_hr, NULL), - IWL_DEV_INFO(0xA0F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, NULL), - IWL_DEV_INFO(0xA0F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL), - IWL_DEV_INFO(0xA0F0, 0x2074, iwl_ax201_cfg_qu_hr, NULL), - IWL_DEV_INFO(0xA0F0, 0x4070, iwl_ax201_cfg_qu_hr, NULL), - IWL_DEV_INFO(0xA0F0, 0x6074, iwl_ax201_cfg_qu_hr, NULL), - IWL_DEV_INFO(0x02F0, 0x0070, iwl_ax201_cfg_quz_hr, NULL), - IWL_DEV_INFO(0x02F0, 0x0074, iwl_ax201_cfg_quz_hr, NULL), - IWL_DEV_INFO(0x02F0, 0x6074, iwl_ax201_cfg_quz_hr, NULL), - IWL_DEV_INFO(0x02F0, 0x0078, iwl_ax201_cfg_quz_hr, NULL), - IWL_DEV_INFO(0x02F0, 0x007C, iwl_ax201_cfg_quz_hr, NULL), - IWL_DEV_INFO(0x02F0, 0x0310, iwl_ax201_cfg_quz_hr, NULL), - IWL_DEV_INFO(0x02F0, 0x1651, iwl_ax1650s_cfg_quz_hr, NULL), - IWL_DEV_INFO(0x02F0, 0x1652, iwl_ax1650i_cfg_quz_hr, NULL), - IWL_DEV_INFO(0x02F0, 0x2074, iwl_ax201_cfg_quz_hr, NULL), - IWL_DEV_INFO(0x02F0, 0x4070, iwl_ax201_cfg_quz_hr, NULL), - IWL_DEV_INFO(0x06F0, 0x0070, iwl_ax201_cfg_quz_hr, NULL), - IWL_DEV_INFO(0x06F0, 0x0074, iwl_ax201_cfg_quz_hr, NULL), - IWL_DEV_INFO(0x06F0, 0x0078, iwl_ax201_cfg_quz_hr, NULL), - IWL_DEV_INFO(0x06F0, 0x007C, iwl_ax201_cfg_quz_hr, NULL), - IWL_DEV_INFO(0x06F0, 0x0310, iwl_ax201_cfg_quz_hr, NULL), - IWL_DEV_INFO(0x06F0, 0x1651, iwl_ax1650s_cfg_quz_hr, NULL), - IWL_DEV_INFO(0x06F0, 0x1652, iwl_ax1650i_cfg_quz_hr, NULL), - IWL_DEV_INFO(0x06F0, 0x2074, iwl_ax201_cfg_quz_hr, NULL), - IWL_DEV_INFO(0x06F0, 0x4070, iwl_ax201_cfg_quz_hr, NULL), - IWL_DEV_INFO(0x34F0, 0x0070, iwl_ax201_cfg_qu_hr, NULL), - IWL_DEV_INFO(0x34F0, 0x0074, iwl_ax201_cfg_qu_hr, NULL), - IWL_DEV_INFO(0x34F0, 0x0078, iwl_ax201_cfg_qu_hr, NULL), - IWL_DEV_INFO(0x34F0, 0x007C, iwl_ax201_cfg_qu_hr, NULL), - IWL_DEV_INFO(0x34F0, 0x0310, iwl_ax201_cfg_qu_hr, NULL), - IWL_DEV_INFO(0x34F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, NULL), - IWL_DEV_INFO(0x34F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL), - IWL_DEV_INFO(0x34F0, 0x2074, iwl_ax201_cfg_qu_hr, NULL), - IWL_DEV_INFO(0x34F0, 0x4070, iwl_ax201_cfg_qu_hr, NULL), - - IWL_DEV_INFO(0x3DF0, 0x0070, iwl_ax201_cfg_qu_hr, NULL), - IWL_DEV_INFO(0x3DF0, 0x0074, iwl_ax201_cfg_qu_hr, NULL), - IWL_DEV_INFO(0x3DF0, 0x0078, iwl_ax201_cfg_qu_hr, NULL), - IWL_DEV_INFO(0x3DF0, 0x007C, iwl_ax201_cfg_qu_hr, NULL), - IWL_DEV_INFO(0x3DF0, 0x0310, iwl_ax201_cfg_qu_hr, NULL), - IWL_DEV_INFO(0x3DF0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, NULL), - IWL_DEV_INFO(0x3DF0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL), - IWL_DEV_INFO(0x3DF0, 0x2074, iwl_ax201_cfg_qu_hr, NULL), - IWL_DEV_INFO(0x3DF0, 0x4070, iwl_ax201_cfg_qu_hr, NULL), - - IWL_DEV_INFO(0x4DF0, 0x0070, iwl_ax201_cfg_qu_hr, NULL), - IWL_DEV_INFO(0x4DF0, 0x0074, iwl_ax201_cfg_qu_hr, NULL), - IWL_DEV_INFO(0x4DF0, 0x0078, iwl_ax201_cfg_qu_hr, NULL), - IWL_DEV_INFO(0x4DF0, 0x007C, iwl_ax201_cfg_qu_hr, NULL), - IWL_DEV_INFO(0x4DF0, 0x0310, iwl_ax201_cfg_qu_hr, NULL), - IWL_DEV_INFO(0x4DF0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, NULL), - IWL_DEV_INFO(0x4DF0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL), - IWL_DEV_INFO(0x4DF0, 0x2074, iwl_ax201_cfg_qu_hr, NULL), - IWL_DEV_INFO(0x4DF0, 0x4070, iwl_ax201_cfg_qu_hr, NULL), - IWL_DEV_INFO(0x4DF0, 0x6074, iwl_ax201_cfg_qu_hr, NULL), - - /* So with HR */ - IWL_DEV_INFO(0x2725, 0x0090, iwlax211_2ax_cfg_so_gf_a0, NULL), - IWL_DEV_INFO(0x2725, 0x0020, iwlax210_2ax_cfg_ty_gf_a0, NULL), - IWL_DEV_INFO(0x2725, 0x2020, iwlax210_2ax_cfg_ty_gf_a0, NULL), - IWL_DEV_INFO(0x2725, 0x0024, iwlax210_2ax_cfg_ty_gf_a0, NULL), - IWL_DEV_INFO(0x2725, 0x0310, iwlax210_2ax_cfg_ty_gf_a0, NULL), - IWL_DEV_INFO(0x2725, 0x0510, iwlax210_2ax_cfg_ty_gf_a0, NULL), - IWL_DEV_INFO(0x2725, 0x0A10, iwlax210_2ax_cfg_ty_gf_a0, NULL), - IWL_DEV_INFO(0x2725, 0xE020, iwlax210_2ax_cfg_ty_gf_a0, NULL), - IWL_DEV_INFO(0x2725, 0xE024, iwlax210_2ax_cfg_ty_gf_a0, NULL), - IWL_DEV_INFO(0x2725, 0x4020, iwlax210_2ax_cfg_ty_gf_a0, NULL), - IWL_DEV_INFO(0x2725, 0x6020, iwlax210_2ax_cfg_ty_gf_a0, NULL), - IWL_DEV_INFO(0x2725, 0x6024, iwlax210_2ax_cfg_ty_gf_a0, NULL), - IWL_DEV_INFO(0x2725, 0x1673, iwlax210_2ax_cfg_ty_gf_a0, iwl_ax210_killer_1675w_name), - IWL_DEV_INFO(0x2725, 0x1674, iwlax210_2ax_cfg_ty_gf_a0, iwl_ax210_killer_1675x_name), - IWL_DEV_INFO(0x7A70, 0x0090, iwlax211_2ax_cfg_so_gf_a0_long, NULL), - IWL_DEV_INFO(0x7A70, 0x0098, iwlax211_2ax_cfg_so_gf_a0_long, NULL), - IWL_DEV_INFO(0x7A70, 0x00B0, iwlax411_2ax_cfg_so_gf4_a0_long, NULL), - IWL_DEV_INFO(0x7A70, 0x0310, iwlax211_2ax_cfg_so_gf_a0_long, NULL), - IWL_DEV_INFO(0x7A70, 0x0510, iwlax211_2ax_cfg_so_gf_a0_long, NULL), - IWL_DEV_INFO(0x7A70, 0x0A10, iwlax211_2ax_cfg_so_gf_a0_long, NULL), - IWL_DEV_INFO(0x7AF0, 0x0090, iwlax211_2ax_cfg_so_gf_a0, NULL), - IWL_DEV_INFO(0x7AF0, 0x0098, iwlax211_2ax_cfg_so_gf_a0, NULL), - IWL_DEV_INFO(0x7AF0, 0x00B0, iwlax411_2ax_cfg_so_gf4_a0, NULL), - IWL_DEV_INFO(0x7AF0, 0x0310, iwlax211_2ax_cfg_so_gf_a0, NULL), - IWL_DEV_INFO(0x7AF0, 0x0510, iwlax211_2ax_cfg_so_gf_a0, NULL), - IWL_DEV_INFO(0x7AF0, 0x0A10, iwlax211_2ax_cfg_so_gf_a0, NULL), - - /* So with JF */ - IWL_DEV_INFO(0x7A70, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_160_name), - IWL_DEV_INFO(0x7A70, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_160_name), - IWL_DEV_INFO(0x7AF0, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_160_name), - IWL_DEV_INFO(0x7AF0, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_160_name), - - /* SO with GF2 */ - IWL_DEV_INFO(0x2726, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name), - IWL_DEV_INFO(0x2726, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name), - IWL_DEV_INFO(0x51F0, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name), - IWL_DEV_INFO(0x51F0, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name), - IWL_DEV_INFO(0x51F1, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name), - IWL_DEV_INFO(0x51F1, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name), - IWL_DEV_INFO(0x54F0, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name), - IWL_DEV_INFO(0x54F0, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name), - IWL_DEV_INFO(0x7A70, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name), - IWL_DEV_INFO(0x7A70, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name), - IWL_DEV_INFO(0x7AF0, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name), - IWL_DEV_INFO(0x7AF0, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name), - IWL_DEV_INFO(0x7F70, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name), - IWL_DEV_INFO(0x7F70, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name), - - /* MA with GF2 */ - IWL_DEV_INFO(0x7E40, 0x1671, iwl_cfg_ma, iwl_ax211_killer_1675s_name), - IWL_DEV_INFO(0x7E40, 0x1672, iwl_cfg_ma, iwl_ax211_killer_1675i_name), - - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, - iwl9560_2ac_cfg_soc, iwl9461_160_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, - iwl9560_2ac_cfg_soc, iwl9461_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, - iwl9560_2ac_cfg_soc, iwl9462_160_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, - iwl9560_2ac_cfg_soc, iwl9462_name), - - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, - iwl9560_2ac_cfg_soc, iwl9560_160_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, - iwl9560_2ac_cfg_soc, iwl9560_name), - - _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB, - iwl9260_2ac_cfg, iwl9270_160_name), - _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB, - iwl9260_2ac_cfg, iwl9270_name), - - _IWL_DEV_INFO(0x271B, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_TH1, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, - iwl9260_2ac_cfg, iwl9162_160_name), - _IWL_DEV_INFO(0x271B, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_TH1, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, - iwl9260_2ac_cfg, iwl9162_name), - - _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, - iwl9260_2ac_cfg, iwl9260_160_name), - _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, - iwl9260_2ac_cfg, iwl9260_name), - -/* Qu with Jf */ - /* Qu B step */ - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, - iwl9560_qu_b0_jf_b0_cfg, iwl9461_160_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, - iwl9560_qu_b0_jf_b0_cfg, iwl9461_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, - iwl9560_qu_b0_jf_b0_cfg, iwl9462_160_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, - iwl9560_qu_b0_jf_b0_cfg, iwl9462_name), - - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, - iwl9560_qu_b0_jf_b0_cfg, iwl9560_160_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, - iwl9560_qu_b0_jf_b0_cfg, iwl9560_name), - - _IWL_DEV_INFO(IWL_CFG_ANY, 0x1551, - IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, - iwl9560_qu_b0_jf_b0_cfg, iwl9560_killer_1550s_name), - _IWL_DEV_INFO(IWL_CFG_ANY, 0x1552, - IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, - iwl9560_qu_b0_jf_b0_cfg, iwl9560_killer_1550i_name), - - /* Qu C step */ - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, - iwl9560_qu_c0_jf_b0_cfg, iwl9461_160_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, - iwl9560_qu_c0_jf_b0_cfg, iwl9461_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, - iwl9560_qu_c0_jf_b0_cfg, iwl9462_160_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, - iwl9560_qu_c0_jf_b0_cfg, iwl9462_name), - - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, - iwl9560_qu_c0_jf_b0_cfg, iwl9560_160_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, - iwl9560_qu_c0_jf_b0_cfg, iwl9560_name), - - _IWL_DEV_INFO(IWL_CFG_ANY, 0x1551, - IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, - iwl9560_qu_c0_jf_b0_cfg, iwl9560_killer_1550s_name), - _IWL_DEV_INFO(IWL_CFG_ANY, 0x1552, - IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, - iwl9560_qu_c0_jf_b0_cfg, iwl9560_killer_1550i_name), - - /* QuZ */ - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, - iwl9560_quz_a0_jf_b0_cfg, iwl9461_160_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, - iwl9560_quz_a0_jf_b0_cfg, iwl9461_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, - iwl9560_quz_a0_jf_b0_cfg, iwl9462_160_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, - iwl9560_quz_a0_jf_b0_cfg, iwl9462_name), - - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, - iwl9560_quz_a0_jf_b0_cfg, iwl9560_160_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, - iwl9560_quz_a0_jf_b0_cfg, iwl9560_name), - - _IWL_DEV_INFO(IWL_CFG_ANY, 0x1551, - IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, - iwl9560_quz_a0_jf_b0_cfg, iwl9560_killer_1550s_name), - _IWL_DEV_INFO(IWL_CFG_ANY, 0x1552, - IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, - iwl9560_quz_a0_jf_b0_cfg, iwl9560_killer_1550i_name), - -/* Qu with Hr */ - /* Qu B step */ - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, - IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, - iwl_qu_b0_hr1_b0, iwl_ax101_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, - IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, - iwl_qu_b0_hr_b0, iwl_ax203_name), - - /* Qu C step */ - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, - IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, - iwl_qu_c0_hr1_b0, iwl_ax101_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, - IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, - iwl_qu_c0_hr_b0, iwl_ax203_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, - IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, - iwl_qu_c0_hr_b0, iwl_ax201_name), - - /* QuZ */ - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, - iwl_quz_a0_hr1_b0, iwl_ax101_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_QUZ, SILICON_B_STEP, - IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, - iwl_cfg_quz_a0_hr_b0, iwl_ax203_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_QUZ, SILICON_B_STEP, - IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, - iwl_cfg_quz_a0_hr_b0, iwl_ax201_name), - -/* Ma */ - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, - iwl_cfg_ma, iwl_ax201_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, - iwl_cfg_ma, iwl_ax211_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, - iwl_cfg_ma, iwl_ax231_name), - -/* So with Hr */ - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, - iwl_cfg_so_a0_hr_a0, iwl_ax203_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, - iwl_cfg_so_a0_hr_a0, iwl_ax101_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, - iwl_cfg_so_a0_hr_a0, iwl_ax201_name), - -/* So-F with Hr */ - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, - iwl_cfg_so_a0_hr_a0, iwl_ax203_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, - iwl_cfg_so_a0_hr_a0, iwl_ax101_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, - iwl_cfg_so_a0_hr_a0, iwl_ax201_name), - -/* So-F with Gf */ - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, - iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_CDB, - iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_name), - -/* SoF with JF2 */ - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, - iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, - iwlax210_2ax_cfg_so_jf_b0, iwl9560_name), - -/* SoF with JF */ - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, - iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, - iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, - iwlax210_2ax_cfg_so_jf_b0, iwl9461_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, - iwlax210_2ax_cfg_so_jf_b0, iwl9462_name), - -/* So with GF */ - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, - iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_CDB, - iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_name), - -/* So with JF2 */ - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, - iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, - iwlax210_2ax_cfg_so_jf_b0, iwl9560_name), - -/* So with JF */ - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, - iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, - iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, - iwlax210_2ax_cfg_so_jf_b0, iwl9461_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, - iwlax210_2ax_cfg_so_jf_b0, iwl9462_name), +/* 7260 Series */ + IWL_DEV_INFO(iwl7260_cfg, iwl7260_2ac_name, + DEVICE(0x08B1)), // unlisted ones fall through to here + IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name, + DEVICE(0x08B1), SUBDEV(0x4060)), + IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name, + DEVICE(0x08B1), SUBDEV(0x406A)), + IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name, + DEVICE(0x08B1), SUBDEV(0x4160)), + IWL_DEV_INFO(iwl7260_cfg, iwl7260_n_name, + DEVICE(0x08B1), SUBDEV(0x4062)), + IWL_DEV_INFO(iwl7260_cfg, iwl7260_n_name, + DEVICE(0x08B1), SUBDEV(0x4162)), + IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name, + DEVICE(0x08B1), SUBDEV(0x4460)), + IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name, + DEVICE(0x08B1), SUBDEV(0x446A)), + IWL_DEV_INFO(iwl7260_cfg, iwl7260_n_name, + DEVICE(0x08B1), SUBDEV(0x4462)), + IWL_DEV_INFO(iwl7260_high_temp_cfg, iwl7260_2ac_name, + DEVICE(0x08B1), SUBDEV(0x4A70)), + IWL_DEV_INFO(iwl7260_high_temp_cfg, iwl7260_2ac_name, + DEVICE(0x08B1), SUBDEV(0x4A6E)), + IWL_DEV_INFO(iwl7260_high_temp_cfg, iwl7260_2ac_name, + DEVICE(0x08B1), SUBDEV(0x4A6C)), + IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name, + DEVICE(0x08B1), SUBDEV(0x4560)), + IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name, + DEVICE(0x08B1), SUBDEV(0x4020)), + IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name, + DEVICE(0x08B1), SUBDEV(0x402A)), + IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name, + DEVICE(0x08B1), SUBDEV(0x4420)), + IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name, + DEVICE(0x08B1), SUBDEV(0xC060)), + IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name, + DEVICE(0x08B1), SUBDEV(0xC06A)), + IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name, + DEVICE(0x08B1), SUBDEV(0xC160)), + IWL_DEV_INFO(iwl7260_cfg, iwl7260_n_name, + DEVICE(0x08B1), SUBDEV(0xC062)), + IWL_DEV_INFO(iwl7260_cfg, iwl7260_n_name, + DEVICE(0x08B1), SUBDEV(0xC162)), + IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name, + DEVICE(0x08B1), SUBDEV(0xC760)), + IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name, + DEVICE(0x08B1), SUBDEV(0xC460)), + IWL_DEV_INFO(iwl7260_cfg, iwl7260_n_name, + DEVICE(0x08B1), SUBDEV(0xC462)), + IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name, + DEVICE(0x08B1), SUBDEV(0xC560)), + IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name, + DEVICE(0x08B1), SUBDEV(0xC360)), + IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name, + DEVICE(0x08B1), SUBDEV(0xC020)), + IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name, + DEVICE(0x08B1), SUBDEV(0xC02A)), + IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name, + DEVICE(0x08B1), SUBDEV(0xC420)), + IWL_DEV_INFO(iwl7260_cfg, iwl7260_2ac_name, + DEVICE(0x08B2), SUBDEV(0x4270)), + IWL_DEV_INFO(iwl7260_cfg, iwl7260_2ac_name, + DEVICE(0x08B2), SUBDEV(0x4272)), + IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name, + DEVICE(0x08B2), SUBDEV(0x4260)), + IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name, + DEVICE(0x08B2), SUBDEV(0x426A)), + IWL_DEV_INFO(iwl7260_cfg, iwl7260_n_name, + DEVICE(0x08B2), SUBDEV(0x4262)), + IWL_DEV_INFO(iwl7260_cfg, iwl7260_2ac_name, + DEVICE(0x08B2), SUBDEV(0x4370)), + IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name, + DEVICE(0x08B2), SUBDEV(0x4360)), + IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name, + DEVICE(0x08B2), SUBDEV(0x4220)), + IWL_DEV_INFO(iwl7260_cfg, iwl7260_2ac_name, + DEVICE(0x08B2), SUBDEV(0xC270)), + IWL_DEV_INFO(iwl7260_cfg, iwl7260_2ac_name, + DEVICE(0x08B2), SUBDEV(0xC272)), + IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name, + DEVICE(0x08B2), SUBDEV(0xC260)), + IWL_DEV_INFO(iwl7260_cfg, iwl7260_n_name, + DEVICE(0x08B2), SUBDEV(0xC26A)), + IWL_DEV_INFO(iwl7260_cfg, iwl7260_n_name, + DEVICE(0x08B2), SUBDEV(0xC262)), + IWL_DEV_INFO(iwl7260_cfg, iwl7260_2ac_name, + DEVICE(0x08B2), SUBDEV(0xC370)), + IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name, + DEVICE(0x08B2), SUBDEV(0xC220)), +/* 3160 Series */ + IWL_DEV_INFO(iwl3160_cfg, iwl3160_2ac_name, + DEVICE(0x08B3)), + + IWL_DEV_INFO(iwl3160_cfg, iwl3160_n_name, + DEVICE(0x08B3), SUBDEV_MASKED(0x62, 0xFF)), + IWL_DEV_INFO(iwl3160_cfg, iwl3160_2n_name, + DEVICE(0x08B3), SUBDEV_MASKED(0x60, 0xFF)), + + IWL_DEV_INFO(iwl3160_cfg, iwl3160_2ac_name, + DEVICE(0x08B4)), + +/* 3165 Series */ + IWL_DEV_INFO(iwl3165_2ac_cfg, iwl3165_2ac_name, + DEVICE(0x3165)), + IWL_DEV_INFO(iwl3165_2ac_cfg, iwl3165_2ac_name, + DEVICE(0x3166)), + +/* 3168 Series */ + IWL_DEV_INFO(iwl3168_2ac_cfg, iwl3168_2ac_name, + DEVICE(0x24FB)), + +/* 7265 Series */ + IWL_DEV_INFO(iwl7265_cfg, iwl7265_2ac_name, + DEVICE(0x095A)), + IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name, + DEVICE(0x095A), SUBDEV(0x5000)), + IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name, + DEVICE(0x095A), SUBDEV(0x500A)), + IWL_DEV_INFO(iwl7265_cfg, iwl7265_n_name, + DEVICE(0x095A), SUBDEV(0x5002)), + IWL_DEV_INFO(iwl7265_cfg, iwl7265_n_name, + DEVICE(0x095A), SUBDEV(0x5102)), + IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name, + DEVICE(0x095A), SUBDEV(0x5020)), + IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name, + DEVICE(0x095A), SUBDEV(0x502A)), + IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name, + DEVICE(0x095A), SUBDEV(0x5090)), + IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name, + DEVICE(0x095A), SUBDEV(0x5190)), + IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name, + DEVICE(0x095A), SUBDEV(0x5100)), + IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name, + DEVICE(0x095A), SUBDEV(0x5400)), + IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name, + DEVICE(0x095A), SUBDEV(0x5420)), + IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name, + DEVICE(0x095A), SUBDEV(0x5490)), + IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name, + DEVICE(0x095A), SUBDEV(0x5C10)), + IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name, + DEVICE(0x095A), SUBDEV(0x5590)), + IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name, + DEVICE(0x095A), SUBDEV(0x9000)), + IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name, + DEVICE(0x095A), SUBDEV(0x900A)), + IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name, + DEVICE(0x095A), SUBDEV(0x9400)), + + IWL_DEV_INFO(iwl7265_cfg, iwl7265_2ac_name, + DEVICE(0x095B)), + IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name, + DEVICE(0x095B), SUBDEV(0x520A)), + IWL_DEV_INFO(iwl7265_cfg, iwl7265_n_name, + DEVICE(0x095B), SUBDEV(0x5302)), + IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name, + DEVICE(0x095B), SUBDEV(0x5200)), + IWL_DEV_INFO(iwl7265_cfg, iwl7265_n_name, + DEVICE(0x095B), SUBDEV(0x5202)), + IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name, + DEVICE(0x095B), SUBDEV(0x9200)), + +/* 8000 Series */ + IWL_DEV_INFO(iwl8260_cfg, iwl8260_2ac_name, + DEVICE(0x24F3)), + IWL_DEV_INFO(iwl8260_cfg, iwl8260_2n_name, + DEVICE(0x24F3), SUBDEV(0x0004)), + IWL_DEV_INFO(iwl8260_cfg, iwl8260_2n_name, + DEVICE(0x24F3), SUBDEV(0x0044)), + IWL_DEV_INFO(iwl8265_cfg, iwl8265_2ac_name, + DEVICE(0x24FD)), + IWL_DEV_INFO(iwl8265_cfg, iwl8275_2ac_name, + DEVICE(0x24FD), SUBDEV(0x3E02)), + IWL_DEV_INFO(iwl8265_cfg, iwl8275_2ac_name, + DEVICE(0x24FD), SUBDEV(0x3E01)), + IWL_DEV_INFO(iwl8265_cfg, iwl8275_2ac_name, + DEVICE(0x24FD), SUBDEV(0x1012)), + IWL_DEV_INFO(iwl8265_cfg, iwl8275_2ac_name, + DEVICE(0x24FD), SUBDEV(0x0012)), + IWL_DEV_INFO(iwl8265_cfg, iwl_killer_1435i_name, + DEVICE(0x24FD), SUBDEV(0x1431)), + IWL_DEV_INFO(iwl8265_cfg, iwl_killer_1434_kix_name, + DEVICE(0x24FD), SUBDEV(0x1432)), + +/* JF1 RF */ + IWL_DEV_INFO(iwl_rf_jf, iwl9461_160_name, + RF_TYPE(JF1)), + IWL_DEV_INFO(iwl_rf_jf_80mhz, iwl9461_name, + RF_TYPE(JF1), BW_LIMITED), + IWL_DEV_INFO(iwl_rf_jf, iwl9462_160_name, + RF_TYPE(JF1), RF_ID(JF1_DIV)), + IWL_DEV_INFO(iwl_rf_jf_80mhz, iwl9462_name, + RF_TYPE(JF1), RF_ID(JF1_DIV), BW_LIMITED), +/* JF2 RF */ + IWL_DEV_INFO(iwl_rf_jf, iwl9260_160_name, + RF_TYPE(JF2)), + IWL_DEV_INFO(iwl_rf_jf_80mhz, iwl9260_name, + RF_TYPE(JF2), BW_LIMITED), + IWL_DEV_INFO(iwl_rf_jf, iwl9560_160_name, + RF_TYPE(JF2), RF_ID(JF)), + IWL_DEV_INFO(iwl_rf_jf_80mhz, iwl9560_name, + RF_TYPE(JF2), RF_ID(JF), BW_LIMITED), + +/* HR RF */ + IWL_DEV_INFO(iwl_rf_hr, iwl_ax201_name, RF_TYPE(HR2)), + IWL_DEV_INFO(iwl_rf_hr_80mhz, iwl_ax101_name, RF_TYPE(HR1)), + IWL_DEV_INFO(iwl_rf_hr_80mhz, iwl_ax203_name, RF_TYPE(HR2), BW_LIMITED), + IWL_DEV_INFO(iwl_rf_hr, iwl_ax200_name, DEVICE(0x2723)), + +/* GF RF */ + IWL_DEV_INFO(iwl_rf_gf, iwl_ax211_name, RF_TYPE(GF)), + IWL_DEV_INFO(iwl_rf_gf, iwl_ax411_name, RF_TYPE(GF), CDB), + IWL_DEV_INFO(iwl_rf_gf, iwl_ax210_name, DEVICE(0x2725)), + +/* Killer CRFs */ + IWL_DEV_INFO(iwl_rf_jf, iwl9260_killer_1550_name, SUBDEV(0x1550)), + IWL_DEV_INFO(iwl_rf_jf, iwl9560_killer_1550s_name, SUBDEV(0x1551)), + IWL_DEV_INFO(iwl_rf_jf, iwl9560_killer_1550i_name, SUBDEV(0x1552)), + + IWL_DEV_INFO(iwl_rf_hr, iwl_ax201_killer_1650s_name, SUBDEV(0x1651)), + IWL_DEV_INFO(iwl_rf_hr, iwl_ax201_killer_1650i_name, SUBDEV(0x1652)), + + IWL_DEV_INFO(iwl_rf_gf, iwl_ax211_killer_1675s_name, SUBDEV(0x1671)), + IWL_DEV_INFO(iwl_rf_gf, iwl_ax211_killer_1675i_name, SUBDEV(0x1672)), + IWL_DEV_INFO(iwl_rf_gf, iwl_ax210_killer_1675w_name, SUBDEV(0x1673)), + IWL_DEV_INFO(iwl_rf_gf, iwl_ax210_killer_1675x_name, SUBDEV(0x1674)), + IWL_DEV_INFO(iwl_rf_gf, iwl_ax411_killer_1690s_name, SUBDEV(0x1691)), + IWL_DEV_INFO(iwl_rf_gf, iwl_ax411_killer_1690i_name, SUBDEV(0x1692)), + +/* Killer discrete */ + IWL_DEV_INFO(iwl_rf_hr, iwl_ax200_killer_1650w_name, + DEVICE(0x2723), SUBDEV(0x1653)), + IWL_DEV_INFO(iwl_rf_hr, iwl_ax200_killer_1650x_name, + DEVICE(0x2723), SUBDEV(0x1654)), #endif /* CONFIG_IWLMVM */ #if IS_ENABLED(CONFIG_IWLMLD) -/* Bz */ - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, - iwl_cfg_bz, iwl_ax201_name), - - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, - iwl_cfg_bz, iwl_ax211_name), - - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, - iwl_cfg_bz, iwl_fm_name), - - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, - iwl_cfg_bz, iwl_wh_name), - - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, - iwl_cfg_bz, iwl_ax201_name), - - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, - iwl_cfg_bz, iwl_ax211_name), - - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, - iwl_cfg_bz, iwl_fm_name), - - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, - iwl_cfg_bz, iwl_wh_name), - -/* Ga (Gl) */ - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_320, IWL_CFG_ANY, IWL_CFG_NO_CDB, - iwl_cfg_gl, iwl_gl_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_NO_320, IWL_CFG_ANY, IWL_CFG_NO_CDB, - iwl_cfg_gl, iwl_mtp_name), - -/* Sc */ - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SC, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, - iwl_cfg_sc, iwl_ax211_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SC, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, - iwl_cfg_sc, iwl_fm_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SC, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, - iwl_cfg_sc, iwl_wh_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SC2, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, - iwl_cfg_sc2, iwl_ax211_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SC2, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, - iwl_cfg_sc2, iwl_fm_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SC2, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, - iwl_cfg_sc2, iwl_wh_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SC2F, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, - iwl_cfg_sc2f, iwl_ax211_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SC2F, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, - iwl_cfg_sc2f, iwl_fm_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SC2F, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, - iwl_cfg_sc2f, iwl_wh_name), - -/* Dr */ - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_DR, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, - iwl_cfg_dr, iwl_dr_name), - -/* Br */ - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_BR, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, - iwl_cfg_br, iwl_br_name), +/* FM RF */ + IWL_DEV_INFO(iwl_rf_fm, iwl_be201_name, RF_TYPE(FM)), + IWL_DEV_INFO(iwl_rf_fm, iwl_be401_name, RF_TYPE(FM), CDB), + /* the discrete NICs got the RF B0, it's only for the name anyway */ + IWL_DEV_INFO(iwl_rf_fm, iwl_be200_name, RF_TYPE(FM), + DEVICE(0x272B), RF_STEP(B)), + IWL_DEV_INFO(iwl_rf_fm_160mhz, iwl_be202_name, + RF_TYPE(FM), BW_LIMITED), + +/* Killer CRFs */ + IWL_DEV_INFO(iwl_rf_fm, iwl_killer_be1750s_name, SUBDEV(0x1771)), + IWL_DEV_INFO(iwl_rf_fm, iwl_killer_be1750i_name, SUBDEV(0x1772)), + IWL_DEV_INFO(iwl_rf_fm, iwl_killer_be1790s_name, SUBDEV(0x1791)), + IWL_DEV_INFO(iwl_rf_fm, iwl_killer_be1790i_name, SUBDEV(0x1792)), + +/* Killer discrete */ + IWL_DEV_INFO(iwl_rf_fm, iwl_killer_be1750w_name, + DEVICE(0x272B), SUBDEV(0x1773)), + IWL_DEV_INFO(iwl_rf_fm, iwl_killer_be1750x_name, + DEVICE(0x272B), SUBDEV(0x1774)), + +/* WH RF */ + IWL_DEV_INFO(iwl_rf_wh, iwl_be211_name, RF_TYPE(WH)), + IWL_DEV_INFO(iwl_rf_wh_160mhz, iwl_be213_name, RF_TYPE(WH), BW_LIMITED), + +/* PE RF */ + IWL_DEV_INFO(iwl_rf_pe, iwl_bn201_name, RF_TYPE(PE)), + IWL_DEV_INFO(iwl_rf_pe, iwl_be223_name, RF_TYPE(PE), SUBDEV(0x0524)), + IWL_DEV_INFO(iwl_rf_pe, iwl_be221_name, RF_TYPE(PE), SUBDEV(0x0324)), + +/* Killer */ + IWL_DEV_INFO(iwl_rf_wh, iwl_killer_be1775s_name, SUBDEV(0x1776)), + IWL_DEV_INFO(iwl_rf_wh, iwl_killer_be1775i_name, SUBDEV(0x1775)), + + IWL_DEV_INFO(iwl_rf_pe, iwl_killer_bn1850w2_name, SUBDEV(0x1851)), + IWL_DEV_INFO(iwl_rf_pe, iwl_killer_bn1850i_name, SUBDEV(0x1852)), #endif /* CONFIG_IWLMLD */ }; EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_dev_info_table); @@ -1246,13 +1076,15 @@ EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_dev_info_table_size); /* * Read rf id and cdb info from prph register and store it */ -static void get_crf_id(struct iwl_trans *iwl_trans) +static void get_crf_id(struct iwl_trans *iwl_trans, + struct iwl_trans_info *info) { u32 sd_reg_ver_addr; + u32 hw_wfpm_id; u32 val = 0; u8 step; - if (iwl_trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) + if (iwl_trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) sd_reg_ver_addr = SD_REG_VER_GEN2; else sd_reg_ver_addr = SD_REG_VER; @@ -1263,83 +1095,83 @@ static void get_crf_id(struct iwl_trans *iwl_trans) iwl_write_umac_prph_no_grab(iwl_trans, WFPM_CTRL_REG, val); /* Read crf info */ - iwl_trans->hw_crf_id = iwl_read_prph_no_grab(iwl_trans, sd_reg_ver_addr); + info->hw_crf_id = iwl_read_prph_no_grab(iwl_trans, sd_reg_ver_addr); /* Read cnv info */ - iwl_trans->hw_cnv_id = - iwl_read_prph_no_grab(iwl_trans, CNVI_AUX_MISC_CHIP); + info->hw_cnv_id = iwl_read_prph_no_grab(iwl_trans, CNVI_AUX_MISC_CHIP); /* For BZ-W, take B step also when A step is indicated */ - if (CSR_HW_REV_TYPE(iwl_trans->hw_rev) == IWL_CFG_MAC_TYPE_BZ_W) + if (CSR_HW_REV_TYPE(info->hw_rev) == IWL_CFG_MAC_TYPE_BZ_W) step = SILICON_B_STEP; /* In BZ, the MAC step must be read from the CNVI aux register */ - if (CSR_HW_REV_TYPE(iwl_trans->hw_rev) == IWL_CFG_MAC_TYPE_BZ) { - step = CNVI_AUX_MISC_CHIP_MAC_STEP(iwl_trans->hw_cnv_id); + if (CSR_HW_REV_TYPE(info->hw_rev) == IWL_CFG_MAC_TYPE_BZ) { + step = CNVI_AUX_MISC_CHIP_MAC_STEP(info->hw_cnv_id); /* For BZ-U, take B step also when A step is indicated */ - if ((CNVI_AUX_MISC_CHIP_PROD_TYPE(iwl_trans->hw_cnv_id) == + if ((CNVI_AUX_MISC_CHIP_PROD_TYPE(info->hw_cnv_id) == CNVI_AUX_MISC_CHIP_PROD_TYPE_BZ_U) && step == SILICON_A_STEP) step = SILICON_B_STEP; } - if (CSR_HW_REV_TYPE(iwl_trans->hw_rev) == IWL_CFG_MAC_TYPE_BZ || - CSR_HW_REV_TYPE(iwl_trans->hw_rev) == IWL_CFG_MAC_TYPE_BZ_W) { - iwl_trans->hw_rev_step = step; - iwl_trans->hw_rev |= step; + if (CSR_HW_REV_TYPE(info->hw_rev) == IWL_CFG_MAC_TYPE_BZ || + CSR_HW_REV_TYPE(info->hw_rev) == IWL_CFG_MAC_TYPE_BZ_W) { + info->hw_rev_step = step; + info->hw_rev |= step; } /* Read cdb info (also contains the jacket info if needed in the future */ - iwl_trans->hw_wfpm_id = - iwl_read_umac_prph_no_grab(iwl_trans, WFPM_OTP_CFG1_ADDR); + hw_wfpm_id = iwl_read_umac_prph_no_grab(iwl_trans, WFPM_OTP_CFG1_ADDR); IWL_INFO(iwl_trans, "Detected crf-id 0x%x, cnv-id 0x%x wfpm id 0x%x\n", - iwl_trans->hw_crf_id, iwl_trans->hw_cnv_id, - iwl_trans->hw_wfpm_id); + info->hw_crf_id, info->hw_cnv_id, hw_wfpm_id); } /* * In case that there is no OTP on the NIC, map the rf id and cdb info * from the prph registers. */ -static int map_crf_id(struct iwl_trans *iwl_trans) +static int map_crf_id(struct iwl_trans *iwl_trans, + struct iwl_trans_info *info) { int ret = 0; - u32 val = iwl_trans->hw_crf_id; + u32 val = info->hw_crf_id; u32 step_id = REG_CRF_ID_STEP(val); u32 slave_id = REG_CRF_ID_SLAVE(val); - u32 jacket_id_cnv = REG_CRF_ID_SLAVE(iwl_trans->hw_cnv_id); - u32 jacket_id_wfpm = WFPM_OTP_CFG1_IS_JACKET(iwl_trans->hw_wfpm_id); - u32 cdb_id_wfpm = WFPM_OTP_CFG1_IS_CDB(iwl_trans->hw_wfpm_id); + u32 jacket_id_cnv = REG_CRF_ID_SLAVE(info->hw_cnv_id); + u32 hw_wfpm_id = iwl_read_umac_prph_no_grab(iwl_trans, + WFPM_OTP_CFG1_ADDR); + u32 jacket_id_wfpm = WFPM_OTP_CFG1_IS_JACKET(hw_wfpm_id); + u32 cdb_id_wfpm = WFPM_OTP_CFG1_IS_CDB(hw_wfpm_id); /* Map between crf id to rf id */ switch (REG_CRF_ID_TYPE(val)) { case REG_CRF_ID_TYPE_JF_1: - iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_JF1 << 12); + info->hw_rf_id = (IWL_CFG_RF_TYPE_JF1 << 12); break; case REG_CRF_ID_TYPE_JF_2: - iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_JF2 << 12); + info->hw_rf_id = (IWL_CFG_RF_TYPE_JF2 << 12); break; case REG_CRF_ID_TYPE_HR_NONE_CDB_1X1: - iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_HR1 << 12); + info->hw_rf_id = (IWL_CFG_RF_TYPE_HR1 << 12); break; case REG_CRF_ID_TYPE_HR_NONE_CDB: - iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_HR2 << 12); + info->hw_rf_id = (IWL_CFG_RF_TYPE_HR2 << 12); break; case REG_CRF_ID_TYPE_HR_CDB: - iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_HR2 << 12); + info->hw_rf_id = (IWL_CFG_RF_TYPE_HR2 << 12); break; case REG_CRF_ID_TYPE_GF: - iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_GF << 12); + info->hw_rf_id = (IWL_CFG_RF_TYPE_GF << 12); break; case REG_CRF_ID_TYPE_FM: - iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_FM << 12); + info->hw_rf_id = (IWL_CFG_RF_TYPE_FM << 12); break; case REG_CRF_ID_TYPE_WHP: - iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_WH << 12); + info->hw_rf_id = (IWL_CFG_RF_TYPE_WH << 12); break; case REG_CRF_ID_TYPE_PE: - iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_PE << 12); + info->hw_rf_id = (IWL_CFG_RF_TYPE_PE << 12); break; default: ret = -EIO; @@ -1351,28 +1183,28 @@ static int map_crf_id(struct iwl_trans *iwl_trans) } /* Set Step-id */ - iwl_trans->hw_rf_id |= (step_id << 8); + info->hw_rf_id |= (step_id << 8); /* Set CDB capabilities */ if (cdb_id_wfpm || slave_id) { - iwl_trans->hw_rf_id += BIT(28); + info->hw_rf_id += BIT(28); IWL_INFO(iwl_trans, "Adding cdb to rf id\n"); } /* Set Jacket capabilities */ if (jacket_id_wfpm || jacket_id_cnv) { - iwl_trans->hw_rf_id += BIT(29); + info->hw_rf_id += BIT(29); IWL_INFO(iwl_trans, "Adding jacket to rf id\n"); } IWL_INFO(iwl_trans, "Detected rf-type 0x%x step-id 0x%x slave-id 0x%x from crf id 0x%x\n", - REG_CRF_ID_TYPE(val), step_id, slave_id, iwl_trans->hw_rf_id); + REG_CRF_ID_TYPE(val), step_id, slave_id, info->hw_rf_id); IWL_INFO(iwl_trans, "Detected cdb-id 0x%x jacket-id 0x%x from wfpm id 0x%x\n", - cdb_id_wfpm, jacket_id_wfpm, iwl_trans->hw_wfpm_id); + cdb_id_wfpm, jacket_id_wfpm, hw_wfpm_id); IWL_INFO(iwl_trans, "Detected jacket-id 0x%x from cnvi id 0x%x\n", - jacket_id_cnv, iwl_trans->hw_cnv_id); + jacket_id_cnv, info->hw_cnv_id); out: return ret; @@ -1382,9 +1214,8 @@ out: #define PCI_CFG_RETRY_TIMEOUT 0x041 VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info * -iwl_pci_find_dev_info(u16 device, u16 subsystem_device, - u16 mac_type, u8 mac_step, u16 rf_type, u8 cdb, - u8 jacket, u8 rf_id, u8 no_160, u8 cores, u8 rf_step) +iwl_pci_find_dev_info(u16 device, u16 subsystem_device, u16 rf_type, u8 cdb, + u8 rf_id, u8 bw_limit, u8 rf_step) { int num_devices = ARRAY_SIZE(iwl_dev_info_table); int i; @@ -1394,49 +1225,32 @@ iwl_pci_find_dev_info(u16 device, u16 subsystem_device, for (i = num_devices - 1; i >= 0; i--) { const struct iwl_dev_info *dev_info = &iwl_dev_info_table[i]; + u16 subdevice_mask; if (dev_info->device != (u16)IWL_CFG_ANY && dev_info->device != device) continue; - if (dev_info->subdevice != (u16)IWL_CFG_ANY && - dev_info->subdevice != subsystem_device) - continue; - - if (dev_info->mac_type != (u16)IWL_CFG_ANY && - dev_info->mac_type != mac_type) - continue; - - if (dev_info->mac_step != (u8)IWL_CFG_ANY && - dev_info->mac_step != mac_step) - continue; - - if (dev_info->rf_type != (u16)IWL_CFG_ANY && - dev_info->rf_type != rf_type) - continue; + subdevice_mask = GENMASK(dev_info->subdevice_m_h, + dev_info->subdevice_m_l); - if (dev_info->cdb != (u8)IWL_CFG_ANY && - dev_info->cdb != cdb) + if (dev_info->subdevice != (u16)IWL_CFG_ANY && + dev_info->subdevice != (subsystem_device & subdevice_mask)) continue; - if (dev_info->jacket != (u8)IWL_CFG_ANY && - dev_info->jacket != jacket) + if (dev_info->match_rf_type && dev_info->rf_type != rf_type) continue; - if (dev_info->rf_id != (u8)IWL_CFG_ANY && - dev_info->rf_id != rf_id) + if (dev_info->match_cdb && dev_info->cdb != cdb) continue; - if (dev_info->no_160 != (u8)IWL_CFG_ANY && - dev_info->no_160 != no_160) + if (dev_info->match_rf_id && dev_info->rf_id != rf_id) continue; - if (dev_info->cores != (u8)IWL_CFG_ANY && - dev_info->cores != cores) + if (dev_info->match_bw_limit && dev_info->bw_limit != bw_limit) continue; - if (dev_info->rf_step != (u8)IWL_CFG_ANY && - dev_info->rf_step != rf_step) + if (dev_info->match_rf_step && dev_info->rf_step != rf_step) continue; return dev_info; @@ -1448,30 +1262,32 @@ EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_pci_find_dev_info); static void iwl_pcie_recheck_me_status(struct work_struct *wk) { - struct iwl_trans *trans = container_of(wk, typeof(*trans), - me_recheck_wk.work); + struct iwl_trans_pcie *trans_pcie = container_of(wk, + typeof(*trans_pcie), + me_recheck_wk.work); u32 val; - val = iwl_read32(trans, CSR_HW_IF_CONFIG_REG); - trans->me_present = !!(val & CSR_HW_IF_CONFIG_REG_IAMT_UP); + val = iwl_read32(trans_pcie->trans, CSR_HW_IF_CONFIG_REG); + trans_pcie->me_present = !!(val & CSR_HW_IF_CONFIG_REG_IAMT_UP); } static void iwl_pcie_check_me_status(struct iwl_trans *trans) { + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 val; - trans->me_present = -1; + trans_pcie->me_present = -1; - INIT_DELAYED_WORK(&trans->me_recheck_wk, + INIT_DELAYED_WORK(&trans_pcie->me_recheck_wk, iwl_pcie_recheck_me_status); /* we don't have a good way of determining this until BZ */ - if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ) + if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_BZ) return; val = iwl_read_prph(trans, CNVI_SCU_REG_FOR_ECO_1); if (val & CNVI_SCU_REG_FOR_ECO_1_WIAMT_KNOWN) { - trans->me_present = + trans_pcie->me_present = !!(val & CNVI_SCU_REG_FOR_ECO_1_WIAMT_PRESENT); return; } @@ -1479,38 +1295,28 @@ static void iwl_pcie_check_me_status(struct iwl_trans *trans) val = iwl_read32(trans, CSR_HW_IF_CONFIG_REG); if (val & (CSR_HW_IF_CONFIG_REG_ME_OWN | CSR_HW_IF_CONFIG_REG_IAMT_UP)) { - trans->me_present = 1; + trans_pcie->me_present = 1; return; } /* recheck again later, ME might still be initializing */ - schedule_delayed_work(&trans->me_recheck_wk, HZ); + schedule_delayed_work(&trans_pcie->me_recheck_wk, HZ); } static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { - const struct iwl_cfg_trans_params *trans; - const struct iwl_cfg *cfg_7265d __maybe_unused = NULL; + const struct iwl_mac_cfg *trans; const struct iwl_dev_info *dev_info; + struct iwl_trans_info info = { + .hw_id = (pdev->device << 16) + pdev->subsystem_device, + }; struct iwl_trans *iwl_trans; struct iwl_trans_pcie *trans_pcie; int ret; - const struct iwl_cfg *cfg; - trans = (void *)(ent->driver_data & ~TRANS_CFG_MARKER); + trans = (void *)ent->driver_data; - /* - * This is needed for backwards compatibility with the old - * tables, so we don't need to change all the config structs - * at the same time. The cfg is used to compare with the old - * full cfg structs. - */ - cfg = (void *)(ent->driver_data & ~TRANS_CFG_MARKER); - - /* make sure trans is the first element in iwl_cfg */ - BUILD_BUG_ON(offsetof(struct iwl_cfg, trans)); - - iwl_trans = iwl_trans_pcie_alloc(pdev, ent, trans); + iwl_trans = iwl_trans_pcie_alloc(pdev, trans, &info); if (IS_ERR(iwl_trans)) return PTR_ERR(iwl_trans); @@ -1519,6 +1325,9 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) iwl_trans_pcie_check_product_reset_status(pdev); iwl_trans_pcie_check_product_reset_mode(pdev); + /* set the things we know so far for the grab NIC access */ + iwl_trans_set_info(iwl_trans, &info); + /* * Let's try to grab NIC access early here. Sometimes, NICs may * fail to initialize, and if that happens it's better if we see @@ -1532,7 +1341,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) goto out_free_trans; if (iwl_trans_grab_nic_access(iwl_trans)) { - get_crf_id(iwl_trans); + get_crf_id(iwl_trans, &info); /* all good */ iwl_trans_release_nic_access(iwl_trans); } else { @@ -1541,38 +1350,32 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } } - iwl_trans->hw_rf_id = iwl_read32(iwl_trans, CSR_HW_RF_ID); + info.hw_rf_id = iwl_read32(iwl_trans, CSR_HW_RF_ID); /* * The RF_ID is set to zero in blank OTP so read version to * extract the RF_ID. * This is relevant only for family 9000 and up. */ - if (iwl_trans->trans_cfg->rf_id && - iwl_trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000 && - !CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id) && map_crf_id(iwl_trans)) { + if (iwl_trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_9000 && + !CSR_HW_RFID_TYPE(info.hw_rf_id) && map_crf_id(iwl_trans, &info)) { ret = -EINVAL; goto out_free_trans; } IWL_INFO(iwl_trans, "PCI dev %04x/%04x, rev=0x%x, rfid=0x%x\n", pdev->device, pdev->subsystem_device, - iwl_trans->hw_rev, iwl_trans->hw_rf_id); + info.hw_rev, info.hw_rf_id); dev_info = iwl_pci_find_dev_info(pdev->device, pdev->subsystem_device, - CSR_HW_REV_TYPE(iwl_trans->hw_rev), - iwl_trans->hw_rev_step, - CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id), - CSR_HW_RFID_IS_CDB(iwl_trans->hw_rf_id), - CSR_HW_RFID_IS_JACKET(iwl_trans->hw_rf_id), + CSR_HW_RFID_TYPE(info.hw_rf_id), + CSR_HW_RFID_IS_CDB(info.hw_rf_id), IWL_SUBDEVICE_RF_ID(pdev->subsystem_device), - IWL_SUBDEVICE_NO_160(pdev->subsystem_device), - IWL_SUBDEVICE_CORES(pdev->subsystem_device), - CSR_HW_RFID_STEP(iwl_trans->hw_rf_id)); + IWL_SUBDEVICE_BW_LIM(pdev->subsystem_device), + CSR_HW_RFID_STEP(info.hw_rf_id)); if (dev_info) { iwl_trans->cfg = dev_info->cfg; - iwl_trans->name = dev_info->name; - iwl_trans->no_160 = dev_info->no_160 == IWL_CFG_NO_160; + info.name = dev_info->name; } #if IS_ENABLED(CONFIG_IWLMVM) @@ -1583,82 +1386,41 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) * all the parameters that the transport uses must, until that is * changed, be identical to the ones in the 7265D configuration. */ - if (cfg == &iwl7265_2ac_cfg) - cfg_7265d = &iwl7265d_2ac_cfg; - else if (cfg == &iwl7265_2n_cfg) - cfg_7265d = &iwl7265d_2n_cfg; - else if (cfg == &iwl7265_n_cfg) - cfg_7265d = &iwl7265d_n_cfg; - if (cfg_7265d && - (iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D) - iwl_trans->cfg = cfg_7265d; - - /* - * This is a hack to switch from Qu B0 to Qu C0. We need to - * do this for all cfgs that use Qu B0, except for those using - * Jf, which have already been moved to the new table. The - * rest must be removed once we convert Qu with Hr as well. - */ - if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QU_C0) { - if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr) - iwl_trans->cfg = &iwl_ax201_cfg_qu_c0_hr_b0; - else if (iwl_trans->cfg == &killer1650s_2ax_cfg_qu_b0_hr_b0) - iwl_trans->cfg = &killer1650s_2ax_cfg_qu_c0_hr_b0; - else if (iwl_trans->cfg == &killer1650i_2ax_cfg_qu_b0_hr_b0) - iwl_trans->cfg = &killer1650i_2ax_cfg_qu_c0_hr_b0; - } - - /* same thing for QuZ... */ - if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QUZ) { - if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr) - iwl_trans->cfg = &iwl_ax201_cfg_quz_hr; - else if (iwl_trans->cfg == &killer1650s_2ax_cfg_qu_b0_hr_b0) - iwl_trans->cfg = &iwl_ax1650s_cfg_quz_hr; - else if (iwl_trans->cfg == &killer1650i_2ax_cfg_qu_b0_hr_b0) - iwl_trans->cfg = &iwl_ax1650i_cfg_quz_hr; - } - + if (iwl_trans->cfg == &iwl7265_cfg && + (info.hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D) + iwl_trans->cfg = &iwl7265d_cfg; #endif - /* - * If we didn't set the cfg yet, the PCI ID table entry should have - * been a full config - if yes, use it, otherwise fail. - */ if (!iwl_trans->cfg) { - if (ent->driver_data & TRANS_CFG_MARKER) { - pr_err("No config found for PCI dev %04x/%04x, rev=0x%x, rfid=0x%x\n", - pdev->device, pdev->subsystem_device, - iwl_trans->hw_rev, iwl_trans->hw_rf_id); - ret = -EINVAL; - goto out_free_trans; - } - iwl_trans->cfg = cfg; + pr_err("No config found for PCI dev %04x/%04x, rev=0x%x, rfid=0x%x\n", + pdev->device, pdev->subsystem_device, + info.hw_rev, info.hw_rf_id); + ret = -EINVAL; + goto out_free_trans; } - /* if we don't have a name yet, copy name from the old cfg */ - if (!iwl_trans->name) - iwl_trans->name = iwl_trans->cfg->name; + IWL_INFO(iwl_trans, "Detected %s\n", info.name); - IWL_INFO(iwl_trans, "Detected %s\n", iwl_trans->name); - - if (iwl_trans->trans_cfg->mq_rx_supported) { + if (iwl_trans->mac_cfg->mq_rx_supported) { if (WARN_ON(!iwl_trans->cfg->num_rbds)) { ret = -EINVAL; goto out_free_trans; } - trans_pcie->num_rx_bufs = iwl_trans->cfg->num_rbds; + trans_pcie->num_rx_bufs = iwl_trans_get_num_rbds(iwl_trans); } else { trans_pcie->num_rx_bufs = RX_QUEUE_SIZE; } - if (!iwl_trans->trans_cfg->integrated) { + if (!iwl_trans->mac_cfg->integrated) { u16 link_status; pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &link_status); - iwl_trans->pcie_link_speed = + info.pcie_link_speed = u16_get_bits(link_status, PCI_EXP_LNKSTA_CLS); } + iwl_trans_set_info(iwl_trans, &info); + ret = iwl_trans_init(iwl_trans); if (ret) goto out_free_trans; @@ -1690,11 +1452,12 @@ out_free_trans: static void iwl_pci_remove(struct pci_dev *pdev) { struct iwl_trans *trans = pci_get_drvdata(pdev); + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); if (!trans) return; - cancel_delayed_work_sync(&trans->me_recheck_wk); + cancel_delayed_work_sync(&trans_pcie->me_recheck_wk); iwl_drv_stop(trans->drv); @@ -1738,27 +1501,11 @@ static int _iwl_pci_resume(struct device *device, bool restore) * Scratch value was altered, this means the device was powered off, we * need to reset it completely. * Note: MAC (bits 0:7) will be cleared upon suspend even with wowlan, - * but not bits [15:8]. So if we have bits set in lower word, assume - * the device is alive. - * For older devices, just try silently to grab the NIC. + * so assume that any bits there mean that the device is usable. */ - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { - if (!(iwl_read32(trans, CSR_FUNC_SCRATCH) & - CSR_FUNC_SCRATCH_POWER_OFF_MASK)) - device_was_powered_off = true; - } else { - /* - * bh are re-enabled by iwl_trans_pcie_release_nic_access, - * so re-enable them if _iwl_trans_pcie_grab_nic_access fails. - */ - local_bh_disable(); - if (_iwl_trans_pcie_grab_nic_access(trans, true)) { - iwl_trans_pcie_release_nic_access(trans); - } else { - device_was_powered_off = true; - local_bh_enable(); - } - } + if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ && + !iwl_read32(trans, CSR_FUNC_SCRATCH)) + device_was_powered_off = true; if (restore || device_was_powered_off) { trans->state = IWL_TRANS_NO_FW; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index 114a9195ad7f..3b7c12fc4f9e 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -194,7 +194,7 @@ struct iwl_rb_allocator { static inline u16 iwl_get_closed_rb_stts(struct iwl_trans *trans, struct iwl_rxq *rxq) { - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { + if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { __le16 *rb_stts = rxq->rb_stts; return le16_to_cpu(READ_ONCE(*rb_stts)); @@ -269,6 +269,7 @@ enum iwl_pcie_fw_reset_state { FW_RESET_REQUESTED, FW_RESET_OK, FW_RESET_ERROR, + FW_RESET_TOP_REQUESTED, }; /** @@ -288,22 +289,14 @@ enum iwl_pcie_imr_status { /** * struct iwl_pcie_txqs - TX queues data * - * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes) - * @page_offs: offset from skb->cb to mac header page pointer - * @dev_cmd_offs: offset from skb->cb to iwl_device_tx_cmd pointer * @queue_used: bit mask of used queues * @queue_stopped: bit mask of stopped queues * @txq: array of TXQ data structures representing the TXQs * @scd_bc_tbls: gen1 pointer to the byte count table of the scheduler - * @queue_alloc_cmd_ver: queue allocation command version * @bc_pool: bytecount DMA allocations pool * @bc_tbl_size: bytecount table size * @tso_hdr_page: page allocated (per CPU) for A-MSDU headers when doing TSO * (and similar usage) - * @cmd: command queue data - * @cmd.fifo: FIFO number - * @cmd.q_id: queue ID - * @cmd.wdg_timeout: watchdog timeout * @tfd: TFD data * @tfd.max_tbs: max number of buffers per TFD * @tfd.size: TFD size @@ -315,26 +308,15 @@ struct iwl_pcie_txqs { struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES]; struct dma_pool *bc_pool; size_t bc_tbl_size; - bool bc_table_dword; - u8 page_offs; - u8 dev_cmd_offs; struct iwl_tso_hdr_page __percpu *tso_hdr_page; struct { - u8 fifo; - u8 q_id; - unsigned int wdg_timeout; - } cmd; - - struct { u8 max_tbs; u16 size; u8 addr_size; } tfd; struct iwl_dma_ptr scd_bc_tbls; - - u8 queue_alloc_cmd_ver; }; /** @@ -344,7 +326,7 @@ struct iwl_pcie_txqs { * @global_table: table mapping received VID from hw to rxb * @rba: allocator for RX replenishing * @ctxt_info: context information for FW self init - * @ctxt_info_gen3: context information for gen3 devices + * @ctxt_info_v2: context information for v1 devices * @prph_info: prph info for self init * @prph_scratch: prph scratch for self init * @ctxt_info_dma_addr: dma addr of context information @@ -352,6 +334,7 @@ struct iwl_pcie_txqs { * @prph_scratch_dma_addr: dma addr of prph scratch * @ctxt_info_dma_addr: dma addr of context information * @iml: image loader image virtual address + * @iml_len: image loader image size * @iml_dma_addr: image loader image DMA address * @trans: pointer to the generic transport area * @scd_base_addr: scheduler sram base address in SRAM @@ -363,9 +346,6 @@ struct iwl_pcie_txqs { * @hw_base: pci hardware address support * @ucode_write_complete: indicates that the ucode has been copied. * @ucode_write_waitq: wait queue for uCode load - * @cmd_queue - command queue number - * @rx_buf_size: Rx buffer size - * @scd_set_active: should the transport configure the SCD for HCMD queue * @rx_page_order: page order for receive buffer size * @rx_buf_bytes: RX buffer (RB) size in bytes * @reg_lock: protect hw register access @@ -406,19 +386,20 @@ struct iwl_pcie_txqs { * @pcie_dbg_dumped_once: indicates PCIe regs were dumped already * @opmode_down: indicates opmode went away * @num_rx_bufs: number of RX buffers to allocate/use - * @no_reclaim_cmds: special commands not using reclaim flow - * (firmware workaround) - * @n_no_reclaim_cmds: number of special commands not using reclaim flow * @affinity_mask: IRQ affinity mask for each RX queue * @debug_rfkill: RF-kill debugging state, -1 for unset, 0/1 for radio * enable/disable - * @fw_reset_handshake: indicates FW reset handshake is needed * @fw_reset_state: state of FW reset handshake * @fw_reset_waitq: waitqueue for FW reset handshake * @is_down: indicates the NIC is down * @isr_stats: interrupt statistics * @napi_dev: (fake) netdev for NAPI registration * @txqs: transport tx queues data. + * @me_present: WiAMT/CSME is detected as present (1), not present (0) + * or unknown (-1, so can still use it as a boolean safely) + * @me_recheck_wk: worker to recheck WiAMT/CSME presence + * @invalid_tx_cmd: invalid TX command buffer + * @wait_command_queue: wait queue for sync commands */ struct iwl_trans_pcie { struct iwl_rxq *rxq; @@ -427,11 +408,12 @@ struct iwl_trans_pcie { struct iwl_rb_allocator rba; union { struct iwl_context_info *ctxt_info; - struct iwl_context_info_gen3 *ctxt_info_gen3; + struct iwl_context_info_v2 *ctxt_info_v2; }; struct iwl_prph_info *prph_info; struct iwl_prph_scratch *prph_scratch; void *iml; + size_t iml_len; dma_addr_t ctxt_info_dma_addr; dma_addr_t prph_info_dma_addr; dma_addr_t prph_scratch_dma_addr; @@ -470,12 +452,8 @@ struct iwl_trans_pcie { wait_queue_head_t ucode_write_waitq; wait_queue_head_t sx_waitq; - u8 n_no_reclaim_cmds; - u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS]; u16 num_rx_bufs; - enum iwl_amsdu_size rx_buf_size; - bool scd_set_active; bool pcie_dbg_dumped_once; u32 rx_page_order; u32 rx_buf_bytes; @@ -510,7 +488,6 @@ struct iwl_trans_pcie { void *base_rb_stts; dma_addr_t base_rb_stts_dma; - bool fw_reset_handshake; enum iwl_pcie_fw_reset_state fw_reset_state; wait_queue_head_t fw_reset_waitq; enum iwl_pcie_imr_status imr_status; @@ -518,6 +495,13 @@ struct iwl_trans_pcie { char rf_name[32]; struct iwl_pcie_txqs txqs; + + s8 me_present; + struct delayed_work me_recheck_wk; + + struct iwl_dma_ptr invalid_tx_cmd; + + wait_queue_head_t wait_command_queue; }; static inline struct iwl_trans_pcie * @@ -552,8 +536,8 @@ iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie) */ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, - const struct pci_device_id *ent, - const struct iwl_cfg_trans_params *cfg_trans); + const struct iwl_mac_cfg *mac_cfg, + struct iwl_trans_info *info); void iwl_trans_pcie_free(struct iwl_trans *trans); void iwl_trans_pcie_free_pnvm_dram_regions(struct iwl_dram_regions *dram_regions, struct device *dev); @@ -623,7 +607,7 @@ struct iwl_tso_page_info { IWL_TSO_PAGE_DATA_SIZE)) int iwl_pcie_tx_init(struct iwl_trans *trans); -void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr); +void iwl_pcie_tx_start(struct iwl_trans *trans); int iwl_pcie_tx_stop(struct iwl_trans *trans); void iwl_pcie_tx_free(struct iwl_trans *trans); bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn, @@ -679,7 +663,7 @@ static inline void *iwl_txq_get_tfd(struct iwl_trans *trans, { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - if (trans->trans_cfg->gen2) + if (trans->mac_cfg->gen2) idx = iwl_txq_get_cmd_index(txq, idx); return (u8 *)txq->tfds + trans_pcie->txqs.tfd.size * idx; @@ -718,7 +702,7 @@ static inline void iwl_txq_stop(struct iwl_trans *trans, struct iwl_txq *txq) static inline int iwl_txq_inc_wrap(struct iwl_trans *trans, int index) { return ++index & - (trans->trans_cfg->base_params->max_tfd_queue_size - 1); + (trans->mac_cfg->base->max_tfd_queue_size - 1); } /** @@ -729,7 +713,7 @@ static inline int iwl_txq_inc_wrap(struct iwl_trans *trans, int index) static inline int iwl_txq_dec_wrap(struct iwl_trans *trans, int index) { return --index & - (trans->trans_cfg->base_params->max_tfd_queue_size - 1); + (trans->mac_cfg->base->max_tfd_queue_size - 1); } void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq); @@ -752,10 +736,12 @@ int iwl_txq_gen2_set_tb(struct iwl_trans *trans, static inline void iwl_txq_set_tfd_invalid_gen2(struct iwl_trans *trans, struct iwl_tfh_tfd *tfd) { + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + tfd->num_tbs = 0; - iwl_txq_gen2_set_tb(trans, tfd, trans->invalid_tx_cmd.dma, - trans->invalid_tx_cmd.size); + iwl_txq_gen2_set_tb(trans, tfd, trans_pcie->invalid_tx_cmd.dma, + trans_pcie->invalid_tx_cmd.size); } void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans, @@ -782,7 +768,7 @@ static inline u16 iwl_txq_gen1_tfd_tb_get_len(struct iwl_trans *trans, struct iwl_tfd *tfd; struct iwl_tfd_tb *tb; - if (trans->trans_cfg->gen2) { + if (trans->mac_cfg->gen2) { struct iwl_tfh_tfd *tfh_tfd = _tfd; struct iwl_tfh_tb *tfh_tb = &tfh_tfd->tbs[idx]; @@ -940,11 +926,13 @@ static inline void iwl_enable_fw_load_int(struct iwl_trans *trans) } } -static inline void iwl_enable_fw_load_int_ctx_info(struct iwl_trans *trans) +static inline void iwl_enable_fw_load_int_ctx_info(struct iwl_trans *trans, + bool top_reset) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - IWL_DEBUG_ISR(trans, "Enabling ALIVE interrupt only\n"); + IWL_DEBUG_ISR(trans, "Enabling %s interrupt only\n", + top_reset ? "RESET" : "ALIVE"); if (!trans_pcie->msix_enabled) { /* @@ -954,11 +942,20 @@ static inline void iwl_enable_fw_load_int_ctx_info(struct iwl_trans *trans) * RX interrupt which will allow us to receive the ALIVE * notification (which is Rx) and continue the flow. */ - trans_pcie->inta_mask = CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX; + if (top_reset) + trans_pcie->inta_mask = CSR_INT_BIT_RESET_DONE; + else + trans_pcie->inta_mask = CSR_INT_BIT_ALIVE | + CSR_INT_BIT_FH_RX; iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); } else { - iwl_enable_hw_int_msk_msix(trans, - MSIX_HW_INT_CAUSES_REG_ALIVE); + u32 val = top_reset ? MSIX_HW_INT_CAUSES_REG_RESET_DONE + : MSIX_HW_INT_CAUSES_REG_ALIVE; + + iwl_enable_hw_int_msk_msix(trans, val); + + if (top_reset) + return; /* * Leave all the FH causes enabled to get the ALIVE * notification. @@ -1005,7 +1002,7 @@ static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) MSIX_HW_INT_CAUSES_REG_RF_KILL); } - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000) { + if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_9000) { /* * On 9000-series devices this bit isn't enabled by default, so * when we power down the device we need set the bit to allow it @@ -1076,8 +1073,7 @@ static inline void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) { } void iwl_pcie_rx_allocator_work(struct work_struct *data); /* common trans ops for all generations transports */ -void iwl_trans_pcie_configure(struct iwl_trans *trans, - const struct iwl_trans_config *trans_cfg); +void iwl_trans_pcie_op_mode_enter(struct iwl_trans *trans); int iwl_trans_pcie_start_hw(struct iwl_trans *trans); void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans); void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val); @@ -1107,11 +1103,14 @@ int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs, bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans); void __releases(nic_access_nobh) iwl_trans_pcie_release_nic_access(struct iwl_trans *trans); +void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power); /* transport gen 1 exported functions */ -void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr); +void iwl_trans_pcie_fw_alive(struct iwl_trans *trans); int iwl_trans_pcie_start_fw(struct iwl_trans *trans, - const struct fw_img *fw, bool run_in_rfkill); + const struct iwl_fw *fw, + const struct fw_img *img, + bool run_in_rfkill); void iwl_trans_pcie_stop_device(struct iwl_trans *trans); /* common functions that are used by gen2 transport */ @@ -1129,15 +1128,12 @@ int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans, void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr); void iwl_pcie_apply_destination(struct iwl_trans *trans); -/* common functions that are used by gen3 transport */ -void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power); - /* transport gen 2 exported functions */ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, - const struct fw_img *fw, bool run_in_rfkill); + const struct iwl_fw *fw, + const struct fw_img *img, + bool run_in_rfkill); void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans); -int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans, - struct iwl_host_cmd *cmd); void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans); int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index 337324eea1a1..f0405eddc367 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -12,7 +12,8 @@ #include "iwl-io.h" #include "internal.h" #include "iwl-op-mode.h" -#include "iwl-context-info-gen3.h" +#include "iwl-context-info-v2.h" +#include "fw/dbg.h" /****************************************************************************** * @@ -143,12 +144,12 @@ static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr) */ int iwl_pcie_rx_stop(struct iwl_trans *trans) { - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { + if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { /* TODO: remove this once fw does it */ - iwl_write_umac_prph(trans, RFH_RXF_DMA_CFG_GEN3, 0); - return iwl_poll_umac_prph_bit(trans, RFH_GEN_STATUS_GEN3, + iwl_write_umac_prph(trans, RFH_RXF_DMA_CFG_AX210, 0); + return iwl_poll_umac_prph_bit(trans, RFH_GEN_STATUS_AX210, RXF_DMA_IDLE, RXF_DMA_IDLE, 1000); - } else if (trans->trans_cfg->mq_rx_supported) { + } else if (trans->mac_cfg->mq_rx_supported) { iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0); return iwl_poll_prph_bit(trans, RFH_GEN_STATUS, RXF_DMA_IDLE, RXF_DMA_IDLE, 1000); @@ -175,7 +176,7 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, * 1. shadow registers aren't enabled * 2. there is a chance that the NIC is asleep */ - if (!trans->trans_cfg->base_params->shadow_reg_enable && + if (!trans->mac_cfg->base->shadow_reg_enable && test_bit(STATUS_TPOWER_PMI, &trans->status)) { reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); @@ -190,9 +191,9 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, } rxq->write_actual = round_down(rxq->write, 8); - if (!trans->trans_cfg->mq_rx_supported) + if (!trans->mac_cfg->mq_rx_supported) iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual); - else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) + else if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) iwl_write32(trans, HBUS_TARG_WRPTR, rxq->write_actual | HBUS_TARG_WRPTR_RX_Q(rxq->id)); else @@ -205,7 +206,7 @@ static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans) struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int i; - for (i = 0; i < trans->num_rx_queues; i++) { + for (i = 0; i < trans->info.num_rxqs; i++) { struct iwl_rxq *rxq = &trans_pcie->rxq[i]; if (!rxq->need_update) @@ -221,7 +222,7 @@ static void iwl_pcie_restock_bd(struct iwl_trans *trans, struct iwl_rxq *rxq, struct iwl_rx_mem_buffer *rxb) { - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { + if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { struct iwl_rx_transfer_desc *bd = rxq->bd; BUILD_BUG_ON(sizeof(*bd) != 2 * sizeof(u64)); @@ -348,7 +349,7 @@ static void iwl_pcie_rxsq_restock(struct iwl_trans *trans, static void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq) { - if (trans->trans_cfg->mq_rx_supported) + if (trans->mac_cfg->mq_rx_supported) iwl_pcie_rxmq_restock(trans, rxq); else iwl_pcie_rxsq_restock(trans, rxq); @@ -362,8 +363,8 @@ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans, u32 *offset, gfp_t priority) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - unsigned int rbsize = iwl_trans_get_rb_size(trans_pcie->rx_buf_size); unsigned int allocsize = PAGE_SIZE << trans_pcie->rx_page_order; + unsigned int rbsize = trans_pcie->rx_buf_bytes; struct page *page; gfp_t gfp_mask = priority; @@ -657,19 +658,19 @@ void iwl_pcie_rx_allocator_work(struct work_struct *data) static int iwl_pcie_free_bd_size(struct iwl_trans *trans) { - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) + if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) return sizeof(struct iwl_rx_transfer_desc); - return trans->trans_cfg->mq_rx_supported ? + return trans->mac_cfg->mq_rx_supported ? sizeof(__le64) : sizeof(__le32); } static int iwl_pcie_used_bd_size(struct iwl_trans *trans) { - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) + if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) return sizeof(struct iwl_rx_completion_desc_bz); - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) + if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) return sizeof(struct iwl_rx_completion_desc); return sizeof(__le32); @@ -701,7 +702,7 @@ static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans, static size_t iwl_pcie_rb_stts_size(struct iwl_trans *trans) { - bool use_rx_td = (trans->trans_cfg->device_family >= + bool use_rx_td = (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210); if (use_rx_td) @@ -720,8 +721,8 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans, int free_size; spin_lock_init(&rxq->lock); - if (trans->trans_cfg->mq_rx_supported) - rxq->queue_size = trans->cfg->num_rbds; + if (trans->mac_cfg->mq_rx_supported) + rxq->queue_size = iwl_trans_get_num_rbds(trans); else rxq->queue_size = RX_QUEUE_SIZE; @@ -736,7 +737,7 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans, if (!rxq->bd) goto err; - if (trans->trans_cfg->mq_rx_supported) { + if (trans->mac_cfg->mq_rx_supported) { rxq->used_bd = dma_alloc_coherent(dev, iwl_pcie_used_bd_size(trans) * rxq->queue_size, @@ -753,7 +754,7 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans, return 0; err: - for (i = 0; i < trans->num_rx_queues; i++) { + for (i = 0; i < trans->info.num_rxqs; i++) { struct iwl_rxq *rxq = &trans_pcie->rxq[i]; iwl_pcie_free_rxq_dma(trans, rxq); @@ -772,7 +773,7 @@ static int iwl_pcie_rx_alloc(struct iwl_trans *trans) if (WARN_ON(trans_pcie->rxq)) return -EINVAL; - trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq), + trans_pcie->rxq = kcalloc(trans->info.num_rxqs, sizeof(struct iwl_rxq), GFP_KERNEL); trans_pcie->rx_pool = kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs), sizeof(trans_pcie->rx_pool[0]), @@ -795,7 +796,7 @@ static int iwl_pcie_rx_alloc(struct iwl_trans *trans) */ trans_pcie->base_rb_stts = dma_alloc_coherent(trans->dev, - rb_stts_size * trans->num_rx_queues, + rb_stts_size * trans->info.num_rxqs, &trans_pcie->base_rb_stts_dma, GFP_KERNEL); if (!trans_pcie->base_rb_stts) { @@ -803,7 +804,7 @@ static int iwl_pcie_rx_alloc(struct iwl_trans *trans) goto err; } - for (i = 0; i < trans->num_rx_queues; i++) { + for (i = 0; i < trans->info.num_rxqs; i++) { struct iwl_rxq *rxq = &trans_pcie->rxq[i]; rxq->id = i; @@ -816,7 +817,7 @@ static int iwl_pcie_rx_alloc(struct iwl_trans *trans) err: if (trans_pcie->base_rb_stts) { dma_free_coherent(trans->dev, - rb_stts_size * trans->num_rx_queues, + rb_stts_size * trans->info.num_rxqs, trans_pcie->base_rb_stts, trans_pcie->base_rb_stts_dma); trans_pcie->base_rb_stts = NULL; @@ -834,11 +835,10 @@ err: static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 rb_size; const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */ - switch (trans_pcie->rx_buf_size) { + switch (trans->conf.rx_buf_size) { case IWL_AMSDU_4K: rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; break; @@ -906,7 +906,7 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans) u32 rb_size, enabled = 0; int i; - switch (trans_pcie->rx_buf_size) { + switch (trans->conf.rx_buf_size) { case IWL_AMSDU_2K: rb_size = RFH_RXF_DMA_RB_SIZE_2K; break; @@ -932,7 +932,7 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans) /* disable free amd used rx queue operation */ iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, 0); - for (i = 0; i < trans->num_rx_queues; i++) { + for (i = 0; i < trans->info.num_rxqs; i++) { /* Tell device where to find RBD free table in DRAM */ iwl_write_prph64_no_grab(trans, RFH_Q_FRBDCB_BA_LSB(i), @@ -976,7 +976,7 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans) RFH_GEN_CFG_VAL(DEFAULT_RXQ_NUM, 0) | RFH_GEN_CFG_SERVICE_DMA_SNOOP | RFH_GEN_CFG_VAL(RB_CHUNK_SIZE, - trans->trans_cfg->integrated ? + trans->mac_cfg->integrated ? RFH_GEN_CFG_RB_CHUNK_SIZE_64 : RFH_GEN_CFG_RB_CHUNK_SIZE_128)); /* Enable the relevant rx queues */ @@ -1072,7 +1072,7 @@ void iwl_pcie_rx_napi_sync(struct iwl_trans *trans) if (unlikely(!trans_pcie->rxq)) return; - for (i = 0; i < trans->num_rx_queues; i++) { + for (i = 0; i < trans->info.num_rxqs; i++) { struct iwl_rxq *rxq = &trans_pcie->rxq[i]; if (rxq && rxq->napi.poll) @@ -1109,7 +1109,7 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans) for (i = 0; i < RX_QUEUE_SIZE; i++) def_rxq->queue[i] = NULL; - for (i = 0; i < trans->num_rx_queues; i++) { + for (i = 0; i < trans->info.num_rxqs; i++) { struct iwl_rxq *rxq = &trans_pcie->rxq[i]; spin_lock_bh(&rxq->lock); @@ -1122,7 +1122,7 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans) rxq->write = 0; rxq->write_actual = 0; memset(rxq->rb_stts, 0, - (trans->trans_cfg->device_family >= + (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) ? sizeof(__le16) : sizeof(struct iwl_rb_status)); @@ -1144,9 +1144,9 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans) } /* move the pool to the default queue and allocator ownerships */ - queue_size = trans->trans_cfg->mq_rx_supported ? + queue_size = trans->mac_cfg->mq_rx_supported ? trans_pcie->num_rx_bufs - 1 : RX_QUEUE_SIZE; - allocator_pool_size = trans->num_rx_queues * + allocator_pool_size = trans->info.num_rxqs * (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC); num_alloc = queue_size + allocator_pool_size; @@ -1175,7 +1175,7 @@ int iwl_pcie_rx_init(struct iwl_trans *trans) if (ret) return ret; - if (trans->trans_cfg->mq_rx_supported) + if (trans->mac_cfg->mq_rx_supported) iwl_pcie_rx_mq_hw_init(trans); else iwl_pcie_rx_hw_init(trans, trans_pcie->rxq); @@ -1223,14 +1223,14 @@ void iwl_pcie_rx_free(struct iwl_trans *trans) if (trans_pcie->base_rb_stts) { dma_free_coherent(trans->dev, - rb_stts_size * trans->num_rx_queues, + rb_stts_size * trans->info.num_rxqs, trans_pcie->base_rb_stts, trans_pcie->base_rb_stts_dma); trans_pcie->base_rb_stts = NULL; trans_pcie->base_rb_stts_dma = 0; } - for (i = 0; i < trans->num_rx_queues; i++) { + for (i = 0; i < trans->info.num_rxqs; i++) { struct iwl_rxq *rxq = &trans_pcie->rxq[i]; iwl_pcie_free_rxq_dma(trans, rxq); @@ -1301,7 +1301,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, int i) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id]; + struct iwl_txq *txq = trans_pcie->txqs.txq[trans->conf.cmd_queue]; bool page_stolen = false; int max_len = trans_pcie->rx_buf_bytes; u32 offset = 0; @@ -1368,8 +1368,8 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, if (reclaim && !pkt->hdr.group_id) { int i; - for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) { - if (trans_pcie->no_reclaim_cmds[i] == + for (i = 0; i < trans->conf.n_no_reclaim_cmds; i++) { + if (trans->conf.no_reclaim_cmds[i] == pkt->hdr.cmd) { reclaim = false; break; @@ -1408,7 +1408,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, } page_stolen |= rxcb._page_stolen; - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) + if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) break; } @@ -1454,18 +1454,18 @@ static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans, BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc) != 32); BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc_bz) != 4); - if (!trans->trans_cfg->mq_rx_supported) { + if (!trans->mac_cfg->mq_rx_supported) { rxb = rxq->queue[i]; rxq->queue[i] = NULL; return rxb; } - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { + if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { struct iwl_rx_completion_desc_bz *cd = rxq->used_bd; vid = le16_to_cpu(cd[i].rbid); *join = cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED; - } else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { + } else if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { struct iwl_rx_completion_desc *cd = rxq->used_bd; vid = le16_to_cpu(cd[i].rbid); @@ -1648,7 +1648,7 @@ irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id) trace_iwlwifi_dev_irq_msix(trans->dev, entry, false, 0, 0); - if (WARN_ON(entry->entry >= trans->num_rx_queues)) + if (WARN_ON(entry->entry >= trans->info.num_rxqs)) return IRQ_NONE; if (!trans_pcie->rxq) { @@ -1683,18 +1683,18 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans) /* W/A for WiFi/WiMAX coex and WiMAX own the RF */ if (trans->cfg->internal_wimax_coex && - !trans->cfg->apmg_not_supported && + !trans->mac_cfg->base->apmg_not_supported && (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) & APMS_CLK_VAL_MRB_FUNC_MODE) || (iwl_read_prph(trans, APMG_PS_CTRL_REG) & APMG_PS_CTRL_VAL_RESET_REQ))) { clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); iwl_op_mode_wimax_active(trans->op_mode); - wake_up(&trans->wait_command_queue); + wake_up(&trans_pcie->wait_command_queue); return; } - for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { + for (i = 0; i < trans->mac_cfg->base->num_of_queues; i++) { if (!trans_pcie->txqs.txq[i]) continue; timer_delete(&trans_pcie->txqs.txq[i]->stuck_timer); @@ -1705,7 +1705,7 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans) iwl_trans_fw_error(trans, IWL_ERR_TYPE_IRQ); clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); - wake_up(&trans->wait_command_queue); + wake_up(&trans_pcie->wait_command_queue); } static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans) @@ -1820,7 +1820,7 @@ void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans, bool from_irq) &trans->status)) IWL_DEBUG_RF_KILL(trans, "Rfkill while SYNC HCMD in flight\n"); - wake_up(&trans->wait_command_queue); + wake_up(&trans_pcie->wait_command_queue); } else { clear_bit(STATUS_RFKILL_HW, &trans->status); if (trans_pcie->opmode_down) @@ -1828,6 +1828,54 @@ void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans, bool from_irq) } } +static void iwl_trans_pcie_handle_reset_interrupt(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + u32 state; + + if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_SC) { + u32 val = iwl_read32(trans, CSR_IPC_STATE); + + state = u32_get_bits(val, CSR_IPC_STATE_RESET); + IWL_DEBUG_ISR(trans, "IPC state = 0x%x/%d\n", val, state); + } else { + state = CSR_IPC_STATE_RESET_SW_READY; + } + + switch (state) { + case CSR_IPC_STATE_RESET_SW_READY: + if (trans_pcie->fw_reset_state == FW_RESET_REQUESTED) { + IWL_DEBUG_ISR(trans, "Reset flow completed\n"); + trans_pcie->fw_reset_state = FW_RESET_OK; + wake_up(&trans_pcie->fw_reset_waitq); + break; + } + fallthrough; + case CSR_IPC_STATE_RESET_TOP_READY: + /* FIXME: handle this case when requesting TOP reset */ + fallthrough; + case CSR_IPC_STATE_RESET_NONE: + IWL_FW_CHECK_FAILED(trans, + "Invalid reset interrupt (state=%d)!\n", + state); + break; + case CSR_IPC_STATE_RESET_TOP_FOLLOWER: + if (trans_pcie->fw_reset_state == FW_RESET_REQUESTED) { + /* if we were in reset, wake that up */ + IWL_INFO(trans, + "TOP reset from BT while doing reset\n"); + trans_pcie->fw_reset_state = FW_RESET_OK; + wake_up(&trans_pcie->fw_reset_waitq); + } else { + IWL_INFO(trans, "TOP reset from BT\n"); + trans->state = IWL_TRANS_NO_FW; + iwl_trans_schedule_reset(trans, + IWL_ERR_TYPE_TOP_RESET_BY_BT); + } + break; + } +} + irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) { struct iwl_trans *trans = dev_id; @@ -1936,7 +1984,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) if (inta & CSR_INT_BIT_ALIVE) { IWL_DEBUG_ISR(trans, "Alive interrupt\n"); isr_stats->alive++; - if (trans->trans_cfg->gen2) { + if (trans->mac_cfg->gen2) { /* * We can restock, since firmware configured * the RFH @@ -1948,10 +1996,8 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) } if (inta & CSR_INT_BIT_RESET_DONE) { - IWL_DEBUG_ISR(trans, "Reset flow completed\n"); - trans_pcie->fw_reset_state = FW_RESET_OK; + iwl_trans_pcie_handle_reset_interrupt(trans); handled |= CSR_INT_BIT_RESET_DONE; - wake_up(&trans_pcie->fw_reset_waitq); } /* Safely ignore these bits for debug checks below */ @@ -2086,7 +2132,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) iwl_enable_rfkill_int(trans); /* Re-enable the ALIVE / Rx interrupt if it occurred */ else if (handled & (CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX)) - iwl_enable_fw_load_int_ctx_info(trans); + iwl_enable_fw_load_int_ctx_info(trans, false); spin_unlock_bh(&trans_pcie->irq_lock); } @@ -2301,7 +2347,7 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) } } - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) + if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) sw_err = inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ; else sw_err = inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR; @@ -2309,9 +2355,13 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) if (inta_hw & MSIX_HW_INT_CAUSES_REG_TOP_FATAL_ERR) { IWL_ERR(trans, "TOP Fatal error detected, inta_hw=0x%x.\n", inta_hw); - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) - iwl_trans_pcie_reset(trans, - IWL_RESET_MODE_PROD_RESET); + if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { + trans->request_top_reset = 1; + iwl_op_mode_nic_error(trans->op_mode, + IWL_ERR_TYPE_TOP_FATAL_ERROR); + iwl_trans_schedule_reset(trans, + IWL_ERR_TYPE_TOP_FATAL_ERROR); + } } /* Error detected by uCode */ @@ -2350,7 +2400,7 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) { IWL_DEBUG_ISR(trans, "Alive interrupt\n"); isr_stats->alive++; - if (trans->trans_cfg->gen2) { + if (trans->mac_cfg->gen2) { /* We can restock, since firmware configured the RFH */ iwl_pcie_rxmq_restock(trans, trans_pcie->rxq); } @@ -2400,11 +2450,8 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) iwl_pcie_irq_handle_error(trans); } - if (inta_hw & MSIX_HW_INT_CAUSES_REG_RESET_DONE) { - IWL_DEBUG_ISR(trans, "Reset flow completed\n"); - trans_pcie->fw_reset_state = FW_RESET_OK; - wake_up(&trans_pcie->fw_reset_waitq); - } + if (inta_hw & MSIX_HW_INT_CAUSES_REG_RESET_DONE) + iwl_trans_pcie_handle_reset_interrupt(trans); if (!polling) iwl_pcie_clear_irq(trans, entry->entry); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c index 08409e24aed6..38ad719161e6 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c @@ -6,7 +6,7 @@ #include "iwl-trans.h" #include "iwl-prph.h" #include "iwl-context-info.h" -#include "iwl-context-info-gen3.h" +#include "iwl-context-info-v2.h" #include "internal.h" #include "fw/dbg.h" @@ -81,13 +81,13 @@ static void iwl_pcie_gen2_apm_stop(struct iwl_trans *trans, bool op_mode_leave) /* Stop device's DMA activity */ iwl_pcie_apm_stop_master(trans); - iwl_trans_sw_reset(trans, false); + iwl_trans_pcie_sw_reset(trans, false); /* * Clear "initialization complete" bit to move adapter from * D0A* (powered-up Active) --> D0U* (Uninitialized) state. */ - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) + if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_INIT); else @@ -102,10 +102,10 @@ void iwl_trans_pcie_fw_reset_handshake(struct iwl_trans *trans) trans_pcie->fw_reset_state = FW_RESET_REQUESTED; - if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) + if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210) iwl_write_umac_prph(trans, UREG_NIC_SET_NMI_DRIVER, UREG_NIC_SET_NMI_DRIVER_RESET_HANDSHAKE); - else if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210) + else if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_AX210) iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6, UREG_DOORBELL_TO_ISR6_RESET_HANDSHAKE); else @@ -157,7 +157,7 @@ static void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans) return; if (trans->state >= IWL_TRANS_FW_STARTED && - trans_pcie->fw_reset_handshake) { + trans->conf.fw_reset_handshake) { /* * Reset handshake can dump firmware on timeout, but that * should assume that the firmware is already dead. @@ -191,8 +191,8 @@ static void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans) } iwl_pcie_ctxt_info_free_paging(trans); - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) - iwl_pcie_ctxt_info_gen3_free(trans, false); + if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) + iwl_pcie_ctxt_info_v2_free(trans, false); else iwl_pcie_ctxt_info_free(trans); @@ -200,7 +200,7 @@ static void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans) iwl_pcie_gen2_apm_stop(trans, false); /* re-take ownership to prevent other users from stealing the device */ - iwl_trans_sw_reset(trans, true); + iwl_trans_pcie_sw_reset(trans, true); /* * Upon stop, the IVAR table gets erased, so msi-x won't @@ -253,7 +253,7 @@ static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int queue_size = max_t(u32, IWL_CMD_QUEUE_SIZE, - trans->cfg->min_txq_size); + trans->mac_cfg->base->min_txq_size); int ret; /* TODO: most of the logic can be removed in A0 - but not in Z0 */ @@ -270,7 +270,7 @@ static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans) return -ENOMEM; /* Allocate or reset and init all Tx and Command queues */ - if (iwl_txq_gen2_init(trans, trans_pcie->txqs.cmd.q_id, queue_size)) + if (iwl_txq_gen2_init(trans, trans->conf.cmd_queue, queue_size)) return -ENOMEM; /* enable shadow regs in HW */ @@ -291,7 +291,7 @@ static void iwl_pcie_get_rf_name(struct iwl_trans *trans) if (buf[0]) return; - switch (CSR_HW_RFID_TYPE(trans->hw_rf_id)) { + switch (CSR_HW_RFID_TYPE(trans->info.hw_rf_id)) { case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_JF): pos = scnprintf(buf, buflen, "JF"); break; @@ -315,7 +315,7 @@ static void iwl_pcie_get_rf_name(struct iwl_trans *trans) break; case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_WP): if (SILICON_Z_STEP == - CSR_HW_RFID_STEP(trans->hw_rf_id)) + CSR_HW_RFID_STEP(trans->info.hw_rf_id)) pos = scnprintf(buf, buflen, "WHTC"); else pos = scnprintf(buf, buflen, "WH"); @@ -324,7 +324,7 @@ static void iwl_pcie_get_rf_name(struct iwl_trans *trans) return; } - switch (CSR_HW_RFID_TYPE(trans->hw_rf_id)) { + switch (CSR_HW_RFID_TYPE(trans->info.hw_rf_id)) { case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HR): case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HR1): case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HRCDB): @@ -347,7 +347,7 @@ static void iwl_pcie_get_rf_name(struct iwl_trans *trans) } pos += scnprintf(buf + pos, buflen - pos, ", rfid=0x%x", - trans->hw_rf_id); + trans->info.hw_rf_id); IWL_INFO(trans, "Detected RF %s\n", buf); @@ -374,8 +374,8 @@ void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans) /* now that we got alive we can free the fw image & the context info. * paging memory cannot be freed included since FW will still use it */ - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) - iwl_pcie_ctxt_info_gen3_free(trans, true); + if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) + iwl_pcie_ctxt_info_v2_free(trans, true); else iwl_pcie_ctxt_info_free(trans); @@ -390,7 +390,7 @@ void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans) iwl_pcie_get_rf_name(trans); mutex_unlock(&trans_pcie->mutex); - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) + if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) trans->step_urm = !!(iwl_read_umac_prph(trans, CNVI_PMU_STEP_FLOW) & CNVI_PMU_STEP_FLOW_FORCE_URM); @@ -413,21 +413,21 @@ static bool iwl_pcie_set_ltr(struct iwl_trans *trans) * initialize the LTR to ~250 usec (see ltr_val above). * The firmware initializes this again later (to a smaller value). */ - if ((trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210 || - trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) && - !trans->trans_cfg->integrated) { + if ((trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_AX210 || + trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_22000) && + !trans->mac_cfg->integrated) { iwl_write32(trans, CSR_LTR_LONG_VAL_AD, ltr_val); return true; } - if (trans->trans_cfg->integrated && - trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) { + if (trans->mac_cfg->integrated && + trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_22000) { iwl_write_prph(trans, HPM_MAC_LTR_CSR, HPM_MAC_LRT_ENABLE_ALL); iwl_write_prph(trans, HPM_UMAC_LTR, ltr_val); return true; } - if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210) { + if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_AX210) { /* First clear the interrupt, just in case */ iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD, MSIX_HW_INT_CAUSES_REG_IML); @@ -484,16 +484,22 @@ static void iwl_pcie_spin_for_iml(struct iwl_trans *trans) } int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, - const struct fw_img *fw, bool run_in_rfkill) + const struct iwl_fw *fw, + const struct fw_img *img, + bool run_in_rfkill) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); bool hw_rfkill, keep_ram_busy; + bool top_reset_done = false; int ret; + mutex_lock(&trans_pcie->mutex); +again: /* This may fail if AMT took ownership of the device */ if (iwl_pcie_prepare_card_hw(trans)) { IWL_WARN(trans, "Exit HW not ready\n"); - return -EIO; + ret = -EIO; + goto out; } iwl_enable_rfkill_int(trans); @@ -510,8 +516,6 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, /* Make sure it finished running */ iwl_pcie_synchronize_irqs(trans); - mutex_lock(&trans_pcie->mutex); - /* If platform's RF_KILL switch is NOT set to KILL */ hw_rfkill = iwl_pcie_check_hw_rf_kill(trans); if (hw_rfkill && !run_in_rfkill) { @@ -541,22 +545,37 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, goto out; } - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) - ret = iwl_pcie_ctxt_info_gen3_init(trans, fw); - else - ret = iwl_pcie_ctxt_info_init(trans, fw); - if (ret) - goto out; + if (WARN_ON(trans->do_top_reset && + trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_SC)) + return -EINVAL; + + /* we need to wait later - set state */ + if (trans->do_top_reset) + trans_pcie->fw_reset_state = FW_RESET_TOP_REQUESTED; + + if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { + if (!top_reset_done) { + ret = iwl_pcie_ctxt_info_v2_alloc(trans, fw, img); + if (ret) + goto out; + } + + iwl_pcie_ctxt_info_v2_kick(trans); + } else { + ret = iwl_pcie_ctxt_info_init(trans, img); + if (ret) + goto out; + } keep_ram_busy = !iwl_pcie_set_ltr(trans); - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { + if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { IWL_DEBUG_POWER(trans, "function scratch register value is 0x%08x\n", iwl_read32(trans, CSR_FUNC_SCRATCH)); iwl_write32(trans, CSR_FUNC_SCRATCH, CSR_FUNC_SCRATCH_INIT_VALUE); iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_ROM_START); - } else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { + } else if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { iwl_write_umac_prph(trans, UREG_CPU_INIT_RUN, 1); } else { iwl_write_prph(trans, UREG_CPU_INIT_RUN, 1); @@ -565,6 +584,38 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, if (keep_ram_busy) iwl_pcie_spin_for_iml(trans); + if (trans->do_top_reset) { + trans->do_top_reset = 0; + +#define FW_TOP_RESET_TIMEOUT (HZ / 4) + ret = wait_event_timeout(trans_pcie->fw_reset_waitq, + trans_pcie->fw_reset_state != FW_RESET_TOP_REQUESTED, + FW_TOP_RESET_TIMEOUT); + + if (trans_pcie->fw_reset_state != FW_RESET_OK) { + if (trans_pcie->fw_reset_state != FW_RESET_TOP_REQUESTED) + IWL_ERR(trans, + "TOP reset interrupted by error (state %d)!\n", + trans_pcie->fw_reset_state); + else + IWL_ERR(trans, "TOP reset timed out!\n"); + iwl_op_mode_nic_error(trans->op_mode, + IWL_ERR_TYPE_TOP_RESET_FAILED); + iwl_trans_schedule_reset(trans, + IWL_ERR_TYPE_TOP_RESET_FAILED); + ret = -EIO; + goto out; + } + + msleep(10); + IWL_INFO(trans, "TOP reset successful, reinit now\n"); + /* now load the firmware again properly */ + trans_pcie->prph_scratch->ctrl_cfg.control.control_flags &= + ~cpu_to_le32(IWL_PRPH_SCRATCH_TOP_RESET); + top_reset_done = true; + goto again; + } + /* re-check RF-Kill state since we may have missed the interrupt */ hw_rfkill = iwl_pcie_check_hw_rf_kill(trans); if (hw_rfkill && !run_in_rfkill) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 4cc7a2e5746d..cc4d289b110d 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -28,7 +28,7 @@ #include "mei/iwl-mei.h" #include "internal.h" #include "iwl-fh.h" -#include "iwl-context-info-gen3.h" +#include "iwl-context-info-v2.h" /* extended range in FW SRAM */ #define IWL_FW_MEM_EXTENDED_START 0x40000 @@ -131,7 +131,7 @@ out: int iwl_trans_pcie_sw_reset(struct iwl_trans *trans, bool retake_ownership) { /* Reset entire device - do controller reset (results in SHRD_HW_RST) */ - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { + if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_SW_RESET); usleep_range(10000, 20000); @@ -237,7 +237,7 @@ static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val) static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux) { - if (trans->cfg->apmg_not_supported) + if (trans->mac_cfg->base->apmg_not_supported) return; if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold)) @@ -293,7 +293,7 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans) */ /* Disable L0S exit timer (platform NMI Work/Around) */ - if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000) + if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_8000) iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS, CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); @@ -317,7 +317,7 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans) iwl_pcie_apm_config(trans); /* Configure analog phase-lock-loop before activating to D0A */ - if (trans->trans_cfg->base_params->pll_cfg) + if (trans->mac_cfg->base->pll_cfg) iwl_set_bit(trans, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL); ret = iwl_finish_nic_init(trans); @@ -353,7 +353,7 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans) * bits do not disable clocks. This preserves any hardware * bits already set by default in "CLK_CTRL_REG" after reset. */ - if (!trans->cfg->apmg_not_supported) { + if (!trans->mac_cfg->base->apmg_not_supported) { iwl_write_prph(trans, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT); udelay(20); @@ -469,7 +469,7 @@ void iwl_pcie_apm_stop_master(struct iwl_trans *trans) /* stop device's busmaster DMA activity */ - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { + if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_REQ); @@ -501,10 +501,10 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave) iwl_pcie_apm_init(trans); /* inform ME that we are leaving */ - if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000) + if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_7000) iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG, APMG_PCIDEV_STT_VAL_WAKE_ME); - else if (trans->trans_cfg->device_family >= + else if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_8000) { iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, CSR_RESET_LINK_PWR_MGMT_DISABLED); @@ -565,7 +565,7 @@ static int iwl_pcie_nic_init(struct iwl_trans *trans) return -ENOMEM; } - if (trans->trans_cfg->base_params->shadow_reg_enable) { + if (trans->mac_cfg->base->shadow_reg_enable) { /* enable shadow regs in HW */ iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF); IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n"); @@ -634,7 +634,7 @@ int iwl_pcie_prepare_card_hw(struct iwl_trans *trans) IWL_DEBUG_INFO(trans, "Couldn't prepare the card but SAP is connected\n"); trans->csme_own = true; - if (trans->trans_cfg->device_family != + if (trans->mac_cfg->device_family != IWL_DEVICE_FAMILY_9000) IWL_ERR(trans, "SAP not supported for this NIC family\n"); @@ -819,7 +819,7 @@ static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans, iwl_enable_interrupts(trans); - if (trans->trans_cfg->gen2) { + if (trans->mac_cfg->gen2) { if (cpu == 1) iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS, 0xFFFF); @@ -977,7 +977,7 @@ monitor: if (dest->monitor_mode == EXTERNAL_MODE && fw_mon->size) { iwl_write_prph(trans, le32_to_cpu(dest->base_reg), fw_mon->physical >> dest->base_shift); - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000) + if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_8000) iwl_write_prph(trans, le32_to_cpu(dest->end_reg), (fw_mon->physical + fw_mon->size - 256) >> dest->end_shift); @@ -1153,7 +1153,7 @@ static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans) */ iwl_pcie_map_list(trans, causes_list_common, ARRAY_SIZE(causes_list_common), val); - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) + if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) iwl_pcie_map_list(trans, causes_list_bz, ARRAY_SIZE(causes_list_bz), val); else @@ -1175,7 +1175,7 @@ static void iwl_pcie_map_rx_causes(struct iwl_trans *trans) * the other (N - 2) interrupt vectors. */ val = BIT(MSIX_FH_INT_CAUSES_Q(0)); - for (idx = 1; idx < trans->num_rx_queues; idx++) { + for (idx = 1; idx < trans->info.num_rxqs; idx++) { iwl_write8(trans, CSR_MSIX_RX_IVAR(idx), MSIX_FH_INT_CAUSES_Q(idx - offset)); val |= BIT(MSIX_FH_INT_CAUSES_Q(idx)); @@ -1196,7 +1196,7 @@ void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie) struct iwl_trans *trans = trans_pcie->trans; if (!trans_pcie->msix_enabled) { - if (trans->trans_cfg->mq_rx_supported && + if (trans->mac_cfg->mq_rx_supported && test_bit(STATUS_DEVICE_ENABLED, &trans->status)) iwl_write_umac_prph(trans, UREG_CHICK, UREG_CHICK_MSI_ENABLE); @@ -1271,7 +1271,7 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool from_irq) iwl_pcie_rx_stop(trans); /* Power-down device's busmaster DMA clocks */ - if (!trans->cfg->apmg_not_supported) { + if (!trans->mac_cfg->base->apmg_not_supported) { iwl_write_prph(trans, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT); udelay(5); @@ -1279,7 +1279,7 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool from_irq) } /* Make sure (redundant) we've released our request to stay awake */ - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) + if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ); else @@ -1337,7 +1337,9 @@ void iwl_pcie_synchronize_irqs(struct iwl_trans *trans) } int iwl_trans_pcie_start_fw(struct iwl_trans *trans, - const struct fw_img *fw, bool run_in_rfkill) + const struct iwl_fw *fw, + const struct fw_img *img, + bool run_in_rfkill) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); bool hw_rfkill; @@ -1408,10 +1410,10 @@ int iwl_trans_pcie_start_fw(struct iwl_trans *trans, iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); /* Load the given image to the HW */ - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000) - ret = iwl_pcie_load_given_ucode_8000(trans, fw); + if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_8000) + ret = iwl_pcie_load_given_ucode_8000(trans, img); else - ret = iwl_pcie_load_given_ucode(trans, fw); + ret = iwl_pcie_load_given_ucode(trans, img); /* re-check RF-Kill state since we may have missed the interrupt */ hw_rfkill = iwl_pcie_check_hw_rf_kill(trans); @@ -1423,10 +1425,10 @@ out: return ret; } -void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr) +void iwl_trans_pcie_fw_alive(struct iwl_trans *trans) { iwl_pcie_reset_ict(trans); - iwl_pcie_tx_start(trans, scd_addr); + iwl_pcie_tx_start(trans); } void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans, @@ -1485,7 +1487,7 @@ void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state, bool from_irq) IWL_WARN(trans, "reporting RF_KILL (radio %s)\n", state ? "disabled" : "enabled"); if (iwl_op_mode_hw_rf_kill(trans->op_mode, state) && - !WARN_ON(trans->trans_cfg->gen2)) + !WARN_ON(trans->mac_cfg->gen2)) _iwl_trans_pcie_stop_device(trans, from_irq); } @@ -1505,7 +1507,7 @@ static void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans, iwl_pcie_synchronize_irqs(trans); - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { + if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ); iwl_clear_bit(trans, CSR_GP_CNTRL, @@ -1534,11 +1536,11 @@ static int iwl_pcie_d3_handshake(struct iwl_trans *trans, bool suspend) struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int ret; - if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210) + if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_AX210) iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6, suspend ? UREG_DOORBELL_TO_ISR6_SUSPEND : UREG_DOORBELL_TO_ISR6_RESUME); - else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) + else if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) iwl_write32(trans, CSR_IPC_SLEEP_CONTROL, suspend ? CSR_IPC_SLEEP_CONTROL_SUSPEND : CSR_IPC_SLEEP_CONTROL_RESUME); @@ -1593,7 +1595,7 @@ int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, goto out; } - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) + if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ); else @@ -1653,17 +1655,18 @@ out: static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev, struct iwl_trans *trans, - const struct iwl_cfg_trans_params *cfg_trans) + const struct iwl_mac_cfg *mac_cfg, + struct iwl_trans_info *info) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int max_irqs, num_irqs, i, ret; u16 pci_cmd; u32 max_rx_queues = IWL_MAX_RX_HW_QUEUES; - if (!cfg_trans->mq_rx_supported) + if (!mac_cfg->mq_rx_supported) goto enable_msi; - if (cfg_trans->device_family <= IWL_DEVICE_FAMILY_9000) + if (mac_cfg->device_family <= IWL_DEVICE_FAMILY_9000) max_rx_queues = IWL_9000_MAX_RX_HW_QUEUES; max_irqs = min_t(u32, num_online_cpus() + 2, max_rx_queues); @@ -1693,27 +1696,28 @@ iwl_pcie_set_interrupt_capa(struct pci_dev *pdev, * More than two interrupts: we will use fewer RSS queues. */ if (num_irqs <= max_irqs - 2) { - trans_pcie->trans->num_rx_queues = num_irqs + 1; + info->num_rxqs = num_irqs + 1; trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX | IWL_SHARED_IRQ_FIRST_RSS; } else if (num_irqs == max_irqs - 1) { - trans_pcie->trans->num_rx_queues = num_irqs; + info->num_rxqs = num_irqs; trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX; } else { - trans_pcie->trans->num_rx_queues = num_irqs - 1; + info->num_rxqs = num_irqs - 1; } IWL_DEBUG_INFO(trans, "MSI-X enabled with rx queues %d, vec mask 0x%x\n", - trans_pcie->trans->num_rx_queues, trans_pcie->shared_vec_mask); + info->num_rxqs, trans_pcie->shared_vec_mask); - WARN_ON(trans_pcie->trans->num_rx_queues > IWL_MAX_RX_HW_QUEUES); + WARN_ON(info->num_rxqs > IWL_MAX_RX_HW_QUEUES); trans_pcie->alloc_vecs = num_irqs; trans_pcie->msix_enabled = true; return; enable_msi: + info->num_rxqs = 1; ret = pci_enable_msi(pdev); if (ret) { dev_err(&pdev->dev, "pci_enable_msi failed - %d\n", ret); @@ -1726,14 +1730,15 @@ enable_msi: } } -static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans) +static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans, + struct iwl_trans_info *info) { #if defined(CONFIG_SMP) int iter_rx_q, i, ret, cpu, offset; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); i = trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 0 : 1; - iter_rx_q = trans_pcie->trans->num_rx_queues - 1 + i; + iter_rx_q = info->num_rxqs - 1 + i; offset = 1 + i; for (; i < iter_rx_q ; i++) { /* @@ -1753,7 +1758,8 @@ static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans) } static int iwl_pcie_init_msix_handler(struct pci_dev *pdev, - struct iwl_trans_pcie *trans_pcie) + struct iwl_trans_pcie *trans_pcie, + struct iwl_trans_info *info) { int i; @@ -1782,7 +1788,7 @@ static int iwl_pcie_init_msix_handler(struct pci_dev *pdev, return ret; } } - iwl_pcie_irq_set_affinity(trans_pcie->trans); + iwl_pcie_irq_set_affinity(trans_pcie->trans, info); return 0; } @@ -1791,7 +1797,7 @@ static int iwl_trans_pcie_clear_persistence_bit(struct iwl_trans *trans) { u32 hpm, wprot; - switch (trans->trans_cfg->device_family) { + switch (trans->mac_cfg->device_family) { case IWL_DEVICE_FAMILY_9000: wprot = PREG_PRPH_WPROT_9000; break; @@ -1860,8 +1866,8 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans) if (err) return err; - if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000 && - trans->trans_cfg->integrated) { + if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_22000 && + trans->mac_cfg->integrated) { err = iwl_pcie_gen2_force_power_gating(trans); if (err) return err; @@ -1936,7 +1942,7 @@ u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs) static u32 iwl_trans_pcie_prph_msk(struct iwl_trans *trans) { - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) + if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) return 0x00FFFFFF; else return 0x000FFFFF; @@ -1960,45 +1966,17 @@ void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr, u32 val) iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val); } -void iwl_trans_pcie_configure(struct iwl_trans *trans, - const struct iwl_trans_config *trans_cfg) +void iwl_trans_pcie_op_mode_enter(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); /* free all first - we might be reconfigured for a different size */ iwl_pcie_free_rbs_pool(trans); - trans_pcie->txqs.cmd.q_id = trans_cfg->cmd_queue; - trans_pcie->txqs.cmd.fifo = trans_cfg->cmd_fifo; - trans_pcie->txqs.page_offs = trans_cfg->cb_data_offs; - trans_pcie->txqs.dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *); - trans_pcie->txqs.queue_alloc_cmd_ver = trans_cfg->queue_alloc_cmd_ver; - - if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS)) - trans_pcie->n_no_reclaim_cmds = 0; - else - trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds; - if (trans_pcie->n_no_reclaim_cmds) - memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds, - trans_pcie->n_no_reclaim_cmds * sizeof(u8)); - - trans_pcie->rx_buf_size = trans_cfg->rx_buf_size; trans_pcie->rx_page_order = - iwl_trans_get_rb_size_order(trans_pcie->rx_buf_size); + iwl_trans_get_rb_size_order(trans->conf.rx_buf_size); trans_pcie->rx_buf_bytes = - iwl_trans_get_rb_size(trans_pcie->rx_buf_size); - trans_pcie->supported_dma_mask = DMA_BIT_MASK(12); - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) - trans_pcie->supported_dma_mask = DMA_BIT_MASK(11); - - trans_pcie->txqs.bc_table_dword = trans_cfg->bc_table_dword; - trans_pcie->scd_set_active = trans_cfg->scd_set_active; - - trans->command_groups = trans_cfg->command_groups; - trans->command_groups_size = trans_cfg->command_groups_size; - - - trans_pcie->fw_reset_handshake = trans_cfg->fw_reset_handshake; + iwl_trans_get_rb_size(trans->conf.rx_buf_size); } void iwl_trans_pcie_free_pnvm_dram_regions(struct iwl_dram_regions *dram_regions, @@ -2026,11 +2004,14 @@ void iwl_trans_pcie_free_pnvm_dram_regions(struct iwl_dram_regions *dram_regions static void iwl_pcie_free_invalid_tx_cmd(struct iwl_trans *trans) { - iwl_pcie_free_dma_ptr(trans, &trans->invalid_tx_cmd); + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + iwl_pcie_free_dma_ptr(trans, &trans_pcie->invalid_tx_cmd); } static int iwl_pcie_alloc_invalid_tx_cmd(struct iwl_trans *trans) { + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_cmd_header_wide bad_cmd = { .cmd = INVALID_WR_PTR_CMD, .group_id = DEBUG_GROUP, @@ -2040,11 +2021,11 @@ static int iwl_pcie_alloc_invalid_tx_cmd(struct iwl_trans *trans) }; int ret; - ret = iwl_pcie_alloc_dma_ptr(trans, &trans->invalid_tx_cmd, + ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->invalid_tx_cmd, sizeof(bad_cmd)); if (ret) return ret; - memcpy(trans->invalid_tx_cmd.addr, &bad_cmd, sizeof(bad_cmd)); + memcpy(trans_pcie->invalid_tx_cmd.addr, &bad_cmd, sizeof(bad_cmd)); return 0; } @@ -2055,7 +2036,7 @@ void iwl_trans_pcie_free(struct iwl_trans *trans) iwl_pcie_synchronize_irqs(trans); - if (trans->trans_cfg->gen2) + if (trans->mac_cfg->gen2) iwl_txq_gen2_tx_free(trans); else iwl_pcie_tx_free(trans); @@ -2348,6 +2329,7 @@ out: void iwl_trans_pcie_reset(struct iwl_trans *trans, enum iwl_reset_mode mode) { + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie_removal *removal; char _msg = 0, *msg = &_msg; @@ -2358,9 +2340,9 @@ void iwl_trans_pcie_reset(struct iwl_trans *trans, enum iwl_reset_mode mode) if (test_bit(STATUS_TRANS_DEAD, &trans->status)) return; - if (trans->me_present && mode == IWL_RESET_MODE_PROD_RESET) { + if (trans_pcie->me_present && mode == IWL_RESET_MODE_PROD_RESET) { mode = IWL_RESET_MODE_FUNC_RESET; - if (trans->me_present < 0) + if (trans_pcie->me_present < 0) msg = " instead of product reset as ME may be present"; else msg = " instead of product reset as ME is present"; @@ -2395,7 +2377,7 @@ void iwl_trans_pcie_reset(struct iwl_trans *trans, enum iwl_reset_mode mode) removal->pdev = to_pci_dev(trans->dev); removal->mode = mode; - removal->integrated = trans->trans_cfg->integrated; + removal->integrated = trans->mac_cfg->integrated; INIT_WORK(&removal->work, iwl_trans_pcie_removal_wk); pci_dev_get(removal->pdev); schedule_work(&removal->work); @@ -2423,7 +2405,7 @@ bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent) if (trans_pcie->cmd_hold_nic_awake) goto out; - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { + if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { write = CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ; mask = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS; poll = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS; @@ -2431,7 +2413,7 @@ bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent) /* this bit wakes up the NIC */ __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, write); - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000) + if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_8000) udelay(2); /* @@ -2518,7 +2500,7 @@ iwl_trans_pcie_release_nic_access(struct iwl_trans *trans) if (trans_pcie->cmd_hold_nic_awake) goto out; - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) + if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ); else @@ -2617,7 +2599,7 @@ int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue, { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - if (queue >= trans->num_rx_queues || !trans_pcie->rxq) + if (queue >= trans->info.num_rxqs || !trans_pcie->rxq) return -EINVAL; data->fr_bd_cb = trans_pcie->rxq[queue].bd_dma; @@ -2698,10 +2680,10 @@ int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm) /* waiting for all the tx frames complete might take a while */ for (cnt = 0; - cnt < trans->trans_cfg->base_params->num_of_queues; + cnt < trans->mac_cfg->base->num_of_queues; cnt++) { - if (cnt == trans_pcie->txqs.cmd.q_id) + if (cnt == trans->conf.cmd_queue) continue; if (!test_bit(cnt, trans_pcie->txqs.queue_used)) continue; @@ -2842,7 +2824,7 @@ static void *iwl_dbgfs_tx_queue_seq_start(struct seq_file *seq, loff_t *pos) struct iwl_dbgfs_tx_queue_priv *priv = seq->private; struct iwl_dbgfs_tx_queue_state *state; - if (*pos >= priv->trans->trans_cfg->base_params->num_of_queues) + if (*pos >= priv->trans->mac_cfg->base->num_of_queues) return NULL; state = kmalloc(sizeof(*state), GFP_KERNEL); @@ -2860,7 +2842,7 @@ static void *iwl_dbgfs_tx_queue_seq_next(struct seq_file *seq, *pos = ++state->pos; - if (*pos >= priv->trans->trans_cfg->base_params->num_of_queues) + if (*pos >= priv->trans->mac_cfg->base->num_of_queues) return NULL; return state; @@ -2892,7 +2874,7 @@ static int iwl_dbgfs_tx_queue_seq_show(struct seq_file *seq, void *v) else seq_puts(seq, "(unallocated)"); - if (state->pos == trans_pcie->txqs.cmd.q_id) + if (state->pos == trans->conf.cmd_queue) seq_puts(seq, " (HCMD)"); seq_puts(seq, "\n"); @@ -2930,7 +2912,7 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file, int pos = 0, i, ret; size_t bufsz; - bufsz = sizeof(char) * 121 * trans->num_rx_queues; + bufsz = sizeof(char) * 121 * trans->info.num_rxqs; if (!trans_pcie->rxq) return -EAGAIN; @@ -2939,7 +2921,7 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file, if (!buf) return -ENOMEM; - for (i = 0; i < trans->num_rx_queues && pos < bufsz; i++) { + for (i = 0; i < trans->info.num_rxqs && pos < bufsz; i++) { struct iwl_rxq *rxq = &trans_pcie->rxq[i]; spin_lock_bh(&rxq->lock); @@ -3269,8 +3251,9 @@ static ssize_t iwl_dbgfs_reset_write(struct file *file, { struct iwl_trans *trans = file->private_data; static const char * const modes[] = { - [IWL_RESET_MODE_SW_RESET] = "n/a", - [IWL_RESET_MODE_REPROBE] = "n/a", + [IWL_RESET_MODE_SW_RESET] = "sw", + [IWL_RESET_MODE_REPROBE] = "reprobe", + [IWL_RESET_MODE_TOP_RESET] = "top", [IWL_RESET_MODE_REMOVE_ONLY] = "remove", [IWL_RESET_MODE_RESCAN] = "rescan", [IWL_RESET_MODE_FUNC_RESET] = "function", @@ -3289,8 +3272,18 @@ static ssize_t iwl_dbgfs_reset_write(struct file *file, if (mode < 0) return mode; - if (mode < IWL_RESET_MODE_REMOVE_ONLY) - return -EINVAL; + if (mode < IWL_RESET_MODE_REMOVE_ONLY) { + if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) + return -EINVAL; + if (mode == IWL_RESET_MODE_TOP_RESET) { + if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_SC) + return -EINVAL; + trans->request_top_reset = 1; + } + iwl_op_mode_nic_error(trans->op_mode, IWL_ERR_TYPE_DEBUGFS); + iwl_trans_schedule_reset(trans, IWL_ERR_TYPE_DEBUGFS); + return count; + } iwl_trans_pcie_reset(trans, mode); @@ -3431,7 +3424,7 @@ static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans, (*data)->len = cpu_to_le32(fh_regs_len); val = (void *)(*data)->data; - if (!trans->trans_cfg->gen2) + if (!trans->mac_cfg->gen2) for (i = FH_MEM_LOWER_BOUND; i < FH_MEM_UPPER_BOUND; i += sizeof(u32)) *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i)); @@ -3478,7 +3471,7 @@ iwl_trans_pcie_dump_pointers(struct iwl_trans *trans, { u32 base, base_high, write_ptr, write_ptr_val, wrap_cnt; - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { + if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { base = DBGC_CUR_DBGBUF_BASE_ADDR_LSB; base_high = DBGC_CUR_DBGBUF_BASE_ADDR_MSB; write_ptr = DBGC_CUR_DBGBUF_STATUS; @@ -3498,7 +3491,7 @@ iwl_trans_pcie_dump_pointers(struct iwl_trans *trans, cpu_to_le32(iwl_read_prph(trans, wrap_cnt)); fw_mon_data->fw_mon_base_ptr = cpu_to_le32(iwl_read_prph(trans, base)); - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { + if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { fw_mon_data->fw_mon_base_high_ptr = cpu_to_le32(iwl_read_prph(trans, base_high)); write_ptr_val &= DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK; @@ -3518,8 +3511,8 @@ iwl_trans_pcie_dump_monitor(struct iwl_trans *trans, if (trans->dbg.dest_tlv || (fw_mon->size && - (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000 || - trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))) { + (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_7000 || + trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))) { struct iwl_fw_error_dump_fw_mon *fw_mon_data; (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR); @@ -3542,14 +3535,14 @@ iwl_trans_pcie_dump_monitor(struct iwl_trans *trans, IWL_LDBG_M2S_BUF_BA_MSK) << trans->dbg.dest_tlv->base_shift; base *= IWL_M2S_UNIT_SIZE; - base += trans->cfg->smem_offset; + base += trans->mac_cfg->base->smem_offset; } else { base = iwl_read_prph(trans, base) << trans->dbg.dest_tlv->base_shift; } - iwl_trans_read_mem(trans, base, fw_mon_data->data, - monitor_len / sizeof(u32)); + iwl_trans_pcie_read_mem(trans, base, fw_mon_data->data, + monitor_len / sizeof(u32)); } else if (trans->dbg.dest_tlv->monitor_mode == MARBH_MODE) { monitor_len = iwl_trans_pci_dump_marbh_monitor(trans, @@ -3583,7 +3576,7 @@ static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, u32 *len) base = (cfg_reg & IWL_LDBG_M2S_BUF_BA_MSK) << trans->dbg.dest_tlv->base_shift; base *= IWL_M2S_UNIT_SIZE; - base += trans->cfg->smem_offset; + base += trans->mac_cfg->base->smem_offset; monitor_len = (cfg_reg & IWL_LDBG_M2S_BUF_SIZE_MSK) >> @@ -3599,7 +3592,7 @@ static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, u32 *len) trans->dbg.dest_tlv->end_shift; /* Make "end" point to the actual end */ - if (trans->trans_cfg->device_family >= + if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_8000 || trans->dbg.dest_tlv->monitor_mode == MARBH_MODE) end += (1 << trans->dbg.dest_tlv->end_shift); @@ -3620,13 +3613,13 @@ iwl_trans_pcie_dump_data(struct iwl_trans *trans, u32 dump_mask, { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_fw_error_dump_data *data; - struct iwl_txq *cmdq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id]; + struct iwl_txq *cmdq = trans_pcie->txqs.txq[trans->conf.cmd_queue]; struct iwl_fw_error_dump_txcmd *txcmd; struct iwl_trans_dump_data *dump_data; u32 len, num_rbs = 0, monitor_len = 0; int i, ptr; bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) && - !trans->trans_cfg->mq_rx_supported && + !trans->mac_cfg->mq_rx_supported && dump_mask & BIT(IWL_FW_ERROR_DUMP_RB); if (!dump_mask) @@ -3651,7 +3644,7 @@ iwl_trans_pcie_dump_data(struct iwl_trans *trans, u32 dump_mask, /* FH registers */ if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) { - if (trans->trans_cfg->gen2) + if (trans->mac_cfg->gen2) len += sizeof(*data) + (iwl_umac_prph(trans, FH_MEM_UPPER_BOUND_GEN2) - iwl_umac_prph(trans, FH_MEM_LOWER_BOUND_GEN2)); @@ -3676,7 +3669,7 @@ iwl_trans_pcie_dump_data(struct iwl_trans *trans, u32 dump_mask, } /* Paged memory for gen2 HW */ - if (trans->trans_cfg->gen2 && dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) + if (trans->mac_cfg->gen2 && dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) for (i = 0; i < trans->init_dram.paging_cnt; i++) len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_paging) + @@ -3701,7 +3694,7 @@ iwl_trans_pcie_dump_data(struct iwl_trans *trans, u32 dump_mask, u8 tfdidx; u32 caplen, cmdlen; - if (trans->trans_cfg->gen2) + if (trans->mac_cfg->gen2) tfdidx = idx; else tfdidx = ptr; @@ -3741,7 +3734,7 @@ iwl_trans_pcie_dump_data(struct iwl_trans *trans, u32 dump_mask, len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs); /* Paged memory for gen2 HW */ - if (trans->trans_cfg->gen2 && + if (trans->mac_cfg->gen2 && dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) { for (i = 0; i < trans->init_dram.paging_cnt; i++) { struct iwl_fw_error_dump_paging *paging; @@ -3781,7 +3774,7 @@ void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans) if (trans_pcie->msix_enabled) { inta_addr = CSR_MSIX_HW_INT_CAUSES_AD; - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) + if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ; else sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR; @@ -3793,12 +3786,14 @@ void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans) iwl_trans_sync_nmi_with_addr(trans, inta_addr, sw_err_bit); } -struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, - const struct pci_device_id *ent, - const struct iwl_cfg_trans_params *cfg_trans) +struct iwl_trans * +iwl_trans_pcie_alloc(struct pci_dev *pdev, + const struct iwl_mac_cfg *mac_cfg, + struct iwl_trans_info *info) { struct iwl_trans_pcie *trans_pcie, **priv; struct iwl_trans *trans; + unsigned int bc_tbl_n_entries; int ret, addr_size; u32 bar0; @@ -3815,13 +3810,16 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, return ERR_PTR(ret); trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev, - cfg_trans); + mac_cfg); if (!trans) return ERR_PTR(-ENOMEM); trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - if (trans->trans_cfg->gen2) { + /* Initialize the wait queue for commands */ + init_waitqueue_head(&trans_pcie->wait_command_queue); + + if (trans->mac_cfg->gen2) { trans_pcie->txqs.tfd.addr_size = 64; trans_pcie->txqs.tfd.max_tbs = IWL_TFH_NUM_TBS; trans_pcie->txqs.tfd.size = sizeof(struct iwl_tfh_tfd); @@ -3830,10 +3828,12 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, trans_pcie->txqs.tfd.max_tbs = IWL_NUM_OF_TBS; trans_pcie->txqs.tfd.size = sizeof(struct iwl_tfd); } - trans->max_skb_frags = IWL_TRANS_PCIE_MAX_FRAGS(trans_pcie); - /* Set a short watchdog for the command queue */ - trans_pcie->txqs.cmd.wdg_timeout = IWL_DEF_WD_TIMEOUT; + trans_pcie->supported_dma_mask = (u32)DMA_BIT_MASK(12); + if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) + trans_pcie->supported_dma_mask = (u32)DMA_BIT_MASK(11); + + info->max_skb_frags = IWL_TRANS_PCIE_MAX_FRAGS(trans_pcie); trans_pcie->txqs.tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page); if (!trans_pcie->txqs.tso_hdr_page) { @@ -3841,20 +3841,21 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, goto out_free_trans; } - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) - trans_pcie->txqs.bc_tbl_size = - sizeof(struct iwl_gen3_bc_tbl_entry) * TFD_QUEUE_BC_SIZE_GEN3_BZ; - else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) - trans_pcie->txqs.bc_tbl_size = - sizeof(struct iwl_gen3_bc_tbl_entry) * TFD_QUEUE_BC_SIZE_GEN3_AX210; + if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) + bc_tbl_n_entries = TFD_QUEUE_BC_SIZE_BZ; + else if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) + bc_tbl_n_entries = TFD_QUEUE_BC_SIZE_AX210; else - trans_pcie->txqs.bc_tbl_size = sizeof(struct iwlagn_scd_bc_tbl); + bc_tbl_n_entries = TFD_QUEUE_BC_SIZE; + + trans_pcie->txqs.bc_tbl_size = + sizeof(struct iwl_bc_tbl_entry) * bc_tbl_n_entries; /* * For gen2 devices, we use a single allocation for each byte-count * table, but they're pretty small (1k) so use a DMA pool that we * allocate here. */ - if (trans->trans_cfg->gen2) { + if (trans->mac_cfg->gen2) { trans_pcie->txqs.bc_pool = dmam_pool_create("iwlwifi:bc", trans->dev, trans_pcie->txqs.bc_tbl_size, @@ -3867,7 +3868,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, /* Some things must not change even if the config does */ WARN_ON(trans_pcie->txqs.tfd.addr_size != - (trans->trans_cfg->gen2 ? 64 : 36)); + (trans->mac_cfg->gen2 ? 64 : 36)); /* Initialize NAPI here - it should be before registering to mac80211 * in the opmode but after the HW struct is allocated. @@ -3901,7 +3902,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, trans_pcie->debug_rfkill = -1; - if (!cfg_trans->base_params->pcie_l1_allowed) { + if (!mac_cfg->base->pcie_l1_allowed) { /* * W/A - seems to solve weird behavior. We need to remove this * if we don't want to stay in L1 all the time. This wastes a @@ -3945,8 +3946,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, trans_pcie->pci_dev = pdev; iwl_disable_interrupts(trans); - trans->hw_rev = iwl_read32(trans, CSR_HW_REV); - if (trans->hw_rev == 0xffffffff) { + info->hw_rev = iwl_read32(trans, CSR_HW_REV); + if (info->hw_rev == 0xffffffff) { dev_err(&pdev->dev, "HW_REV=0xFFFFFFFF, PCI issues?\n"); ret = -EIO; goto out_no_pci; @@ -3958,17 +3959,14 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, * "dash" value). To keep hw_rev backwards compatible - we'll store it * in the old format. */ - if (cfg_trans->device_family >= IWL_DEVICE_FAMILY_8000) - trans->hw_rev_step = trans->hw_rev & 0xF; + if (mac_cfg->device_family >= IWL_DEVICE_FAMILY_8000) + info->hw_rev_step = info->hw_rev & 0xF; else - trans->hw_rev_step = (trans->hw_rev & 0xC) >> 2; + info->hw_rev_step = (info->hw_rev & 0xC) >> 2; - IWL_DEBUG_INFO(trans, "HW REV: 0x%0x\n", trans->hw_rev); + IWL_DEBUG_INFO(trans, "HW REV: 0x%0x\n", info->hw_rev); - iwl_pcie_set_interrupt_capa(pdev, trans, cfg_trans); - trans->hw_id = (pdev->device << 16) + pdev->subsystem_device; - snprintf(trans->hw_id_str, sizeof(trans->hw_id_str), - "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device); + iwl_pcie_set_interrupt_capa(pdev, trans, mac_cfg, info); init_waitqueue_head(&trans_pcie->sx_waitq); @@ -3977,7 +3975,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, goto out_no_pci; if (trans_pcie->msix_enabled) { - ret = iwl_pcie_init_msix_handler(pdev, trans_pcie); + ret = iwl_pcie_init_msix_handler(pdev, trans_pcie, info); if (ret) goto out_no_pci; } else { diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c index 71227fd3dac0..df0545f09da9 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c @@ -18,13 +18,12 @@ static struct page *get_workaround_page(struct iwl_trans *trans, struct sk_buff *skb) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_tso_page_info *info; struct page **page_ptr; struct page *ret; dma_addr_t phys; - page_ptr = (void *)((u8 *)skb->cb + trans_pcie->txqs.page_offs); + page_ptr = (void *)((u8 *)skb->cb + trans->conf.cb_data_offs); ret = alloc_page(GFP_ATOMIC); if (!ret) @@ -164,7 +163,7 @@ static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans, struct iwl_device_tx_cmd *dev_cmd) { #ifdef CONFIG_INET - struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload; + struct iwl_tx_cmd_v9 *tx_cmd = (void *)dev_cmd->payload; struct ieee80211_hdr *hdr = (void *)skb->data; unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room; unsigned int mss = skb_shinfo(skb)->gso_size; @@ -491,21 +490,21 @@ struct iwl_tfh_tfd *iwl_txq_gen2_build_tfd(struct iwl_trans *trans, bool amsdu; /* There must be data left over for TB1 or this code must be changed */ - BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE); + BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_v9) < IWL_FIRST_TB_SIZE); BUILD_BUG_ON(sizeof(struct iwl_cmd_header) + - offsetofend(struct iwl_tx_cmd_gen2, dram_info) > + offsetofend(struct iwl_tx_cmd_v9, dram_info) > IWL_FIRST_TB_SIZE); - BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen3) < IWL_FIRST_TB_SIZE); + BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_FIRST_TB_SIZE); BUILD_BUG_ON(sizeof(struct iwl_cmd_header) + - offsetofend(struct iwl_tx_cmd_gen3, dram_info) > + offsetofend(struct iwl_tx_cmd, dram_info) > IWL_FIRST_TB_SIZE); memset(tfd, 0, sizeof(*tfd)); - if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) - len = sizeof(struct iwl_tx_cmd_gen2); + if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210) + len = sizeof(struct iwl_tx_cmd_v9); else - len = sizeof(struct iwl_tx_cmd_gen3); + len = sizeof(struct iwl_tx_cmd); amsdu = ieee80211_is_data_qos(hdr->frame_control) && (*ieee80211_get_qos_ctl(hdr) & @@ -536,17 +535,17 @@ int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q) * If q->n_window is smaller than max_tfd_queue_size, there is no need * to reserve any queue entries for this purpose. */ - if (q->n_window < trans->trans_cfg->base_params->max_tfd_queue_size) + if (q->n_window < trans->mac_cfg->base->max_tfd_queue_size) max = q->n_window; else - max = trans->trans_cfg->base_params->max_tfd_queue_size - 1; + max = trans->mac_cfg->base->max_tfd_queue_size - 1; /* * max_tfd_queue_size is a power of 2, so the following is equivalent to * modulo by max_tfd_queue_size and is well defined. */ used = (q->write_ptr - q->read_ptr) & - (trans->trans_cfg->base_params->max_tfd_queue_size - 1); + (trans->mac_cfg->base->max_tfd_queue_size - 1); if (WARN_ON(used > max)) return 0; @@ -561,8 +560,8 @@ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans, struct iwl_txq *txq, u16 byte_cnt, int num_tbs) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); + struct iwl_bc_tbl_entry *scd_bc_tbl = txq->bc_tbl.addr; u8 filled_tfd_size, num_fetch_chunks; u16 len = byte_cnt; __le16 bc_ent; @@ -582,24 +581,16 @@ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans, */ num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1; - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { - struct iwl_gen3_bc_tbl_entry *scd_bc_tbl_gen3 = txq->bc_tbl.addr; - - /* Starting from AX210, the HW expects bytes */ - WARN_ON(trans_pcie->txqs.bc_table_dword); + if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { WARN_ON(len > 0x3FFF); bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14)); - scd_bc_tbl_gen3[idx].tfd_offset = bc_ent; } else { - struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr; - - /* Before AX210, the HW expects DW */ - WARN_ON(!trans_pcie->txqs.bc_table_dword); len = DIV_ROUND_UP(len, 4); WARN_ON(len > 0xFFF); bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12)); - scd_bc_tbl->tfd_offset[idx] = bc_ent; } + + scd_bc_tbl[idx].tfd_offset = bc_ent; } static u8 iwl_txq_gen2_get_num_tbs(struct iwl_tfh_tfd *tfd) @@ -756,7 +747,8 @@ int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_device_tx_cmd **dev_cmd_ptr; dev_cmd_ptr = (void *)((u8 *)skb->cb + - trans_pcie->txqs.dev_cmd_offs); + trans->conf.cb_data_offs + + sizeof(void *)); *dev_cmd_ptr = dev_cmd; __skb_queue_tail(&txq->overflow_q, skb); @@ -785,16 +777,16 @@ int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, return -1; } - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { - struct iwl_tx_cmd_gen3 *tx_cmd_gen3 = + if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { + struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; - cmd_len = le16_to_cpu(tx_cmd_gen3->len); + cmd_len = le16_to_cpu(tx_cmd->len); } else { - struct iwl_tx_cmd_gen2 *tx_cmd_gen2 = + struct iwl_tx_cmd_v9 *tx_cmd_v9 = (void *)dev_cmd->payload; - cmd_len = le16_to_cpu(tx_cmd_gen2->len); + cmd_len = le16_to_cpu(tx_cmd_v9->len); } /* Set up entry for this TFD in Tx byte-count array */ @@ -832,7 +824,7 @@ static void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id) IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", txq_id, txq->read_ptr); - if (txq_id != trans_pcie->txqs.cmd.q_id) { + if (txq_id != trans->conf.cmd_queue) { int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr); struct iwl_cmd_meta *cmd_meta = &txq->entries[idx].meta; struct sk_buff *skb = txq->entries[idx].skb; @@ -906,7 +898,7 @@ static void iwl_txq_gen2_free(struct iwl_trans *trans, int txq_id) iwl_txq_gen2_unmap(trans, txq_id); /* De-alloc array of command/tx buffers */ - if (txq_id == trans_pcie->txqs.cmd.q_id) + if (txq_id == trans->conf.cmd_queue) for (i = 0; i < txq->n_window; i++) { kfree_sensitive(txq->entries[i].cmd); kfree_sensitive(txq->entries[i].free_buf); @@ -1007,7 +999,7 @@ static int iwl_pcie_txq_alloc_response(struct iwl_trans *trans, txq->id = qid; trans_pcie->txqs.txq[qid] = txq; - wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1); + wr_ptr &= (trans->mac_cfg->base->max_tfd_queue_size - 1); /* Place first TFD at index corresponding to start sequence number */ txq->read_ptr = wr_ptr; @@ -1043,8 +1035,8 @@ int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask, /* but must be power of 2 values for calculating read/write pointers */ size = rounddown_pow_of_two(size); - if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_BZ && - trans->hw_rev_step == SILICON_A_STEP) { + if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_BZ && + trans->info.hw_rev_step == SILICON_A_STEP) { size = 4096; txq = iwl_txq_dyn_alloc_dma(trans, size, timeout); } else { @@ -1064,7 +1056,7 @@ int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask, if (IS_ERR(txq)) return PTR_ERR(txq); - if (trans_pcie->txqs.queue_alloc_cmd_ver == 0) { + if (trans->conf.queue_alloc_cmd_ver == 0) { memset(&cmd.old, 0, sizeof(cmd.old)); cmd.old.tfdq_addr = cpu_to_le64(txq->dma_addr); cmd.old.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma); @@ -1081,7 +1073,7 @@ int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask, hcmd.id = SCD_QUEUE_CFG; hcmd.len[0] = sizeof(cmd.old); hcmd.data[0] = &cmd.old; - } else if (trans_pcie->txqs.queue_alloc_cmd_ver == 3) { + } else if (trans->conf.queue_alloc_cmd_ver == 3) { memset(&cmd.new, 0, sizeof(cmd.new)); cmd.new.operation = cpu_to_le32(IWL_SCD_QUEUE_ADD); cmd.new.u.add.tfdq_dram_addr = cpu_to_le64(txq->dma_addr); @@ -1176,7 +1168,7 @@ int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size) } ret = iwl_txq_init(trans, queue, queue_size, - (txq_id == trans_pcie->txqs.cmd.q_id)); + (txq_id == trans->conf.cmd_queue)); if (ret) { IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); goto error; @@ -1206,7 +1198,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id]; + struct iwl_txq *txq = trans_pcie->txqs.txq[trans->conf.cmd_queue]; struct iwl_device_cmd *out_cmd; struct iwl_cmd_meta *out_meta; void *dup_buf = NULL; @@ -1323,7 +1315,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, cpu_to_le16(cmd_size - sizeof(struct iwl_cmd_header_wide)); out_cmd->hdr_wide.reserved = 0; out_cmd->hdr_wide.sequence = - cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->txqs.cmd.q_id) | + cpu_to_le16(QUEUE_TO_SEQ(trans->conf.cmd_queue) | INDEX_TO_SEQ(txq->write_ptr)); cmd_pos = sizeof(struct iwl_cmd_header_wide); @@ -1371,7 +1363,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", iwl_get_cmd_string(trans, cmd->id), group_id, out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), - cmd_size, txq->write_ptr, idx, trans_pcie->txqs.cmd.q_id); + cmd_size, txq->write_ptr, idx, trans->conf.cmd_queue); /* start the TFD with the minimum copy bytes */ tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index 9fc4e98693eb..7abd7c7daa89 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -78,7 +78,6 @@ void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr) static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 reg = 0; int txq_id = txq->id; @@ -90,8 +89,8 @@ static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, * 2. NIC is woken up for CMD regardless of shadow outside this function * 3. there is a chance that the NIC is asleep */ - if (!trans->trans_cfg->base_params->shadow_reg_enable && - txq_id != trans_pcie->txqs.cmd.q_id && + if (!trans->mac_cfg->base->shadow_reg_enable && + txq_id != trans->conf.cmd_queue && test_bit(STATUS_TPOWER_PMI, &trans->status)) { /* * wake up nic if it's powered down ... @@ -125,7 +124,7 @@ void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans) struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int i; - for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { + for (i = 0; i < trans->mac_cfg->base->num_of_queues; i++) { struct iwl_txq *txq = trans_pcie->txqs.txq[i]; if (!test_bit(i, trans_pcie->txqs.queue_used)) @@ -193,7 +192,7 @@ static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - if (!trans->trans_cfg->base_params->apmg_wake_up_wa) + if (!trans->mac_cfg->base->apmg_wake_up_wa) return; spin_lock(&trans_pcie->reg_lock); @@ -226,11 +225,10 @@ static void iwl_pcie_free_and_unmap_tso_page(struct iwl_trans *trans, void iwl_pcie_free_tso_pages(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_cmd_meta *cmd_meta) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct page **page_ptr; struct page *next; - page_ptr = (void *)((u8 *)skb->cb + trans_pcie->txqs.page_offs); + page_ptr = (void *)((u8 *)skb->cb + trans->conf.cb_data_offs); next = *page_ptr; *page_ptr = NULL; @@ -280,10 +278,12 @@ iwl_txq_gen1_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx) static void iwl_txq_set_tfd_invalid_gen1(struct iwl_trans *trans, struct iwl_tfd *tfd) { + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + tfd->num_tbs = 0; - iwl_pcie_gen1_tfd_set_tb(tfd, 0, trans->invalid_tx_cmd.dma, - trans->invalid_tx_cmd.size); + iwl_pcie_gen1_tfd_set_tb(tfd, 0, trans_pcie->invalid_tx_cmd.dma, + trans_pcie->invalid_tx_cmd.size); } static void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans, @@ -355,7 +355,7 @@ static void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq, /* We have only q->n_window txq->entries, but we use * TFD_QUEUE_SIZE_MAX tfds */ - if (trans->trans_cfg->gen2) + if (trans->mac_cfg->gen2) iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta, iwl_txq_get_tfd(trans, txq, read_ptr)); else @@ -394,7 +394,7 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", txq_id, txq->read_ptr); - if (txq_id != trans_pcie->txqs.cmd.q_id) { + if (txq_id != trans->conf.cmd_queue) { struct sk_buff *skb = txq->entries[txq->read_ptr].skb; struct iwl_cmd_meta *cmd_meta = &txq->entries[txq->read_ptr].meta; @@ -408,7 +408,7 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); if (txq->read_ptr == txq->write_ptr && - txq_id == trans_pcie->txqs.cmd.q_id) + txq_id == trans->conf.cmd_queue) iwl_pcie_clear_cmd_in_flight(trans); } @@ -446,7 +446,7 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) iwl_pcie_txq_unmap(trans, txq_id); /* De-alloc array of command/tx buffers */ - if (txq_id == trans_pcie->txqs.cmd.q_id) + if (txq_id == trans->conf.cmd_queue) for (i = 0; i < txq->n_window; i++) { kfree_sensitive(txq->entries[i].cmd); kfree_sensitive(txq->entries[i].free_buf); @@ -456,7 +456,7 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) if (txq->tfds) { dma_free_coherent(dev, trans_pcie->txqs.tfd.size * - trans->trans_cfg->base_params->max_tfd_queue_size, + trans->mac_cfg->base->max_tfd_queue_size, txq->tfds, txq->dma_addr); txq->dma_addr = 0; txq->tfds = NULL; @@ -475,10 +475,10 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) memset(txq, 0, sizeof(*txq)); } -void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) +void iwl_pcie_tx_start(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - int nq = trans->trans_cfg->base_params->num_of_queues; + int nq = trans->mac_cfg->base->num_of_queues; int chan; u32 reg_val; int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) - @@ -493,13 +493,10 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) trans_pcie->scd_base_addr = iwl_read_prph(trans, SCD_SRAM_BASE_ADDR); - WARN_ON(scd_base_addr != 0 && - scd_base_addr != trans_pcie->scd_base_addr); - /* reset context data, TX status and translation data */ - iwl_trans_write_mem(trans, trans_pcie->scd_base_addr + - SCD_CONTEXT_MEM_LOWER_BOUND, - NULL, clear_dwords); + iwl_trans_pcie_write_mem(trans, trans_pcie->scd_base_addr + + SCD_CONTEXT_MEM_LOWER_BOUND, + NULL, clear_dwords); iwl_write_prph(trans, SCD_DRAM_BASE_ADDR, trans_pcie->txqs.scd_bc_tbls.dma >> 10); @@ -507,12 +504,12 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) /* The chain extension of the SCD doesn't work well. This feature is * enabled by default by the HW, so we need to disable it manually. */ - if (trans->trans_cfg->base_params->scd_chain_ext_wa) + if (trans->mac_cfg->base->scd_chain_ext_wa) iwl_write_prph(trans, SCD_CHAINEXT_EN, 0); - iwl_trans_ac_txq_enable(trans, trans_pcie->txqs.cmd.q_id, - trans_pcie->txqs.cmd.fifo, - trans_pcie->txqs.cmd.wdg_timeout); + iwl_trans_ac_txq_enable(trans, trans->conf.cmd_queue, + trans->conf.cmd_fifo, + IWL_DEF_WD_TIMEOUT); /* Activate all Tx DMA/FIFO channels */ iwl_scd_activate_fifos(trans); @@ -529,7 +526,7 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); /* Enable L1-Active */ - if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000) + if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_8000) iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, APMG_PCIDEV_STT_VAL_L1_ACT_DIS); } @@ -543,13 +540,13 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans) * we should never get here in gen2 trans mode return early to avoid * having invalid accesses */ - if (WARN_ON_ONCE(trans->trans_cfg->gen2)) + if (WARN_ON_ONCE(trans->mac_cfg->gen2)) return; - for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; + for (txq_id = 0; txq_id < trans->mac_cfg->base->num_of_queues; txq_id++) { struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id]; - if (trans->trans_cfg->gen2) + if (trans->mac_cfg->gen2) iwl_write_direct64(trans, FH_MEM_CBBC_QUEUE(trans, txq_id), txq->dma_addr); @@ -571,7 +568,7 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans) * while we were in WoWLAN in which case SCD_SRAM_BASE_ADDR will * contain garbage. */ - iwl_pcie_tx_start(trans, 0); + iwl_pcie_tx_start(trans); } static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans) @@ -633,7 +630,7 @@ int iwl_pcie_tx_stop(struct iwl_trans *trans) return 0; /* Unmap DMA from host system and free skb's */ - for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; + for (txq_id = 0; txq_id < trans->mac_cfg->base->num_of_queues; txq_id++) iwl_pcie_txq_unmap(trans, txq_id); @@ -656,7 +653,7 @@ void iwl_pcie_tx_free(struct iwl_trans *trans) /* Tx queues */ if (trans_pcie->txq_memory) { for (txq_id = 0; - txq_id < trans->trans_cfg->base_params->num_of_queues; + txq_id < trans->mac_cfg->base->num_of_queues; txq_id++) { iwl_pcie_txq_free(trans, txq_id); trans_pcie->txqs.txq[txq_id] = NULL; @@ -678,7 +675,7 @@ void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq) bool active; u8 fifo; - if (trans->trans_cfg->gen2) { + if (trans->mac_cfg->gen2) { IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id, txq->read_ptr, txq->write_ptr); /* TODO: access new SCD registers and dump them */ @@ -695,9 +692,9 @@ void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq) jiffies_to_msecs(txq->wd_timeout), txq->read_ptr, txq->write_ptr, iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) & - (trans->trans_cfg->base_params->max_tfd_queue_size - 1), + (trans->mac_cfg->base->max_tfd_queue_size - 1), iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) & - (trans->trans_cfg->base_params->max_tfd_queue_size - 1), + (trans->mac_cfg->base->max_tfd_queue_size - 1), iwl_read_direct32(trans, FH_TX_TRB_REG(fifo))); } @@ -723,8 +720,8 @@ int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, bool cmd_queue) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - size_t num_entries = trans->trans_cfg->gen2 ? - slots_num : trans->trans_cfg->base_params->max_tfd_queue_size; + size_t num_entries = trans->mac_cfg->gen2 ? + slots_num : trans->mac_cfg->base->max_tfd_queue_size; size_t tfd_sz; size_t tb0_buf_sz; int i; @@ -779,7 +776,7 @@ int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, for (i = 0; i < num_entries; i++) { void *tfd = iwl_txq_get_tfd(trans, txq, i); - if (trans->trans_cfg->gen2) + if (trans->mac_cfg->gen2) iwl_txq_set_tfd_invalid_gen2(trans, tfd); else iwl_txq_set_tfd_invalid_gen1(trans, tfd); @@ -799,6 +796,8 @@ error: return -ENOMEM; } +#define BC_TABLE_SIZE (sizeof(struct iwl_bc_tbl_entry) * TFD_QUEUE_BC_SIZE) + /* * iwl_pcie_tx_alloc - allocate TX context * Allocate all Tx DMA structures and initialize them @@ -808,12 +807,12 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans) int ret; int txq_id, slots_num; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - u16 bc_tbls_size = trans->trans_cfg->base_params->num_of_queues; + u16 bc_tbls_size = trans->mac_cfg->base->num_of_queues; - if (WARN_ON(trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)) + if (WARN_ON(trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)) return -EINVAL; - bc_tbls_size *= sizeof(struct iwlagn_scd_bc_tbl); + bc_tbls_size *= BC_TABLE_SIZE; /*It is not allowed to alloc twice, so warn when this happens. * We cannot rely on the previous allocation, so free and fail */ @@ -837,7 +836,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans) } trans_pcie->txq_memory = - kcalloc(trans->trans_cfg->base_params->num_of_queues, + kcalloc(trans->mac_cfg->base->num_of_queues, sizeof(struct iwl_txq), GFP_KERNEL); if (!trans_pcie->txq_memory) { IWL_ERR(trans, "Not enough memory for txq\n"); @@ -846,16 +845,16 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans) } /* Alloc and init all Tx queues, including the command queue (#4/#9) */ - for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; + for (txq_id = 0; txq_id < trans->mac_cfg->base->num_of_queues; txq_id++) { - bool cmd_queue = (txq_id == trans_pcie->txqs.cmd.q_id); + bool cmd_queue = (txq_id == trans->conf.cmd_queue); if (cmd_queue) slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE, - trans->cfg->min_txq_size); + trans->mac_cfg->base->min_txq_size); else slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE, - trans->cfg->min_ba_txq_size); + trans->mac_cfg->base->min_ba_txq_size); trans_pcie->txqs.txq[txq_id] = &trans_pcie->txq_memory[txq_id]; ret = iwl_pcie_txq_alloc(trans, trans_pcie->txqs.txq[txq_id], slots_num, cmd_queue); @@ -905,7 +904,7 @@ int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, bool cmd_queue) { u32 tfd_queue_max_size = - trans->trans_cfg->base_params->max_tfd_queue_size; + trans->mac_cfg->base->max_tfd_queue_size; int ret; txq->need_update = false; @@ -963,16 +962,16 @@ int iwl_pcie_tx_init(struct iwl_trans *trans) spin_unlock_bh(&trans_pcie->irq_lock); /* Alloc and init all Tx queues, including the command queue (#4/#9) */ - for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; + for (txq_id = 0; txq_id < trans->mac_cfg->base->num_of_queues; txq_id++) { - bool cmd_queue = (txq_id == trans_pcie->txqs.cmd.q_id); + bool cmd_queue = (txq_id == trans->conf.cmd_queue); if (cmd_queue) slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE, - trans->cfg->min_txq_size); + trans->mac_cfg->base->min_txq_size); else slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE, - trans->cfg->min_ba_txq_size); + trans->mac_cfg->base->min_ba_txq_size); ret = iwl_txq_init(trans, trans_pcie->txqs.txq[txq_id], slots_num, cmd_queue); if (ret) { @@ -991,7 +990,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans) } iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE); - if (trans->trans_cfg->base_params->num_of_queues > 20) + if (trans->mac_cfg->base->num_of_queues > 20) iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_ENABLE_31_QUEUES); @@ -1012,7 +1011,7 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, if (test_bit(STATUS_TRANS_DEAD, &trans->status)) return -ENODEV; - if (!trans->trans_cfg->base_params->apmg_wake_up_wa) + if (!trans->mac_cfg->base->apmg_wake_up_wa) return 0; /* @@ -1090,12 +1089,12 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) idx = iwl_txq_get_cmd_index(txq, idx); r = iwl_txq_get_cmd_index(txq, txq->read_ptr); - if (idx >= trans->trans_cfg->base_params->max_tfd_queue_size || + if (idx >= trans->mac_cfg->base->max_tfd_queue_size || (!iwl_txq_used(txq, idx, txq->read_ptr, txq->write_ptr))) { WARN_ONCE(test_bit(txq_id, trans_pcie->txqs.queue_used), "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n", __func__, txq_id, idx, - trans->trans_cfg->base_params->max_tfd_queue_size, + trans->mac_cfg->base->max_tfd_queue_size, txq->write_ptr, txq->read_ptr); return; } @@ -1164,15 +1163,15 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, fifo = cfg->fifo; /* Disable the scheduler prior configuring the cmd queue */ - if (txq_id == trans_pcie->txqs.cmd.q_id && - trans_pcie->scd_set_active) + if (txq_id == trans->conf.cmd_queue && + trans->conf.scd_set_active) iwl_scd_enable_set_active(trans, 0); /* Stop this Tx queue before configuring it */ iwl_scd_txq_set_inactive(trans, txq_id); /* Set this queue as a chain-building queue unless it is CMD */ - if (txq_id != trans_pcie->txqs.cmd.q_id) + if (txq_id != trans->conf.cmd_queue) iwl_scd_txq_set_chain(trans, txq_id); if (cfg->aggregate) { @@ -1206,7 +1205,7 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, * this sad hardware issue. * This bug has been fixed on devices 9000 and up. */ - scd_bug = !trans->trans_cfg->mq_rx_supported && + scd_bug = !trans->mac_cfg->mq_rx_supported && !((ssn - txq->write_ptr) & 0x3f) && (ssn != txq->write_ptr); if (scd_bug) @@ -1242,8 +1241,8 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, SCD_QUEUE_STTS_REG_MSK); /* enable the scheduler for this queue (only) */ - if (txq_id == trans_pcie->txqs.cmd.q_id && - trans_pcie->scd_set_active) + if (txq_id == trans->conf.cmd_queue && + trans->conf.scd_set_active) iwl_scd_enable_set_active(trans, BIT(txq_id)); IWL_DEBUG_TX_QUEUES(trans, @@ -1293,8 +1292,9 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id, if (configure_scd) { iwl_scd_txq_set_inactive(trans, txq_id); - iwl_trans_write_mem(trans, stts_addr, (const void *)zero_val, - ARRAY_SIZE(zero_val)); + iwl_trans_pcie_write_mem(trans, stts_addr, + (const void *)zero_val, + ARRAY_SIZE(zero_val)); } iwl_pcie_txq_unmap(trans, txq_id); @@ -1310,10 +1310,10 @@ static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block) struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int i; - for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { + for (i = 0; i < trans->mac_cfg->base->num_of_queues; i++) { struct iwl_txq *txq = trans_pcie->txqs.txq[i]; - if (i == trans_pcie->txqs.cmd.q_id) + if (i == trans->conf.cmd_queue) continue; /* we skip the command queue (obviously) so it's OK to nest */ @@ -1346,7 +1346,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id]; + struct iwl_txq *txq = trans_pcie->txqs.txq[trans->conf.cmd_queue]; struct iwl_device_cmd *out_cmd; struct iwl_cmd_meta *out_meta; void *dup_buf = NULL; @@ -1361,7 +1361,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; unsigned long flags; - if (WARN(!trans->wide_cmd_header && + if (WARN(!trans->conf.wide_cmd_header && group_id > IWL_ALWAYS_LONG_GROUP, "unsupported wide command %#x\n", cmd->id)) return -EINVAL; @@ -1475,7 +1475,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, sizeof(struct iwl_cmd_header_wide)); out_cmd->hdr_wide.reserved = 0; out_cmd->hdr_wide.sequence = - cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->txqs.cmd.q_id) | + cpu_to_le16(QUEUE_TO_SEQ(trans->conf.cmd_queue) | INDEX_TO_SEQ(txq->write_ptr)); cmd_pos = sizeof(struct iwl_cmd_header_wide); @@ -1483,7 +1483,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, } else { out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id); out_cmd->hdr.sequence = - cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->txqs.cmd.q_id) | + cpu_to_le16(QUEUE_TO_SEQ(trans->conf.cmd_queue) | INDEX_TO_SEQ(txq->write_ptr)); out_cmd->hdr.group_id = 0; @@ -1534,7 +1534,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, iwl_get_cmd_string(trans, cmd->id), group_id, out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), - cmd_size, txq->write_ptr, idx, trans_pcie->txqs.cmd.q_id); + cmd_size, txq->write_ptr, idx, trans->conf.cmd_queue); /* start the TFD with the minimum copy bytes */ tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE); @@ -1633,14 +1633,14 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans, struct iwl_device_cmd *cmd; struct iwl_cmd_meta *meta; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id]; + struct iwl_txq *txq = trans_pcie->txqs.txq[trans->conf.cmd_queue]; /* If a Tx command is being handled and it isn't in the actual * command queue then there a command routing bug has been introduced * in the queue management code. */ - if (WARN(txq_id != trans_pcie->txqs.cmd.q_id, + if (WARN(txq_id != trans->conf.cmd_queue, "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n", - txq_id, trans_pcie->txqs.cmd.q_id, sequence, txq->read_ptr, + txq_id, trans->conf.cmd_queue, sequence, txq->read_ptr, txq->write_ptr)) { iwl_print_hex_error(trans, pkt, 32); return; @@ -1654,7 +1654,7 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans, group_id = cmd->hdr.group_id; cmd_id = WIDE_ID(group_id, cmd->hdr.cmd); - if (trans->trans_cfg->gen2) + if (trans->mac_cfg->gen2) iwl_txq_gen2_tfd_unmap(trans, meta, iwl_txq_get_tfd(trans, txq, index)); else @@ -1683,7 +1683,7 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans, clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", iwl_get_cmd_string(trans, cmd_id)); - wake_up(&trans->wait_command_queue); + wake_up(&trans_pcie->wait_command_queue); } meta->flags = 0; @@ -1753,7 +1753,7 @@ static void *iwl_pcie_get_page_hdr(struct iwl_trans *trans, dma_addr_t phys; void *ret; - page_ptr = (void *)((u8 *)skb->cb + trans_pcie->txqs.page_offs); + page_ptr = (void *)((u8 *)skb->cb + trans->conf.cb_data_offs); if (WARN_ON(*page_ptr)) return NULL; @@ -1912,7 +1912,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, u16 tb1_len) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; + struct iwl_tx_cmd_v6 *tx_cmd = (void *)dev_cmd->payload; struct ieee80211_hdr *hdr = (void *)skb->data; unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room; unsigned int mss = skb_shinfo(skb)->gso_size; @@ -2067,14 +2067,14 @@ static void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans, int num_tbs) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwlagn_scd_bc_tbl *scd_bc_tbl; + struct iwl_bc_tbl_entry *scd_bc_tbl; int write_ptr = txq->write_ptr; int txq_id = txq->id; u8 sec_ctl = 0; u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; __le16 bc_ent; struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd; - struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; + struct iwl_tx_cmd_v6 *tx_cmd = (void *)dev_cmd->payload; u8 sta_id = tx_cmd->sta_id; scd_bc_tbl = trans_pcie->txqs.scd_bc_tbls.addr; @@ -2092,7 +2092,8 @@ static void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans, len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN; break; } - if (trans_pcie->txqs.bc_table_dword) + + if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210) len = DIV_ROUND_UP(len, 4); if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX)) @@ -2100,10 +2101,10 @@ static void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans, bc_ent = cpu_to_le16(len | (sta_id << 12)); - scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; + scd_bc_tbl[txq_id * BC_TABLE_SIZE + write_ptr].tfd_offset = bc_ent; if (write_ptr < TFD_QUEUE_SIZE_BC_DUP) - scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = + scd_bc_tbl[txq_id * BC_TABLE_SIZE + TFD_QUEUE_SIZE_MAX + write_ptr].tfd_offset = bc_ent; } @@ -2112,7 +2113,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct ieee80211_hdr *hdr; - struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; + struct iwl_tx_cmd_v6 *tx_cmd = (struct iwl_tx_cmd_v6 *)dev_cmd->payload; struct iwl_cmd_meta *out_meta; struct iwl_txq *txq; dma_addr_t tb0_phys, tb1_phys, scratch_phys; @@ -2153,7 +2154,8 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_device_tx_cmd **dev_cmd_ptr; dev_cmd_ptr = (void *)((u8 *)skb->cb + - trans_pcie->txqs.dev_cmd_offs); + trans->conf.cb_data_offs + + sizeof(void *)); *dev_cmd_ptr = dev_cmd; __skb_queue_tail(&txq->overflow_q, skb); @@ -2184,7 +2186,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, tb0_phys = iwl_txq_get_first_tb_dma(txq, txq->write_ptr); scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) + - offsetof(struct iwl_tx_cmd, scratch); + offsetof(struct iwl_tx_cmd_v6, scratch); tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); @@ -2199,7 +2201,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, * (This calculation modifies the TX command, so do it before the * setup of the first TB) */ - len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) + + len = sizeof(struct iwl_tx_cmd_v6) + sizeof(struct iwl_cmd_header) + hdr_len - IWL_FIRST_TB_SIZE; /* do not align A-MSDU to dword as the subframe header aligns it */ amsdu = ieee80211_is_data_qos(fc) && @@ -2222,9 +2224,9 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, IWL_FIRST_TB_SIZE, true); /* there must be data left over for TB1 or this code must be changed */ - BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_FIRST_TB_SIZE); + BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_v6) < IWL_FIRST_TB_SIZE); BUILD_BUG_ON(sizeof(struct iwl_cmd_header) + - offsetofend(struct iwl_tx_cmd, scratch) > + offsetofend(struct iwl_tx_cmd_v6, scratch) > IWL_FIRST_TB_SIZE); /* map the data for TB1 */ @@ -2312,24 +2314,24 @@ static void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans, int read_ptr) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->txqs.scd_bc_tbls.addr; + struct iwl_bc_tbl_entry *scd_bc_tbl = trans_pcie->txqs.scd_bc_tbls.addr; int txq_id = txq->id; u8 sta_id = 0; __le16 bc_ent; struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd; - struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; + struct iwl_tx_cmd_v6 *tx_cmd = (void *)dev_cmd->payload; WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); - if (txq_id != trans_pcie->txqs.cmd.q_id) + if (txq_id != trans->conf.cmd_queue) sta_id = tx_cmd->sta_id; bc_ent = cpu_to_le16(1 | (sta_id << 12)); - scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent; + scd_bc_tbl[txq_id * BC_TABLE_SIZE + read_ptr].tfd_offset = bc_ent; if (read_ptr < TFD_QUEUE_SIZE_BC_DUP) - scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = + scd_bc_tbl[txq_id * BC_TABLE_SIZE + TFD_QUEUE_SIZE_MAX + read_ptr].tfd_offset = bc_ent; } @@ -2343,7 +2345,7 @@ void iwl_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, int txq_read_ptr, txq_write_ptr; /* This function is not meant to release cmd queue*/ - if (WARN_ON(txq_id == trans_pcie->txqs.cmd.q_id)) + if (WARN_ON(txq_id == trans->conf.cmd_queue)) return; if (WARN_ON(!txq)) @@ -2385,7 +2387,7 @@ void iwl_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, IWL_ERR(trans, "%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n", __func__, txq_id, last_to_free, - trans->trans_cfg->base_params->max_tfd_queue_size, + trans->mac_cfg->base->max_tfd_queue_size, txq_write_ptr, txq_read_ptr); iwl_op_mode_time_point(trans->op_mode, @@ -2414,7 +2416,7 @@ void iwl_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, txq->entries[read_ptr].skb = NULL; - if (!trans->trans_cfg->gen2) + if (!trans->mac_cfg->gen2) iwl_txq_gen1_inval_byte_cnt_tbl(trans, txq, txq_read_ptr); @@ -2456,7 +2458,8 @@ void iwl_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, struct iwl_device_tx_cmd *dev_cmd_ptr; dev_cmd_ptr = *(void **)((u8 *)skb->cb + - trans_pcie->txqs.dev_cmd_offs); + trans->conf.cb_data_offs + + sizeof(void *)); /* * Note that we can very well be overflowing again. @@ -2548,11 +2551,11 @@ next_queue: #define HOST_COMPLETE_TIMEOUT (2 * HZ) static int iwl_trans_pcie_send_hcmd_sync(struct iwl_trans *trans, - struct iwl_host_cmd *cmd) + struct iwl_host_cmd *cmd, + const char *cmd_str) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - const char *cmd_str = iwl_get_cmd_string(trans, cmd->id); - struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id]; + struct iwl_txq *txq = trans_pcie->txqs.txq[trans->conf.cmd_queue]; int cmd_idx; int ret; @@ -2565,7 +2568,7 @@ static int iwl_trans_pcie_send_hcmd_sync(struct iwl_trans *trans, IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str); - if (trans->trans_cfg->gen2) + if (trans->mac_cfg->gen2) cmd_idx = iwl_pcie_gen2_enqueue_hcmd(trans, cmd); else cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd); @@ -2578,7 +2581,7 @@ static int iwl_trans_pcie_send_hcmd_sync(struct iwl_trans *trans, return ret; } - ret = wait_event_timeout(trans->wait_command_queue, + ret = wait_event_timeout(trans_pcie->wait_command_queue, !test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status), HOST_COMPLETE_TIMEOUT); @@ -2594,7 +2597,7 @@ static int iwl_trans_pcie_send_hcmd_sync(struct iwl_trans *trans, cmd_str); ret = -ETIMEDOUT; - iwl_trans_sync_nmi(trans); + iwl_trans_pcie_sync_nmi(trans); goto cancel; } @@ -2645,6 +2648,8 @@ cancel: int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) { + const char *cmd_str = iwl_get_cmd_string(trans, cmd->id); + /* Make sure the NIC is still alive in the bus */ if (test_bit(STATUS_TRANS_DEAD, &trans->status)) return -ENODEV; @@ -2656,20 +2661,16 @@ int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, return -ERFKILL; } - if (unlikely(trans->system_pm_mode == IWL_PLAT_PM_MODE_D3 && - !(cmd->flags & CMD_SEND_IN_D3))) { - IWL_DEBUG_WOWLAN(trans, "Dropping CMD 0x%x: D3\n", cmd->id); - return -EHOSTDOWN; - } - if (cmd->flags & CMD_ASYNC) { int ret; + IWL_DEBUG_INFO(trans, "Sending async command %s\n", cmd_str); + /* An asynchronous command can not expect an SKB to be set. */ if (WARN_ON(cmd->flags & CMD_WANT_SKB)) return -EINVAL; - if (trans->trans_cfg->gen2) + if (trans->mac_cfg->gen2) ret = iwl_pcie_gen2_enqueue_hcmd(trans, cmd); else ret = iwl_pcie_enqueue_hcmd(trans, cmd); @@ -2683,6 +2684,5 @@ int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, return 0; } - return iwl_trans_pcie_send_hcmd_sync(trans, cmd); + return iwl_trans_pcie_send_hcmd_sync(trans, cmd, cmd_str); } -IWL_EXPORT_SYMBOL(iwl_trans_pcie_send_hcmd); diff --git a/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c b/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c index d0bda23c628a..784433bb246a 100644 --- a/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c +++ b/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c @@ -2,7 +2,7 @@ /* * KUnit tests for the iwlwifi device info table * - * Copyright (C) 2023-2024 Intel Corporation + * Copyright (C) 2023-2025 Intel Corporation */ #include <kunit/test.h> #include <linux/pci.h> @@ -13,10 +13,50 @@ MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING"); static void iwl_pci_print_dev_info(const char *pfx, const struct iwl_dev_info *di) { - printk(KERN_DEBUG "%sdev=%.4x,subdev=%.4x,mac_type=%.4x,mac_step=%.4x,rf_type=%.4x,cdb=%d,jacket=%d,rf_id=%.2x,no_160=%d,cores=%.2x\n", - pfx, di->device, di->subdevice, di->mac_type, di->mac_step, - di->rf_type, di->cdb, di->jacket, di->rf_id, di->no_160, - di->cores); + u16 subdevice_mask = GENMASK(di->subdevice_m_h, di->subdevice_m_l); + char buf[100] = {}; + int pos = 0; + + if (di->match_rf_type) + pos += scnprintf(buf + pos, sizeof(buf) - pos, + " rf_type=%03x", di->rf_type); + else + pos += scnprintf(buf + pos, sizeof(buf) - pos, + " rf_type=*"); + + if (di->match_bw_limit) + pos += scnprintf(buf + pos, sizeof(buf) - pos, + " bw_limit=%d", di->bw_limit); + else + pos += scnprintf(buf + pos, sizeof(buf) - pos, + " bw_limit=*"); + + if (di->match_rf_step) + pos += scnprintf(buf + pos, sizeof(buf) - pos, + " rf_step=%c", + di->rf_step == SILICON_Z_STEP ? 'Z' : + 'A' + di->rf_step); + else + pos += scnprintf(buf + pos, sizeof(buf) - pos, + " rf_step=*"); + + if (di->match_rf_id) + pos += scnprintf(buf + pos, sizeof(buf) - pos, + " rf_id=0x%x", di->rf_id); + else + pos += scnprintf(buf + pos, sizeof(buf) - pos, + " rf_id=*"); + + if (di->match_cdb) + pos += scnprintf(buf + pos, sizeof(buf) - pos, + " cdb=%d", di->cdb); + else + pos += scnprintf(buf + pos, sizeof(buf) - pos, + " cdb=*"); + + + printk(KERN_DEBUG "%sdev=%04x subdev=%04x/%04x%s\n", + pfx, di->device, di->subdevice, subdevice_mask, buf); } static void devinfo_table_order(struct kunit *test) @@ -28,11 +68,14 @@ static void devinfo_table_order(struct kunit *test) const struct iwl_dev_info *ret; ret = iwl_pci_find_dev_info(di->device, di->subdevice, - di->mac_type, di->mac_step, di->rf_type, di->cdb, - di->jacket, di->rf_id, - di->no_160, di->cores, di->rf_step); - if (ret != di) { + di->rf_id, di->bw_limit, + di->rf_step); + if (!ret) { + iwl_pci_print_dev_info("No entry found for: ", di); + KUNIT_FAIL(test, + "No entry found for entry at index %d\n", idx); + } else if (ret != di) { iwl_pci_print_dev_info("searched: ", di); iwl_pci_print_dev_info("found: ", ret); KUNIT_FAIL(test, @@ -42,6 +85,92 @@ static void devinfo_table_order(struct kunit *test) } } +static void devinfo_names(struct kunit *test) +{ + int idx; + + for (idx = 0; idx < iwl_dev_info_table_size; idx++) { + const struct iwl_dev_info *di = &iwl_dev_info_table[idx]; + + KUNIT_ASSERT_TRUE(test, di->name); + } +} + +static void devinfo_no_cfg_dups(struct kunit *test) +{ + for (int i = 0; i < iwl_dev_info_table_size; i++) { + const struct iwl_rf_cfg *cfg_i = iwl_dev_info_table[i].cfg; + + for (int j = 0; j < i; j++) { + const struct iwl_rf_cfg *cfg_j = iwl_dev_info_table[j].cfg; + + if (cfg_i == cfg_j) + continue; + + KUNIT_EXPECT_NE_MSG(test, memcmp(cfg_i, cfg_j, + sizeof(*cfg_i)), 0, + "identical configs: %ps and %ps\n", + cfg_i, cfg_j); + } + } +} + +static void devinfo_no_name_dups(struct kunit *test) +{ + for (int i = 0; i < iwl_dev_info_table_size; i++) { + for (int j = 0; j < i; j++) { + if (iwl_dev_info_table[i].name == iwl_dev_info_table[j].name) + continue; + + KUNIT_EXPECT_NE_MSG(test, + strcmp(iwl_dev_info_table[i].name, + iwl_dev_info_table[j].name), + 0, + "name dup: %ps/%ps", + iwl_dev_info_table[i].name, + iwl_dev_info_table[j].name); + } + } +} + +static void devinfo_check_subdev_match(struct kunit *test) +{ + for (int i = 0; i < iwl_dev_info_table_size; i++) { + const struct iwl_dev_info *di = &iwl_dev_info_table[i]; + u16 subdevice_mask = GENMASK(di->subdevice_m_h, + di->subdevice_m_l); + + /* if BW limit bit is matched then must have a limit */ + if (di->match_bw_limit == 1 && di->bw_limit == 1) + KUNIT_EXPECT_NE(test, di->cfg->bw_limit, 0); + + /* if subdevice is ANY we can have RF ID/BW limit */ + if (di->subdevice == (u16)IWL_CFG_ANY) + continue; + + /* same if the subdevice mask doesn't overlap them */ + if (IWL_SUBDEVICE_RF_ID(subdevice_mask) == 0 && + IWL_SUBDEVICE_BW_LIM(subdevice_mask) == 0) + continue; + + /* but otherwise they shouldn't be used */ + KUNIT_EXPECT_EQ(test, (int)di->match_rf_id, 0); + KUNIT_EXPECT_EQ(test, (int)di->match_bw_limit, 0); + } +} + +static void devinfo_check_killer_subdev(struct kunit *test) +{ + for (int i = 0; i < iwl_dev_info_table_size; i++) { + const struct iwl_dev_info *di = &iwl_dev_info_table[i]; + + if (!strstr(di->name, "Killer")) + continue; + + KUNIT_EXPECT_NE(test, di->subdevice, (u16)IWL_CFG_ANY); + } +} + static void devinfo_pci_ids(struct kunit *test) { struct pci_dev *dev; @@ -64,9 +193,36 @@ static void devinfo_pci_ids(struct kunit *test) } } +static void devinfo_no_mac_cfg_dups(struct kunit *test) +{ + for (int i = 0; iwl_hw_card_ids[i].vendor; i++) { + const struct iwl_mac_cfg *cfg_i = + (void *)iwl_hw_card_ids[i].driver_data; + + for (int j = 0; j < i; j++) { + const struct iwl_mac_cfg *cfg_j = + (void *)iwl_hw_card_ids[j].driver_data; + + if (cfg_i == cfg_j) + continue; + + KUNIT_EXPECT_NE_MSG(test, memcmp(cfg_j, cfg_i, + sizeof(*cfg_i)), 0, + "identical configs: %ps and %ps\n", + cfg_i, cfg_j); + } + } +} + static struct kunit_case devinfo_test_cases[] = { KUNIT_CASE(devinfo_table_order), + KUNIT_CASE(devinfo_names), + KUNIT_CASE(devinfo_no_cfg_dups), + KUNIT_CASE(devinfo_no_name_dups), + KUNIT_CASE(devinfo_check_subdev_match), + KUNIT_CASE(devinfo_check_killer_subdev), KUNIT_CASE(devinfo_pci_ids), + KUNIT_CASE(devinfo_no_mac_cfg_dups), {} }; diff --git a/drivers/net/wireless/intersil/p54/fwio.c b/drivers/net/wireless/intersil/p54/fwio.c index 772084a9bd8d..3baf8ab01e22 100644 --- a/drivers/net/wireless/intersil/p54/fwio.c +++ b/drivers/net/wireless/intersil/p54/fwio.c @@ -231,6 +231,7 @@ int p54_download_eeprom(struct p54_common *priv, void *buf, mutex_lock(&priv->eeprom_mutex); priv->eeprom = buf; + priv->eeprom_slice_size = len; eeprom_hdr = skb_put(skb, eeprom_hdr_size + len); if (priv->fw_var < 0x509) { @@ -253,6 +254,7 @@ int p54_download_eeprom(struct p54_common *priv, void *buf, ret = -EBUSY; } priv->eeprom = NULL; + priv->eeprom_slice_size = 0; mutex_unlock(&priv->eeprom_mutex); return ret; } diff --git a/drivers/net/wireless/intersil/p54/p54.h b/drivers/net/wireless/intersil/p54/p54.h index 522656de4159..aeb5e40cc5ef 100644 --- a/drivers/net/wireless/intersil/p54/p54.h +++ b/drivers/net/wireless/intersil/p54/p54.h @@ -258,6 +258,7 @@ struct p54_common { /* eeprom handling */ void *eeprom; + size_t eeprom_slice_size; struct completion eeprom_comp; struct mutex eeprom_mutex; }; diff --git a/drivers/net/wireless/intersil/p54/txrx.c b/drivers/net/wireless/intersil/p54/txrx.c index 8414aa208655..2deb1bb54f24 100644 --- a/drivers/net/wireless/intersil/p54/txrx.c +++ b/drivers/net/wireless/intersil/p54/txrx.c @@ -496,14 +496,19 @@ static void p54_rx_eeprom_readback(struct p54_common *priv, return ; if (priv->fw_var >= 0x509) { - memcpy(priv->eeprom, eeprom->v2.data, - le16_to_cpu(eeprom->v2.len)); + if (le16_to_cpu(eeprom->v2.len) != priv->eeprom_slice_size) + return; + + memcpy(priv->eeprom, eeprom->v2.data, priv->eeprom_slice_size); } else { - memcpy(priv->eeprom, eeprom->v1.data, - le16_to_cpu(eeprom->v1.len)); + if (le16_to_cpu(eeprom->v1.len) != priv->eeprom_slice_size) + return; + + memcpy(priv->eeprom, eeprom->v1.data, priv->eeprom_slice_size); } priv->eeprom = NULL; + priv->eeprom_slice_size = 0; tmp = p54_find_and_unlink_skb(priv, hdr->req_id); dev_kfree_skb_any(tmp); complete(&priv->eeprom_comp); diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index 1485f949ad4e..7b50a88a18e5 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -913,8 +913,7 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) multicast = is_multicast_ether_addr(skb->data); - if (unlikely(!multicast && skb->sk && - sock_flag(skb->sk, SOCK_WIFI_STATUS) && + if (unlikely(!multicast && sk_requests_wifi_status(skb->sk) && priv->adapter->fw_api_ver == MWIFIEX_FW_V15)) skb = mwifiex_clone_skb_for_tx_status(priv, skb, diff --git a/drivers/net/wireless/mediatek/mt76/channel.c b/drivers/net/wireless/mediatek/mt76/channel.c index e7b839e74290..cc2d888e3f17 100644 --- a/drivers/net/wireless/mediatek/mt76/channel.c +++ b/drivers/net/wireless/mediatek/mt76/channel.c @@ -302,11 +302,13 @@ void mt76_put_vif_phy_link(struct mt76_phy *phy, struct ieee80211_vif *vif, struct mt76_vif_link *mlink) { struct mt76_dev *dev = phy->dev; - struct mt76_vif_data *mvif = mlink->mvif; + struct mt76_vif_data *mvif; if (IS_ERR_OR_NULL(mlink) || !mlink->offchannel) return; + mvif = mlink->mvif; + rcu_assign_pointer(mvif->offchannel_link, NULL); dev->drv->vif_link_remove(phy, vif, &vif->bss_conf, mlink); kfree(mlink); diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c index b88d7e10742e..45c8db939d55 100644 --- a/drivers/net/wireless/mediatek/mt76/mac80211.c +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c @@ -449,8 +449,10 @@ mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw) wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS); wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AQL); - wiphy->available_antennas_tx = phy->antenna_mask; - wiphy->available_antennas_rx = phy->antenna_mask; + if (!wiphy->available_antennas_tx) + wiphy->available_antennas_tx = phy->antenna_mask; + if (!wiphy->available_antennas_rx) + wiphy->available_antennas_rx = phy->antenna_mask; wiphy->sar_capa = &mt76_sar_capa; phy->frp = devm_kcalloc(dev->dev, wiphy->sar_capa->num_freq_ranges, @@ -1703,7 +1705,7 @@ s8 mt76_get_power_bound(struct mt76_phy *phy, s8 txpower) int n_chains = hweight16(phy->chainmask); txpower = mt76_get_sar_power(phy, phy->chandef.chan, txpower * 2); - txpower -= mt76_tx_power_nss_delta(n_chains); + txpower -= mt76_tx_power_path_delta(n_chains); return txpower; } @@ -1719,7 +1721,7 @@ int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, return -EINVAL; n_chains = hweight16(phy->chainmask); - delta = mt76_tx_power_nss_delta(n_chains); + delta = mt76_tx_power_path_delta(n_chains); *dbm = DIV_ROUND_UP(phy->txpower_cur + delta, 2); return 0; diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index d7cd467b812f..5f8d81cda6cd 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -162,6 +162,16 @@ enum mt76_dfs_state { MT_DFS_STATE_ACTIVE, }; +#define MT76_RNR_SCAN_MAX_BSSIDS 16 +struct mt76_scan_rnr_param { + u8 bssid[MT76_RNR_SCAN_MAX_BSSIDS][ETH_ALEN]; + u8 channel[MT76_RNR_SCAN_MAX_BSSIDS]; + u8 random_mac[ETH_ALEN]; + u8 seq_num; + u8 bssid_num; + u32 sreq_flag; +}; + struct mt76_queue_buf { dma_addr_t addr; u16 len:15, @@ -941,6 +951,8 @@ struct mt76_dev { char alpha2[3]; enum nl80211_dfs_regions region; + struct mt76_scan_rnr_param rnr; + u32 debugfs_reg; u8 csa_complete; @@ -1386,12 +1398,12 @@ static inline bool mt76_is_skb_pktid(u8 pktid) return pktid >= MT_PACKET_ID_FIRST; } -static inline u8 mt76_tx_power_nss_delta(u8 nss) +static inline u8 mt76_tx_power_path_delta(u8 path) { - static const u8 nss_delta[4] = { 0, 6, 9, 12 }; - u8 idx = nss - 1; + static const u8 path_delta[5] = { 0, 6, 9, 12, 14 }; + u8 idx = path - 1; - return (idx < ARRAY_SIZE(nss_delta)) ? nss_delta[idx] : 0; + return (idx < ARRAY_SIZE(path_delta)) ? path_delta[idx] : 0; } static inline bool mt76_testmode_enabled(struct mt76_phy *phy) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/init.c b/drivers/net/wireless/mediatek/mt76/mt7615/init.c index 66ba3be27343..aae80005a3c1 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/init.c @@ -273,7 +273,7 @@ void mt7615_init_txpower(struct mt7615_dev *dev, struct ieee80211_supported_band *sband) { int i, n_chains = hweight8(dev->mphy.antenna_mask), target_chains; - int delta_idx, delta = mt76_tx_power_nss_delta(n_chains); + int delta_idx, delta = mt76_tx_power_path_delta(n_chains); u8 *eep = (u8 *)dev->mt76.eeprom.data; enum nl80211_band band = sband->band; struct mt76_power_limits limits; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c index b8fcd4eb3fbb..4064e193d4de 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c @@ -2067,7 +2067,7 @@ static void mt7615_mcu_set_txpower_sku(struct mt7615_phy *phy, u8 *sku) }; tx_power = mt76_get_sar_power(mphy, mphy->chandef.chan, tx_power); - tx_power -= mt76_tx_power_nss_delta(n_chains); + tx_power -= mt76_tx_power_path_delta(n_chains); tx_power = mt76_get_rate_power_limits(mphy, mphy->chandef.chan, &limits, tx_power); mphy->txpower_cur = tx_power; @@ -2084,8 +2084,8 @@ static void mt7615_mcu_set_txpower_sku(struct mt7615_phy *phy, u8 *sku) int delta = 0; if (i < n_chains - 1) - delta = mt76_tx_power_nss_delta(n_chains) - - mt76_tx_power_nss_delta(i + 1); + delta = mt76_tx_power_path_delta(n_chains) - + mt76_tx_power_path_delta(i + 1); sku[MT_SKU_1SS_DELTA + i] = delta; } } diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac.h index 455979476d11..192dcc374a64 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac.h +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac.h @@ -232,9 +232,14 @@ static inline bool is_mt7992(struct mt76_dev *dev) return mt76_chip(dev) == 0x7992; } +static inline bool is_mt7990(struct mt76_dev *dev) +{ + return mt76_chip(dev) == 0x7993; +} + static inline bool is_mt799x(struct mt76_dev *dev) { - return is_mt7996(dev) || is_mt7992(dev); + return is_mt7996(dev) || is_mt7992(dev) || is_mt7990(dev); } static inline bool is_mt7622(struct mt76_dev *dev) diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h index 487ad716f872..1013cad57a7f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h @@ -273,6 +273,7 @@ enum tx_frag_idx { #define MT_TXD6_TX_RATE GENMASK(21, 16) #define MT_TXD6_TIMESTAMP_OFS_EN BIT(15) #define MT_TXD6_TIMESTAMP_OFS_IDX GENMASK(14, 10) +#define MT_TXD6_TID_ADDBA GENMASK(10, 8) #define MT_TXD6_MSDU_CNT GENMASK(9, 4) #define MT_TXD6_MSDU_CNT_V2 GENMASK(15, 10) #define MT_TXD6_DIS_MAT BIT(3) diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c index bafcf5a279e2..cb13d0a76878 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c @@ -67,8 +67,7 @@ int mt76_connac_mcu_init_download(struct mt76_dev *dev, u32 addr, u32 len, if ((!is_connac_v1(dev) && addr == MCU_PATCH_ADDRESS) || (is_mt7921(dev) && addr == 0x900000) || (is_mt7925(dev) && (addr == 0x900000 || addr == 0xe0002800)) || - (is_mt7996(dev) && addr == 0x900000) || - (is_mt7992(dev) && addr == 0x900000)) + (is_mt799x(dev) && addr == 0x900000)) cmd = MCU_CMD(PATCH_START_REQ); else cmd = MCU_CMD(TARGET_ADDRESS_LEN_REQ); @@ -391,7 +390,7 @@ void mt76_connac_mcu_sta_basic_tlv(struct mt76_dev *dev, struct sk_buff *skb, basic->conn_type = cpu_to_le32(CONNECTION_INFRA_BC); if (vif->type == NL80211_IFTYPE_STATION && - link_conf && !is_zero_ether_addr(link_conf->bssid)) { + !is_zero_ether_addr(link_conf->bssid)) { memcpy(basic->peer_addr, link_conf->bssid, ETH_ALEN); basic->aid = cpu_to_le16(vif->cfg.aid); } else { @@ -1667,6 +1666,44 @@ int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy, } EXPORT_SYMBOL_GPL(mt76_connac_mcu_uni_add_bss); +void mt76_connac_mcu_build_rnr_scan_param(struct mt76_dev *mdev, + struct cfg80211_scan_request *sreq) +{ + struct ieee80211_channel **scan_list = sreq->channels; + int i, bssid_index = 0; + + /* clear 6G active Scan BSSID table */ + memset(&mdev->rnr, 0, sizeof(mdev->rnr)); + + for (i = 0; i < sreq->n_6ghz_params; i++) { + u8 ch_idx = sreq->scan_6ghz_params[i].channel_idx; + int k = 0; + + /* Remove the duplicated BSSID */ + for (k = 0; k < bssid_index; k++) { + if (!memcmp(&mdev->rnr.bssid[k], + sreq->scan_6ghz_params[i].bssid, + ETH_ALEN)) + break; + } + + if (k == bssid_index && + bssid_index < MT76_RNR_SCAN_MAX_BSSIDS) { + memcpy(&mdev->rnr.bssid[bssid_index++], + sreq->scan_6ghz_params[i].bssid, ETH_ALEN); + mdev->rnr.channel[k] = scan_list[ch_idx]->hw_value; + } + } + + mdev->rnr.bssid_num = bssid_index; + + if (sreq->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) { + memcpy(mdev->rnr.random_mac, sreq->mac_addr, ETH_ALEN); + mdev->rnr.sreq_flag = sreq->flags; + } +} +EXPORT_SYMBOL_GPL(mt76_connac_mcu_build_rnr_scan_param); + #define MT76_CONNAC_SCAN_CHANNEL_TIME 60 int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif, struct ieee80211_scan_request *scan_req) diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h index 43237e518373..27daf419560a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h @@ -869,6 +869,7 @@ enum { #define NETWORK_WDS BIT(21) #define SCAN_FUNC_RANDOM_MAC BIT(0) +#define SCAN_FUNC_RNR_SCAN BIT(3) #define SCAN_FUNC_SPLIT_SCAN BIT(5) #define CONNECTION_INFRA_STA (STA_TYPE_STA | NETWORK_INFRA) @@ -1065,6 +1066,7 @@ enum { MCU_UNI_EVENT_WED_RRO = 0x57, MCU_UNI_EVENT_PER_STA_INFO = 0x6d, MCU_UNI_EVENT_ALL_STA_INFO = 0x6e, + MCU_UNI_EVENT_SDO = 0x83, }; #define MCU_UNI_CMD_EVENT BIT(1) @@ -1182,6 +1184,11 @@ enum { #define MCU_UNI_CMD(_t) (__MCU_CMD_FIELD_UNI | \ FIELD_PREP(__MCU_CMD_FIELD_ID, \ MCU_UNI_CMD_##_t)) + +#define MCU_UNI_QUERY(_t) (__MCU_CMD_FIELD_UNI | __MCU_CMD_FIELD_QUERY | \ + FIELD_PREP(__MCU_CMD_FIELD_ID, \ + MCU_UNI_CMD_##_t)) + #define MCU_CE_CMD(_t) (__MCU_CMD_FIELD_CE | \ FIELD_PREP(__MCU_CMD_FIELD_ID, \ MCU_CE_CMD_##_t)) @@ -1287,16 +1294,20 @@ enum { MCU_UNI_CMD_EFUSE_CTRL = 0x2d, MCU_UNI_CMD_RA = 0x2f, MCU_UNI_CMD_MURU = 0x31, + MCU_UNI_CMD_TESTMODE_RX_STAT = 0x32, MCU_UNI_CMD_BF = 0x33, MCU_UNI_CMD_CHANNEL_SWITCH = 0x34, MCU_UNI_CMD_THERMAL = 0x35, MCU_UNI_CMD_VOW = 0x37, MCU_UNI_CMD_FIXED_RATE_TABLE = 0x40, + MCU_UNI_CMD_TESTMODE_CTRL = 0x46, MCU_UNI_CMD_RRO = 0x57, MCU_UNI_CMD_OFFCH_SCAN_CTRL = 0x58, MCU_UNI_CMD_PER_STA_INFO = 0x6d, MCU_UNI_CMD_ALL_STA_INFO = 0x6e, MCU_UNI_CMD_ASSERT_DUMP = 0x6f, + MCU_UNI_CMD_RADIO_STATUS = 0x80, + MCU_UNI_CMD_SDO = 0x88, }; enum { @@ -1369,6 +1380,7 @@ enum { UNI_BSS_INFO_OFFLOAD = 25, UNI_BSS_INFO_MLD = 26, UNI_BSS_INFO_PM_DISABLE = 27, + UNI_BSS_INFO_EHT = 30, }; enum { @@ -1969,6 +1981,8 @@ int mt76_connac_mcu_start_patch(struct mt76_dev *dev); int mt76_connac_mcu_patch_sem_ctrl(struct mt76_dev *dev, bool get); int mt76_connac_mcu_start_firmware(struct mt76_dev *dev, u32 addr, u32 option); +void mt76_connac_mcu_build_rnr_scan_param(struct mt76_dev *mdev, + struct cfg80211_scan_request *sreq); int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif, struct ieee80211_scan_request *scan_req); int mt76_connac_mcu_cancel_hw_scan(struct mt76_phy *phy, diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c index a82c75ba26e6..a683d53c7ceb 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c @@ -174,7 +174,6 @@ static int mt76x02_poll_tx(struct napi_struct *napi, int budget) int mt76x02_dma_init(struct mt76x02_dev *dev) { - struct mt76_txwi_cache __maybe_unused *t; int i, ret, fifo_size; struct mt76_queue *q; void *status_fifo; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c index 84ef80ab4afb..96cecc576a98 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c @@ -17,6 +17,8 @@ static const struct usb_device_id mt76x2u_device_table[] = { { USB_DEVICE(0x057c, 0x8503) }, /* Avm FRITZ!WLAN AC860 */ { USB_DEVICE(0x7392, 0xb711) }, /* Edimax EW 7722 UAC */ { USB_DEVICE(0x0e8d, 0x7632) }, /* HC-M7662BU1 */ + { USB_DEVICE(0x0471, 0x2126) }, /* LiteOn WN4516R module, nonstandard USB connector */ + { USB_DEVICE(0x0471, 0x7600) }, /* LiteOn WN4519R module, nonstandard USB connector */ { USB_DEVICE(0x2c4e, 0x0103) }, /* Mercury UD13 */ { USB_DEVICE(0x0846, 0x9014) }, /* Netgear WNDA3100v3 */ { USB_DEVICE(0x0846, 0x9053) }, /* Netgear A6210 */ diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c index 33a14365ec9b..3b5562811511 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c @@ -191,6 +191,7 @@ int mt76x2u_register_device(struct mt76x02_dev *dev) { struct ieee80211_hw *hw = mt76_hw(dev); struct mt76_usb *usb = &dev->mt76.usb; + bool vht; int err; INIT_DELAYED_WORK(&dev->cal_work, mt76x2u_phy_calibrate); @@ -217,7 +218,17 @@ int mt76x2u_register_device(struct mt76x02_dev *dev) /* check hw sg support in order to enable AMSDU */ hw->max_tx_fragments = dev->mt76.usb.sg_en ? MT_TX_SG_MAX_SIZE : 1; - err = mt76_register_device(&dev->mt76, true, mt76x02_rates, + switch (dev->mt76.rev) { + case 0x76320044: + /* these ASIC revisions do not support VHT */ + vht = false; + break; + default: + vht = true; + break; + } + + err = mt76_register_device(&dev->mt76, vht, mt76x02_rates, ARRAY_SIZE(mt76x02_rates)); if (err) goto fail; diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c index 192e8eff970b..b287b7d9394e 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c @@ -211,13 +211,28 @@ static const struct file_operations mt7915_sys_recovery_ops = { static int mt7915_radar_trigger(void *data, u64 val) { - struct mt7915_dev *dev = data; +#define RADAR_MAIN_CHAIN 1 +#define RADAR_BACKGROUND 2 + struct mt7915_phy *phy = data; + struct mt7915_dev *dev = phy->dev; + int rdd_idx; + + if (!val || val > RADAR_BACKGROUND) + return -EINVAL; - if (val > MT_RX_SEL2) + if (val == RADAR_BACKGROUND && !dev->rdd2_phy) { + dev_err(dev->mt76.dev, "Background radar is not enabled\n"); return -EINVAL; + } + + rdd_idx = mt7915_get_rdd_idx(phy, val == RADAR_BACKGROUND); + if (rdd_idx < 0) { + dev_err(dev->mt76.dev, "No RDD found\n"); + return -EINVAL; + } return mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_RADAR_EMULATE, - val, 0, 0); + rdd_idx, 0, 0); } DEFINE_DEBUGFS_ATTRIBUTE(fops_radar_trigger, NULL, @@ -445,6 +460,11 @@ mt7915_rdd_monitor(struct seq_file *s, void *data) mutex_lock(&dev->mt76.mutex); + if (!mt7915_eeprom_has_background_radar(dev)) { + seq_puts(s, "no background radar capability\n"); + goto out; + } + if (!cfg80211_chandef_valid(chandef)) { ret = -EINVAL; goto out; @@ -1242,7 +1262,7 @@ int mt7915_init_debugfs(struct mt7915_phy *phy) if (!dev->dbdc_support || phy->mt76->band_idx) { debugfs_create_u32("dfs_hw_pattern", 0400, dir, &dev->hw_pattern); - debugfs_create_file("radar_trigger", 0200, dir, dev, + debugfs_create_file("radar_trigger", 0200, dir, phy, &fops_radar_trigger); debugfs_create_devm_seqfile(dev->mt76.dev, "rdd_monitor", dir, mt7915_rdd_monitor); diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c index 928e0b07a9bf..c0f3402d30bb 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c @@ -147,7 +147,7 @@ static int mt7915_eeprom_load(struct mt7915_dev *dev) /* read eeprom data from efuse */ block_num = DIV_ROUND_UP(eeprom_size, eeprom_blk_size); for (i = 0; i < block_num; i++) { - ret = mt7915_mcu_get_eeprom(dev, i * eeprom_blk_size); + ret = mt7915_mcu_get_eeprom(dev, i * eeprom_blk_size, NULL); if (ret < 0) return ret; } @@ -361,6 +361,37 @@ s8 mt7915_eeprom_get_power_delta(struct mt7915_dev *dev, int band) return val & MT_EE_RATE_DELTA_SIGN ? delta : -delta; } +bool +mt7915_eeprom_has_background_radar(struct mt7915_dev *dev) +{ + u8 val, buf[MT7915_EEPROM_BLOCK_SIZE]; + u8 band_sel, tx_path, rx_path; + int offs = MT_EE_WIFI_CONF + 1; + + switch (mt76_chip(&dev->mt76)) { + case 0x7915: + return true; + case 0x7906: + /* read efuse to check background radar capability */ + if (mt7915_mcu_get_eeprom(dev, offs, buf)) + break; + + val = buf[offs % MT7915_EEPROM_BLOCK_SIZE]; + band_sel = u8_get_bits(val, MT_EE_WIFI_CONF0_BAND_SEL); + tx_path = u8_get_bits(val, MT_EE_WIFI_CONF0_TX_PATH); + rx_path = u8_get_bits(val, MT_EE_WIFI_CONF0_RX_PATH); + + return (band_sel == MT_EE_V2_BAND_SEL_5GHZ && + tx_path == rx_path && rx_path == 2); + case 0x7981: + case 0x7986: + default: + break; + } + + return false; +} + const u8 mt7915_sku_group_len[] = { [SKU_CCK] = 4, [SKU_OFDM] = 8, diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h index 509fb43d8a68..31aec0f40232 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h @@ -55,6 +55,7 @@ enum mt7915_eeprom_field { #define MT_EE_CAL_DPD_SIZE_V2_7981 (102 * MT_EE_CAL_UNIT) /* no 6g dpd data */ #define MT_EE_WIFI_CONF0_TX_PATH GENMASK(2, 0) +#define MT_EE_WIFI_CONF0_RX_PATH GENMASK(5, 3) #define MT_EE_WIFI_CONF0_BAND_SEL GENMASK(7, 6) #define MT_EE_WIFI_CONF1_BAND_SEL GENMASK(7, 6) #define MT_EE_WIFI_CONF_STREAM_NUM GENMASK(7, 5) diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c index bee4beabc4eb..3e30ca5155d2 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c @@ -285,7 +285,7 @@ static void __mt7915_init_txpower(struct mt7915_phy *phy, { struct mt7915_dev *dev = phy->dev; int i, n_chains = hweight16(phy->mt76->chainmask); - int nss_delta = mt76_tx_power_nss_delta(n_chains); + int path_delta = mt76_tx_power_path_delta(n_chains); int pwr_delta = mt7915_eeprom_get_power_delta(dev, sband->band); struct mt76_power_limits limits; @@ -305,7 +305,7 @@ static void __mt7915_init_txpower(struct mt7915_phy *phy, target_power = mt76_get_rate_power_limits(phy->mt76, chan, &limits, target_power); - target_power += nss_delta; + target_power += path_delta; target_power = DIV_ROUND_UP(target_power, 2); chan->max_power = min_t(int, chan->max_reg_power, target_power); @@ -392,9 +392,10 @@ mt7915_init_wiphy(struct mt7915_phy *phy) if (!is_mt7915(&dev->mt76)) wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_STA_TX_PWR); - if (!mdev->dev->of_node || - !of_property_read_bool(mdev->dev->of_node, - "mediatek,disable-radar-background")) + if (mt7915_eeprom_has_background_radar(phy->dev) && + (!mdev->dev->of_node || + !of_property_read_bool(mdev->dev->of_node, + "mediatek,disable-radar-background"))) wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_RADAR_BACKGROUND); @@ -924,8 +925,7 @@ mt7915_set_stream_he_txbf_caps(struct mt7915_phy *phy, c = IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US; if (!is_mt7915(&dev->mt76)) - c |= IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO | - IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO; + c |= IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO; elem->phy_cap_info[2] |= c; c = IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE | diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c index 2ba6eb3038ce..9400e4af2a04 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c @@ -2035,16 +2035,15 @@ void mt7915_mac_work(struct work_struct *work) static void mt7915_dfs_stop_radar_detector(struct mt7915_phy *phy) { struct mt7915_dev *dev = phy->dev; + int rdd_idx = mt7915_get_rdd_idx(phy, false); - if (phy->rdd_state & BIT(0)) - mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, 0, - MT_RX_SEL0, 0); - if (phy->rdd_state & BIT(1)) - mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, 1, - MT_RX_SEL0, 0); + if (rdd_idx < 0) + return; + + mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, rdd_idx, 0, 0); } -static int mt7915_dfs_start_rdd(struct mt7915_dev *dev, int chain) +static int mt7915_dfs_start_rdd(struct mt7915_dev *dev, int rdd_idx) { int err, region; @@ -2061,52 +2060,38 @@ static int mt7915_dfs_start_rdd(struct mt7915_dev *dev, int chain) break; } - err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_START, chain, - MT_RX_SEL0, region); + err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_START, rdd_idx, 0, region); if (err < 0) return err; if (is_mt7915(&dev->mt76)) { - err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_SET_WF_ANT, chain, + err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_SET_WF_ANT, rdd_idx, 0, dev->dbdc_support ? 2 : 0); if (err < 0) return err; } - return mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_DET_MODE, chain, - MT_RX_SEL0, 1); + return mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_DET_MODE, rdd_idx, 0, 1); } static int mt7915_dfs_start_radar_detector(struct mt7915_phy *phy) { - struct cfg80211_chan_def *chandef = &phy->mt76->chandef; struct mt7915_dev *dev = phy->dev; - int err; + int err, rdd_idx; + + rdd_idx = mt7915_get_rdd_idx(phy, false); + if (rdd_idx < 0) + return -EINVAL; /* start CAC */ - err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_START, - phy->mt76->band_idx, MT_RX_SEL0, 0); + err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_START, rdd_idx, 0, 0); if (err < 0) return err; - err = mt7915_dfs_start_rdd(dev, phy->mt76->band_idx); + err = mt7915_dfs_start_rdd(dev, rdd_idx); if (err < 0) return err; - phy->rdd_state |= BIT(phy->mt76->band_idx); - - if (!is_mt7915(&dev->mt76)) - return 0; - - if (chandef->width == NL80211_CHAN_WIDTH_160 || - chandef->width == NL80211_CHAN_WIDTH_80P80) { - err = mt7915_dfs_start_rdd(dev, 1); - if (err < 0) - return err; - - phy->rdd_state |= BIT(1); - } - return 0; } @@ -2148,12 +2133,12 @@ int mt7915_dfs_init_radar_detector(struct mt7915_phy *phy) { struct mt7915_dev *dev = phy->dev; enum mt76_dfs_state dfs_state, prev_state; - int err; + int err, rdd_idx = mt7915_get_rdd_idx(phy, false); prev_state = phy->mt76->dfs_state; dfs_state = mt76_phy_dfs_state(phy->mt76); - if (prev_state == dfs_state) + if (prev_state == dfs_state || rdd_idx < 0) return 0; if (prev_state == MT_DFS_STATE_UNKNOWN) @@ -2177,8 +2162,7 @@ int mt7915_dfs_init_radar_detector(struct mt7915_phy *phy) if (dfs_state == MT_DFS_STATE_CAC) return 0; - err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_END, - phy->mt76->band_idx, MT_RX_SEL0, 0); + err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_END, rdd_idx, 0, 0); if (err < 0) { phy->mt76->dfs_state = MT_DFS_STATE_UNKNOWN; return err; @@ -2188,15 +2172,13 @@ int mt7915_dfs_init_radar_detector(struct mt7915_phy *phy) return 0; stop: - err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_NORMAL_START, - phy->mt76->band_idx, MT_RX_SEL0, 0); + err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_NORMAL_START, rdd_idx, 0, 0); if (err < 0) return err; if (is_mt7915(&dev->mt76)) { err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_SET_WF_ANT, - phy->mt76->band_idx, 0, - dev->dbdc_support ? 2 : 0); + rdd_idx, 0, dev->dbdc_support ? 2 : 0); if (err < 0) return err; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c index 3643c72bb68d..427542777abc 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c @@ -303,17 +303,35 @@ mt7915_mcu_rx_radar_detected(struct mt7915_dev *dev, struct sk_buff *skb) { struct mt76_phy *mphy = &dev->mt76.phy; struct mt7915_mcu_rdd_report *r; + u32 sku; r = (struct mt7915_mcu_rdd_report *)skb->data; - if (r->band_idx > MT_RX_SEL2) + switch (r->rdd_idx) { + case MT_RDD_IDX_BAND0: + break; + case MT_RDD_IDX_BAND1: + sku = mt7915_check_adie(dev, true); + /* the main phy is bound to band 1 for this sku */ + if (is_mt7986(&dev->mt76) && + (sku == MT7975_ONE_ADIE || sku == MT7976_ONE_ADIE)) + break; + mphy = dev->mt76.phys[MT_BAND1]; + break; + case MT_RDD_IDX_BACKGROUND: + if (!dev->rdd2_phy) + return; + mphy = dev->rdd2_phy->mt76; + break; + default: + dev_err(dev->mt76.dev, "Unknown RDD idx %d\n", r->rdd_idx); return; + } - if ((r->band_idx && !dev->phy.mt76->band_idx) && - dev->mt76.phys[MT_BAND1]) - mphy = dev->mt76.phys[MT_BAND1]; + if (!mphy) + return; - if (r->band_idx == MT_RX_SEL2) + if (r->rdd_idx == MT_RDD_IDX_BACKGROUND) cfg80211_background_radar_event(mphy->hw->wiphy, &dev->rdd2_chandef, GFP_ATOMIC); @@ -2697,11 +2715,14 @@ int mt7915_mcu_rdd_background_enable(struct mt7915_phy *phy, struct cfg80211_chan_def *chandef) { struct mt7915_dev *dev = phy->dev; - int err, region; + int err, region, rdd_idx; + + rdd_idx = mt7915_get_rdd_idx(phy, true); + if (rdd_idx < 0) + return -EINVAL; if (!chandef) { /* disable offchain */ - err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, MT_RX_SEL2, - 0, 0); + err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, rdd_idx, 0, 0); if (err) return err; @@ -2727,8 +2748,7 @@ int mt7915_mcu_rdd_background_enable(struct mt7915_phy *phy, break; } - return mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_START, MT_RX_SEL2, - 0, region); + return mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_START, rdd_idx, 0, region); } int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd) @@ -2859,7 +2879,7 @@ int mt7915_mcu_set_eeprom(struct mt7915_dev *dev) &req, sizeof(req), true); } -int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset) +int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset, u8 *read_buf) { struct mt7915_mcu_eeprom_info req = { .addr = cpu_to_le32(round_down(offset, @@ -2867,8 +2887,8 @@ int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset) }; struct mt7915_mcu_eeprom_info *res; struct sk_buff *skb; + u8 *buf = read_buf; int ret; - u8 *buf; ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_QUERY(EFUSE_ACCESS), @@ -2877,8 +2897,10 @@ int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset) return ret; res = (struct mt7915_mcu_eeprom_info *)skb->data; - buf = dev->mt76.eeprom.data + le32_to_cpu(res->addr); + if (!buf) + buf = dev->mt76.eeprom.data + le32_to_cpu(res->addr); memcpy(buf, res->data, MT7915_EEPROM_BLOCK_SIZE); + dev_kfree_skb(skb); return 0; diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h index 092ed504a8f2..086ad89ecd91 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h @@ -57,7 +57,7 @@ struct mt7915_mcu_bcc_notify { struct mt7915_mcu_rdd_report { struct mt76_connac2_mcu_rxd_hdr rxd; - u8 band_idx; + u8 rdd_idx; u8 long_detected; u8 constant_prf_detected; u8 staggered_prf_detected; diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c index 876f0692850a..9c4d5cea0c42 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c @@ -651,6 +651,9 @@ int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr, wed->wlan.base = devm_ioremap(dev->mt76.dev, pci_resource_start(pci_dev, 0), pci_resource_len(pci_dev, 0)); + if (!wed->wlan.base) + return -ENOMEM; + wed->wlan.phy_base = pci_resource_start(pci_dev, 0); wed->wlan.wpdma_int = pci_resource_start(pci_dev, 0) + MT_INT_WED_SOURCE_CSR; @@ -678,6 +681,9 @@ int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr, wed->wlan.bus_type = MTK_WED_BUS_AXI; wed->wlan.base = devm_ioremap(dev->mt76.dev, res->start, resource_size(res)); + if (!wed->wlan.base) + return -ENOMEM; + wed->wlan.phy_base = res->start; wed->wlan.wpdma_int = res->start + MT_INT_SOURCE_CSR; wed->wlan.wpdma_mask = res->start + MT_INT_MASK_CSR; diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h index 533939f2b7ed..2e94347c46d6 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h @@ -215,8 +215,6 @@ struct mt7915_phy { s16 coverage_class; u8 slottime; - u8 rdd_state; - u32 trb_ts; u32 rx_ampdu_ts; @@ -331,10 +329,10 @@ enum { __MT_WFDMA_MAX, }; -enum { - MT_RX_SEL0, - MT_RX_SEL1, - MT_RX_SEL2, /* monitor chain */ +enum rdd_idx { + MT_RDD_IDX_BAND0, /* RDD idx for band idx 0 (single-band) */ + MT_RDD_IDX_BAND1, /* RDD idx for band idx 1 */ + MT_RDD_IDX_BACKGROUND, /* RDD idx for background chain */ }; enum mt7915_rdd_cmd { @@ -354,6 +352,18 @@ enum mt7915_rdd_cmd { RDD_IRQ_OFF, }; +static inline int +mt7915_get_rdd_idx(struct mt7915_phy *phy, bool is_background) +{ + if (!phy->mt76->cap.has_5ghz) + return -1; + + if (is_background) + return MT_RDD_IDX_BACKGROUND; + + return phy->mt76->band_idx; +} + static inline struct mt7915_phy * mt7915_hw_phy(struct ieee80211_hw *hw) { @@ -425,6 +435,7 @@ int mt7915_eeprom_get_target_power(struct mt7915_dev *dev, struct ieee80211_channel *chan, u8 chain_idx); s8 mt7915_eeprom_get_power_delta(struct mt7915_dev *dev, int band); +bool mt7915_eeprom_has_background_radar(struct mt7915_dev *dev); int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2); void mt7915_dma_prefetch(struct mt7915_dev *dev); void mt7915_dma_cleanup(struct mt7915_dev *dev); @@ -473,7 +484,7 @@ int mt7915_mcu_set_fixed_rate_ctrl(struct mt7915_dev *dev, struct ieee80211_sta *sta, void *data, u32 field); int mt7915_mcu_set_eeprom(struct mt7915_dev *dev); -int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset); +int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset, u8 *read_buf); int mt7915_mcu_get_eeprom_free_block(struct mt7915_dev *dev, u8 *block_num); int mt7915_mcu_set_mac(struct mt7915_dev *dev, int band, bool enable, bool hdr_trans); diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c index 826c48a2ee69..1fffa43379b2 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c @@ -83,6 +83,11 @@ mt7921_init_he_caps(struct mt792x_phy *phy, enum nl80211_band band, he_cap_elem->phy_cap_info[9] |= IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU | IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU; + + if (is_mt7922(phy->mt76->dev)) { + he_cap_elem->phy_cap_info[0] |= + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G; + } break; case NL80211_IFTYPE_STATION: he_cap_elem->mac_cap_info[1] |= diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/Makefile b/drivers/net/wireless/mediatek/mt76/mt7925/Makefile index d321e4ed732f..ade5e647c941 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/Makefile +++ b/drivers/net/wireless/mediatek/mt76/mt7925/Makefile @@ -5,5 +5,6 @@ obj-$(CONFIG_MT7925E) += mt7925e.o obj-$(CONFIG_MT7925U) += mt7925u.o mt7925-common-y := mac.o mcu.o main.o init.o debugfs.o +mt7925-common-$(CONFIG_NL80211_TESTMODE) += testmode.o mt7925e-y := pci.o pci_mac.o pci_mcu.o mt7925u-y := usb.o diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/init.c b/drivers/net/wireless/mediatek/mt76/mt7925/init.c index 63cb08f4d87c..2a83ff59a968 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7925/init.c @@ -89,7 +89,7 @@ void mt7925_regd_be_ctrl(struct mt792x_dev *dev, u8 *alpha2) } /* Check the last one */ - if (rule->flag && BIT(0)) + if (rule->flag & BIT(0)) break; pos += sizeof(*rule); @@ -322,6 +322,12 @@ static void mt7925_init_work(struct work_struct *work) return; } + ret = mt7925_mcu_set_thermal_protect(dev); + if (ret) { + dev_err(dev->mt76.dev, "thermal protection enable failed\n"); + return; + } + /* we support chip reset now */ dev->hw_init_done = true; diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/main.c b/drivers/net/wireless/mediatek/mt76/mt7925/main.c index 66f327781947..94b0099dcd41 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7925/main.c @@ -334,6 +334,9 @@ int __mt7925_start(struct mt792x_phy *phy) ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work, MT792x_WATCHDOG_TIME); + if (phy->chip_cap & MT792x_CHIP_CAP_WF_RF_PIN_CTRL_EVT_EN) + wiphy_rfkill_start_polling(mphy->hw->wiphy); + return 0; } EXPORT_SYMBOL_GPL(__mt7925_start); @@ -1865,6 +1868,10 @@ mt7925_change_chanctx(struct ieee80211_hw *hw, link_conf = mt792x_vif_to_bss_conf(vif, mconf->link_id); mt7925_mcu_set_chctx(mvif->phy->mt76, &mconf->mt76, link_conf, ctx); + + if (changed & IEEE80211_CHANCTX_CHANGE_PUNCTURING) + mt7925_mcu_set_eht_pp(mvif->phy->mt76, &mconf->mt76, + link_conf, ctx); } } @@ -1954,8 +1961,10 @@ static void mt7925_link_info_changed(struct ieee80211_hw *hw, struct mt792x_phy *phy = mt792x_hw_phy(hw); struct mt792x_dev *dev = mt792x_hw_dev(hw); struct mt792x_bss_conf *mconf; + struct ieee80211_bss_conf *link_conf; mconf = mt792x_vif_to_link(mvif, info->link_id); + link_conf = mt792x_vif_to_bss_conf(vif, mconf->link_id); mt792x_mutex_acquire(dev); @@ -1997,6 +2006,10 @@ static void mt7925_link_info_changed(struct ieee80211_hw *hw, mvif->mlo_pm_state = MT792x_MLO_CHANGED_PS; } + if (changed & IEEE80211_CHANCTX_CHANGE_PUNCTURING) + mt7925_mcu_set_eht_pp(mvif->phy->mt76, &mconf->mt76, + link_conf, NULL); + mt792x_mutex_release(dev); } @@ -2195,6 +2208,18 @@ static void mt7925_unassign_vif_chanctx(struct ieee80211_hw *hw, mutex_unlock(&dev->mt76.mutex); } +static void mt7925_rfkill_poll(struct ieee80211_hw *hw) +{ + struct mt792x_phy *phy = mt792x_hw_phy(hw); + int ret; + + mt792x_mutex_acquire(phy->dev); + ret = mt7925_mcu_wf_rf_pin_ctrl(phy); + mt792x_mutex_release(phy->dev); + + wiphy_rfkill_set_hw_state(hw->wiphy, ret == 0); +} + const struct ieee80211_ops mt7925_ops = { .tx = mt792x_tx, .start = mt7925_start, @@ -2234,6 +2259,8 @@ const struct ieee80211_ops mt7925_ops = { .sta_statistics = mt792x_sta_statistics, .sched_scan_start = mt7925_start_sched_scan, .sched_scan_stop = mt7925_stop_sched_scan, + CFG80211_TESTMODE_CMD(mt7925_testmode_cmd) + CFG80211_TESTMODE_DUMP(mt7925_testmode_dump) #ifdef CONFIG_PM .suspend = mt7925_suspend, .resume = mt7925_resume, @@ -2255,6 +2282,7 @@ const struct ieee80211_ops mt7925_ops = { .link_info_changed = mt7925_link_info_changed, .change_vif_links = mt7925_change_vif_links, .change_sta_links = mt7925_change_sta_links, + .rfkill_poll = mt7925_rfkill_poll, }; EXPORT_SYMBOL_GPL(mt7925_ops); diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c index 14b1f603fb62..b8542be0d945 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c @@ -783,7 +783,7 @@ int mt7925_mcu_fw_log_2_host(struct mt792x_dev *dev, u8 ctrl) int ret; ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_UNI_CMD(WSYS_CONFIG), - &req, sizeof(req), false, NULL); + &req, sizeof(req), true, NULL); return ret; } @@ -974,6 +974,23 @@ int mt7925_mcu_set_deep_sleep(struct mt792x_dev *dev, bool enable) } EXPORT_SYMBOL_GPL(mt7925_mcu_set_deep_sleep); +int mt7925_mcu_set_thermal_protect(struct mt792x_dev *dev) +{ + char cmd[64]; + int ret = 0; + + snprintf(cmd, sizeof(cmd), "ThermalProtGband %d %d %d %d %d %d %d %d %d %d", + 0, 100, 90, 80, 30, 1, 1, 115, 105, 5); + ret = mt7925_mcu_chip_config(dev, cmd); + + snprintf(cmd, sizeof(cmd), "ThermalProtAband %d %d %d %d %d %d %d %d %d %d", + 1, 100, 90, 80, 30, 1, 1, 115, 105, 5); + ret |= mt7925_mcu_chip_config(dev, cmd); + + return ret; +} +EXPORT_SYMBOL_GPL(mt7925_mcu_set_thermal_protect); + int mt7925_run_firmware(struct mt792x_dev *dev) { int err; @@ -1424,7 +1441,7 @@ int mt7925_mcu_set_eeprom(struct mt792x_dev *dev) }; return mt76_mcu_send_and_get_msg(&dev->mt76, MCU_UNI_CMD(EFUSE_CTRL), - &req, sizeof(req), false, NULL); + &req, sizeof(req), true, NULL); } EXPORT_SYMBOL_GPL(mt7925_mcu_set_eeprom); @@ -2046,8 +2063,6 @@ int mt7925_mcu_set_sniffer(struct mt792x_dev *dev, struct ieee80211_vif *vif, }, }; - mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(SNIFFER), &req, sizeof(req), true); - return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(SNIFFER), &req, sizeof(req), true); } @@ -2298,6 +2313,40 @@ __mt7925_mcu_alloc_bss_req(struct mt76_dev *dev, struct mt76_vif_link *mvif, int return skb; } +static +void mt7925_mcu_bss_eht_tlv(struct sk_buff *skb, struct mt76_phy *phy, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_chanctx_conf *ctx) +{ + struct cfg80211_chan_def *chandef = ctx ? &ctx->def : + &link_conf->chanreq.oper; + + struct bss_eht_tlv *req; + struct tlv *tlv; + + tlv = mt76_connac_mcu_add_tlv(skb, UNI_BSS_INFO_EHT, sizeof(*req)); + req = (struct bss_eht_tlv *)tlv; + req->is_eth_dscb_present = chandef->punctured ? 1 : 0; + req->eht_dis_sub_chan_bitmap = cpu_to_le16(chandef->punctured); +} + +int mt7925_mcu_set_eht_pp(struct mt76_phy *phy, struct mt76_vif_link *mvif, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_chanctx_conf *ctx) +{ + struct sk_buff *skb; + + skb = __mt7925_mcu_alloc_bss_req(phy->dev, mvif, + MT7925_BSS_UPDATE_MAX_SIZE); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + mt7925_mcu_bss_eht_tlv(skb, phy, link_conf, ctx); + + return mt76_mcu_skb_send_msg(phy->dev, skb, + MCU_UNI_CMD(BSS_INFO_UPDATE), true); +} + int mt7925_mcu_set_chctx(struct mt76_phy *phy, struct mt76_vif_link *mvif, struct ieee80211_bss_conf *link_conf, struct ieee80211_chanctx_conf *ctx) @@ -2764,7 +2813,7 @@ int mt7925_mcu_set_dbdc(struct mt76_phy *phy, bool enable) conf->band = 0; /* unused */ err = mt76_mcu_skb_send_msg(mdev, skb, MCU_UNI_CMD(SET_DBDC_PARMS), - false); + true); return err; } @@ -2779,7 +2828,6 @@ int mt7925_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif, struct mt76_dev *mdev = phy->dev; struct mt76_connac_mcu_scan_channel *chan; struct sk_buff *skb; - struct scan_hdr_tlv *hdr; struct scan_req_tlv *req; struct scan_ssid_tlv *ssid; @@ -2790,9 +2838,12 @@ int mt7925_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif, struct tlv *tlv; int max_len; + if (test_bit(MT76_HW_SCANNING, &phy->state)) + return -EBUSY; + max_len = sizeof(*hdr) + sizeof(*req) + sizeof(*ssid) + - sizeof(*bssid) + sizeof(*chan_info) + - sizeof(*misc) + sizeof(*ie); + sizeof(*bssid) * MT7925_RNR_SCAN_MAX_BSSIDS + + sizeof(*chan_info) + sizeof(*misc) + sizeof(*ie); skb = mt76_mcu_msg_alloc(mdev, NULL, max_len); if (!skb) @@ -2815,6 +2866,8 @@ int mt7925_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif, for (i = 0; i < sreq->n_ssids; i++) { if (!sreq->ssids[i].ssid_len) continue; + if (i > MT7925_RNR_SCAN_MAX_BSSIDS) + break; ssid->ssids[i].ssid_len = cpu_to_le32(sreq->ssids[i].ssid_len); memcpy(ssid->ssids[i].ssid, sreq->ssids[i].ssid, @@ -2824,10 +2877,31 @@ int mt7925_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif, ssid->ssid_type = n_ssids ? BIT(2) : BIT(0); ssid->ssids_num = n_ssids; - tlv = mt76_connac_mcu_add_tlv(skb, UNI_SCAN_BSSID, sizeof(*bssid)); - bssid = (struct scan_bssid_tlv *)tlv; + if (sreq->n_6ghz_params) { + u8 j; + + mt76_connac_mcu_build_rnr_scan_param(mdev, sreq); - memcpy(bssid->bssid, sreq->bssid, ETH_ALEN); + for (j = 0; j < mdev->rnr.bssid_num; j++) { + if (j > MT7925_RNR_SCAN_MAX_BSSIDS) + break; + + tlv = mt76_connac_mcu_add_tlv(skb, UNI_SCAN_BSSID, + sizeof(*bssid)); + bssid = (struct scan_bssid_tlv *)tlv; + + ether_addr_copy(bssid->bssid, mdev->rnr.bssid[j]); + bssid->match_ch = mdev->rnr.channel[j]; + bssid->match_ssid_ind = MT7925_RNR_SCAN_MAX_BSSIDS; + bssid->match_short_ssid_ind = MT7925_RNR_SCAN_MAX_BSSIDS; + } + req->scan_func |= SCAN_FUNC_RNR_SCAN; + } else { + tlv = mt76_connac_mcu_add_tlv(skb, UNI_SCAN_BSSID, sizeof(*bssid)); + bssid = (struct scan_bssid_tlv *)tlv; + + ether_addr_copy(bssid->bssid, sreq->bssid); + } tlv = mt76_connac_mcu_add_tlv(skb, UNI_SCAN_CHANNEL, sizeof(*chan_info)); chan_info = (struct scan_chan_info_tlv *)tlv; @@ -2869,7 +2943,7 @@ int mt7925_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif, } err = mt76_mcu_skb_send_msg(mdev, skb, MCU_UNI_CMD(SCAN_REQ), - false); + true); if (err < 0) clear_bit(MT76_HW_SCANNING, &phy->state); @@ -2975,7 +3049,7 @@ int mt7925_mcu_sched_scan_req(struct mt76_phy *phy, } return mt76_mcu_skb_send_msg(mdev, skb, MCU_UNI_CMD(SCAN_REQ), - false); + true); } EXPORT_SYMBOL_GPL(mt7925_mcu_sched_scan_req); @@ -3011,7 +3085,7 @@ mt7925_mcu_sched_scan_enable(struct mt76_phy *phy, clear_bit(MT76_HW_SCHED_SCANNING, &phy->state); return mt76_mcu_skb_send_msg(mdev, skb, MCU_UNI_CMD(SCAN_REQ), - false); + true); } int mt7925_mcu_cancel_hw_scan(struct mt76_phy *phy, @@ -3050,7 +3124,7 @@ int mt7925_mcu_cancel_hw_scan(struct mt76_phy *phy, } return mt76_mcu_send_msg(phy->dev, MCU_UNI_CMD(SCAN_REQ), - &req, sizeof(req), false); + &req, sizeof(req), true); } EXPORT_SYMBOL_GPL(mt7925_mcu_cancel_hw_scan); @@ -3155,7 +3229,7 @@ int mt7925_mcu_set_channel_domain(struct mt76_phy *phy) memcpy(__skb_push(skb, sizeof(req)), &req, sizeof(req)); return mt76_mcu_skb_send_msg(dev, skb, MCU_UNI_CMD(SET_DOMAIN_INFO), - false); + true); } EXPORT_SYMBOL_GPL(mt7925_mcu_set_channel_domain); @@ -3305,9 +3379,18 @@ int mt7925_mcu_fill_message(struct mt76_dev *mdev, struct sk_buff *skb, else uni_txd->option = MCU_CMD_UNI_EXT_ACK; - if (cmd == MCU_UNI_CMD(HIF_CTRL)) + if (cmd == MCU_UNI_CMD(HIF_CTRL) || + cmd == MCU_UNI_CMD(CHIP_CONFIG)) uni_txd->option &= ~MCU_CMD_ACK; + if (mcu_cmd == MCU_UNI_CMD_TESTMODE_CTRL || + mcu_cmd == MCU_UNI_CMD_TESTMODE_RX_STAT) { + if (cmd & __MCU_CMD_FIELD_QUERY) + uni_txd->option = 0x2; + else + uni_txd->option = 0x6; + } + goto exit; } @@ -3599,6 +3682,43 @@ int mt7925_mcu_set_rate_txpower(struct mt76_phy *phy) return 0; } +int mt7925_mcu_wf_rf_pin_ctrl(struct mt792x_phy *phy) +{ +#define UNI_CMD_RADIO_STATUS_GET 0 + struct mt792x_dev *dev = phy->dev; + struct sk_buff *skb; + int ret; + struct { + __le16 tag; + __le16 len; + u8 rsv[4]; + } __packed req = { + .tag = UNI_CMD_RADIO_STATUS_GET, + .len = cpu_to_le16(sizeof(req)), + }; + struct mt7925_radio_status_event { + __le16 tag; + __le16 len; + + u8 data; + u8 rsv[3]; + } __packed *status; + + ret = mt76_mcu_send_and_get_msg(&dev->mt76, + MCU_UNI_CMD(RADIO_STATUS), + &req, sizeof(req), true, &skb); + if (ret) + return ret; + + skb_pull(skb, sizeof(struct tlv)); + status = (struct mt7925_radio_status_event *)skb->data; + ret = status->data; + + dev_kfree_skb(skb); + + return ret; +} + int mt7925_mcu_set_rxfilter(struct mt792x_dev *dev, u32 fif, u8 bit_op, u32 bit_map) { diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h index 8ac43feb26d6..ee6fb16e83c5 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h @@ -104,13 +104,6 @@ enum { MT7925_TM_WIFISPECTRUM, }; -struct mt7925_rftest_cmd { - u8 action; - u8 rsv[3]; - __le32 param0; - __le32 param1; -} __packed; - struct mt7925_rftest_evt { __le32 param0; __le32 param1; @@ -196,6 +189,7 @@ enum { UNI_SNIFFER_CONFIG, }; +#define MT7925_RNR_SCAN_MAX_BSSIDS 10 struct scan_hdr_tlv { /* fixed field */ u8 seq_num; @@ -223,7 +217,7 @@ struct scan_req_tlv { __le16 timeout_value; __le16 probe_delay_time; __le32 func_mask_ext; -}; +} __packed; struct scan_ssid_tlv { __le16 tag; @@ -235,9 +229,10 @@ struct scan_ssid_tlv { * BIT(2) + ssid_type_ext BIT(0) specified SSID only */ u8 ssids_num; - u8 pad[2]; - struct mt76_connac_mcu_scan_ssid ssids[4]; -}; + u8 is_short_ssid; + u8 pad; + struct mt76_connac_mcu_scan_ssid ssids[MT7925_RNR_SCAN_MAX_BSSIDS]; +} __packed; struct scan_bssid_tlv { __le16 tag; @@ -247,8 +242,9 @@ struct scan_bssid_tlv { u8 match_ch; u8 match_ssid_ind; u8 rcpi; - u8 pad[3]; -}; + u8 match_short_ssid_ind; + u8 pad[2]; +} __packed; struct scan_chan_info_tlv { __le16 tag; @@ -264,7 +260,7 @@ struct scan_chan_info_tlv { u8 channels_num; /* valid when channel_type is 4 */ u8 pad[2]; struct mt76_connac_mcu_scan_channel channels[64]; -}; +} __packed; struct scan_ie_tlv { __le16 tag; @@ -372,6 +368,19 @@ struct bss_mld_tlv { u8 __rsv[3]; } __packed; +struct bss_eht_tlv { + __le16 tag; + __le16 len; + u8 is_eht_op_present; + u8 is_eth_dscb_present; + u8 eht_ctrl; + u8 eht_ccfs0; + u8 eht_ccfs1; + u8 pad1; + __le16 eht_dis_sub_chan_bitmap; + u8 pad2[4]; +} __packed; + struct sta_rec_ba_uni { __le16 tag; __le16 len; @@ -589,6 +598,47 @@ struct roc_acquire_tlv { u8 rsv[3]; } __packed; +enum ENUM_CMD_TEST_CTRL_ACT { + CMD_TEST_CTRL_ACT_SWITCH_MODE = 0, + CMD_TEST_CTRL_ACT_SET_AT = 1, + CMD_TEST_CTRL_ACT_GET_AT = 2, + CMD_TEST_CTRL_ACT_SET_AT_ENG = 3, + CMD_TEST_CTRL_ACT_GET_AT_ENG = 4, + CMD_TEST_CTRL_ACT_NUM +}; + +enum ENUM_CMD_TEST_CTRL_ACT_SWITCH_MODE_OP { + CMD_TEST_CTRL_ACT_SWITCH_MODE_NORMAL = 0, + CMD_TEST_CTRL_ACT_SWITCH_MODE_RF_TEST = 1, + CMD_TEST_CTRL_ACT_SWITCH_MODE_ICAP = 2, + CMD_TEST_CTRL_ACT_SWITCH_MODE_NUM +}; + +union testmode_data { + __le32 op_mode; + __le32 channel_freq; + u8 rf_at_info[84]; +}; + +union testmode_evt { + __le32 op_mode; + __le32 channel_freq; + u8 rf_at_info[1024]; +}; + +struct uni_cmd_testmode_ctrl { + u16 tag; + u16 length; + u8 action; + u8 reserved[3]; + union testmode_data data; +} __packed; + +struct mt7925_rftest_cmd { + u8 padding[4]; + struct uni_cmd_testmode_ctrl ctrl; +} __packed; + static inline enum connac3_mcu_cipher_type mt7925_mcu_get_cipher(int cipher) { @@ -637,11 +687,15 @@ int mt7925_mcu_add_bss_info(struct mt792x_phy *phy, int mt7925_mcu_set_timing(struct mt792x_phy *phy, struct ieee80211_bss_conf *link_conf); int mt7925_mcu_set_deep_sleep(struct mt792x_dev *dev, bool enable); +int mt7925_mcu_set_thermal_protect(struct mt792x_dev *dev); int mt7925_mcu_set_channel_domain(struct mt76_phy *phy); int mt7925_mcu_set_radio_en(struct mt792x_phy *phy, bool enable); int mt7925_mcu_set_chctx(struct mt76_phy *phy, struct mt76_vif_link *mvif, struct ieee80211_bss_conf *link_conf, struct ieee80211_chanctx_conf *ctx); +int mt7925_mcu_set_eht_pp(struct mt76_phy *phy, struct mt76_vif_link *mvif, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_chanctx_conf *ctx); int mt7925_mcu_set_rate_txpower(struct mt76_phy *phy); int mt7925_mcu_update_arp_filter(struct mt76_dev *dev, struct ieee80211_bss_conf *link_conf); diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h index 4e50f2597ccd..1b165d0d8bd3 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h +++ b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h @@ -365,5 +365,11 @@ int mt7925_mcu_wtbl_update_hdr_trans(struct mt792x_dev *dev, struct ieee80211_vif *vif, struct ieee80211_sta *sta, int link_id); +int mt7925_mcu_wf_rf_pin_ctrl(struct mt792x_phy *phy); + +int mt7925_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + void *data, int len); +int mt7925_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg, + struct netlink_callback *cb, void *data, int len); #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c index c7b5dc1dbb34..89dc30f7c6b7 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c @@ -31,6 +31,10 @@ static void mt7925e_unregister_device(struct mt792x_dev *dev) { int i; struct mt76_connac_pm *pm = &dev->pm; + struct ieee80211_hw *hw = mt76_hw(dev); + + if (dev->phy.chip_cap & MT792x_CHIP_CAP_WF_RF_PIN_CTRL_EVT_EN) + wiphy_rfkill_stop_polling(hw->wiphy); cancel_work_sync(&dev->init_work); mt76_unregister_device(&dev->mt76); @@ -490,9 +494,6 @@ static int mt7925_pci_suspend(struct device *device) /* disable interrupt */ mt76_wr(dev, dev->irq_map->host_irq_enable, 0); - mt76_wr(dev, MT_WFDMA0_HOST_INT_DIS, - dev->irq_map->tx.all_complete_mask | - MT_INT_RX_DONE_ALL | MT_INT_MCU_CMD); mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0); diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/regs.h b/drivers/net/wireless/mediatek/mt76/mt7925/regs.h index 985794a40c1a..547489092c29 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/regs.h +++ b/drivers/net/wireless/mediatek/mt76/mt7925/regs.h @@ -28,7 +28,7 @@ #define MT_MDP_TO_HIF 0 #define MT_MDP_TO_WM 1 -#define MT_WFDMA0_HOST_INT_ENA MT_WFDMA0(0x228) +#define MT_WFDMA0_HOST_INT_ENA MT_WFDMA0(0x204) #define MT_WFDMA0_HOST_INT_DIS MT_WFDMA0(0x22c) #define HOST_RX_DONE_INT_ENA4 BIT(12) #define HOST_RX_DONE_INT_ENA5 BIT(13) diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/testmode.c b/drivers/net/wireless/mediatek/mt76/mt7925/testmode.c new file mode 100644 index 000000000000..a3c97164ba21 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7925/testmode.c @@ -0,0 +1,201 @@ +// SPDX-License-Identifier: ISC + +#include "mt7925.h" +#include "mcu.h" + +#define MT7925_EVT_RSP_LEN 512 + +enum mt7925_testmode_attr { + MT7925_TM_ATTR_UNSPEC, + MT7925_TM_ATTR_SET, + MT7925_TM_ATTR_QUERY, + MT7925_TM_ATTR_RSP, + + /* keep last */ + NUM_MT7925_TM_ATTRS, + MT7925_TM_ATTR_MAX = NUM_MT7925_TM_ATTRS - 1, +}; + +struct mt7925_tm_cmd { + u8 padding[4]; + struct uni_cmd_testmode_ctrl c; +} __packed; + +struct mt7925_tm_evt { + u32 param0; + u32 param1; +} __packed; + +static const struct nla_policy mt7925_tm_policy[NUM_MT7925_TM_ATTRS] = { + [MT7925_TM_ATTR_SET] = NLA_POLICY_EXACT_LEN(sizeof(struct mt7925_tm_cmd)), + [MT7925_TM_ATTR_QUERY] = NLA_POLICY_EXACT_LEN(sizeof(struct mt7925_tm_cmd)), +}; + +static int +mt7925_tm_set(struct mt792x_dev *dev, struct mt7925_tm_cmd *req) +{ + struct mt7925_rftest_cmd cmd; + struct mt7925_rftest_cmd *pcmd = &cmd; + bool testmode = false, normal = false; + struct mt76_connac_pm *pm = &dev->pm; + struct mt76_phy *phy = &dev->mphy; + int ret = -ENOTCONN; + + memset(pcmd, 0, sizeof(*pcmd)); + memcpy(pcmd, req, sizeof(struct mt7925_tm_cmd)); + + mutex_lock(&dev->mt76.mutex); + + if (pcmd->ctrl.action == CMD_TEST_CTRL_ACT_SWITCH_MODE) { + if (pcmd->ctrl.data.op_mode == CMD_TEST_CTRL_ACT_SWITCH_MODE_NORMAL) + normal = true; + else + testmode = true; + } + + if (testmode) { + /* Make sure testmode running on full power mode */ + pm->enable = false; + cancel_delayed_work_sync(&pm->ps_work); + cancel_work_sync(&pm->wake_work); + __mt792x_mcu_drv_pmctrl(dev); + + phy->test.state = MT76_TM_STATE_ON; + } + + if (!mt76_testmode_enabled(phy)) + goto out; + + ret = mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(TESTMODE_CTRL), &cmd, + sizeof(cmd), false); + + if (ret) + goto out; + + if (normal) { + /* Switch back to the normal world */ + phy->test.state = MT76_TM_STATE_OFF; + pm->enable = true; + } +out: + mutex_unlock(&dev->mt76.mutex); + + return ret; +} + +static int +mt7925_tm_query(struct mt792x_dev *dev, struct mt7925_tm_cmd *req, + char *evt_resp) +{ + struct mt7925_rftest_cmd cmd; + char *pcmd = (char *)&cmd; + struct sk_buff *skb = NULL; + int ret = 1; + + memset(pcmd, 0, sizeof(*pcmd)); + memcpy(pcmd + 4, (char *)&req->c, sizeof(struct uni_cmd_testmode_ctrl)); + + if (*((uint16_t *)req->padding) == MCU_UNI_CMD_TESTMODE_CTRL) + ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_UNI_QUERY(TESTMODE_CTRL), + &cmd, sizeof(cmd), true, &skb); + else if (*((uint16_t *)req->padding) == MCU_UNI_CMD_TESTMODE_RX_STAT) + ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_UNI_QUERY(TESTMODE_RX_STAT), + &cmd, sizeof(cmd), true, &skb); + + if (ret) + goto out; + + memcpy((char *)evt_resp, (char *)skb->data + 8, MT7925_EVT_RSP_LEN); + +out: + dev_kfree_skb(skb); + + return ret; +} + +int mt7925_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + void *data, int len) +{ + struct nlattr *tb[NUM_MT76_TM_ATTRS]; + struct mt76_phy *mphy = hw->priv; + struct mt792x_phy *phy = mphy->priv; + int err; + + if (!test_bit(MT76_STATE_RUNNING, &mphy->state) || + !(hw->conf.flags & IEEE80211_CONF_MONITOR)) + return -ENOTCONN; + + err = nla_parse_deprecated(tb, MT76_TM_ATTR_MAX, data, len, + mt76_tm_policy, NULL); + if (err) + return err; + + if (tb[MT76_TM_ATTR_DRV_DATA]) { + struct nlattr *drv_tb[NUM_MT7925_TM_ATTRS], *data; + int ret; + + data = tb[MT76_TM_ATTR_DRV_DATA]; + ret = nla_parse_nested_deprecated(drv_tb, + MT7925_TM_ATTR_MAX, + data, mt7925_tm_policy, + NULL); + if (ret) + return ret; + + data = drv_tb[MT7925_TM_ATTR_SET]; + if (data) + return mt7925_tm_set(phy->dev, nla_data(data)); + } + + return -EINVAL; +} + +int mt7925_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg, + struct netlink_callback *cb, void *data, int len) +{ + struct nlattr *tb[NUM_MT76_TM_ATTRS]; + struct mt76_phy *mphy = hw->priv; + struct mt792x_phy *phy = mphy->priv; + int err; + + if (!test_bit(MT76_STATE_RUNNING, &mphy->state) || + !(hw->conf.flags & IEEE80211_CONF_MONITOR) || + !mt76_testmode_enabled(mphy)) + return -ENOTCONN; + + if (cb->args[2]++ > 0) + return -ENOENT; + + err = nla_parse_deprecated(tb, MT76_TM_ATTR_MAX, data, len, + mt76_tm_policy, NULL); + if (err) + return err; + + if (tb[MT76_TM_ATTR_DRV_DATA]) { + struct nlattr *drv_tb[NUM_MT7925_TM_ATTRS], *data; + int ret; + + data = tb[MT76_TM_ATTR_DRV_DATA]; + ret = nla_parse_nested_deprecated(drv_tb, + MT7925_TM_ATTR_MAX, + data, mt7925_tm_policy, + NULL); + if (ret) + return ret; + + data = drv_tb[MT7925_TM_ATTR_QUERY]; + if (data) { + char evt_resp[MT7925_EVT_RSP_LEN]; + + err = mt7925_tm_query(phy->dev, nla_data(data), + evt_resp); + if (err) + return err; + + return nla_put(msg, MT7925_TM_ATTR_RSP, + sizeof(evt_resp), evt_resp); + } + } + + return -EINVAL; +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/coredump.c b/drivers/net/wireless/mediatek/mt76/mt7996/coredump.c index ccab0d7b9be4..303d6e80a666 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/coredump.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/coredump.c @@ -48,8 +48,8 @@ const struct mt7996_mem_region* mt7996_coredump_get_mem_layout(struct mt7996_dev *dev, u32 *num) { switch (mt76_chip(&dev->mt76)) { - case 0x7990: - case 0x7991: + case MT7996_DEVICE_ID: + case MT7996_DEVICE_ID_2: *num = ARRAY_SIZE(mt7996_mem_regions); return &mt7996_mem_regions[0]; default: diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c index 4a28db17a287..0ab827f52fd7 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c @@ -222,18 +222,27 @@ static const struct file_operations mt7996_sys_recovery_ops = { static int mt7996_radar_trigger(void *data, u64 val) { +#define RADAR_MAIN_CHAIN 1 +#define RADAR_BACKGROUND 2 struct mt7996_dev *dev = data; + struct mt7996_phy *phy = mt7996_band_phy(dev, NL80211_BAND_5GHZ); + int rdd_idx; - if (val > MT_RX_SEL2) + if (!phy || !val || val > RADAR_BACKGROUND) return -EINVAL; - if (val == MT_RX_SEL2 && !dev->rdd2_phy) { + if (val == RADAR_BACKGROUND && !dev->rdd2_phy) { dev_err(dev->mt76.dev, "Background radar is not enabled\n"); return -EINVAL; } - return mt7996_mcu_rdd_cmd(dev, RDD_RADAR_EMULATE, - val, 0, 0); + rdd_idx = mt7996_get_rdd_idx(phy, val == RADAR_BACKGROUND); + if (rdd_idx < 0) { + dev_err(dev->mt76.dev, "No RDD found\n"); + return -EINVAL; + } + + return mt7996_mcu_rdd_cmd(dev, RDD_RADAR_EMULATE, rdd_idx, 0); } DEFINE_DEBUGFS_ATTRIBUTE(fops_radar_trigger, NULL, diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/dma.c b/drivers/net/wireless/mediatek/mt76/mt7996/dma.c index 69a7d9b2e38b..c8bef0b2a144 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/dma.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/dma.c @@ -55,20 +55,32 @@ static void mt7996_dma_config(struct mt7996_dev *dev) /* rx queue */ RXQ_CONFIG(MT_RXQ_MCU, WFDMA0, MT_INT_RX_DONE_WM, MT7996_RXQ_MCU_WM); + /* for mt7990, RX ring 1 is for SDO instead */ RXQ_CONFIG(MT_RXQ_MCU_WA, WFDMA0, MT_INT_RX_DONE_WA, MT7996_RXQ_MCU_WA); - - /* mt7996: band0 and band1, mt7992: band0 */ RXQ_CONFIG(MT_RXQ_MAIN, WFDMA0, MT_INT_RX_DONE_BAND0, MT7996_RXQ_BAND0); - RXQ_CONFIG(MT_RXQ_MAIN_WA, WFDMA0, MT_INT_RX_DONE_WA_MAIN, MT7996_RXQ_MCU_WA_MAIN); + if (mt7996_has_wa(dev)) + RXQ_CONFIG(MT_RXQ_MAIN_WA, WFDMA0, MT_INT_RX_DONE_WA_MAIN, + MT7996_RXQ_MCU_WA_MAIN); - if (is_mt7996(&dev->mt76)) { + switch (mt76_chip(&dev->mt76)) { + case MT7992_DEVICE_ID: + RXQ_CONFIG(MT_RXQ_BAND1_WA, WFDMA0, MT_INT_RX_DONE_WA_EXT, MT7996_RXQ_MCU_WA_EXT); + RXQ_CONFIG(MT_RXQ_BAND1, WFDMA0, MT_INT_RX_DONE_BAND1, MT7996_RXQ_BAND1); + break; + case MT7990_DEVICE_ID: + RXQ_CONFIG(MT_RXQ_BAND1, WFDMA0, MT_INT_RX_DONE_BAND1, MT7996_RXQ_BAND1); + RXQ_CONFIG(MT_RXQ_TXFREE_BAND0, WFDMA0, + MT_INT_RX_TXFREE_BAND0_MT7990, MT7990_RXQ_TXFREE0); + if (dev->hif2) + RXQ_CONFIG(MT_RXQ_TXFREE_BAND1, WFDMA0, + MT_INT_RX_TXFREE_BAND1_MT7990, MT7990_RXQ_TXFREE1); + break; + case MT7996_DEVICE_ID: + default: /* mt7996 band2 */ - RXQ_CONFIG(MT_RXQ_BAND2, WFDMA0, MT_INT_RX_DONE_BAND2, MT7996_RXQ_BAND2); RXQ_CONFIG(MT_RXQ_BAND2_WA, WFDMA0, MT_INT_RX_DONE_WA_TRI, MT7996_RXQ_MCU_WA_TRI); - } else { - /* mt7992 band1 */ - RXQ_CONFIG(MT_RXQ_BAND1, WFDMA0, MT_INT_RX_DONE_BAND1, MT7996_RXQ_BAND1); - RXQ_CONFIG(MT_RXQ_BAND1_WA, WFDMA0, MT_INT_RX_DONE_WA_EXT, MT7996_RXQ_MCU_WA_EXT); + RXQ_CONFIG(MT_RXQ_BAND2, WFDMA0, MT_INT_RX_DONE_BAND2, MT7996_RXQ_BAND2); + break; } if (dev->has_rro) { @@ -104,9 +116,11 @@ static void mt7996_dma_config(struct mt7996_dev *dev) } /* mcu tx queue */ - MCUQ_CONFIG(MT_MCUQ_WM, WFDMA0, MT_INT_TX_DONE_MCU_WM, MT7996_TXQ_MCU_WM); - MCUQ_CONFIG(MT_MCUQ_WA, WFDMA0, MT_INT_TX_DONE_MCU_WA, MT7996_TXQ_MCU_WA); MCUQ_CONFIG(MT_MCUQ_FWDL, WFDMA0, MT_INT_TX_DONE_FWDL, MT7996_TXQ_FWDL); + MCUQ_CONFIG(MT_MCUQ_WM, WFDMA0, MT_INT_TX_DONE_MCU_WM, MT7996_TXQ_MCU_WM); + if (mt7996_has_wa(dev)) + MCUQ_CONFIG(MT_MCUQ_WA, WFDMA0, MT_INT_TX_DONE_MCU_WA, + MT7996_TXQ_MCU_WA); } static u32 __mt7996_dma_prefetch_base(u16 *base, u8 depth) @@ -121,43 +135,62 @@ static u32 __mt7996_dma_prefetch_base(u16 *base, u8 depth) static void __mt7996_dma_prefetch(struct mt7996_dev *dev, u32 ofs) { u16 base = 0; - u8 queue; + u8 queue, val; #define PREFETCH(_depth) (__mt7996_dma_prefetch_base(&base, (_depth))) /* prefetch SRAM wrapping boundary for tx/rx ring. */ - mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_FWDL) + ofs, PREFETCH(0x2)); - mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WM) + ofs, PREFETCH(0x2)); + /* Tx Command Rings */ + val = is_mt7996(&dev->mt76) ? 2 : 4; + mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_FWDL) + ofs, PREFETCH(val)); + mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WM) + ofs, PREFETCH(val)); + if (mt7996_has_wa(dev)) + mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WA) + ofs, PREFETCH(val)); + + /* Tx Data Rings */ mt76_wr(dev, MT_TXQ_EXT_CTRL(0) + ofs, PREFETCH(0x8)); - mt76_wr(dev, MT_TXQ_EXT_CTRL(1) + ofs, PREFETCH(0x8)); - mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WA) + ofs, PREFETCH(0x2)); - mt76_wr(dev, MT_TXQ_EXT_CTRL(2) + ofs, PREFETCH(0x8)); - mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MCU) + ofs, PREFETCH(0x2)); - mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MCU_WA) + ofs, PREFETCH(0x2)); - mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MAIN_WA) + ofs, PREFETCH(0x2)); - - queue = is_mt7996(&dev->mt76) ? MT_RXQ_BAND2_WA : MT_RXQ_BAND1_WA; - mt76_wr(dev, MT_RXQ_BAND1_CTRL(queue) + ofs, PREFETCH(0x2)); - - mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MAIN) + ofs, PREFETCH(0x10)); + if (!is_mt7996(&dev->mt76) || dev->hif2) + mt76_wr(dev, MT_TXQ_EXT_CTRL(1) + ofs, PREFETCH(0x8)); + if (is_mt7996(&dev->mt76)) + mt76_wr(dev, MT_TXQ_EXT_CTRL(2) + ofs, PREFETCH(0x8)); + + /* Rx Event Rings */ + mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MCU) + ofs, PREFETCH(val)); + mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MCU_WA) + ofs, PREFETCH(val)); + + /* Rx TxFreeDone From WA Rings */ + if (mt7996_has_wa(dev)) { + mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MAIN_WA) + ofs, PREFETCH(val)); + queue = is_mt7996(&dev->mt76) ? MT_RXQ_BAND2_WA : MT_RXQ_BAND1_WA; + mt76_wr(dev, MT_RXQ_EXT_CTRL(queue) + ofs, PREFETCH(val)); + } + /* Rx TxFreeDone From MAC Rings */ + val = is_mt7996(&dev->mt76) ? 4 : 8; + if (is_mt7990(&dev->mt76) || (is_mt7996(&dev->mt76) && dev->has_rro)) + mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_TXFREE_BAND0) + ofs, PREFETCH(val)); + if (is_mt7990(&dev->mt76) && dev->hif2) + mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_TXFREE_BAND1) + ofs, PREFETCH(val)); + else if (is_mt7996(&dev->mt76) && dev->has_rro) + mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_TXFREE_BAND2) + ofs, PREFETCH(val)); + + /* Rx Data Rings */ + mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MAIN) + ofs, PREFETCH(0x10)); queue = is_mt7996(&dev->mt76) ? MT_RXQ_BAND2 : MT_RXQ_BAND1; - mt76_wr(dev, MT_RXQ_BAND1_CTRL(queue) + ofs, PREFETCH(0x10)); + mt76_wr(dev, MT_RXQ_EXT_CTRL(queue) + ofs, PREFETCH(0x10)); + /* Rx RRO Rings */ if (dev->has_rro) { - mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_RRO_BAND0) + ofs, - PREFETCH(0x10)); - mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_RRO_BAND2) + ofs, - PREFETCH(0x10)); - mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MSDU_PAGE_BAND0) + ofs, - PREFETCH(0x4)); - mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MSDU_PAGE_BAND1) + ofs, - PREFETCH(0x4)); - mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MSDU_PAGE_BAND2) + ofs, - PREFETCH(0x4)); - mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_TXFREE_BAND0) + ofs, - PREFETCH(0x4)); - mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_TXFREE_BAND2) + ofs, - PREFETCH(0x4)); + mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_RRO_BAND0) + ofs, PREFETCH(0x10)); + queue = is_mt7996(&dev->mt76) ? MT_RXQ_RRO_BAND2 : MT_RXQ_RRO_BAND1; + mt76_wr(dev, MT_RXQ_EXT_CTRL(queue) + ofs, PREFETCH(0x10)); + + mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MSDU_PAGE_BAND0) + ofs, PREFETCH(val)); + if (is_mt7996(&dev->mt76)) { + mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MSDU_PAGE_BAND1) + ofs, + PREFETCH(val)); + mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MSDU_PAGE_BAND2) + ofs, + PREFETCH(val)); + } } #undef PREFETCH @@ -269,6 +302,9 @@ void mt7996_dma_start(struct mt7996_dev *dev, bool reset, bool wed_reset) mtk_wed_device_start(wed, wed_irq_mask); } + if (!mt7996_has_wa(dev)) + irq_mask &= ~(MT_INT_RX(MT_RXQ_MAIN_WA) | + MT_INT_RX(MT_RXQ_BAND1_WA)); irq_mask = reset ? MT_INT_MCU_CMD : irq_mask; mt7996_irq_enable(dev, irq_mask); @@ -474,12 +510,14 @@ int mt7996_dma_init(struct mt7996_dev *dev) return ret; /* command to WA */ - ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WA, - MT_MCUQ_ID(MT_MCUQ_WA), - MT7996_TX_MCU_RING_SIZE, - MT_MCUQ_RING_BASE(MT_MCUQ_WA)); - if (ret) - return ret; + if (mt7996_has_wa(dev)) { + ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WA, + MT_MCUQ_ID(MT_MCUQ_WA), + MT7996_TX_MCU_RING_SIZE, + MT_MCUQ_RING_BASE(MT_MCUQ_WA)); + if (ret) + return ret; + } /* firmware download */ ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_FWDL, @@ -493,16 +531,16 @@ int mt7996_dma_init(struct mt7996_dev *dev) ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU], MT_RXQ_ID(MT_RXQ_MCU), MT7996_RX_MCU_RING_SIZE, - MT_RX_BUF_SIZE, + MT7996_RX_MCU_BUF_SIZE, MT_RXQ_RING_BASE(MT_RXQ_MCU)); if (ret) return ret; - /* event from WA */ + /* event from WA, or SDO event for mt7990 */ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU_WA], MT_RXQ_ID(MT_RXQ_MCU_WA), MT7996_RX_MCU_RING_SIZE_WA, - MT_RX_BUF_SIZE, + MT7996_RX_MCU_BUF_SIZE, MT_RXQ_RING_BASE(MT_RXQ_MCU_WA)); if (ret) return ret; @@ -527,13 +565,41 @@ int mt7996_dma_init(struct mt7996_dev *dev) dev->mt76.q_rx[MT_RXQ_MAIN_WA].wed = wed; } - ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN_WA], - MT_RXQ_ID(MT_RXQ_MAIN_WA), - MT7996_RX_MCU_RING_SIZE, - MT_RX_BUF_SIZE, - MT_RXQ_RING_BASE(MT_RXQ_MAIN_WA)); - if (ret) - return ret; + if (mt7996_has_wa(dev)) { + ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN_WA], + MT_RXQ_ID(MT_RXQ_MAIN_WA), + MT7996_RX_MCU_RING_SIZE, + MT_RX_BUF_SIZE, + MT_RXQ_RING_BASE(MT_RXQ_MAIN_WA)); + if (ret) + return ret; + } else { + if (mtk_wed_device_active(wed)) { + dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].flags = MT_WED_Q_TXFREE; + dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].wed = wed; + } + ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0], + MT_RXQ_ID(MT_RXQ_TXFREE_BAND0), + MT7996_RX_MCU_RING_SIZE, + MT7996_RX_BUF_SIZE, + MT_RXQ_RING_BASE(MT_RXQ_TXFREE_BAND0)); + if (ret) + return ret; + } + + if (!mt7996_has_wa(dev) && dev->hif2) { + if (mtk_wed_device_active(wed)) { + dev->mt76.q_rx[MT_RXQ_TXFREE_BAND1].flags = MT_WED_Q_TXFREE; + dev->mt76.q_rx[MT_RXQ_TXFREE_BAND1].wed = wed; + } + ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_TXFREE_BAND1], + MT_RXQ_ID(MT_RXQ_TXFREE_BAND1), + MT7996_RX_MCU_RING_SIZE, + MT7996_RX_BUF_SIZE, + MT_RXQ_RING_BASE(MT_RXQ_TXFREE_BAND1)); + if (ret) + return ret; + } if (mt7996_band_valid(dev, MT_BAND2)) { /* rx data queue for mt7996 band2 */ @@ -573,14 +639,16 @@ int mt7996_dma_init(struct mt7996_dev *dev) return ret; /* tx free notify event from WA for mt7992 band1 */ - rx_base = MT_RXQ_RING_BASE(MT_RXQ_BAND1_WA) + hif1_ofs; - ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND1_WA], - MT_RXQ_ID(MT_RXQ_BAND1_WA), - MT7996_RX_MCU_RING_SIZE, - MT_RX_BUF_SIZE, - rx_base); - if (ret) - return ret; + if (mt7996_has_wa(dev)) { + rx_base = MT_RXQ_RING_BASE(MT_RXQ_BAND1_WA) + hif1_ofs; + ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND1_WA], + MT_RXQ_ID(MT_RXQ_BAND1_WA), + MT7996_RX_MCU_RING_SIZE, + MT_RX_BUF_SIZE, + rx_base); + if (ret) + return ret; + } } if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed) && diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c index 53dfac02f8af..87c6192b6384 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c @@ -13,10 +13,12 @@ static int mt7996_check_eeprom(struct mt7996_dev *dev) u16 val = get_unaligned_le16(eeprom); switch (val) { - case 0x7990: + case MT7996_DEVICE_ID: return is_mt7996(&dev->mt76) ? 0 : -EINVAL; - case 0x7992: + case MT7992_DEVICE_ID: return is_mt7992(&dev->mt76) ? 0 : -EINVAL; + case MT7990_DEVICE_ID: + return is_mt7990(&dev->mt76) ? 0 : -EINVAL; default: return -EINVAL; } @@ -25,7 +27,7 @@ static int mt7996_check_eeprom(struct mt7996_dev *dev) static char *mt7996_eeprom_name(struct mt7996_dev *dev) { switch (mt76_chip(&dev->mt76)) { - case 0x7992: + case MT7992_DEVICE_ID: switch (dev->var.type) { case MT7992_VAR_TYPE_23: if (dev->var.fem == MT7996_FEM_INT) @@ -39,7 +41,11 @@ static char *mt7996_eeprom_name(struct mt7996_dev *dev) return MT7992_EEPROM_DEFAULT_MIX; return MT7992_EEPROM_DEFAULT; } - case 0x7990: + case MT7990_DEVICE_ID: + if (dev->var.fem == MT7996_FEM_INT) + return MT7990_EEPROM_DEFAULT_INT; + return MT7990_EEPROM_DEFAULT; + case MT7996_DEVICE_ID: default: switch (dev->var.type) { case MT7996_VAR_TYPE_233: @@ -304,6 +310,7 @@ int mt7996_eeprom_parse_hw_cap(struct mt7996_dev *dev, struct mt7996_phy *phy) phy->has_aux_rx = true; mphy->antenna_mask = BIT(nss) - 1; + phy->orig_antenna_mask = mphy->antenna_mask; mphy->chainmask = (BIT(path) - 1) << dev->chainshift[band_idx]; phy->orig_chainmask = mphy->chainmask; dev->chainmask |= mphy->chainmask; @@ -370,3 +377,30 @@ s8 mt7996_eeprom_get_power_delta(struct mt7996_dev *dev, int band) return val & MT_EE_RATE_DELTA_SIGN ? delta : -delta; } + +bool mt7996_eeprom_has_background_radar(struct mt7996_dev *dev) +{ + switch (mt76_chip(&dev->mt76)) { + case MT7996_DEVICE_ID: + if (dev->var.type == MT7996_VAR_TYPE_233) + return false; + break; + case MT7992_DEVICE_ID: + if (dev->var.type == MT7992_VAR_TYPE_23) + return false; + break; + case MT7990_DEVICE_ID: { + u8 path, rx_path, nss, *eeprom = dev->mt76.eeprom.data; + + mt7996_eeprom_parse_stream(eeprom, MT_BAND1, &path, &rx_path, &nss); + /* Disable background radar capability in 3T3R */ + if (path == 3 || rx_path == 3) + return false; + break; + } + default: + return false; + } + + return true; +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/init.c b/drivers/net/wireless/mediatek/mt76/mt7996/init.c index 6b660424aedc..a9599c286328 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/init.c @@ -217,6 +217,9 @@ static int mt7996_thermal_init(struct mt7996_phy *phy) name = devm_kasprintf(&wiphy->dev, GFP_KERNEL, "mt7996_%s.%d", wiphy_name(wiphy), phy->mt76->band_idx); + if (!name) + return -ENOMEM; + snprintf(cname, sizeof(cname), "cooling_device%d", phy->mt76->band_idx); cdev = thermal_cooling_device_register(name, phy, &mt7996_thermal_ops); @@ -314,8 +317,8 @@ static void __mt7996_init_txpower(struct mt7996_phy *phy, struct ieee80211_supported_band *sband) { struct mt7996_dev *dev = phy->dev; - int i, nss = hweight16(phy->mt76->chainmask); - int nss_delta = mt76_tx_power_nss_delta(nss); + int i, n_chains = hweight16(phy->mt76->chainmask); + int path_delta = mt76_tx_power_path_delta(n_chains); int pwr_delta = mt7996_eeprom_get_power_delta(dev, sband->band); struct mt76_power_limits limits; @@ -327,7 +330,7 @@ static void __mt7996_init_txpower(struct mt7996_phy *phy, target_power = mt76_get_rate_power_limits(phy->mt76, chan, &limits, target_power); - target_power += nss_delta; + target_power += path_delta; target_power = DIV_ROUND_UP(target_power, 2); chan->max_power = min_t(int, chan->max_reg_power, target_power); @@ -440,6 +443,9 @@ mt7996_init_wiphy(struct ieee80211_hw *hw, struct mtk_wed_device *wed) hw->queues = 4; hw->max_rx_aggregation_subframes = max_subframes; hw->max_tx_aggregation_subframes = max_subframes; + if (is_mt7990(mdev) && dev->has_eht) + hw->max_tx_aggregation_subframes = 512; + hw->netdev_features = NETIF_F_RXCSUM; if (mtk_wed_device_active(wed)) hw->netdev_features |= NETIF_F_HW_TC; @@ -472,7 +478,7 @@ mt7996_init_wiphy(struct ieee80211_hw *hw, struct mtk_wed_device *wed) wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CAN_REPLACE_PTK0); wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER); - if (mt7996_has_background_radar(dev) && + if (mt7996_eeprom_has_background_radar(dev) && (!mdev->dev->of_node || !of_property_read_bool(mdev->dev->of_node, "mediatek,disable-radar-background"))) @@ -929,13 +935,13 @@ static int mt7996_variant_type_init(struct mt7996_dev *dev) u8 var_type; switch (mt76_chip(&dev->mt76)) { - case 0x7990: + case MT7996_DEVICE_ID: if (val & MT_PAD_GPIO_2ADIE_TBTC) var_type = MT7996_VAR_TYPE_233; else var_type = MT7996_VAR_TYPE_444; break; - case 0x7992: + case MT7992_DEVICE_ID: if (val & MT_PAD_GPIO_ADIE_SINGLE) var_type = MT7992_VAR_TYPE_23; else if (u32_get_bits(val, MT_PAD_GPIO_ADIE_COMB_7992)) @@ -943,6 +949,9 @@ static int mt7996_variant_type_init(struct mt7996_dev *dev) else return -EINVAL; break; + case MT7990_DEVICE_ID: + var_type = MT7990_VAR_TYPE_23; + break; default: return -EINVAL; } @@ -1061,10 +1070,10 @@ void mt7996_set_stream_vht_txbf_caps(struct mt7996_phy *phy) *cap |= IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE; - if (is_mt7996(phy->mt76->dev)) - *cap |= FIELD_PREP(IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK, 3); - else + if (is_mt7992(phy->mt76->dev)) *cap |= FIELD_PREP(IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK, 4); + else + *cap |= FIELD_PREP(IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK, 3); *cap &= ~(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK | IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | @@ -1107,18 +1116,17 @@ mt7996_set_stream_he_txbf_caps(struct mt7996_phy *phy, elem->phy_cap_info[7] &= ~IEEE80211_HE_PHY_CAP7_MAX_NC_MASK; c = IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US | - IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO | - IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO; + IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO; elem->phy_cap_info[2] |= c; c = IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE; - if (is_mt7996(phy->mt76->dev)) - c |= IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4 | - (IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4 * non_2g); - else + if (is_mt7992(phy->mt76->dev)) c |= IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_5 | (IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_5 * non_2g); + else + c |= IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4 | + (IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4 * non_2g); elem->phy_cap_info[4] |= c; @@ -1318,6 +1326,9 @@ mt7996_init_eht_caps(struct mt7996_phy *phy, enum nl80211_band band, u8_encode_bits(IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_11454, IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_MASK); + eht_cap_elem->mac_cap_info[1] |= + IEEE80211_EHT_MAC_CAP1_MAX_AMPDU_LEN_MASK; + eht_cap_elem->phy_cap_info[0] = IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI | IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER | diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c index d89c06f47997..0dbd4662bc84 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c @@ -647,6 +647,14 @@ mt7996_mac_fill_rx(struct mt7996_dev *dev, enum mt76_rxq_id q, status->last_amsdu = amsdu_info == MT_RXD4_LAST_AMSDU_FRAME; } + /* IEEE 802.11 fragmentation can only be applied to unicast frames. + * Hence, drop fragments with multicast/broadcast RA. + * This check fixes vulnerabilities, like CVE-2020-26145. + */ + if ((ieee80211_has_morefrags(fc) || seq_ctrl & IEEE80211_SCTL_FRAG) && + FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) != MT_RXD3_NORMAL_U2M) + return -EINVAL; + hdr_gap = (u8 *)rxd - skb->data + 2 * remove_pad; if (hdr_trans && ieee80211_has_morefrags(fc)) { if (mt7996_reverse_frag0_hdr_trans(skb, hdr_gap)) @@ -789,10 +797,13 @@ mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi, if (ieee80211_is_action(fc) && mgmt->u.action.category == WLAN_CATEGORY_BACK && - mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ) + mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ) { + if (is_mt7990(&dev->mt76)) + txwi[6] |= cpu_to_le32(FIELD_PREP(MT_TXD6_TID_ADDBA, tid)); tid = MT_TX_ADDBA; - else if (ieee80211_is_mgmt(hdr->frame_control)) + } else if (ieee80211_is_mgmt(hdr->frame_control)) { tid = MT_TX_NORMAL; + } val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) | FIELD_PREP(MT_TXD1_HDR_INFO, @@ -987,12 +998,32 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi, } } +static bool +mt7996_tx_use_mgmt(struct mt7996_dev *dev, struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + + if (ieee80211_is_mgmt(hdr->frame_control)) + return true; + + /* for SDO to bypass specific data frame */ + if (!mt7996_has_wa(dev)) { + if (unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE))) + return true; + + if (ieee80211_has_a4(hdr->frame_control) && + !ieee80211_is_data_present(hdr->frame_control)) + return true; + } + + return false; +} + int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, enum mt76_txq_id qid, struct mt76_wcid *wcid, struct ieee80211_sta *sta, struct mt76_tx_info *tx_info) { - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data; struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb); struct ieee80211_key_conf *key = info->control.hw_key; @@ -1017,8 +1048,11 @@ int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, return id; pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb); - mt7996_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, key, - pid, qid, 0); + memset(txwi_ptr, 0, MT_TXD_SIZE); + /* Transmit non qos data by 802.11 header and need to fill txd by host*/ + if (!is_8023 || pid >= MT_PACKET_ID_FIRST) + mt7996_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, key, + pid, qid, 0); txp = (struct mt76_connac_txp_common *)(txwi + MT_TXD_SIZE); for (i = 0; i < nbuf; i++) { @@ -1035,13 +1069,15 @@ int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, } txp->fw.nbuf = nbuf; - txp->fw.flags = - cpu_to_le16(MT_CT_INFO_FROM_HOST | MT_CT_INFO_APPLY_TXD); + txp->fw.flags = cpu_to_le16(MT_CT_INFO_FROM_HOST); + + if (!is_8023 || pid >= MT_PACKET_ID_FIRST) + txp->fw.flags |= cpu_to_le16(MT_CT_INFO_APPLY_TXD); if (!key) txp->fw.flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME); - if (!is_8023 && ieee80211_is_mgmt(hdr->frame_control)) + if (!is_8023 && mt7996_tx_use_mgmt(dev, tx_info->skb)) txp->fw.flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME); if (vif) { @@ -1179,6 +1215,7 @@ mt7996_mac_tx_free(struct mt7996_dev *dev, void *data, int len) void *end = data + len; bool wake = false; u16 total, count = 0; + u8 ver; /* clean DMA queues and unmap buffers first */ mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false); @@ -1192,7 +1229,8 @@ mt7996_mac_tx_free(struct mt7996_dev *dev, void *data, int len) mt76_queue_tx_cleanup(dev, phy3->q_tx[MT_TXQ_BE], false); } - if (WARN_ON_ONCE(le32_get_bits(tx_free[1], MT_TXFREE1_VER) < 5)) + ver = le32_get_bits(tx_free[1], MT_TXFREE1_VER); + if (WARN_ON_ONCE(ver < 5)) return; total = le32_get_bits(tx_free[0], MT_TXFREE0_MSDU_CNT); @@ -1214,11 +1252,16 @@ mt7996_mac_tx_free(struct mt7996_dev *dev, void *data, int len) wcid = rcu_dereference(dev->mt76.wcid[idx]); sta = wcid_to_sta(wcid); if (!sta) - continue; + goto next; msta_link = container_of(wcid, struct mt7996_sta_link, wcid); mt76_wcid_add_poll(&dev->mt76, &msta_link->wcid); +next: + /* ver 7 has a new DW with pair = 1, skip it */ + if (ver == 7 && ((void *)(cur_info + 1) < end) && + (le32_to_cpu(*(cur_info + 1)) & MT_TXFREE_INFO_PAIR)) + cur_info++; continue; } else if (info & MT_TXFREE_INFO_HEADER) { u32 tx_retries = 0, tx_failed = 0; @@ -2411,16 +2454,15 @@ void mt7996_mac_work(struct work_struct *work) static void mt7996_dfs_stop_radar_detector(struct mt7996_phy *phy) { struct mt7996_dev *dev = phy->dev; + int rdd_idx = mt7996_get_rdd_idx(phy, false); + + if (rdd_idx < 0) + return; - if (phy->rdd_state & BIT(0)) - mt7996_mcu_rdd_cmd(dev, RDD_STOP, 0, - MT_RX_SEL0, 0); - if (phy->rdd_state & BIT(1)) - mt7996_mcu_rdd_cmd(dev, RDD_STOP, 1, - MT_RX_SEL0, 0); + mt7996_mcu_rdd_cmd(dev, RDD_STOP, rdd_idx, 0); } -static int mt7996_dfs_start_rdd(struct mt7996_dev *dev, int chain) +static int mt7996_dfs_start_rdd(struct mt7996_dev *dev, int rdd_idx) { int err, region; @@ -2437,44 +2479,30 @@ static int mt7996_dfs_start_rdd(struct mt7996_dev *dev, int chain) break; } - err = mt7996_mcu_rdd_cmd(dev, RDD_START, chain, - MT_RX_SEL0, region); + err = mt7996_mcu_rdd_cmd(dev, RDD_START, rdd_idx, region); if (err < 0) return err; - return mt7996_mcu_rdd_cmd(dev, RDD_DET_MODE, chain, - MT_RX_SEL0, 1); + return mt7996_mcu_rdd_cmd(dev, RDD_DET_MODE, rdd_idx, 1); } static int mt7996_dfs_start_radar_detector(struct mt7996_phy *phy) { - struct cfg80211_chan_def *chandef = &phy->mt76->chandef; struct mt7996_dev *dev = phy->dev; - u8 band_idx = phy->mt76->band_idx; - int err; + int err, rdd_idx; - /* start CAC */ - err = mt7996_mcu_rdd_cmd(dev, RDD_CAC_START, band_idx, - MT_RX_SEL0, 0); - if (err < 0) - return err; + rdd_idx = mt7996_get_rdd_idx(phy, false); + if (rdd_idx < 0) + return -EINVAL; - err = mt7996_dfs_start_rdd(dev, band_idx); + /* start CAC */ + err = mt7996_mcu_rdd_cmd(dev, RDD_CAC_START, rdd_idx, 0); if (err < 0) return err; - phy->rdd_state |= BIT(band_idx); - - if (chandef->width == NL80211_CHAN_WIDTH_160 || - chandef->width == NL80211_CHAN_WIDTH_80P80) { - err = mt7996_dfs_start_rdd(dev, 1); - if (err < 0) - return err; - - phy->rdd_state |= BIT(1); - } + err = mt7996_dfs_start_rdd(dev, rdd_idx); - return 0; + return err; } static int @@ -2515,12 +2543,12 @@ int mt7996_dfs_init_radar_detector(struct mt7996_phy *phy) { struct mt7996_dev *dev = phy->dev; enum mt76_dfs_state dfs_state, prev_state; - int err; + int err, rdd_idx = mt7996_get_rdd_idx(phy, false); prev_state = phy->mt76->dfs_state; dfs_state = mt76_phy_dfs_state(phy->mt76); - if (prev_state == dfs_state) + if (prev_state == dfs_state || rdd_idx < 0) return 0; if (prev_state == MT_DFS_STATE_UNKNOWN) @@ -2544,8 +2572,7 @@ int mt7996_dfs_init_radar_detector(struct mt7996_phy *phy) if (dfs_state == MT_DFS_STATE_CAC) return 0; - err = mt7996_mcu_rdd_cmd(dev, RDD_CAC_END, - phy->mt76->band_idx, MT_RX_SEL0, 0); + err = mt7996_mcu_rdd_cmd(dev, RDD_CAC_END, rdd_idx, 0); if (err < 0) { phy->mt76->dfs_state = MT_DFS_STATE_UNKNOWN; return err; @@ -2555,8 +2582,7 @@ int mt7996_dfs_init_radar_detector(struct mt7996_phy *phy) return 0; stop: - err = mt7996_mcu_rdd_cmd(dev, RDD_NORMAL_START, - phy->mt76->band_idx, MT_RX_SEL0, 0); + err = mt7996_mcu_rdd_cmd(dev, RDD_NORMAL_START, rdd_idx, 0); if (err < 0) return err; diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/main.c b/drivers/net/wireless/mediatek/mt76/mt7996/main.c index 91c64e3a0860..78ae9f5cb176 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/main.c @@ -68,11 +68,13 @@ static int mt7996_start(struct ieee80211_hw *hw) static void mt7996_stop_phy(struct mt7996_phy *phy) { - struct mt7996_dev *dev = phy->dev; + struct mt7996_dev *dev; if (!phy || !test_bit(MT76_STATE_RUNNING, &phy->mt76->state)) return; + dev = phy->dev; + cancel_delayed_work_sync(&phy->mt76->mac_work); mutex_lock(&dev->mt76.mutex); @@ -414,11 +416,13 @@ static void mt7996_phy_set_rxfilter(struct mt7996_phy *phy) static void mt7996_set_monitor(struct mt7996_phy *phy, bool enabled) { - struct mt7996_dev *dev = phy->dev; + struct mt7996_dev *dev; if (!phy) return; + dev = phy->dev; + if (enabled == !(phy->rxfilter & MT_WF_RFCR_DROP_OTHER_UC)) return; @@ -684,7 +688,7 @@ mt7996_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, } n_chains = hweight16(phy->mt76->chainmask); - delta = mt76_tx_power_nss_delta(n_chains); + delta = mt76_tx_power_path_delta(n_chains); *dbm = DIV_ROUND_UP(phy->mt76->txpower_cur + delta, 2); return 0; @@ -987,7 +991,7 @@ mt7996_mac_sta_add_links(struct mt7996_dev *dev, struct ieee80211_vif *vif, { struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv; unsigned int link_id; - int err; + int err = 0; for_each_set_bit(link_id, &new_links, IEEE80211_MLD_MAX_NUM_LINKS) { struct ieee80211_bss_conf *link_conf; @@ -998,16 +1002,22 @@ mt7996_mac_sta_add_links(struct mt7996_dev *dev, struct ieee80211_vif *vif, continue; link_conf = link_conf_dereference_protected(vif, link_id); - if (!link_conf) + if (!link_conf) { + err = -EINVAL; goto error_unlink; + } link = mt7996_vif_link(dev, vif, link_id); - if (!link) + if (!link) { + err = -EINVAL; goto error_unlink; + } link_sta = link_sta_dereference_protected(sta, link_id); - if (!link_sta) + if (!link_sta) { + err = -EINVAL; goto error_unlink; + } err = mt7996_mac_sta_init_link(dev, link_conf, link_sta, link, link_id); @@ -1244,7 +1254,7 @@ unlock: static int mt7996_set_rts_threshold(struct ieee80211_hw *hw, u32 val) { struct mt7996_dev *dev = mt7996_hw_dev(hw); - int i, ret; + int i, ret = 0; mutex_lock(&dev->mt76.mutex); @@ -1518,7 +1528,8 @@ mt7996_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant) u8 shift = dev->chainshift[band_idx]; phy->mt76->chainmask = tx_ant & phy->orig_chainmask; - phy->mt76->antenna_mask = phy->mt76->chainmask >> shift; + phy->mt76->antenna_mask = (phy->mt76->chainmask >> shift) & + phy->orig_antenna_mask; mt76_set_stream_caps(phy->mt76, true); mt7996_set_stream_vht_txbf_caps(phy); diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c index ddd555942c73..f0adc0b4b8b6 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c @@ -13,7 +13,7 @@ #define fw_name(_dev, name, ...) ({ \ char *_fw; \ switch (mt76_chip(&(_dev)->mt76)) { \ - case 0x7992: \ + case MT7992_DEVICE_ID: \ switch ((_dev)->var.type) { \ case MT7992_VAR_TYPE_23: \ _fw = MT7992_##name##_23; \ @@ -22,7 +22,10 @@ _fw = MT7992_##name; \ } \ break; \ - case 0x7990: \ + case MT7990_DEVICE_ID: \ + _fw = MT7990_##name; \ + break; \ + case MT7996_DEVICE_ID: \ default: \ switch ((_dev)->var.type) { \ case MT7996_VAR_TYPE_233: \ @@ -265,7 +268,7 @@ mt7996_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb, txd_len = cmd & __MCU_CMD_FIELD_UNI ? sizeof(*uni_txd) : sizeof(*mcu_txd); txd = (__le32 *)skb_push(skb, txd_len); - if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state)) + if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state) && mt7996_has_wa(dev)) qid = MT_MCUQ_WA; else qid = MT_MCUQ_WM; @@ -335,8 +338,12 @@ exit: int mt7996_mcu_wa_cmd(struct mt7996_dev *dev, int cmd, u32 a1, u32 a2, u32 a3) { struct { + u8 _rsv[4]; + + __le16 tag; + __le16 len; __le32 args[3]; - } req = { + } __packed req = { .args = { cpu_to_le32(a1), cpu_to_le32(a2), @@ -344,7 +351,16 @@ int mt7996_mcu_wa_cmd(struct mt7996_dev *dev, int cmd, u32 a1, u32 a2, u32 a3) }, }; - return mt76_mcu_send_msg(&dev->mt76, cmd, &req, sizeof(req), false); + if (mt7996_has_wa(dev)) + return mt76_mcu_send_msg(&dev->mt76, cmd, &req.args, + sizeof(req.args), false); + + req.tag = cpu_to_le16(cmd == MCU_WA_PARAM_CMD(QUERY) ? UNI_CMD_SDO_QUERY : + UNI_CMD_SDO_SET); + req.len = cpu_to_le16(sizeof(req) - 4); + + return mt76_mcu_send_msg(&dev->mt76, MCU_WA_UNI_CMD(SDO), &req, + sizeof(req), false); } static void @@ -364,21 +380,27 @@ mt7996_mcu_rx_radar_detected(struct mt7996_dev *dev, struct sk_buff *skb) r = (struct mt7996_mcu_rdd_report *)skb->data; - if (r->band_idx >= ARRAY_SIZE(dev->mt76.phys)) - return; - - if (r->band_idx == MT_RX_SEL2 && !dev->rdd2_phy) - return; - - if (r->band_idx == MT_RX_SEL2) + switch (r->rdd_idx) { + case MT_RDD_IDX_BAND2: + mphy = dev->mt76.phys[MT_BAND2]; + break; + case MT_RDD_IDX_BAND1: + mphy = dev->mt76.phys[MT_BAND1]; + break; + case MT_RDD_IDX_BACKGROUND: + if (!dev->rdd2_phy) + return; mphy = dev->rdd2_phy->mt76; - else - mphy = dev->mt76.phys[r->band_idx]; + break; + default: + dev_err(dev->mt76.dev, "Unknown RDD idx %d\n", r->rdd_idx); + return; + } if (!mphy) return; - if (r->band_idx == MT_RX_SEL2) + if (r->rdd_idx == MT_RDD_IDX_BACKGROUND) cfg80211_background_radar_event(mphy->hw->wiphy, &dev->rdd2_chandef, GFP_ATOMIC); @@ -2242,9 +2264,6 @@ mt7996_mcu_sta_mld_setup_tlv(struct mt7996_dev *dev, struct sk_buff *skb, if (!link) continue; - if (!msta_link) - continue; - mld_setup_link->wcid = cpu_to_le16(msta_link->wcid.idx); mld_setup_link->bss_idx = link->mt76.idx; mld_setup_link++; @@ -3011,6 +3030,9 @@ static int mt7996_load_ram(struct mt7996_dev *dev) if (ret) return ret; + if (!mt7996_has_wa(dev)) + return 0; + ret = __mt7996_load_ram(dev, "DSP", fw_name(dev, FIRMWARE_DSP), MT7996_RAM_TYPE_DSP); if (ret) @@ -3021,10 +3043,9 @@ static int mt7996_load_ram(struct mt7996_dev *dev) } static int -mt7996_firmware_state(struct mt7996_dev *dev, bool wa) +mt7996_firmware_state(struct mt7996_dev *dev, u8 fw_state) { - u32 state = FIELD_PREP(MT_TOP_MISC_FW_STATE, - wa ? FW_STATE_RDY : FW_STATE_FW_DOWNLOAD); + u32 state = FIELD_PREP(MT_TOP_MISC_FW_STATE, fw_state); if (!mt76_poll_msec(dev, MT_TOP_MISC, MT_TOP_MISC_FW_STATE, state, 1000)) { @@ -3056,13 +3077,14 @@ mt7996_mcu_restart(struct mt76_dev *dev) static int mt7996_load_firmware(struct mt7996_dev *dev) { + u8 fw_state; int ret; /* make sure fw is download state */ - if (mt7996_firmware_state(dev, false)) { + if (mt7996_firmware_state(dev, FW_STATE_FW_DOWNLOAD)) { /* restart firmware once */ mt7996_mcu_restart(&dev->mt76); - ret = mt7996_firmware_state(dev, false); + ret = mt7996_firmware_state(dev, FW_STATE_FW_DOWNLOAD); if (ret) { dev_err(dev->mt76.dev, "Firmware is not ready for download\n"); @@ -3078,7 +3100,8 @@ static int mt7996_load_firmware(struct mt7996_dev *dev) if (ret) return ret; - ret = mt7996_firmware_state(dev, true); + fw_state = mt7996_has_wa(dev) ? FW_STATE_RDY : FW_STATE_NORMAL_TRX; + ret = mt7996_firmware_state(dev, fw_state); if (ret) return ret; @@ -3216,13 +3239,15 @@ int mt7996_mcu_init_firmware(struct mt7996_dev *dev) if (ret) return ret; - ret = mt7996_mcu_fw_log_2_host(dev, MCU_FW_LOG_WA, 0); - if (ret) - return ret; + if (mt7996_has_wa(dev)) { + ret = mt7996_mcu_fw_log_2_host(dev, MCU_FW_LOG_WA, 0); + if (ret) + return ret; - ret = mt7996_mcu_set_mwds(dev, 1); - if (ret) - return ret; + ret = mt7996_mcu_set_mwds(dev, 1); + if (ret) + return ret; + } ret = mt7996_mcu_init_rx_airtime(dev); if (ret) @@ -3248,7 +3273,7 @@ int mt7996_mcu_init(struct mt7996_dev *dev) void mt7996_mcu_exit(struct mt7996_dev *dev) { mt7996_mcu_restart(&dev->mt76); - if (mt7996_firmware_state(dev, false)) { + if (mt7996_firmware_state(dev, FW_STATE_FW_DOWNLOAD)) { dev_err(dev->mt76.dev, "Failed to exit mcu\n"); goto out; } @@ -3535,11 +3560,10 @@ int mt7996_mcu_rdd_background_enable(struct mt7996_phy *phy, struct cfg80211_chan_def *chandef) { struct mt7996_dev *dev = phy->dev; - int err, region; + int err, region, rdd_idx = mt7996_get_rdd_idx(phy, true); if (!chandef) { /* disable offchain */ - err = mt7996_mcu_rdd_cmd(dev, RDD_STOP, MT_RX_SEL2, - 0, 0); + err = mt7996_mcu_rdd_cmd(dev, RDD_STOP, rdd_idx, 0); if (err) return err; @@ -3565,8 +3589,7 @@ int mt7996_mcu_rdd_background_enable(struct mt7996_phy *phy, break; } - return mt7996_mcu_rdd_cmd(dev, RDD_START, MT_RX_SEL2, - 0, region); + return mt7996_mcu_rdd_cmd(dev, RDD_START, rdd_idx, region); } int mt7996_mcu_set_chan_info(struct mt7996_phy *phy, u16 tag) @@ -4433,8 +4456,7 @@ int mt7996_mcu_set_radio_en(struct mt7996_phy *phy, bool enable) &req, sizeof(req), true); } -int mt7996_mcu_rdd_cmd(struct mt7996_dev *dev, int cmd, u8 index, - u8 rx_sel, u8 val) +int mt7996_mcu_rdd_cmd(struct mt7996_dev *dev, int cmd, u8 rdd_idx, u8 val) { struct { u8 _rsv[4]; @@ -4451,8 +4473,7 @@ int mt7996_mcu_rdd_cmd(struct mt7996_dev *dev, int cmd, u8 index, .tag = cpu_to_le16(UNI_RDD_CTRL_PARM), .len = cpu_to_le16(sizeof(req) - 4), .ctrl = cmd, - .rdd_idx = index, - .rdd_rx_sel = rx_sel, + .rdd_idx = rdd_idx, .val = val, }; @@ -4742,7 +4763,26 @@ int mt7996_mcu_cp_support(struct mt7996_dev *dev, u8 mode) mode > mt76_connac_lmac_mapping(IEEE80211_AC_VO)) return -EINVAL; + if (!mt7996_has_wa(dev)) { + struct { + u8 _rsv[4]; + + __le16 tag; + __le16 len; + u8 cp_mode; + u8 rsv[3]; + } __packed req = { + .tag = cpu_to_le16(UNI_CMD_SDO_CP_MODE), + .len = cpu_to_le16(sizeof(req) - 4), + .cp_mode = mode, + }; + + return mt76_mcu_send_msg(&dev->mt76, MCU_WA_UNI_CMD(SDO), + &req, sizeof(req), false); + } + cp_mode = cpu_to_le32(mode); + return mt76_mcu_send_msg(&dev->mt76, MCU_WA_EXT_CMD(CP_SUPPORT), &cp_mode, sizeof(cp_mode), true); } diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h index 2ab6a53bee86..130ea95626d5 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h @@ -69,7 +69,7 @@ struct mt7996_mcu_rdd_report { __le16 tag; __le16 len; - u8 band_idx; + u8 rdd_idx; u8 long_detected; u8 constant_prf_detected; u8 staggered_prf_detected; @@ -832,13 +832,13 @@ enum { sizeof(struct sta_rec_eht_mld) + \ sizeof(struct tlv)) -#define MT7996_MAX_BEACON_SIZE 1338 #define MT7996_BEACON_UPDATE_SIZE (sizeof(struct bss_req_hdr) + \ sizeof(struct bss_bcn_content_tlv) + \ 4 + MT_TXD_SIZE + \ sizeof(struct bss_bcn_cntdwn_tlv) + \ sizeof(struct bss_bcn_mbss_tlv)) -#define MT7996_MAX_BSS_OFFLOAD_SIZE (MT7996_MAX_BEACON_SIZE + \ +#define MT7996_MAX_BSS_OFFLOAD_SIZE 2048 +#define MT7996_MAX_BEACON_SIZE (MT7996_MAX_BSS_OFFLOAD_SIZE - \ MT7996_BEACON_UPDATE_SIZE) enum { @@ -938,6 +938,12 @@ enum { }; enum { + UNI_CMD_SDO_SET = 1, + UNI_CMD_SDO_QUERY, + UNI_CMD_SDO_CP_MODE = 6, +}; + +enum { MT7996_SEC_MODE_PLAIN, MT7996_SEC_MODE_AES, MT7996_SEC_MODE_SCRAMBLE, diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c index 13b188e281bd..30b40f4a91be 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c @@ -54,6 +54,17 @@ static const u32 mt7996_offs[] = { [MIB_BSCR7] = 0x9e8, [MIB_BSCR17] = 0xa10, [MIB_TRDR1] = 0xa28, + [HIF_REMAP_L1] = 0x24, + [HIF_REMAP_BASE_L1] = 0x130000, + [HIF_REMAP_L2] = 0x1b4, + [HIF_REMAP_BASE_L2] = 0x1000, + [CBTOP1_PHY_END] = 0x77ffffff, + [INFRA_MCU_END] = 0x7c3fffff, + [WTBLON_WDUCR] = 0x370, + [WTBL_UPDATE] = 0x380, + [WTBL_ITCR] = 0x3b0, + [WTBL_ITCR0] = 0x3b8, + [WTBL_ITCR1] = 0x3bc, }; static const u32 mt7992_offs[] = { @@ -80,6 +91,54 @@ static const u32 mt7992_offs[] = { [MIB_BSCR7] = 0xae4, [MIB_BSCR17] = 0xb0c, [MIB_TRDR1] = 0xb24, + [HIF_REMAP_L1] = 0x8, + [HIF_REMAP_BASE_L1] = 0x40000, + [HIF_REMAP_L2] = 0x1b4, + [HIF_REMAP_BASE_L2] = 0x1000, + [CBTOP1_PHY_END] = 0x77ffffff, + [INFRA_MCU_END] = 0x7c3fffff, + [WTBLON_WDUCR] = 0x370, + [WTBL_UPDATE] = 0x380, + [WTBL_ITCR] = 0x3b0, + [WTBL_ITCR0] = 0x3b8, + [WTBL_ITCR1] = 0x3bc, +}; + +static const u32 mt7990_offs[] = { + [MIB_RVSR0] = 0x800, + [MIB_RVSR1] = 0x804, + [MIB_BTSCR5] = 0x868, + [MIB_BTSCR6] = 0x878, + [MIB_RSCR1] = 0x890, + [MIB_RSCR27] = 0xa38, + [MIB_RSCR28] = 0xa3c, + [MIB_RSCR29] = 0xa40, + [MIB_RSCR30] = 0xa44, + [MIB_RSCR31] = 0xa48, + [MIB_RSCR33] = 0xa50, + [MIB_RSCR35] = 0xa58, + [MIB_RSCR36] = 0xa5c, + [MIB_BSCR0] = 0xbb8, + [MIB_BSCR1] = 0xbbc, + [MIB_BSCR2] = 0xbc0, + [MIB_BSCR3] = 0xbc4, + [MIB_BSCR4] = 0xbc8, + [MIB_BSCR5] = 0xbcc, + [MIB_BSCR6] = 0xbd0, + [MIB_BSCR7] = 0xbd4, + [MIB_BSCR17] = 0xbfc, + [MIB_TRDR1] = 0xc14, + [HIF_REMAP_L1] = 0x8, + [HIF_REMAP_BASE_L1] = 0x40000, + [HIF_REMAP_L2] = 0x1b8, + [HIF_REMAP_BASE_L2] = 0x110000, + [CBTOP1_PHY_END] = 0x7fffffff, + [INFRA_MCU_END] = 0x7cffffff, + [WTBLON_WDUCR] = 0x400, + [WTBL_UPDATE] = 0x410, + [WTBL_ITCR] = 0x440, + [WTBL_ITCR0] = 0x448, + [WTBL_ITCR1] = 0x44c, }; static const struct __map mt7996_reg_map[] = { @@ -135,14 +194,83 @@ static const struct __map mt7996_reg_map[] = { { 0x0, 0x0, 0x0 }, /* imply end of search */ }; +static const struct __map mt7990_reg_map[] = { + {0x54000000, 0x02000, 0x1000}, /* WFDMA_0 (PCIE0 MCU DMA0) */ + {0x55000000, 0x03000, 0x1000}, /* WFDMA_1 (PCIE0 MCU DMA1) */ + {0x56000000, 0x04000, 0x1000}, /* WFDMA_2 (Reserved) */ + {0x57000000, 0x05000, 0x1000}, /* WFDMA_3 (MCU wrap CR) */ + {0x58000000, 0x06000, 0x1000}, /* WFDMA_4 (PCIE1 MCU DMA0 (MEM_DMA)) */ + {0x59000000, 0x07000, 0x1000}, /* WFDMA_5 (PCIE1 MCU DMA1) */ + {0x820c0000, 0x08000, 0x4000}, /* WF_UMAC_TOP (PLE) */ + {0x820c8000, 0x0c000, 0x2000}, /* WF_UMAC_TOP (PSE) */ + {0x820cc000, 0x0e000, 0x2000}, /* WF_UMAC_TOP (PP) */ + {0x820e0000, 0x20000, 0x0400}, /* WF_LMAC_TOP BN0 (WF_CFG) */ + {0x820e1000, 0x20400, 0x0200}, /* WF_LMAC_TOP BN0 (WF_TRB) */ + {0x820e2000, 0x20800, 0x0400}, /* WF_LMAC_TOP BN0 (WF_AGG) */ + {0x820e3000, 0x20c00, 0x0400}, /* WF_LMAC_TOP BN0 (WF_ARB) */ + {0x820e4000, 0x21000, 0x0400}, /* WF_LMAC_TOP BN0 (WF_TMAC) */ + {0x820e5000, 0x21400, 0x0800}, /* WF_LMAC_TOP BN0 (WF_RMAC) */ + {0x820ce000, 0x21c00, 0x0200}, /* WF_LMAC_TOP (WF_SEC) */ + {0x820e7000, 0x21e00, 0x0200}, /* WF_LMAC_TOP BN0 (WF_DMA) */ + {0x820cf000, 0x22000, 0x1000}, /* WF_LMAC_TOP (WF_PF) */ + {0x820e9000, 0x23400, 0x0200}, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */ + {0x820ea000, 0x24000, 0x0200}, /* WF_LMAC_TOP BN0 (WF_ETBF) */ + {0x820eb000, 0x24200, 0x0400}, /* WF_LMAC_TOP BN0 (WF_LPON) */ + {0x820ec000, 0x24600, 0x0200}, /* WF_LMAC_TOP BN0 (WF_INT) */ + {0x820ed000, 0x24800, 0x0800}, /* WF_LMAC_TOP BN0 (WF_MIB) */ + {0x820ca000, 0x26000, 0x2000}, /* WF_LMAC_TOP BN0 (WF_MUCOP) */ + {0x820d0000, 0x30000, 0x10000}, /* WF_LMAC_TOP (WF_WTBLON) */ + {0x00400000, 0x80000, 0x10000}, /* WF_MCU_SYSRAM */ + {0x820f0000, 0xa0000, 0x0400}, /* WF_LMAC_TOP BN1 (WF_CFG) */ + {0x820f1000, 0xa0600, 0x0200}, /* WF_LMAC_TOP BN1 (WF_TRB) */ + {0x820f2000, 0xa0800, 0x0400}, /* WF_LMAC_TOP BN1 (WF_AGG) */ + {0x820f3000, 0xa0c00, 0x0400}, /* WF_LMAC_TOP BN1 (WF_ARB) */ + {0x820f4000, 0xa1000, 0x0400}, /* WF_LMAC_TOP BN1 (WF_TMAC) */ + {0x820f5000, 0xa1400, 0x0800}, /* WF_LMAC_TOP BN1 (WF_RMAC) */ + {0x820f7000, 0xa1e00, 0x0200}, /* WF_LMAC_TOP BN1 (WF_DMA) */ + {0x820f9000, 0xa3400, 0x0200}, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */ + {0x820fa000, 0xa4000, 0x0200}, /* WF_LMAC_TOP BN1 (WF_ETBF) */ + {0x820fb000, 0xa4200, 0x0400}, /* WF_LMAC_TOP BN1 (WF_LPON) */ + {0x820fc000, 0xa4600, 0x0200}, /* WF_LMAC_TOP BN1 (WF_INT) */ + {0x820fd000, 0xa4800, 0x0800}, /* WF_LMAC_TOP BN1 (WF_MIB) */ + {0x820cc000, 0xa5000, 0x2000}, /* WF_LMAC_TOP BN1 (WF_MUCOP) */ + {0x820c4000, 0xa8000, 0x4000}, /* WF_LMAC_TOP (WF_UWTBL) */ + {0x81030000, 0xae000, 0x100}, /* WFSYS_AON part 1 */ + {0x81031000, 0xae100, 0x100}, /* WFSYS_AON part 2 */ + {0x81032000, 0xae200, 0x100}, /* WFSYS_AON part 3 */ + {0x81033000, 0xae300, 0x100}, /* WFSYS_AON part 4 */ + {0x81034000, 0xae400, 0x100}, /* WFSYS_AON part 5 */ + {0x80020000, 0xb0000, 0x10000}, /* WF_TOP_MISC_OFF */ + {0x81020000, 0xc0000, 0x10000}, /* WF_TOP_MISC_ON */ + {0x81040000, 0x120000, 0x1000}, /* WF_MCU_CFG_ON */ + {0x81050000, 0x121000, 0x1000}, /* WF_MCU_EINT */ + {0x81060000, 0x122000, 0x1000}, /* WF_MCU_GPT */ + {0x81070000, 0x123000, 0x1000}, /* WF_MCU_WDT */ + {0x80010000, 0x124000, 0x1000}, /* WF_AXIDMA */ + {0x7c020000, 0xd0000, 0x10000}, /* CONN_INFRA, wfdma for from CODA flow use */ + {0x7c060000, 0xe0000, 0x10000}, /* CONN_INFRA, conn_host_csr_top for from CODA flow use */ + {0x20020000, 0xd0000, 0x10000}, /* CONN_INFRA, wfdma */ + {0x20060000, 0xe0000, 0x10000}, /* CONN_INFRA, conn_host_csr_top */ + {0x7c000000, 0xf0000, 0x10000}, /* CONN_INFRA */ + {0x70020000, 0x1f0000, 0x9000}, /* PCIE remapping (AP2CONN) */ + {0x0, 0x0, 0x0}, /* imply end of search */ +}; + static u32 mt7996_reg_map_l1(struct mt7996_dev *dev, u32 addr) { u32 offset = FIELD_GET(MT_HIF_REMAP_L1_OFFSET, addr); u32 base = FIELD_GET(MT_HIF_REMAP_L1_BASE, addr); + u32 l1_mask, val; - dev->bus_ops->rmw(&dev->mt76, MT_HIF_REMAP_L1, - MT_HIF_REMAP_L1_MASK, - FIELD_PREP(MT_HIF_REMAP_L1_MASK, base)); + if (is_mt7996(&dev->mt76)) { + l1_mask = MT_HIF_REMAP_L1_MASK_7996; + val = FIELD_PREP(MT_HIF_REMAP_L1_MASK_7996, base); + } else { + l1_mask = MT_HIF_REMAP_L1_MASK; + val = FIELD_PREP(MT_HIF_REMAP_L1_MASK, base); + } + + dev->bus_ops->rmw(&dev->mt76, MT_HIF_REMAP_L1, l1_mask, val); /* use read to push write */ dev->bus_ops->rr(&dev->mt76, MT_HIF_REMAP_L1); @@ -151,18 +279,41 @@ static u32 mt7996_reg_map_l1(struct mt7996_dev *dev, u32 addr) static u32 mt7996_reg_map_l2(struct mt7996_dev *dev, u32 addr) { - u32 offset = FIELD_GET(MT_HIF_REMAP_L2_OFFSET, addr); - u32 base = FIELD_GET(MT_HIF_REMAP_L2_BASE, addr); + u32 offset, base, l2_mask, val; - dev->bus_ops->rmw(&dev->mt76, MT_HIF_REMAP_L2, - MT_HIF_REMAP_L2_MASK, - FIELD_PREP(MT_HIF_REMAP_L2_MASK, base)); + if (is_mt7990(&dev->mt76)) { + offset = FIELD_GET(MT_HIF_REMAP_L2_OFFSET_7990, addr); + base = FIELD_GET(MT_HIF_REMAP_L2_BASE_7990, addr); + l2_mask = MT_HIF_REMAP_L2_MASK_7990; + val = FIELD_PREP(MT_HIF_REMAP_L2_MASK_7990, base); + } else { + offset = FIELD_GET(MT_HIF_REMAP_L2_OFFSET, addr); + base = FIELD_GET(MT_HIF_REMAP_L2_BASE, addr); + l2_mask = MT_HIF_REMAP_L2_MASK; + val = FIELD_PREP(MT_HIF_REMAP_L2_MASK, base); + } + + dev->bus_ops->rmw(&dev->mt76, MT_HIF_REMAP_L2, l2_mask, val); /* use read to push write */ dev->bus_ops->rr(&dev->mt76, MT_HIF_REMAP_L2); return MT_HIF_REMAP_BASE_L2 + offset; } +static u32 mt7996_reg_map_cbtop(struct mt7996_dev *dev, u32 addr) +{ + u32 offset = FIELD_GET(MT_HIF_REMAP_CBTOP_OFFSET, addr); + u32 base = FIELD_GET(MT_HIF_REMAP_CBTOP_BASE, addr); + + dev->bus_ops->rmw(&dev->mt76, MT_HIF_REMAP_CBTOP, + MT_HIF_REMAP_CBTOP_MASK, + FIELD_PREP(MT_HIF_REMAP_CBTOP_MASK, base)); + /* use read to push write */ + dev->bus_ops->rr(&dev->mt76, MT_HIF_REMAP_CBTOP); + + return MT_HIF_REMAP_BASE_CBTOP + offset; +} + static u32 __mt7996_reg_addr(struct mt7996_dev *dev, u32 addr) { int i; @@ -193,17 +344,20 @@ static u32 __mt7996_reg_remap_addr(struct mt7996_dev *dev, u32 addr) (addr >= MT_WFSYS1_PHY_START && addr <= MT_WFSYS1_PHY_END)) return mt7996_reg_map_l1(dev, addr); - if (dev_is_pci(dev->mt76.dev) && - ((addr >= MT_CBTOP1_PHY_START && addr <= MT_CBTOP1_PHY_END) || - addr >= MT_CBTOP2_PHY_START)) - return mt7996_reg_map_l1(dev, addr); - /* CONN_INFRA: covert to phyiscal addr and use layer 1 remap */ if (addr >= MT_INFRA_MCU_START && addr <= MT_INFRA_MCU_END) { addr = addr - MT_INFRA_MCU_START + MT_INFRA_BASE; return mt7996_reg_map_l1(dev, addr); } + if (dev_is_pci(dev->mt76.dev) && + ((addr >= MT_CBTOP1_PHY_START && addr <= MT_CBTOP1_PHY_END) || + addr >= MT_CBTOP2_PHY_START)) { + if (is_mt7990(&dev->mt76)) + return mt7996_reg_map_cbtop(dev, addr); + return mt7996_reg_map_l1(dev, addr); + } + return mt7996_reg_map_l2(dev, addr); } @@ -323,6 +477,9 @@ int mt7996_mmio_wed_init(struct mt7996_dev *dev, void *pdev_ptr, wed->wlan.base = devm_ioremap(dev->mt76.dev, pci_resource_start(pci_dev, 0), pci_resource_len(pci_dev, 0)); + if (!wed->wlan.base) + return -ENOMEM; + wed->wlan.phy_base = pci_resource_start(pci_dev, 0); if (hif2) { @@ -350,7 +507,7 @@ int mt7996_mmio_wed_init(struct mt7996_dev *dev, void *pdev_ptr, MT_RXQ_RING_BASE(MT7996_RXQ_BAND0) + MT7996_RXQ_BAND0 * MT_RING_SIZE; - wed->wlan.id = 0x7991; + wed->wlan.id = MT7996_DEVICE_ID_2; wed->wlan.tx_tbit[0] = ffs(MT_INT_TX_DONE_BAND2) - 1; } else { wed->wlan.hw_rro = dev->has_rro; /* default on */ @@ -443,18 +600,24 @@ static int mt7996_mmio_init(struct mt76_dev *mdev, spin_lock_init(&dev->reg_lock); switch (device_id) { - case 0x7990: + case MT7996_DEVICE_ID: dev->reg.base = mt7996_reg_base; dev->reg.offs_rev = mt7996_offs; dev->reg.map = mt7996_reg_map; dev->reg.map_size = ARRAY_SIZE(mt7996_reg_map); break; - case 0x7992: + case MT7992_DEVICE_ID: dev->reg.base = mt7996_reg_base; dev->reg.offs_rev = mt7992_offs; dev->reg.map = mt7996_reg_map; dev->reg.map_size = ARRAY_SIZE(mt7996_reg_map); break; + case MT7990_DEVICE_ID: + dev->reg.base = mt7996_reg_base; + dev->reg.offs_rev = mt7990_offs; + dev->reg.map = mt7990_reg_map; + dev->reg.map_size = ARRAY_SIZE(mt7990_reg_map); + break; default: return -EINVAL; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h index 43e646ed6094..1ad6bc046f7c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h +++ b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h @@ -14,7 +14,7 @@ #define MT7996_MAX_RADIOS 3 #define MT7996_MAX_INTERFACES 19 /* per-band */ #define MT7996_MAX_WMM_SETS 4 -#define MT7996_WTBL_BMC_SIZE (is_mt7992(&dev->mt76) ? 32 : 64) +#define MT7996_WTBL_BMC_SIZE (is_mt7996(&dev->mt76) ? 64 : 32) #define MT7996_WTBL_RESERVED (mt7996_wtbl_size(dev) - 1) #define MT7996_WTBL_STA (MT7996_WTBL_RESERVED - \ mt7996_max_interface_num(dev)) @@ -29,6 +29,16 @@ #define MT7996_RX_RING_SIZE 1536 #define MT7996_RX_MCU_RING_SIZE 512 #define MT7996_RX_MCU_RING_SIZE_WA 1024 +/* scatter-gather of mcu event is not supported in connac3 */ +#define MT7996_RX_MCU_BUF_SIZE (2048 + \ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) + +#define MT7996_DEVICE_ID 0x7990 +#define MT7996_DEVICE_ID_2 0x7991 +#define MT7992_DEVICE_ID 0x7992 +#define MT7992_DEVICE_ID_2 0x799a +#define MT7990_DEVICE_ID 0x7993 +#define MT7990_DEVICE_ID_2 0x799b #define MT7996_FIRMWARE_WA "mediatek/mt7996/mt7996_wa.bin" #define MT7996_FIRMWARE_WM "mediatek/mt7996/mt7996_wm.bin" @@ -50,6 +60,11 @@ #define MT7992_FIRMWARE_DSP_23 "mediatek/mt7996/mt7992_dsp_23.bin" #define MT7992_ROM_PATCH_23 "mediatek/mt7996/mt7992_rom_patch_23.bin" +#define MT7990_FIRMWARE_WA "" +#define MT7990_FIRMWARE_WM "mediatek/mt7996/mt7990_wm.bin" +#define MT7990_FIRMWARE_DSP "" +#define MT7990_ROM_PATCH "mediatek/mt7996/mt7990_rom_patch.bin" + #define MT7996_EEPROM_DEFAULT "mediatek/mt7996/mt7996_eeprom.bin" #define MT7996_EEPROM_DEFAULT_INT "mediatek/mt7996/mt7996_eeprom_2i5i6i.bin" #define MT7996_EEPROM_DEFAULT_233 "mediatek/mt7996/mt7996_eeprom_233.bin" @@ -61,6 +76,9 @@ #define MT7992_EEPROM_DEFAULT_23 "mediatek/mt7996/mt7992_eeprom_23.bin" #define MT7992_EEPROM_DEFAULT_23_INT "mediatek/mt7996/mt7992_eeprom_23_2i5i.bin" +#define MT7990_EEPROM_DEFAULT "mediatek/mt7996/mt7990_eeprom.bin" +#define MT7990_EEPROM_DEFAULT_INT "mediatek/mt7996/mt7990_eeprom_2i5i.bin" + #define MT7996_EEPROM_SIZE 7680 #define MT7996_EEPROM_BLOCK_SIZE 16 #define MT7996_TOKEN_SIZE 16384 @@ -131,6 +149,10 @@ enum mt7992_var_type { MT7992_VAR_TYPE_23, }; +enum mt7990_var_type { + MT7990_VAR_TYPE_23, +}; + enum mt7996_fem_type { MT7996_FEM_EXT, MT7996_FEM_INT, @@ -165,6 +187,8 @@ enum mt7996_rxq_id { MT7996_RXQ_TXFREE1 = 9, MT7996_RXQ_TXFREE2 = 7, MT7996_RXQ_RRO_IND = 0, + MT7990_RXQ_TXFREE0 = 6, + MT7990_RXQ_TXFREE1 = 7, }; struct mt7996_twt_flow { @@ -281,8 +305,6 @@ struct mt7996_phy { s16 coverage_class; u8 slottime; - u8 rdd_state; - u16 beacon_rate; u32 rx_ampdu_ts; @@ -293,6 +315,7 @@ struct mt7996_phy { struct mt76_channel_state state_ts; u16 orig_chainmask; + u16 orig_antenna_mask; bool has_aux_rx; bool counter_reset; @@ -405,10 +428,10 @@ enum { __MT_WFDMA_MAX, }; -enum { - MT_RX_SEL0, - MT_RX_SEL1, - MT_RX_SEL2, /* monitor chain */ +enum rdd_idx { + MT_RDD_IDX_BAND2, /* RDD idx for band idx 2 */ + MT_RDD_IDX_BAND1, /* RDD idx for band idx 1 */ + MT_RDD_IDX_BACKGROUND, /* RDD idx for background chain */ }; enum mt7996_rdd_cmd { @@ -427,6 +450,21 @@ enum mt7996_rdd_cmd { RDD_IRQ_OFF, }; +static inline int +mt7996_get_rdd_idx(struct mt7996_phy *phy, bool is_background) +{ + if (!phy->mt76->cap.has_5ghz) + return -1; + + if (is_background) + return MT_RDD_IDX_BACKGROUND; + + if (phy->mt76->band_idx == MT_BAND2) + return MT_RDD_IDX_BAND2; + + return MT_RDD_IDX_BAND1; +} + static inline struct mt7996_dev * mt7996_hw_dev(struct ieee80211_hw *hw) { @@ -461,31 +499,12 @@ mt7996_phy3(struct mt7996_dev *dev) static inline bool mt7996_band_valid(struct mt7996_dev *dev, u8 band) { - if (is_mt7992(&dev->mt76)) + if (!is_mt7996(&dev->mt76)) return band <= MT_BAND1; return band <= MT_BAND2; } -static inline bool -mt7996_has_background_radar(struct mt7996_dev *dev) -{ - switch (mt76_chip(&dev->mt76)) { - case 0x7990: - if (dev->var.type == MT7996_VAR_TYPE_233) - return false; - break; - case 0x7992: - if (dev->var.type == MT7992_VAR_TYPE_23) - return false; - break; - default: - return false; - } - - return true; -} - static inline struct mt7996_phy * mt7996_band_phy(struct mt7996_dev *dev, enum nl80211_band band) { @@ -549,6 +568,7 @@ int mt7996_eeprom_parse_hw_cap(struct mt7996_dev *dev, struct mt7996_phy *phy); int mt7996_eeprom_get_target_power(struct mt7996_dev *dev, struct ieee80211_channel *chan); s8 mt7996_eeprom_get_power_delta(struct mt7996_dev *dev, int band); +bool mt7996_eeprom_has_background_radar(struct mt7996_dev *dev); int mt7996_dma_init(struct mt7996_dev *dev); void mt7996_dma_reset(struct mt7996_dev *dev, bool force); void mt7996_dma_prefetch(struct mt7996_dev *dev); @@ -637,8 +657,7 @@ int mt7996_mcu_get_temperature(struct mt7996_phy *phy); int mt7996_mcu_set_thermal_throttling(struct mt7996_phy *phy, u8 state); int mt7996_mcu_set_thermal_protect(struct mt7996_phy *phy, bool enable); int mt7996_mcu_set_txpower_sku(struct mt7996_phy *phy); -int mt7996_mcu_rdd_cmd(struct mt7996_dev *dev, int cmd, u8 index, - u8 rx_sel, u8 val); +int mt7996_mcu_rdd_cmd(struct mt7996_dev *dev, int cmd, u8 rdd_idx, u8 val); int mt7996_mcu_rdd_background_enable(struct mt7996_phy *phy, struct cfg80211_chan_def *chandef); int mt7996_mcu_set_fixed_rate_table(struct mt7996_phy *phy, u8 table_idx, @@ -704,6 +723,11 @@ static inline u16 mt7996_rx_chainmask(struct mt7996_phy *phy) return tx_chainmask | (BIT(fls(tx_chainmask)) * phy->has_aux_rx); } +static inline bool mt7996_has_wa(struct mt7996_dev *dev) +{ + return !is_mt7990(&dev->mt76); +} + void mt7996_mac_init(struct mt7996_dev *dev); u32 mt7996_mac_wtbl_lmac_addr(struct mt7996_dev *dev, u16 wcid, u8 dw); bool mt7996_mac_wtbl_update(struct mt7996_dev *dev, int idx, u32 mask); diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/pci.c b/drivers/net/wireless/mediatek/mt76/mt7996/pci.c index 04056181368a..19e99bc1c6c4 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/pci.c @@ -16,14 +16,16 @@ static DEFINE_SPINLOCK(hif_lock); static u32 hif_idx; static const struct pci_device_id mt7996_pci_device_table[] = { - { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7990) }, - { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7992) }, + { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, MT7996_DEVICE_ID) }, + { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, MT7992_DEVICE_ID) }, + { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, MT7990_DEVICE_ID) }, { }, }; static const struct pci_device_id mt7996_hif_device_table[] = { - { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7991) }, - { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x799a) }, + { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, MT7996_DEVICE_ID_2) }, + { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, MT7992_DEVICE_ID_2) }, + { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, MT7990_DEVICE_ID_2) }, { }, }; @@ -63,8 +65,9 @@ static struct mt7996_hif *mt7996_pci_init_hif2(struct pci_dev *pdev) { hif_idx++; - if (!pci_get_device(PCI_VENDOR_ID_MEDIATEK, 0x7991, NULL) && - !pci_get_device(PCI_VENDOR_ID_MEDIATEK, 0x799a, NULL)) + if (!pci_get_device(PCI_VENDOR_ID_MEDIATEK, MT7996_DEVICE_ID_2, NULL) && + !pci_get_device(PCI_VENDOR_ID_MEDIATEK, MT7992_DEVICE_ID_2, NULL) && + !pci_get_device(PCI_VENDOR_ID_MEDIATEK, MT7990_DEVICE_ID_2, NULL)) return NULL; writel(hif_idx | MT_PCIE_RECOG_ID_SEM, @@ -121,7 +124,9 @@ static int mt7996_pci_probe(struct pci_dev *pdev, mt76_pci_disable_aspm(pdev); - if (id->device == 0x7991 || id->device == 0x799a) + if (id->device == MT7996_DEVICE_ID_2 || + id->device == MT7992_DEVICE_ID_2 || + id->device == MT7990_DEVICE_ID_2) return mt7996_pci_hif2_probe(pdev); dev = mt7996_mmio_probe(&pdev->dev, pcim_iomap_table(pdev)[0], @@ -256,3 +261,5 @@ MODULE_FIRMWARE(MT7992_FIRMWARE_WA); MODULE_FIRMWARE(MT7992_FIRMWARE_WM); MODULE_FIRMWARE(MT7992_FIRMWARE_DSP); MODULE_FIRMWARE(MT7992_ROM_PATCH); +MODULE_FIRMWARE(MT7990_FIRMWARE_WM); +MODULE_FIRMWARE(MT7990_ROM_PATCH); diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/regs.h b/drivers/net/wireless/mediatek/mt76/mt7996/regs.h index 1876a968c92d..e942c0058731 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/regs.h +++ b/drivers/net/wireless/mediatek/mt76/mt7996/regs.h @@ -64,6 +64,17 @@ enum offs_rev { MIB_BSCR7, MIB_BSCR17, MIB_TRDR1, + HIF_REMAP_L1, + HIF_REMAP_BASE_L1, + HIF_REMAP_L2, + HIF_REMAP_BASE_L2, + CBTOP1_PHY_END, + INFRA_MCU_END, + WTBLON_WDUCR, + WTBL_UPDATE, + WTBL_ITCR, + WTBL_ITCR0, + WTBL_ITCR1, __MT_OFFS_MAX, }; @@ -291,19 +302,19 @@ enum offs_rev { /* WTBLON TOP */ #define MT_WTBLON_TOP_BASE 0x820d4000 #define MT_WTBLON_TOP(ofs) (MT_WTBLON_TOP_BASE + (ofs)) -#define MT_WTBLON_TOP_WDUCR MT_WTBLON_TOP(0x370) +#define MT_WTBLON_TOP_WDUCR MT_WTBLON_TOP(__OFFS(WTBLON_WDUCR)) #define MT_WTBLON_TOP_WDUCR_GROUP GENMASK(4, 0) -#define MT_WTBL_UPDATE MT_WTBLON_TOP(0x380) +#define MT_WTBL_UPDATE MT_WTBLON_TOP(__OFFS(WTBL_UPDATE)) #define MT_WTBL_UPDATE_WLAN_IDX GENMASK(11, 0) #define MT_WTBL_UPDATE_ADM_COUNT_CLEAR BIT(14) #define MT_WTBL_UPDATE_BUSY BIT(31) -#define MT_WTBL_ITCR MT_WTBLON_TOP(0x3b0) +#define MT_WTBL_ITCR MT_WTBLON_TOP(__OFFS(WTBL_ITCR)) #define MT_WTBL_ITCR_WR BIT(16) #define MT_WTBL_ITCR_EXEC BIT(31) -#define MT_WTBL_ITDR0 MT_WTBLON_TOP(0x3b8) -#define MT_WTBL_ITDR1 MT_WTBLON_TOP(0x3bc) +#define MT_WTBL_ITDR0 MT_WTBLON_TOP(__OFFS(WTBL_ITCR0)) +#define MT_WTBL_ITDR1 MT_WTBLON_TOP(__OFFS(WTBL_ITCR1)) #define MT_WTBL_SPE_IDX_SEL BIT(6) /* WTBL */ @@ -483,7 +494,7 @@ enum offs_rev { #define MT_MCUQ_EXT_CTRL(q) (MT_Q_BASE(q) + 0x600 + \ MT_MCUQ_ID(q) * 0x4) -#define MT_RXQ_BAND1_CTRL(q) (MT_Q_BASE(__RXQ(q)) + 0x680 + \ +#define MT_RXQ_EXT_CTRL(q) (MT_Q_BASE(__RXQ(q)) + 0x680 + \ MT_RXQ_ID(q) * 0x4) #define MT_TXQ_EXT_CTRL(q) (MT_Q_BASE(__TXQ(q)) + 0x600 + \ MT_TXQ_ID(q) * 0x4) @@ -504,6 +515,8 @@ enum offs_rev { #define MT_INT_RX_DONE_WA_TRI BIT(3) #define MT_INT_RX_TXFREE_MAIN BIT(17) #define MT_INT_RX_TXFREE_TRI BIT(15) +#define MT_INT_RX_TXFREE_BAND0_MT7990 BIT(14) +#define MT_INT_RX_TXFREE_BAND1_MT7990 BIT(15) #define MT_INT_RX_DONE_BAND2_EXT BIT(23) #define MT_INT_RX_TXFREE_EXT BIT(26) #define MT_INT_MCU_CMD BIT(29) @@ -576,27 +589,39 @@ enum offs_rev { #define MT_MCU_CMD_WDT_MASK GENMASK(31, 30) /* l1/l2 remap */ -#define MT_HIF_REMAP_L1 0x155024 -#define MT_HIF_REMAP_L1_MASK GENMASK(31, 16) +#define CONN_BUS_CR_VON_BASE 0x155000 +#define MT_HIF_REMAP_L1 (CONN_BUS_CR_VON_BASE + __OFFS(HIF_REMAP_L1)) +#define MT_HIF_REMAP_L1_MASK_7996 GENMASK(31, 16) +#define MT_HIF_REMAP_L1_MASK GENMASK(15, 0) #define MT_HIF_REMAP_L1_OFFSET GENMASK(15, 0) #define MT_HIF_REMAP_L1_BASE GENMASK(31, 16) -#define MT_HIF_REMAP_BASE_L1 0x130000 +#define MT_HIF_REMAP_BASE_L1 __OFFS(HIF_REMAP_BASE_L1) -#define MT_HIF_REMAP_L2 0x1b4 +#define MT_HIF_REMAP_L2 __OFFS(HIF_REMAP_L2) #define MT_HIF_REMAP_L2_MASK GENMASK(19, 0) #define MT_HIF_REMAP_L2_OFFSET GENMASK(11, 0) #define MT_HIF_REMAP_L2_BASE GENMASK(31, 12) -#define MT_HIF_REMAP_BASE_L2 0x1000 +#define MT_HIF_REMAP_L2_MASK_7990 GENMASK(15, 0) +#define MT_HIF_REMAP_L2_OFFSET_7990 GENMASK(15, 0) +#define MT_HIF_REMAP_L2_BASE_7990 GENMASK(31, 16) +#define MT_HIF_REMAP_BASE_L2 __OFFS(HIF_REMAP_BASE_L2) + +/* for mt7990 only */ +#define MT_HIF_REMAP_CBTOP 0x1f6554 +#define MT_HIF_REMAP_CBTOP_MASK GENMASK(15, 0) +#define MT_HIF_REMAP_CBTOP_OFFSET GENMASK(15, 0) +#define MT_HIF_REMAP_CBTOP_BASE GENMASK(31, 16) +#define MT_HIF_REMAP_BASE_CBTOP 0x1c0000 #define MT_INFRA_BASE 0x18000000 #define MT_WFSYS0_PHY_START 0x18400000 #define MT_WFSYS1_PHY_START 0x18800000 #define MT_WFSYS1_PHY_END 0x18bfffff #define MT_CBTOP1_PHY_START 0x70000000 -#define MT_CBTOP1_PHY_END 0x77ffffff +#define MT_CBTOP1_PHY_END __OFFS(CBTOP1_PHY_END) #define MT_CBTOP2_PHY_START 0xf0000000 #define MT_INFRA_MCU_START 0x7c000000 -#define MT_INFRA_MCU_END 0x7c3fffff +#define MT_INFRA_MCU_END __OFFS(INFRA_MCU_END) /* FW MODE SYNC */ #define MT_FW_ASSERT_CNT 0x02208274 diff --git a/drivers/net/wireless/purelifi/plfxlc/usb.c b/drivers/net/wireless/purelifi/plfxlc/usb.c index 10d2e2124ff8..c2a1234b59db 100644 --- a/drivers/net/wireless/purelifi/plfxlc/usb.c +++ b/drivers/net/wireless/purelifi/plfxlc/usb.c @@ -503,8 +503,10 @@ int plfxlc_usb_wreq_async(struct plfxlc_usb *usb, const u8 *buffer, (void *)buffer, buffer_len, complete_fn, context); r = usb_submit_urb(urb, GFP_ATOMIC); - if (r) + if (r) { + usb_free_urb(urb); dev_err(&udev->dev, "Async write submit failed (%d)\n", r); + } return r; } diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c index 7537f04b1930..819cf519e66e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/core.c +++ b/drivers/net/wireless/realtek/rtlwifi/core.c @@ -58,17 +58,6 @@ void rtl_rfreg_delay(struct ieee80211_hw *hw, enum radio_path rfpath, u32 addr, } EXPORT_SYMBOL(rtl_rfreg_delay); -void rtl_bb_delay(struct ieee80211_hw *hw, u32 addr, u32 data) -{ - if (addr >= 0xf9 && addr <= 0xfe) { - rtl_addr_delay(addr); - } else { - rtl_set_bbreg(hw, addr, MASKDWORD, data); - udelay(1); - } -} -EXPORT_SYMBOL(rtl_bb_delay); - static void rtl_fw_do_work(const struct firmware *firmware, void *context, bool is_wow) { diff --git a/drivers/net/wireless/realtek/rtlwifi/core.h b/drivers/net/wireless/realtek/rtlwifi/core.h index 42c2d9e13bb8..45225d89ac5e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/core.h +++ b/drivers/net/wireless/realtek/rtlwifi/core.h @@ -58,7 +58,6 @@ void rtl_wowlan_fw_cb(const struct firmware *firmware, void *context); void rtl_addr_delay(u32 addr); void rtl_rfreg_delay(struct ieee80211_hw *hw, enum radio_path rfpath, u32 addr, u32 mask, u32 data); -void rtl_bb_delay(struct ieee80211_hw *hw, u32 addr, u32 data); bool rtl_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb); bool rtl_btc_status_false(void); void rtl_dm_diginit(struct ieee80211_hw *hw, u32 cur_igval); diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c index 0eafc4d125f9..898f597f70a9 100644 --- a/drivers/net/wireless/realtek/rtlwifi/pci.c +++ b/drivers/net/wireless/realtek/rtlwifi/pci.c @@ -155,6 +155,16 @@ static void _rtl_pci_update_default_setting(struct ieee80211_hw *hw) ((u8)init_aspm) == (PCI_EXP_LNKCTL_ASPM_L0S | PCI_EXP_LNKCTL_ASPM_L1 | PCI_EXP_LNKCTL_CCC)) ppsc->support_aspm = false; + + /* RTL8723BE found on some ASUSTek laptops, such as F441U and + * X555UQ with subsystem ID 11ad:1723 are known to output large + * amounts of PCIe AER errors during and after boot up, causing + * heavy lags, poor network throughput, and occasional lock-ups. + */ + if (rtlpriv->rtlhal.hw_type == HARDWARE_TYPE_RTL8723BE && + (rtlpci->pdev->subsystem_vendor == 0x11ad && + rtlpci->pdev->subsystem_device == 0x1723)) + ppsc->support_aspm = false; } static bool _rtl_pci_platform_switch_device_pci_aspm( diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c index e07402e73ba3..68f890050afb 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c @@ -2055,11 +2055,6 @@ void rtl92d_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t) RTPRINT(rtlpriv, FINIT, INIT_IQK, "LCK:Finish!!!\n"); } -void rtl92d_phy_ap_calibrate(struct ieee80211_hw *hw, s8 delta) -{ - return; -} - static bool _rtl92d_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable, u32 cmdtableidx, u32 cmdtablesz, enum swchnlcmd_id cmdid, u32 para1, u32 para2, u32 msdelay) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h index bbe9ef77225e..a9bfe54f2802 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h @@ -83,7 +83,6 @@ void rtl92d_phy_set_poweron(struct ieee80211_hw *hw); bool rtl92d_phy_check_poweroff(struct ieee80211_hw *hw); void rtl92d_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t); void rtl92d_update_bbrf_configuration(struct ieee80211_hw *hw); -void rtl92d_phy_ap_calibrate(struct ieee80211_hw *hw, s8 delta); void rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw); void rtl92d_phy_reload_iqk_setting(struct ieee80211_hw *hw, u8 channel); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.c index 289ec71ce3e5..8c2167cc1f13 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.c @@ -2445,11 +2445,6 @@ void rtl92du_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t) RTPRINT(rtlpriv, FINIT, INIT_IQK, "LCK:Finish!!!\n"); } -void rtl92du_phy_ap_calibrate(struct ieee80211_hw *hw, s8 delta) -{ - /* Nothing to do. */ -} - u8 rtl92du_phy_sw_chnl(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.h index 090a6203db7e..a107a5a76beb 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.h @@ -24,7 +24,6 @@ void rtl92du_phy_set_poweron(struct ieee80211_hw *hw); bool rtl92du_phy_check_poweroff(struct ieee80211_hw *hw); void rtl92du_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t); void rtl92du_update_bbrf_configuration(struct ieee80211_hw *hw); -void rtl92du_phy_ap_calibrate(struct ieee80211_hw *hw, s8 delta); void rtl92du_phy_iq_calibrate(struct ieee80211_hw *hw); void rtl92du_phy_reload_iqk_setting(struct ieee80211_hw *hw, u8 channel); void rtl92du_phy_init_pa_bias(struct ieee80211_hw *hw); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c index 73ef602bfb01..1e7f0cd1c86e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c @@ -2926,10 +2926,6 @@ void rtl92ee_phy_lc_calibrate(struct ieee80211_hw *hw) rtlphy->lck_inprogress = false; } -void rtl92ee_phy_ap_calibrate(struct ieee80211_hw *hw, s8 delta) -{ -} - void rtl92ee_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain) { _rtl92ee_phy_set_rfpath_switch(hw, bmain, false); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.h index 1a5dbc628379..ec4c26b81c48 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.h @@ -119,7 +119,6 @@ void rtl92ee_phy_set_bw_mode(struct ieee80211_hw *hw, void rtl92ee_phy_sw_chnl_callback(struct ieee80211_hw *hw); u8 rtl92ee_phy_sw_chnl(struct ieee80211_hw *hw); void rtl92ee_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery); -void rtl92ee_phy_ap_calibrate(struct ieee80211_hw *hw, s8 delta); void rtl92ee_phy_lc_calibrate(struct ieee80211_hw *hw); void rtl92ee_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain); bool rtl92ee_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c index 9eddbada8af1..13a05066e8a6 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c @@ -4586,10 +4586,6 @@ void rtl8821ae_phy_lc_calibrate(struct ieee80211_hw *hw) { } -void rtl8821ae_phy_ap_calibrate(struct ieee80211_hw *hw, s8 delta) -{ -} - void rtl8821ae_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain) { _rtl8821ae_phy_set_rfpath_switch(hw, bmain); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.h index 35b7d0f70125..90bf5462a3f8 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.h @@ -214,7 +214,6 @@ void rtl8821ae_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery); void rtl8812ae_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery); -void rtl8821ae_phy_ap_calibrate(struct ieee80211_hw *hw, s8 delta); void rtl8821ae_phy_lc_calibrate(struct ieee80211_hw *hw); void rtl8821ae_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain); bool rtl8812ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c index f5718e570011..d35ed56d6db9 100644 --- a/drivers/net/wireless/realtek/rtlwifi/usb.c +++ b/drivers/net/wireless/realtek/rtlwifi/usb.c @@ -1077,15 +1077,3 @@ void rtl_usb_disconnect(struct usb_interface *intf) ieee80211_free_hw(hw); } EXPORT_SYMBOL(rtl_usb_disconnect); - -int rtl_usb_suspend(struct usb_interface *pusb_intf, pm_message_t message) -{ - return 0; -} -EXPORT_SYMBOL(rtl_usb_suspend); - -int rtl_usb_resume(struct usb_interface *pusb_intf) -{ - return 0; -} -EXPORT_SYMBOL(rtl_usb_resume); diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.h b/drivers/net/wireless/realtek/rtlwifi/usb.h index b66d6f9ae564..b873bbc9c4c2 100644 --- a/drivers/net/wireless/realtek/rtlwifi/usb.h +++ b/drivers/net/wireless/realtek/rtlwifi/usb.h @@ -138,7 +138,5 @@ int rtl_usb_probe(struct usb_interface *intf, const struct usb_device_id *id, const struct rtl_hal_cfg *rtl92cu_hal_cfg); void rtl_usb_disconnect(struct usb_interface *intf); -int rtl_usb_suspend(struct usb_interface *pusb_intf, pm_message_t message); -int rtl_usb_resume(struct usb_interface *pusb_intf); #endif diff --git a/drivers/net/wireless/realtek/rtw88/coex.c b/drivers/net/wireless/realtek/rtw88/coex.c index c929db1e53ca..64904278ddad 100644 --- a/drivers/net/wireless/realtek/rtw88/coex.c +++ b/drivers/net/wireless/realtek/rtw88/coex.c @@ -309,7 +309,7 @@ static void rtw_coex_tdma_timer_base(struct rtw_dev *rtwdev, u8 type) { struct rtw_coex *coex = &rtwdev->coex; struct rtw_coex_stat *coex_stat = &coex->stat; - u8 para[2] = {0}; + u8 para[6] = {}; u8 times; u16 tbtt_interval = coex_stat->wl_beacon_interval; diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c index 6b563ac489a7..4fc78b882080 100644 --- a/drivers/net/wireless/realtek/rtw88/fw.c +++ b/drivers/net/wireless/realtek/rtw88/fw.c @@ -1466,7 +1466,7 @@ void rtw_add_rsvd_page_sta(struct rtw_dev *rtwdev, int rtw_fw_write_data_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr, u8 *buf, u32 size) { - u8 bckp[2]; + u8 bckp[3]; u8 val; u16 rsvd_pg_head; u32 bcn_valid_addr; @@ -1478,6 +1478,8 @@ int rtw_fw_write_data_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr, if (!size) return -EINVAL; + bckp[2] = rtw_read8(rtwdev, REG_BCN_CTRL); + if (rtw_chip_wcpu_11n(rtwdev)) { rtw_write32_set(rtwdev, REG_DWBCN0_CTRL, BIT_BCN_VALID); } else { @@ -1491,6 +1493,9 @@ int rtw_fw_write_data_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr, val |= BIT_ENSWBCN >> 8; rtw_write8(rtwdev, REG_CR + 1, val); + rtw_write8(rtwdev, REG_BCN_CTRL, + (bckp[2] & ~BIT_EN_BCN_FUNCTION) | BIT_DIS_TSF_UDT); + if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_PCIE) { val = rtw_read8(rtwdev, REG_FWHW_TXQ_CTRL + 2); bckp[1] = val; @@ -1521,6 +1526,7 @@ restore: rsvd_pg_head = rtwdev->fifo.rsvd_boundary; rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2, rsvd_pg_head | BIT_BCN_VALID_V1); + rtw_write8(rtwdev, REG_BCN_CTRL, bckp[2]); if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_PCIE) rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, bckp[1]); rtw_write8(rtwdev, REG_CR + 1, bckp[0]); diff --git a/drivers/net/wireless/realtek/rtw88/hci.h b/drivers/net/wireless/realtek/rtw88/hci.h index 96aeda26014e..d4bee9c3ecfe 100644 --- a/drivers/net/wireless/realtek/rtw88/hci.h +++ b/drivers/net/wireless/realtek/rtw88/hci.h @@ -19,6 +19,8 @@ struct rtw_hci_ops { void (*link_ps)(struct rtw_dev *rtwdev, bool enter); void (*interface_cfg)(struct rtw_dev *rtwdev); void (*dynamic_rx_agg)(struct rtw_dev *rtwdev, bool enable); + void (*write_firmware_page)(struct rtw_dev *rtwdev, u32 page, + const u8 *data, u32 size); int (*write_data_rsvd_page)(struct rtw_dev *rtwdev, u8 *buf, u32 size); int (*write_data_h2c)(struct rtw_dev *rtwdev, u8 *buf, u32 size); @@ -79,6 +81,12 @@ static inline void rtw_hci_dynamic_rx_agg(struct rtw_dev *rtwdev, bool enable) rtwdev->hci.ops->dynamic_rx_agg(rtwdev, enable); } +static inline void rtw_hci_write_firmware_page(struct rtw_dev *rtwdev, u32 page, + const u8 *data, u32 size) +{ + rtwdev->hci.ops->write_firmware_page(rtwdev, page, data, size); +} + static inline int rtw_hci_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf, u32 size) { diff --git a/drivers/net/wireless/realtek/rtw88/mac.c b/drivers/net/wireless/realtek/rtw88/mac.c index 0491f501c138..f66d1b302dc5 100644 --- a/drivers/net/wireless/realtek/rtw88/mac.c +++ b/drivers/net/wireless/realtek/rtw88/mac.c @@ -856,8 +856,8 @@ fwdl_ready: } } -static void -write_firmware_page(struct rtw_dev *rtwdev, u32 page, const u8 *data, u32 size) +void rtw_write_firmware_page(struct rtw_dev *rtwdev, u32 page, + const u8 *data, u32 size) { u32 val32; u32 block_nr; @@ -887,6 +887,7 @@ write_firmware_page(struct rtw_dev *rtwdev, u32 page, const u8 *data, u32 size) rtw_write32(rtwdev, write_addr, le32_to_cpu(remain_data)); } } +EXPORT_SYMBOL(rtw_write_firmware_page); static int download_firmware_legacy(struct rtw_dev *rtwdev, const u8 *data, u32 size) @@ -904,11 +905,13 @@ download_firmware_legacy(struct rtw_dev *rtwdev, const u8 *data, u32 size) rtw_write8_set(rtwdev, REG_MCUFW_CTRL, BIT_FWDL_CHK_RPT); for (page = 0; page < total_page; page++) { - write_firmware_page(rtwdev, page, data, DLFW_PAGE_SIZE_LEGACY); + rtw_hci_write_firmware_page(rtwdev, page, data, + DLFW_PAGE_SIZE_LEGACY); data += DLFW_PAGE_SIZE_LEGACY; } if (last_page_size) - write_firmware_page(rtwdev, page, data, last_page_size); + rtw_hci_write_firmware_page(rtwdev, page, data, + last_page_size); if (!check_hw_ready(rtwdev, REG_MCUFW_CTRL, BIT_FWDL_CHK_RPT, 1)) { rtw_err(rtwdev, "failed to check download firmware report\n"); diff --git a/drivers/net/wireless/realtek/rtw88/mac.h b/drivers/net/wireless/realtek/rtw88/mac.h index 6905e2747372..e92b1483728d 100644 --- a/drivers/net/wireless/realtek/rtw88/mac.h +++ b/drivers/net/wireless/realtek/rtw88/mac.h @@ -34,6 +34,8 @@ int rtw_pwr_seq_parser(struct rtw_dev *rtwdev, const struct rtw_pwr_seq_cmd * const *cmd_seq); int rtw_mac_power_on(struct rtw_dev *rtwdev); void rtw_mac_power_off(struct rtw_dev *rtwdev); +void rtw_write_firmware_page(struct rtw_dev *rtwdev, u32 page, + const u8 *data, u32 size); int rtw_download_firmware(struct rtw_dev *rtwdev, struct rtw_fw_state *fw); int rtw_mac_init(struct rtw_dev *rtwdev); void rtw_mac_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop); diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c index 026fbf4ad9cc..77f9fbe1870c 100644 --- a/drivers/net/wireless/realtek/rtw88/mac80211.c +++ b/drivers/net/wireless/realtek/rtw88/mac80211.c @@ -396,6 +396,8 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw, if (rtw_bf_support) rtw_bf_assoc(rtwdev, vif, conf); + rtw_set_ampdu_factor(rtwdev, vif, conf); + rtw_fw_beacon_filter_config(rtwdev, true, vif); } else { rtw_leave_lps(rtwdev); diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c index 959f56a3cc1a..c4de5d114eda 100644 --- a/drivers/net/wireless/realtek/rtw88/main.c +++ b/drivers/net/wireless/realtek/rtw88/main.c @@ -2242,7 +2242,8 @@ int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw) ieee80211_hw_set(hw, SUPPORTS_PS); ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS); ieee80211_hw_set(hw, SUPPORT_FAST_XMIT); - ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU); + if (rtwdev->chip->amsdu_in_ampdu) + ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU); ieee80211_hw_set(hw, HAS_RATE_CONTROL); ieee80211_hw_set(hw, TX_AMSDU); ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS); @@ -2447,6 +2448,38 @@ void rtw_core_enable_beacon(struct rtw_dev *rtwdev, bool enable) } } +void rtw_set_ampdu_factor(struct rtw_dev *rtwdev, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf) +{ + const struct rtw_chip_ops *ops = rtwdev->chip->ops; + struct ieee80211_sta *sta; + u8 factor = 0xff; + + if (!ops->set_ampdu_factor) + return; + + rcu_read_lock(); + + sta = ieee80211_find_sta(vif, bss_conf->bssid); + if (!sta) { + rcu_read_unlock(); + rtw_warn(rtwdev, "%s: failed to find station %pM\n", + __func__, bss_conf->bssid); + return; + } + + if (sta->deflink.vht_cap.vht_supported) + factor = u32_get_bits(sta->deflink.vht_cap.cap, + IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK); + else if (sta->deflink.ht_cap.ht_supported) + factor = sta->deflink.ht_cap.ampdu_factor; + + rcu_read_unlock(); + + if (factor != 0xff) + ops->set_ampdu_factor(rtwdev, factor); +} + MODULE_AUTHOR("Realtek Corporation"); MODULE_DESCRIPTION("Realtek 802.11ac wireless core module"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h index 02343e059fd9..b0f1fabe9554 100644 --- a/drivers/net/wireless/realtek/rtw88/main.h +++ b/drivers/net/wireless/realtek/rtw88/main.h @@ -878,6 +878,7 @@ struct rtw_chip_ops { u32 antenna_rx); void (*cfg_ldo25)(struct rtw_dev *rtwdev, bool enable); void (*efuse_grant)(struct rtw_dev *rtwdev, bool enable); + void (*set_ampdu_factor)(struct rtw_dev *rtwdev, u8 factor); void (*false_alarm_statistics)(struct rtw_dev *rtwdev); void (*phy_calibration)(struct rtw_dev *rtwdev); void (*dpk_track)(struct rtw_dev *rtwdev); @@ -1229,6 +1230,7 @@ struct rtw_chip_info { u16 fw_fifo_addr[RTW_FW_FIFO_MAX]; const struct rtw_fwcd_segs *fwcd_segs; + bool amsdu_in_ampdu; u8 usb_tx_agg_desc_num; bool hw_feature_report; u8 c2h_ra_report_size; @@ -2272,4 +2274,6 @@ void rtw_update_channel(struct rtw_dev *rtwdev, u8 center_channel, void rtw_core_port_switch(struct rtw_dev *rtwdev, struct ieee80211_vif *vif); bool rtw_core_check_sta_active(struct rtw_dev *rtwdev); void rtw_core_enable_beacon(struct rtw_dev *rtwdev, bool enable); +void rtw_set_ampdu_factor(struct rtw_dev *rtwdev, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf); #endif diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c index bb4c4ccb31d4..7f2b6dc21f56 100644 --- a/drivers/net/wireless/realtek/rtw88/pci.c +++ b/drivers/net/wireless/realtek/rtw88/pci.c @@ -12,6 +12,7 @@ #include "fw.h" #include "ps.h" #include "debug.h" +#include "mac.h" static bool rtw_disable_msi; static bool rtw_pci_disable_aspm; @@ -1602,6 +1603,7 @@ static const struct rtw_hci_ops rtw_pci_ops = { .link_ps = rtw_pci_link_ps, .interface_cfg = rtw_pci_interface_cfg, .dynamic_rx_agg = NULL, + .write_firmware_page = rtw_write_firmware_page, .read8 = rtw_pci_read8, .read16 = rtw_pci_read16, diff --git a/drivers/net/wireless/realtek/rtw88/rtw8703b.c b/drivers/net/wireless/realtek/rtw88/rtw8703b.c index 1d232adbdd7e..9e6700c43a63 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8703b.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8703b.c @@ -519,15 +519,6 @@ static const struct rtw_rqpn rqpn_table_8703b[] = { RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH}, }; -/* Default power index table for RTL8703B, used if EFUSE does not - * contain valid data. Replaces EFUSE data from offset 0x10 (start of - * txpwr_idx_table). - */ -static const u8 rtw8703b_txpwr_idx_table[] = { - 0x2D, 0x2D, 0x2D, 0x2D, 0x2D, 0x2D, - 0x2D, 0x2D, 0x2D, 0x2D, 0x2D, 0x02 -}; - static void try_mac_from_devicetree(struct rtw_dev *rtwdev) { struct device_node *node = rtwdev->dev->of_node; @@ -544,15 +535,9 @@ static void try_mac_from_devicetree(struct rtw_dev *rtwdev) } } -#define DBG_EFUSE_FIX(rtwdev, name) \ - rtw_dbg(rtwdev, RTW_DBG_EFUSE, "Fixed invalid EFUSE value: " \ - # name "=0x%x\n", rtwdev->efuse.name) - static int rtw8703b_read_efuse(struct rtw_dev *rtwdev, u8 *log_map) { struct rtw_efuse *efuse = &rtwdev->efuse; - u8 *pwr = (u8 *)efuse->txpwr_idx_table; - bool valid = false; int ret; ret = rtw8723x_read_efuse(rtwdev, log_map); @@ -562,51 +547,6 @@ static int rtw8703b_read_efuse(struct rtw_dev *rtwdev, u8 *log_map) if (!is_valid_ether_addr(efuse->addr)) try_mac_from_devicetree(rtwdev); - /* If TX power index table in EFUSE is invalid, fall back to - * built-in table. - */ - for (int i = 0; i < ARRAY_SIZE(rtw8703b_txpwr_idx_table); i++) - if (pwr[i] != 0xff) { - valid = true; - break; - } - if (!valid) { - for (int i = 0; i < ARRAY_SIZE(rtw8703b_txpwr_idx_table); i++) - pwr[i] = rtw8703b_txpwr_idx_table[i]; - rtw_dbg(rtwdev, RTW_DBG_EFUSE, - "Replaced invalid EFUSE TX power index table."); - rtw8723x_debug_txpwr_limit(rtwdev, - efuse->txpwr_idx_table, 2); - } - - /* Override invalid antenna settings. */ - if (efuse->bt_setting == 0xff) { - /* shared antenna */ - efuse->bt_setting |= BIT(0); - /* RF path A */ - efuse->bt_setting &= ~BIT(6); - DBG_EFUSE_FIX(rtwdev, bt_setting); - } - - /* Override invalid board options: The coex code incorrectly - * assumes that if bits 6 & 7 are set the board doesn't - * support coex. Regd is also derived from rf_board_option and - * should be 0 if there's no valid data. - */ - if (efuse->rf_board_option == 0xff) { - efuse->regd = 0; - efuse->rf_board_option &= GENMASK(5, 0); - DBG_EFUSE_FIX(rtwdev, rf_board_option); - } - - /* Override invalid crystal cap setting, default comes from - * vendor driver. Chip specific. - */ - if (efuse->crystal_cap == 0xff) { - efuse->crystal_cap = 0x20; - DBG_EFUSE_FIX(rtwdev, crystal_cap); - } - return 0; } @@ -1904,6 +1844,7 @@ static const struct rtw_chip_ops rtw8703b_ops = { .set_antenna = NULL, .cfg_ldo25 = rtw8723x_cfg_ldo25, .efuse_grant = rtw8723x_efuse_grant, + .set_ampdu_factor = NULL, .false_alarm_statistics = rtw8723x_false_alarm_statistics, .phy_calibration = rtw8703b_phy_calibration, .dpk_track = NULL, diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723cs.c b/drivers/net/wireless/realtek/rtw88/rtw8723cs.c index 8d38d36be8c0..1f98d35a8dd1 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8723cs.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8723cs.c @@ -19,7 +19,7 @@ static const struct sdio_device_id rtw_8723cs_id_table[] = { MODULE_DEVICE_TABLE(sdio, rtw_8723cs_id_table); static struct sdio_driver rtw_8723cs_driver = { - .name = "rtw8723cs", + .name = KBUILD_MODNAME, .id_table = rtw_8723cs_id_table, .probe = rtw_sdio_probe, .remove = rtw_sdio_remove, diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723d.c b/drivers/net/wireless/realtek/rtw88/rtw8723d.c index 87715bd54860..31876e708f9e 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8723d.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8723d.c @@ -1404,6 +1404,7 @@ static const struct rtw_chip_ops rtw8723d_ops = { .set_antenna = NULL, .cfg_ldo25 = rtw8723x_cfg_ldo25, .efuse_grant = rtw8723x_efuse_grant, + .set_ampdu_factor = NULL, .false_alarm_statistics = rtw8723x_false_alarm_statistics, .phy_calibration = rtw8723d_phy_calibration, .cck_pd_set = rtw8723d_phy_cck_pd_set, diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723de.c b/drivers/net/wireless/realtek/rtw88/rtw8723de.c index abbaafa32851..87c8bc9d18a9 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8723de.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8723de.c @@ -17,7 +17,7 @@ static const struct pci_device_id rtw_8723de_id_table[] = { MODULE_DEVICE_TABLE(pci, rtw_8723de_id_table); static struct pci_driver rtw_8723de_driver = { - .name = "rtw_8723de", + .name = KBUILD_MODNAME, .id_table = rtw_8723de_id_table, .probe = rtw_pci_probe, .remove = rtw_pci_remove, diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723ds.c b/drivers/net/wireless/realtek/rtw88/rtw8723ds.c index e5b6960ba0a0..206b77e5b98e 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8723ds.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8723ds.c @@ -25,7 +25,7 @@ static const struct sdio_device_id rtw_8723ds_id_table[] = { MODULE_DEVICE_TABLE(sdio, rtw_8723ds_id_table); static struct sdio_driver rtw_8723ds_driver = { - .name = "rtw_8723ds", + .name = KBUILD_MODNAME, .probe = rtw_sdio_probe, .remove = rtw_sdio_remove, .id_table = rtw_8723ds_id_table, diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723du.c b/drivers/net/wireless/realtek/rtw88/rtw8723du.c index 322a805da76b..b661a26e0e22 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8723du.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8723du.c @@ -24,7 +24,7 @@ static int rtw8723du_probe(struct usb_interface *intf, } static struct usb_driver rtw_8723du_driver = { - .name = "rtw_8723du", + .name = KBUILD_MODNAME, .id_table = rtw_8723du_id_table, .probe = rtw8723du_probe, .disconnect = rtw_usb_disconnect, diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723x.c b/drivers/net/wireless/realtek/rtw88/rtw8723x.c index 69f73cb5b4cd..4c77963fdd37 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8723x.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8723x.c @@ -69,6 +69,9 @@ static void __rtw8723x_lck(struct rtw_dev *rtwdev) #define DBG_EFUSE_2BYTE(rtwdev, map, name) \ rtw_dbg(rtwdev, RTW_DBG_EFUSE, # name "=0x%02x%02x\n", \ (map)->name[0], (map)->name[1]) +#define DBG_EFUSE_FIX(rtwdev, name) \ + rtw_dbg(rtwdev, RTW_DBG_EFUSE, "Fixed invalid EFUSE value: " \ + # name "=0x%x\n", rtwdev->efuse.name) static void rtw8723xe_efuse_debug(struct rtw_dev *rtwdev, struct rtw8723x_efuse *map) @@ -238,10 +241,21 @@ static void rtw8723xs_efuse_parsing(struct rtw_efuse *efuse, ether_addr_copy(efuse->addr, map->s.mac_addr); } +/* Default power index table for RTL8703B/RTL8723D, used if EFUSE does + * not contain valid data. Replaces EFUSE data from offset 0x10 (start + * of txpwr_idx_table). + */ +static const u8 rtw8723x_txpwr_idx_table[] = { + 0x2D, 0x2D, 0x2D, 0x2D, 0x2D, 0x2D, + 0x2D, 0x2D, 0x2D, 0x2D, 0x2D, 0x02 +}; + static int __rtw8723x_read_efuse(struct rtw_dev *rtwdev, u8 *log_map) { struct rtw_efuse *efuse = &rtwdev->efuse; + u8 *pwr = (u8 *)efuse->txpwr_idx_table; struct rtw8723x_efuse *map; + bool valid = false; int i; map = (struct rtw8723x_efuse *)log_map; @@ -279,6 +293,51 @@ static int __rtw8723x_read_efuse(struct rtw_dev *rtwdev, u8 *log_map) return -EOPNOTSUPP; } + /* If TX power index table in EFUSE is invalid, fall back to + * built-in table. + */ + for (i = 0; i < ARRAY_SIZE(rtw8723x_txpwr_idx_table); i++) + if (pwr[i] != 0xff) { + valid = true; + break; + } + if (!valid) { + for (i = 0; i < ARRAY_SIZE(rtw8723x_txpwr_idx_table); i++) + pwr[i] = rtw8723x_txpwr_idx_table[i]; + rtw_dbg(rtwdev, RTW_DBG_EFUSE, + "Replaced invalid EFUSE TX power index table."); + rtw8723x_debug_txpwr_limit(rtwdev, + efuse->txpwr_idx_table, 2); + } + + /* Override invalid antenna settings. */ + if (efuse->bt_setting == 0xff) { + /* shared antenna */ + efuse->bt_setting |= BIT(0); + /* RF path A */ + efuse->bt_setting &= ~BIT(6); + DBG_EFUSE_FIX(rtwdev, bt_setting); + } + + /* Override invalid board options: The coex code incorrectly + * assumes that if bits 6 & 7 are set the board doesn't + * support coex. Regd is also derived from rf_board_option and + * should be 0 if there's no valid data. + */ + if (efuse->rf_board_option == 0xff) { + efuse->regd = 0; + efuse->rf_board_option &= GENMASK(5, 0); + DBG_EFUSE_FIX(rtwdev, rf_board_option); + } + + /* Override invalid crystal cap setting, default comes from + * vendor driver. Chip specific. + */ + if (efuse->crystal_cap == 0xff) { + efuse->crystal_cap = 0x20; + DBG_EFUSE_FIX(rtwdev, crystal_cap); + } + return 0; } diff --git a/drivers/net/wireless/realtek/rtw88/rtw8812a.c b/drivers/net/wireless/realtek/rtw88/rtw8812a.c index f9ba2aa2928a..c2ef41767ff9 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8812a.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8812a.c @@ -925,6 +925,7 @@ static const struct rtw_chip_ops rtw8812a_ops = { .set_tx_power_index = rtw88xxa_set_tx_power_index, .cfg_ldo25 = rtw8812a_cfg_ldo25, .efuse_grant = rtw88xxa_efuse_grant, + .set_ampdu_factor = NULL, .false_alarm_statistics = rtw88xxa_false_alarm_statistics, .phy_calibration = rtw8812a_phy_calibration, .cck_pd_set = rtw88xxa_phy_cck_pd_set, @@ -1075,6 +1076,7 @@ const struct rtw_chip_info rtw8812a_hw_spec = { .rfe_defs = rtw8812a_rfe_defs, .rfe_defs_size = ARRAY_SIZE(rtw8812a_rfe_defs), .rx_ldpc = false, + .amsdu_in_ampdu = true, .hw_feature_report = false, .c2h_ra_report_size = 4, .old_datarate_fb_limit = true, diff --git a/drivers/net/wireless/realtek/rtw88/rtw8812au.c b/drivers/net/wireless/realtek/rtw88/rtw8812au.c index e18995f4cc78..dfea89670372 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8812au.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8812au.c @@ -82,7 +82,7 @@ static const struct usb_device_id rtw_8812au_id_table[] = { MODULE_DEVICE_TABLE(usb, rtw_8812au_id_table); static struct usb_driver rtw_8812au_driver = { - .name = "rtw_8812au", + .name = KBUILD_MODNAME, .id_table = rtw_8812au_id_table, .probe = rtw_usb_probe, .disconnect = rtw_usb_disconnect, diff --git a/drivers/net/wireless/realtek/rtw88/rtw8814a.c b/drivers/net/wireless/realtek/rtw88/rtw8814a.c index cfd35d40d46e..44dd3090484b 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8814a.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8814a.c @@ -1332,6 +1332,16 @@ static void rtw8814a_cfg_ldo25(struct rtw_dev *rtwdev, bool enable) { } +/* Without this RTL8814A sends too many frames and (some?) 11n AP + * can't handle it, resulting in low TX speed. Other chips seem fine. + */ +static void rtw8814a_set_ampdu_factor(struct rtw_dev *rtwdev, u8 factor) +{ + factor = min_t(u8, factor, IEEE80211_VHT_MAX_AMPDU_256K); + + rtw_write32(rtwdev, REG_AMPDU_MAX_LENGTH, (8192 << factor) - 1); +} + static void rtw8814a_false_alarm_statistics(struct rtw_dev *rtwdev) { struct rtw_dm_info *dm_info = &rtwdev->dm_info; @@ -2051,6 +2061,7 @@ static const struct rtw_chip_ops rtw8814a_ops = { .set_antenna = NULL, .cfg_ldo25 = rtw8814a_cfg_ldo25, .efuse_grant = rtw8814a_efuse_grant, + .set_ampdu_factor = rtw8814a_set_ampdu_factor, .false_alarm_statistics = rtw8814a_false_alarm_statistics, .phy_calibration = rtw8814a_phy_calibration, .cck_pd_set = rtw8814a_phy_cck_pd_set, @@ -2189,6 +2200,7 @@ const struct rtw_chip_info rtw8814a_hw_spec = { .rx_ldpc = true, .max_power_index = 0x3f, .ampdu_density = IEEE80211_HT_MPDU_DENSITY_2, + .amsdu_in_ampdu = false, /* RX speed is better without AMSDU */ .usb_tx_agg_desc_num = 3, .hw_feature_report = false, .c2h_ra_report_size = 6, diff --git a/drivers/net/wireless/realtek/rtw88/rtw8814ae.c b/drivers/net/wireless/realtek/rtw88/rtw8814ae.c index 54d2e20a7764..c7436f1c8d40 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8814ae.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8814ae.c @@ -17,7 +17,7 @@ static const struct pci_device_id rtw_8814ae_id_table[] = { MODULE_DEVICE_TABLE(pci, rtw_8814ae_id_table); static struct pci_driver rtw_8814ae_driver = { - .name = "rtw_8814ae", + .name = KBUILD_MODNAME, .id_table = rtw_8814ae_id_table, .probe = rtw_pci_probe, .remove = rtw_pci_remove, diff --git a/drivers/net/wireless/realtek/rtw88/rtw8814au.c b/drivers/net/wireless/realtek/rtw88/rtw8814au.c index afe045fb84de..1a0886ec17dd 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8814au.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8814au.c @@ -42,7 +42,7 @@ static const struct usb_device_id rtw_8814au_id_table[] = { MODULE_DEVICE_TABLE(usb, rtw_8814au_id_table); static struct usb_driver rtw_8814au_driver = { - .name = "rtw_8814au", + .name = KBUILD_MODNAME, .id_table = rtw_8814au_id_table, .probe = rtw_usb_probe, .disconnect = rtw_usb_disconnect, diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821a.c b/drivers/net/wireless/realtek/rtw88/rtw8821a.c index f68239b07319..413aec694c33 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8821a.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8821a.c @@ -871,6 +871,7 @@ static const struct rtw_chip_ops rtw8821a_ops = { .set_tx_power_index = rtw88xxa_set_tx_power_index, .cfg_ldo25 = rtw8821a_cfg_ldo25, .efuse_grant = rtw88xxa_efuse_grant, + .set_ampdu_factor = NULL, .false_alarm_statistics = rtw88xxa_false_alarm_statistics, .phy_calibration = rtw8821a_phy_calibration, .cck_pd_set = rtw88xxa_phy_cck_pd_set, @@ -1175,6 +1176,7 @@ const struct rtw_chip_info rtw8821a_hw_spec = { .rfe_defs = rtw8821a_rfe_defs, .rfe_defs_size = ARRAY_SIZE(rtw8821a_rfe_defs), .rx_ldpc = false, + .amsdu_in_ampdu = true, .hw_feature_report = false, .c2h_ra_report_size = 4, .old_datarate_fb_limit = true, diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821au.c b/drivers/net/wireless/realtek/rtw88/rtw8821au.c index a01744b64e8d..28c281b32978 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8821au.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8821au.c @@ -66,7 +66,7 @@ static const struct usb_device_id rtw_8821au_id_table[] = { MODULE_DEVICE_TABLE(usb, rtw_8821au_id_table); static struct usb_driver rtw_8821au_driver = { - .name = "rtw_8821au", + .name = KBUILD_MODNAME, .id_table = rtw_8821au_id_table, .probe = rtw_usb_probe, .disconnect = rtw_usb_disconnect, diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c index 0ade7f11cbd2..413130a30ca9 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c @@ -1668,6 +1668,7 @@ static const struct rtw_chip_ops rtw8821c_ops = { .set_antenna = NULL, .set_tx_power_index = rtw8821c_set_tx_power_index, .cfg_ldo25 = rtw8821c_cfg_ldo25, + .set_ampdu_factor = NULL, .false_alarm_statistics = rtw8821c_false_alarm_statistics, .phy_calibration = rtw8821c_phy_calibration, .cck_pd_set = rtw8821c_phy_cck_pd_set, @@ -1990,6 +1991,7 @@ const struct rtw_chip_info rtw8821c_hw_spec = { .band = RTW_BAND_2G | RTW_BAND_5G, .page_size = TX_PAGE_SIZE, .dig_min = 0x1c, + .amsdu_in_ampdu = true, .usb_tx_agg_desc_num = 3, .hw_feature_report = true, .c2h_ra_report_size = 7, diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821ce.c b/drivers/net/wireless/realtek/rtw88/rtw8821ce.c index f3d971feda04..40637c079d99 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8821ce.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8821ce.c @@ -21,7 +21,7 @@ static const struct pci_device_id rtw_8821ce_id_table[] = { MODULE_DEVICE_TABLE(pci, rtw_8821ce_id_table); static struct pci_driver rtw_8821ce_driver = { - .name = "rtw_8821ce", + .name = KBUILD_MODNAME, .id_table = rtw_8821ce_id_table, .probe = rtw_pci_probe, .remove = rtw_pci_remove, diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821cs.c b/drivers/net/wireless/realtek/rtw88/rtw8821cs.c index a359413369a4..6d94162213c6 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8821cs.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8821cs.c @@ -20,7 +20,7 @@ static const struct sdio_device_id rtw_8821cs_id_table[] = { MODULE_DEVICE_TABLE(sdio, rtw_8821cs_id_table); static struct sdio_driver rtw_8821cs_driver = { - .name = "rtw_8821cs", + .name = KBUILD_MODNAME, .probe = rtw_sdio_probe, .remove = rtw_sdio_remove, .id_table = rtw_8821cs_id_table, diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821cu.c b/drivers/net/wireless/realtek/rtw88/rtw8821cu.c index a019f4085e73..7a0fffc359e2 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8821cu.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8821cu.c @@ -48,7 +48,7 @@ static int rtw_8821cu_probe(struct usb_interface *intf, } static struct usb_driver rtw_8821cu_driver = { - .name = "rtw_8821cu", + .name = KBUILD_MODNAME, .id_table = rtw_8821cu_id_table, .probe = rtw_8821cu_probe, .disconnect = rtw_usb_disconnect, diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.c b/drivers/net/wireless/realtek/rtw88/rtw8822b.c index b4934da88e33..ab199eaea3c7 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822b.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.c @@ -2158,6 +2158,7 @@ static const struct rtw_chip_ops rtw8822b_ops = { .set_tx_power_index = rtw8822b_set_tx_power_index, .set_antenna = rtw8822b_set_antenna, .cfg_ldo25 = rtw8822b_cfg_ldo25, + .set_ampdu_factor = NULL, .false_alarm_statistics = rtw8822b_false_alarm_statistics, .phy_calibration = rtw8822b_phy_calibration, .pwr_track = rtw8822b_pwr_track, @@ -2531,6 +2532,7 @@ const struct rtw_chip_info rtw8822b_hw_spec = { .band = RTW_BAND_2G | RTW_BAND_5G, .page_size = TX_PAGE_SIZE, .dig_min = 0x1c, + .amsdu_in_ampdu = true, .usb_tx_agg_desc_num = 3, .hw_feature_report = true, .c2h_ra_report_size = 7, diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822be.c b/drivers/net/wireless/realtek/rtw88/rtw8822be.c index 4994950776cd..0bb9f70e7920 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822be.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8822be.c @@ -17,7 +17,7 @@ static const struct pci_device_id rtw_8822be_id_table[] = { MODULE_DEVICE_TABLE(pci, rtw_8822be_id_table); static struct pci_driver rtw_8822be_driver = { - .name = "rtw_8822be", + .name = KBUILD_MODNAME, .id_table = rtw_8822be_id_table, .probe = rtw_pci_probe, .remove = rtw_pci_remove, diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822bs.c b/drivers/net/wireless/realtek/rtw88/rtw8822bs.c index 31d8645f83bd..744781dcb419 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822bs.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8822bs.c @@ -20,7 +20,7 @@ static const struct sdio_device_id rtw_8822bs_id_table[] = { MODULE_DEVICE_TABLE(sdio, rtw_8822bs_id_table); static struct sdio_driver rtw_8822bs_driver = { - .name = "rtw_8822bs", + .name = KBUILD_MODNAME, .probe = rtw_sdio_probe, .remove = rtw_sdio_remove, .id_table = rtw_8822bs_id_table, diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822bu.c b/drivers/net/wireless/realtek/rtw88/rtw8822bu.c index 572d1f31832e..44e28e583964 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822bu.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8822bu.c @@ -77,6 +77,8 @@ static const struct usb_device_id rtw_8822bu_id_table[] = { .driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* Mercusys MA30N */ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3322, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* D-Link DWA-T185 rev. A1 */ + { USB_DEVICE_AND_INTERFACE_INFO(0x0411, 0x03d1, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* BUFFALO WI-U2-866DM */ {}, }; MODULE_DEVICE_TABLE(usb, rtw_8822bu_id_table); @@ -88,7 +90,7 @@ static int rtw8822bu_probe(struct usb_interface *intf, } static struct usb_driver rtw_8822bu_driver = { - .name = "rtw_8822bu", + .name = KBUILD_MODNAME, .id_table = rtw_8822bu_id_table, .probe = rtw8822bu_probe, .disconnect = rtw_usb_disconnect, diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c index 5e53e0db177e..017d959de3ce 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c @@ -3951,7 +3951,8 @@ static void rtw8822c_dpk_cal_coef1(struct rtw_dev *rtwdev) rtw_write32(rtwdev, REG_NCTL0, 0x00001148); rtw_write32(rtwdev, REG_NCTL0, 0x00001149); - check_hw_ready(rtwdev, 0x2d9c, MASKBYTE0, 0x55); + if (!check_hw_ready(rtwdev, 0x2d9c, MASKBYTE0, 0x55)) + rtw_warn(rtwdev, "DPK stuck, performance may be suboptimal"); rtw_write8(rtwdev, 0x1b10, 0x0); rtw_write32_mask(rtwdev, REG_NCTL0, BIT_SUBPAGE, 0x0000000c); @@ -4968,6 +4969,7 @@ static const struct rtw_chip_ops rtw8822c_ops = { .set_tx_power_index = rtw8822c_set_tx_power_index, .set_antenna = rtw8822c_set_antenna, .cfg_ldo25 = rtw8822c_cfg_ldo25, + .set_ampdu_factor = NULL, .false_alarm_statistics = rtw8822c_false_alarm_statistics, .dpk_track = rtw8822c_dpk_track, .phy_calibration = rtw8822c_phy_calibration, @@ -5349,6 +5351,7 @@ const struct rtw_chip_info rtw8822c_hw_spec = { .band = RTW_BAND_2G | RTW_BAND_5G, .page_size = TX_PAGE_SIZE, .dig_min = 0x20, + .amsdu_in_ampdu = true, .usb_tx_agg_desc_num = 3, .hw_feature_report = true, .c2h_ra_report_size = 7, diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822ce.c b/drivers/net/wireless/realtek/rtw88/rtw8822ce.c index e26c6bc82936..9def732480af 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822ce.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8822ce.c @@ -21,7 +21,7 @@ static const struct pci_device_id rtw_8822ce_id_table[] = { MODULE_DEVICE_TABLE(pci, rtw_8822ce_id_table); static struct pci_driver rtw_8822ce_driver = { - .name = "rtw_8822ce", + .name = KBUILD_MODNAME, .id_table = rtw_8822ce_id_table, .probe = rtw_pci_probe, .remove = rtw_pci_remove, diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822cs.c b/drivers/net/wireless/realtek/rtw88/rtw8822cs.c index 975e81c824f2..322281e07eb8 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822cs.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8822cs.c @@ -20,7 +20,7 @@ static const struct sdio_device_id rtw_8822cs_id_table[] = { MODULE_DEVICE_TABLE(sdio, rtw_8822cs_id_table); static struct sdio_driver rtw_8822cs_driver = { - .name = "rtw_8822cs", + .name = KBUILD_MODNAME, .probe = rtw_sdio_probe, .remove = rtw_sdio_remove, .id_table = rtw_8822cs_id_table, diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822cu.c b/drivers/net/wireless/realtek/rtw88/rtw8822cu.c index 157d5102a4b1..324fd5c8bfd4 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822cu.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8822cu.c @@ -32,7 +32,7 @@ static int rtw8822cu_probe(struct usb_interface *intf, } static struct usb_driver rtw_8822cu_driver = { - .name = "rtw_8822cu", + .name = KBUILD_MODNAME, .id_table = rtw_8822cu_id_table, .probe = rtw8822cu_probe, .disconnect = rtw_usb_disconnect, diff --git a/drivers/net/wireless/realtek/rtw88/sdio.c b/drivers/net/wireless/realtek/rtw88/sdio.c index 6209a49312f1..e733ed846123 100644 --- a/drivers/net/wireless/realtek/rtw88/sdio.c +++ b/drivers/net/wireless/realtek/rtw88/sdio.c @@ -10,6 +10,7 @@ #include <linux/mmc/host.h> #include <linux/mmc/sdio_func.h> #include "main.h" +#include "mac.h" #include "debug.h" #include "fw.h" #include "ps.h" @@ -677,12 +678,22 @@ static void rtw_sdio_enable_rx_aggregation(struct rtw_dev *rtwdev) { u8 size, timeout; - if (rtw_chip_wcpu_11n(rtwdev)) { + switch (rtwdev->chip->id) { + case RTW_CHIP_TYPE_8703B: + case RTW_CHIP_TYPE_8821A: + case RTW_CHIP_TYPE_8812A: size = 0x6; timeout = 0x6; - } else { + break; + case RTW_CHIP_TYPE_8723D: + size = 0xa; + timeout = 0x3; + rtw_write8_set(rtwdev, REG_RXDMA_AGG_PG_TH + 3, BIT(7)); + break; + default: size = 0xff; timeout = 0x1; + break; } /* Make the firmware honor the size limit configured below */ @@ -718,10 +729,7 @@ static u8 rtw_sdio_get_tx_qsel(struct rtw_dev *rtwdev, struct sk_buff *skb, case RTW_TX_QUEUE_H2C: return TX_DESC_QSEL_H2C; case RTW_TX_QUEUE_MGMT: - if (rtw_chip_wcpu_11n(rtwdev)) - return TX_DESC_QSEL_HIGH; - else - return TX_DESC_QSEL_MGMT; + return TX_DESC_QSEL_MGMT; case RTW_TX_QUEUE_HI0: return TX_DESC_QSEL_HIGH; default: @@ -1157,6 +1165,7 @@ static const struct rtw_hci_ops rtw_sdio_ops = { .link_ps = rtw_sdio_link_ps, .interface_cfg = rtw_sdio_interface_cfg, .dynamic_rx_agg = NULL, + .write_firmware_page = rtw_write_firmware_page, .read8 = rtw_sdio_read8, .read16 = rtw_sdio_read16, @@ -1227,10 +1236,7 @@ static void rtw_sdio_process_tx_queue(struct rtw_dev *rtwdev, return; } - if (queue <= RTW_TX_QUEUE_VO) - rtw_sdio_indicate_tx_status(rtwdev, skb); - else - dev_kfree_skb_any(skb); + rtw_sdio_indicate_tx_status(rtwdev, skb); } static void rtw_sdio_tx_handler(struct work_struct *work) @@ -1298,7 +1304,6 @@ static void rtw_sdio_deinit_tx(struct rtw_dev *rtwdev) struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv; int i; - flush_workqueue(rtwsdio->txwq); destroy_workqueue(rtwsdio->txwq); kfree(rtwsdio->tx_handler_data); diff --git a/drivers/net/wireless/realtek/rtw88/usb.c b/drivers/net/wireless/realtek/rtw88/usb.c index c8092fa0d9f1..3b5126ffc81a 100644 --- a/drivers/net/wireless/realtek/rtw88/usb.c +++ b/drivers/net/wireless/realtek/rtw88/usb.c @@ -139,7 +139,7 @@ static void rtw_usb_write(struct rtw_dev *rtwdev, u32 addr, u32 val, int len) ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), RTW_USB_CMD_REQ, RTW_USB_CMD_WRITE, - addr, 0, data, len, 30000); + addr, 0, data, len, 500); if (ret < 0 && ret != -ENODEV && count++ < 4) rtw_err(rtwdev, "write register 0x%x failed with %d\n", addr, ret); @@ -165,6 +165,60 @@ static void rtw_usb_write32(struct rtw_dev *rtwdev, u32 addr, u32 val) rtw_usb_write(rtwdev, addr, val, 4); } +static void rtw_usb_write_firmware_page(struct rtw_dev *rtwdev, u32 page, + const u8 *data, u32 size) +{ + struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev); + struct usb_device *udev = rtwusb->udev; + u32 addr = FW_START_ADDR_LEGACY; + u8 *data_dup, *buf; + u32 n, block_size; + int ret; + + switch (rtwdev->chip->id) { + case RTW_CHIP_TYPE_8723D: + block_size = 254; + break; + default: + block_size = 196; + break; + } + + data_dup = kmemdup(data, size, GFP_KERNEL); + if (!data_dup) + return; + + buf = data_dup; + + rtw_write32_mask(rtwdev, REG_MCUFW_CTRL, BIT_ROM_PGE, page); + + while (size > 0) { + if (size >= block_size) + n = block_size; + else if (size >= 8) + n = 8; + else + n = 1; + + ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), + RTW_USB_CMD_REQ, RTW_USB_CMD_WRITE, + addr, 0, buf, n, 500); + if (ret != n) { + if (ret != -ENODEV) + rtw_err(rtwdev, + "write 0x%x len %d failed: %d\n", + addr, n, ret); + break; + } + + addr += n; + buf += n; + size -= n; + } + + kfree(data_dup); +} + static int dma_mapping_to_ep(enum rtw_dma_mapping dma_mapping) { switch (dma_mapping) { @@ -866,6 +920,7 @@ static void rtw_usb_dynamic_rx_agg(struct rtw_dev *rtwdev, bool enable) case RTW_CHIP_TYPE_8822C: case RTW_CHIP_TYPE_8822B: case RTW_CHIP_TYPE_8821C: + case RTW_CHIP_TYPE_8814A: rtw_usb_dynamic_rx_agg_v1(rtwdev, enable); break; case RTW_CHIP_TYPE_8821A: @@ -891,6 +946,7 @@ static const struct rtw_hci_ops rtw_usb_ops = { .link_ps = rtw_usb_link_ps, .interface_cfg = rtw_usb_interface_cfg, .dynamic_rx_agg = rtw_usb_dynamic_rx_agg, + .write_firmware_page = rtw_usb_write_firmware_page, .write8 = rtw_usb_write8, .write16 = rtw_usb_write16, @@ -948,7 +1004,6 @@ static void rtw_usb_deinit_rx(struct rtw_dev *rtwdev) skb_queue_purge(&rtwusb->rx_queue); - flush_workqueue(rtwusb->rxwq); destroy_workqueue(rtwusb->rxwq); skb_queue_purge(&rtwusb->rx_free_queue); @@ -977,7 +1032,6 @@ static void rtw_usb_deinit_tx(struct rtw_dev *rtwdev) { struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev); - flush_workqueue(rtwusb->txwq); destroy_workqueue(rtwusb->txwq); rtw_usb_tx_queue_purge(rtwusb); } @@ -1094,7 +1148,8 @@ static int rtw_usb_switch_mode_new(struct rtw_dev *rtwdev) static bool rtw_usb3_chip_old(u8 chip_id) { - return chip_id == RTW_CHIP_TYPE_8812A; + return chip_id == RTW_CHIP_TYPE_8812A || + chip_id == RTW_CHIP_TYPE_8814A; } static bool rtw_usb3_chip_new(u8 chip_id) diff --git a/drivers/net/wireless/realtek/rtw89/acpi.c b/drivers/net/wireless/realtek/rtw89/acpi.c index f5dedb12c129..581d6d4154d3 100644 --- a/drivers/net/wireless/realtek/rtw89/acpi.c +++ b/drivers/net/wireless/realtek/rtw89/acpi.c @@ -12,6 +12,121 @@ static const guid_t rtw89_guid = GUID_INIT(0xD2A8C3E8, 0x4B69, 0x4F00, 0x82, 0xBD, 0xFE, 0x86, 0x07, 0x80, 0x3A, 0xA7); +static u32 rtw89_acpi_traversal_object(struct rtw89_dev *rtwdev, + const union acpi_object *obj, u8 *pos) +{ + const union acpi_object *elm; + unsigned int i; + u32 sub_len; + u32 len = 0; + u8 *tmp; + + switch (obj->type) { + case ACPI_TYPE_INTEGER: + if (pos) + pos[len] = obj->integer.value; + + len++; + break; + case ACPI_TYPE_BUFFER: + if (unlikely(obj->buffer.length == 0)) { + rtw89_debug(rtwdev, RTW89_DBG_ACPI, + "%s: invalid buffer type\n", __func__); + goto err; + } + + if (pos) + memcpy(pos, obj->buffer.pointer, obj->buffer.length); + + len += obj->buffer.length; + break; + case ACPI_TYPE_PACKAGE: + if (unlikely(obj->package.count == 0)) { + rtw89_debug(rtwdev, RTW89_DBG_ACPI, + "%s: invalid package type\n", __func__); + goto err; + } + + for (i = 0; i < obj->package.count; i++) { + elm = &obj->package.elements[i]; + tmp = pos ? pos + len : NULL; + + sub_len = rtw89_acpi_traversal_object(rtwdev, elm, tmp); + if (unlikely(sub_len == 0)) + goto err; + + len += sub_len; + } + break; + default: + rtw89_debug(rtwdev, RTW89_DBG_ACPI, "%s: unhandled type: %d\n", + __func__, obj->type); + goto err; + } + + return len; + +err: + return 0; +} + +static u32 rtw89_acpi_calculate_object_length(struct rtw89_dev *rtwdev, + const union acpi_object *obj) +{ + return rtw89_acpi_traversal_object(rtwdev, obj, NULL); +} + +static struct rtw89_acpi_data * +rtw89_acpi_evaluate_method(struct rtw89_dev *rtwdev, const char *method) +{ + struct acpi_buffer buf = {ACPI_ALLOCATE_BUFFER, NULL}; + struct rtw89_acpi_data *data = NULL; + acpi_handle root, handle; + union acpi_object *obj; + acpi_status status; + u32 len; + + root = ACPI_HANDLE(rtwdev->dev); + if (!root) { + rtw89_debug(rtwdev, RTW89_DBG_ACPI, + "acpi (%s): failed to get root\n", method); + return NULL; + } + + status = acpi_get_handle(root, (acpi_string)method, &handle); + if (ACPI_FAILURE(status)) { + rtw89_debug(rtwdev, RTW89_DBG_ACPI, + "acpi (%s): failed to get handle\n", method); + return NULL; + } + + status = acpi_evaluate_object(handle, NULL, NULL, &buf); + if (ACPI_FAILURE(status)) { + rtw89_debug(rtwdev, RTW89_DBG_ACPI, + "acpi (%s): failed to evaluate object\n", method); + return NULL; + } + + obj = buf.pointer; + len = rtw89_acpi_calculate_object_length(rtwdev, obj); + if (unlikely(len == 0)) { + rtw89_debug(rtwdev, RTW89_DBG_ACPI, + "acpi (%s): failed to traversal obj len\n", method); + goto out; + } + + data = kzalloc(struct_size(data, buf, len), GFP_KERNEL); + if (!data) + goto out; + + data->len = len; + rtw89_acpi_traversal_object(rtwdev, obj, data->buf); + +out: + ACPI_FREE(obj); + return data; +} + static int rtw89_acpi_dsm_get_value(struct rtw89_dev *rtwdev, union acpi_object *obj, u8 *value) @@ -121,6 +236,49 @@ int rtw89_acpi_dsm_get_policy_6ghz_sp(struct rtw89_dev *rtwdev, return 0; } +static bool chk_acpi_policy_tas_sig(const struct rtw89_acpi_policy_tas *p) +{ + return p->signature[0] == 0x52 && + p->signature[1] == 0x54 && + p->signature[2] == 0x4B && + p->signature[3] == 0x05; +} + +static int rtw89_acpi_dsm_get_policy_tas(struct rtw89_dev *rtwdev, + union acpi_object *obj, + struct rtw89_acpi_policy_tas **policy) +{ + const struct rtw89_acpi_policy_tas *ptr; + u32 buf_len; + + if (obj->type != ACPI_TYPE_BUFFER) { + rtw89_debug(rtwdev, RTW89_DBG_ACPI, + "acpi: expect buffer but type: %d\n", obj->type); + return -EINVAL; + } + + buf_len = obj->buffer.length; + if (buf_len < sizeof(*ptr)) { + rtw89_debug(rtwdev, RTW89_DBG_ACPI, "%s: invalid buffer length: %u\n", + __func__, buf_len); + return -EINVAL; + } + + ptr = (typeof(ptr))obj->buffer.pointer; + if (!chk_acpi_policy_tas_sig(ptr)) { + rtw89_debug(rtwdev, RTW89_DBG_ACPI, "%s: bad signature\n", __func__); + return -EINVAL; + } + + *policy = kmemdup(ptr, sizeof(*ptr), GFP_KERNEL); + if (!*policy) + return -ENOMEM; + + rtw89_hex_dump(rtwdev, RTW89_DBG_ACPI, "policy_tas: ", *policy, + sizeof(*ptr)); + return 0; +} + int rtw89_acpi_evaluate_dsm(struct rtw89_dev *rtwdev, enum rtw89_acpi_dsm_func func, struct rtw89_acpi_dsm_result *res) @@ -142,6 +300,8 @@ int rtw89_acpi_evaluate_dsm(struct rtw89_dev *rtwdev, else if (func == RTW89_ACPI_DSM_FUNC_6GHZ_SP_SUP) ret = rtw89_acpi_dsm_get_policy_6ghz_sp(rtwdev, obj, &res->u.policy_6ghz_sp); + else if (func == RTW89_ACPI_DSM_FUNC_TAS_EN) + ret = rtw89_acpi_dsm_get_policy_tas(rtwdev, obj, &res->u.policy_tas); else ret = rtw89_acpi_dsm_get_value(rtwdev, obj, &res->u.value); @@ -152,46 +312,875 @@ int rtw89_acpi_evaluate_dsm(struct rtw89_dev *rtwdev, int rtw89_acpi_evaluate_rtag(struct rtw89_dev *rtwdev, struct rtw89_acpi_rtag_result *res) { - struct acpi_buffer buf = {ACPI_ALLOCATE_BUFFER, NULL}; - acpi_handle root, handle; - union acpi_object *obj; - acpi_status status; + const struct rtw89_acpi_data *data; u32 buf_len; int ret = 0; - root = ACPI_HANDLE(rtwdev->dev); - if (!root) - return -EOPNOTSUPP; - - status = acpi_get_handle(root, (acpi_string)"RTAG", &handle); - if (ACPI_FAILURE(status)) + data = rtw89_acpi_evaluate_method(rtwdev, "RTAG"); + if (!data) return -EIO; - status = acpi_evaluate_object(handle, NULL, NULL, &buf); - if (ACPI_FAILURE(status)) - return -EIO; + buf_len = data->len; + if (buf_len != sizeof(*res)) { + rtw89_debug(rtwdev, RTW89_DBG_ACPI, "%s: invalid buffer length: %u\n", + __func__, buf_len); + ret = -EINVAL; + goto out; + } - obj = buf.pointer; - if (obj->type != ACPI_TYPE_BUFFER) { + *res = *(struct rtw89_acpi_rtag_result *)data->buf; + + rtw89_hex_dump(rtwdev, RTW89_DBG_ACPI, "antenna_gain: ", res, sizeof(*res)); + +out: + kfree(data); + return ret; +} + +enum rtw89_acpi_sar_subband rtw89_acpi_sar_get_subband(struct rtw89_dev *rtwdev, + u32 center_freq) +{ + switch (center_freq) { + default: rtw89_debug(rtwdev, RTW89_DBG_ACPI, - "acpi: expect buffer but type: %d\n", obj->type); - ret = -EINVAL; + "center freq %u to ACPI SAR subband is unhandled\n", + center_freq); + fallthrough; + case 2412 ... 2484: + return RTW89_ACPI_SAR_2GHZ_SUBBAND; + case 5180 ... 5240: + return RTW89_ACPI_SAR_5GHZ_SUBBAND_1; + case 5250 ... 5320: + return RTW89_ACPI_SAR_5GHZ_SUBBAND_2; + case 5500 ... 5720: + return RTW89_ACPI_SAR_5GHZ_SUBBAND_2E; + case 5745 ... 5885: + return RTW89_ACPI_SAR_5GHZ_SUBBAND_3_4; + case 5955 ... 6155: + return RTW89_ACPI_SAR_6GHZ_SUBBAND_5_L; + case 6175 ... 6415: + return RTW89_ACPI_SAR_6GHZ_SUBBAND_5_H; + case 6435 ... 6515: + return RTW89_ACPI_SAR_6GHZ_SUBBAND_6; + case 6535 ... 6695: + return RTW89_ACPI_SAR_6GHZ_SUBBAND_7_L; + case 6715 ... 6855: + return RTW89_ACPI_SAR_6GHZ_SUBBAND_7_H; + + /* freq 6875 (ch 185, 20MHz) spans RTW89_ACPI_SAR_6GHZ_SUBBAND_7_H + * and RTW89_ACPI_SAR_6GHZ_SUBBAND_8, so directly describe it with + * struct rtw89_6ghz_span. + */ + + case 6895 ... 7115: + return RTW89_ACPI_SAR_6GHZ_SUBBAND_8; + } +} + +enum rtw89_band rtw89_acpi_sar_subband_to_band(struct rtw89_dev *rtwdev, + enum rtw89_acpi_sar_subband subband) +{ + switch (subband) { + default: + rtw89_debug(rtwdev, RTW89_DBG_ACPI, + "ACPI SAR subband %u to band is unhandled\n", subband); + fallthrough; + case RTW89_ACPI_SAR_2GHZ_SUBBAND: + return RTW89_BAND_2G; + case RTW89_ACPI_SAR_5GHZ_SUBBAND_1: + return RTW89_BAND_5G; + case RTW89_ACPI_SAR_5GHZ_SUBBAND_2: + return RTW89_BAND_5G; + case RTW89_ACPI_SAR_5GHZ_SUBBAND_2E: + return RTW89_BAND_5G; + case RTW89_ACPI_SAR_5GHZ_SUBBAND_3_4: + return RTW89_BAND_5G; + case RTW89_ACPI_SAR_6GHZ_SUBBAND_5_L: + return RTW89_BAND_6G; + case RTW89_ACPI_SAR_6GHZ_SUBBAND_5_H: + return RTW89_BAND_6G; + case RTW89_ACPI_SAR_6GHZ_SUBBAND_6: + return RTW89_BAND_6G; + case RTW89_ACPI_SAR_6GHZ_SUBBAND_7_L: + return RTW89_BAND_6G; + case RTW89_ACPI_SAR_6GHZ_SUBBAND_7_H: + return RTW89_BAND_6G; + case RTW89_ACPI_SAR_6GHZ_SUBBAND_8: + return RTW89_BAND_6G; + } +} + +static u8 rtw89_acpi_sar_rfpath_to_hp_antidx(enum rtw89_rf_path rfpath) +{ + switch (rfpath) { + default: + case RF_PATH_B: + return 0; + case RF_PATH_A: + return 1; + } +} + +static u8 rtw89_acpi_sar_rfpath_to_rt_antidx(enum rtw89_rf_path rfpath) +{ + switch (rfpath) { + default: + case RF_PATH_A: + return 0; + case RF_PATH_B: + return 1; + } +} + +static s16 rtw89_acpi_sar_normalize_hp_val(u8 v) +{ + static const u8 bias = 10; + static const u8 fct = 1; + u16 res; + + BUILD_BUG_ON(fct > TXPWR_FACTOR_OF_RTW89_ACPI_SAR); + + res = (bias << TXPWR_FACTOR_OF_RTW89_ACPI_SAR) + + (v << (TXPWR_FACTOR_OF_RTW89_ACPI_SAR - fct)); + + return min_t(s32, res, MAX_VAL_OF_RTW89_ACPI_SAR); +} + +static s16 rtw89_acpi_sar_normalize_rt_val(u8 v) +{ + static const u8 fct = 3; + u16 res; + + BUILD_BUG_ON(fct > TXPWR_FACTOR_OF_RTW89_ACPI_SAR); + + res = v << (TXPWR_FACTOR_OF_RTW89_ACPI_SAR - fct); + + return min_t(s32, res, MAX_VAL_OF_RTW89_ACPI_SAR); +} + +static +void rtw89_acpi_sar_load_std_legacy(struct rtw89_dev *rtwdev, + const struct rtw89_acpi_sar_recognition *rec, + const void *content, + struct rtw89_sar_entry_from_acpi *ent) +{ + const struct rtw89_acpi_sar_std_legacy *ptr = content; + enum rtw89_acpi_sar_subband subband; + enum rtw89_rf_path path; + + for (subband = 0; subband < NUM_OF_RTW89_ACPI_SAR_SUBBAND; subband++) { + for (path = 0; path < NUM_OF_RTW89_ACPI_SAR_RF_PATH; path++) { + u8 antidx = rec->rfpath_to_antidx(path); + + if (subband < RTW89_ACPI_SAR_SUBBAND_NR_LEGACY) + ent->v[subband][path] = + rec->normalize(ptr->v[antidx][subband]); + else + ent->v[subband][path] = MAX_VAL_OF_RTW89_ACPI_SAR; + } + } +} + +static +void rtw89_acpi_sar_load_std_has_6ghz(struct rtw89_dev *rtwdev, + const struct rtw89_acpi_sar_recognition *rec, + const void *content, + struct rtw89_sar_entry_from_acpi *ent) +{ + const struct rtw89_acpi_sar_std_has_6ghz *ptr = content; + enum rtw89_acpi_sar_subband subband; + enum rtw89_rf_path path; + + BUILD_BUG_ON(RTW89_ACPI_SAR_SUBBAND_NR_HAS_6GHZ != NUM_OF_RTW89_ACPI_SAR_SUBBAND); + + for (subband = 0; subband < NUM_OF_RTW89_ACPI_SAR_SUBBAND; subband++) { + for (path = 0; path < NUM_OF_RTW89_ACPI_SAR_RF_PATH; path++) { + u8 antidx = rec->rfpath_to_antidx(path); + + ent->v[subband][path] = rec->normalize(ptr->v[antidx][subband]); + } + } +} + +static +void rtw89_acpi_sar_load_sml_legacy(struct rtw89_dev *rtwdev, + const struct rtw89_acpi_sar_recognition *rec, + const void *content, + struct rtw89_sar_entry_from_acpi *ent) +{ + const struct rtw89_acpi_sar_sml_legacy *ptr = content; + enum rtw89_acpi_sar_subband subband; + enum rtw89_rf_path path; + + for (subband = 0; subband < NUM_OF_RTW89_ACPI_SAR_SUBBAND; subband++) { + for (path = 0; path < NUM_OF_RTW89_ACPI_SAR_RF_PATH; path++) { + u8 antidx = rec->rfpath_to_antidx(path); + + if (subband < RTW89_ACPI_SAR_SUBBAND_NR_LEGACY) + ent->v[subband][path] = + rec->normalize(ptr->v[antidx][subband]); + else + ent->v[subband][path] = MAX_VAL_OF_RTW89_ACPI_SAR; + } + } +} + +static +void rtw89_acpi_sar_load_sml_has_6ghz(struct rtw89_dev *rtwdev, + const struct rtw89_acpi_sar_recognition *rec, + const void *content, + struct rtw89_sar_entry_from_acpi *ent) +{ + const struct rtw89_acpi_sar_sml_has_6ghz *ptr = content; + enum rtw89_acpi_sar_subband subband; + enum rtw89_rf_path path; + + BUILD_BUG_ON(RTW89_ACPI_SAR_SUBBAND_NR_HAS_6GHZ != NUM_OF_RTW89_ACPI_SAR_SUBBAND); + + for (subband = 0; subband < NUM_OF_RTW89_ACPI_SAR_SUBBAND; subband++) { + for (path = 0; path < NUM_OF_RTW89_ACPI_SAR_RF_PATH; path++) { + u8 antidx = rec->rfpath_to_antidx(path); + + ent->v[subband][path] = rec->normalize(ptr->v[antidx][subband]); + } + } +} + +static s16 rtw89_acpi_geo_sar_normalize_delta(s8 delta) +{ + static const u8 fct = 1; + + BUILD_BUG_ON(fct > TXPWR_FACTOR_OF_RTW89_ACPI_SAR); + + return delta << (TXPWR_FACTOR_OF_RTW89_ACPI_SAR - fct); +} + +static enum rtw89_acpi_geo_sar_regd_hp +rtw89_acpi_geo_sar_regd_convert_hp_idx(enum rtw89_regulation_type regd) +{ + switch (regd) { + case RTW89_FCC: + case RTW89_IC: + case RTW89_NCC: + case RTW89_CHILE: + case RTW89_MEXICO: + return RTW89_ACPI_GEO_SAR_REGD_HP_FCC; + case RTW89_ETSI: + case RTW89_MKK: + case RTW89_ACMA: + return RTW89_ACPI_GEO_SAR_REGD_HP_ETSI; + default: + case RTW89_WW: + case RTW89_NA: + case RTW89_KCC: + return RTW89_ACPI_GEO_SAR_REGD_HP_WW; + } +} + +static enum rtw89_acpi_geo_sar_regd_rt +rtw89_acpi_geo_sar_regd_convert_rt_idx(enum rtw89_regulation_type regd) +{ + switch (regd) { + case RTW89_FCC: + case RTW89_NCC: + case RTW89_CHILE: + case RTW89_MEXICO: + return RTW89_ACPI_GEO_SAR_REGD_RT_FCC; + case RTW89_ETSI: + case RTW89_ACMA: + return RTW89_ACPI_GEO_SAR_REGD_RT_ETSI; + case RTW89_MKK: + return RTW89_ACPI_GEO_SAR_REGD_RT_MKK; + case RTW89_IC: + return RTW89_ACPI_GEO_SAR_REGD_RT_IC; + case RTW89_KCC: + return RTW89_ACPI_GEO_SAR_REGD_RT_KCC; + default: + case RTW89_WW: + case RTW89_NA: + return RTW89_ACPI_GEO_SAR_REGD_RT_WW; + } +} + +static +void rtw89_acpi_geo_sar_load_by_hp(struct rtw89_dev *rtwdev, + const struct rtw89_acpi_geo_sar_hp_val *ptr, + enum rtw89_rf_path path, s16 *val) +{ + u8 antidx = rtw89_acpi_sar_rfpath_to_hp_antidx(path); + s16 delta = rtw89_acpi_geo_sar_normalize_delta(ptr->delta[antidx]); + s16 max = rtw89_acpi_sar_normalize_hp_val(ptr->max); + + *val = clamp_t(s32, (*val) + delta, MIN_VAL_OF_RTW89_ACPI_SAR, max); +} + +static +void rtw89_acpi_geo_sar_load_by_rt(struct rtw89_dev *rtwdev, + const struct rtw89_acpi_geo_sar_rt_val *ptr, + s16 *val) +{ + s16 delta = rtw89_acpi_geo_sar_normalize_delta(ptr->delta); + s16 max = rtw89_acpi_sar_normalize_rt_val(ptr->max); + + *val = clamp_t(s32, (*val) + delta, MIN_VAL_OF_RTW89_ACPI_SAR, max); +} + +static +void rtw89_acpi_geo_sar_load_hp_legacy(struct rtw89_dev *rtwdev, + const void *content, + enum rtw89_regulation_type regd, + struct rtw89_sar_entry_from_acpi *ent) +{ + const struct rtw89_acpi_geo_sar_hp_legacy *ptr = content; + const struct rtw89_acpi_geo_sar_hp_legacy_entry *ptr_ent; + const struct rtw89_acpi_geo_sar_hp_val *ptr_ent_val; + enum rtw89_acpi_geo_sar_regd_hp geo_idx = + rtw89_acpi_geo_sar_regd_convert_hp_idx(regd); + enum rtw89_acpi_sar_subband subband; + enum rtw89_rf_path path; + enum rtw89_band band; + + ptr_ent = &ptr->entries[geo_idx]; + + for (subband = 0; subband < NUM_OF_RTW89_ACPI_SAR_SUBBAND; subband++) { + band = rtw89_acpi_sar_subband_to_band(rtwdev, subband); + switch (band) { + case RTW89_BAND_2G: + ptr_ent_val = &ptr_ent->val_2ghz; + break; + case RTW89_BAND_5G: + ptr_ent_val = &ptr_ent->val_5ghz; + break; + default: + case RTW89_BAND_6G: + ptr_ent_val = NULL; + break; + } + + if (!ptr_ent_val) + continue; + + for (path = 0; path < NUM_OF_RTW89_ACPI_SAR_RF_PATH; path++) + rtw89_acpi_geo_sar_load_by_hp(rtwdev, ptr_ent_val, path, + &ent->v[subband][path]); + } +} + +static +void rtw89_acpi_geo_sar_load_hp_has_6ghz(struct rtw89_dev *rtwdev, + const void *content, + enum rtw89_regulation_type regd, + struct rtw89_sar_entry_from_acpi *ent) +{ + const struct rtw89_acpi_geo_sar_hp_has_6ghz *ptr = content; + const struct rtw89_acpi_geo_sar_hp_has_6ghz_entry *ptr_ent; + const struct rtw89_acpi_geo_sar_hp_val *ptr_ent_val; + enum rtw89_acpi_geo_sar_regd_hp geo_idx = + rtw89_acpi_geo_sar_regd_convert_hp_idx(regd); + enum rtw89_acpi_sar_subband subband; + enum rtw89_rf_path path; + enum rtw89_band band; + + ptr_ent = &ptr->entries[geo_idx]; + + for (subband = 0; subband < NUM_OF_RTW89_ACPI_SAR_SUBBAND; subband++) { + band = rtw89_acpi_sar_subband_to_band(rtwdev, subband); + switch (band) { + case RTW89_BAND_2G: + ptr_ent_val = &ptr_ent->val_2ghz; + break; + case RTW89_BAND_5G: + ptr_ent_val = &ptr_ent->val_5ghz; + break; + case RTW89_BAND_6G: + ptr_ent_val = &ptr_ent->val_6ghz; + break; + default: + ptr_ent_val = NULL; + break; + } + + if (!ptr_ent_val) + continue; + + for (path = 0; path < NUM_OF_RTW89_ACPI_SAR_RF_PATH; path++) + rtw89_acpi_geo_sar_load_by_hp(rtwdev, ptr_ent_val, path, + &ent->v[subband][path]); + } +} + +static +void rtw89_acpi_geo_sar_load_rt_legacy(struct rtw89_dev *rtwdev, + const void *content, + enum rtw89_regulation_type regd, + struct rtw89_sar_entry_from_acpi *ent) +{ + const struct rtw89_acpi_geo_sar_rt_legacy *ptr = content; + const struct rtw89_acpi_geo_sar_rt_legacy_entry *ptr_ent; + const struct rtw89_acpi_geo_sar_rt_val *ptr_ent_val; + enum rtw89_acpi_geo_sar_regd_rt geo_idx = + rtw89_acpi_geo_sar_regd_convert_rt_idx(regd); + enum rtw89_acpi_sar_subband subband; + enum rtw89_rf_path path; + enum rtw89_band band; + + ptr_ent = &ptr->entries[geo_idx]; + + for (subband = 0; subband < NUM_OF_RTW89_ACPI_SAR_SUBBAND; subband++) { + band = rtw89_acpi_sar_subband_to_band(rtwdev, subband); + switch (band) { + case RTW89_BAND_2G: + ptr_ent_val = &ptr_ent->val_2ghz; + break; + case RTW89_BAND_5G: + ptr_ent_val = &ptr_ent->val_5ghz; + break; + default: + case RTW89_BAND_6G: + ptr_ent_val = NULL; + break; + } + + if (!ptr_ent_val) + continue; + + for (path = 0; path < NUM_OF_RTW89_ACPI_SAR_RF_PATH; path++) + rtw89_acpi_geo_sar_load_by_rt(rtwdev, ptr_ent_val, + &ent->v[subband][path]); + } +} + +static +void rtw89_acpi_geo_sar_load_rt_has_6ghz(struct rtw89_dev *rtwdev, + const void *content, + enum rtw89_regulation_type regd, + struct rtw89_sar_entry_from_acpi *ent) +{ + const struct rtw89_acpi_geo_sar_rt_has_6ghz *ptr = content; + const struct rtw89_acpi_geo_sar_rt_has_6ghz_entry *ptr_ent; + const struct rtw89_acpi_geo_sar_rt_val *ptr_ent_val; + enum rtw89_acpi_geo_sar_regd_rt geo_idx = + rtw89_acpi_geo_sar_regd_convert_rt_idx(regd); + enum rtw89_acpi_sar_subband subband; + enum rtw89_rf_path path; + enum rtw89_band band; + + ptr_ent = &ptr->entries[geo_idx]; + + for (subband = 0; subband < NUM_OF_RTW89_ACPI_SAR_SUBBAND; subband++) { + band = rtw89_acpi_sar_subband_to_band(rtwdev, subband); + switch (band) { + case RTW89_BAND_2G: + ptr_ent_val = &ptr_ent->val_2ghz; + break; + case RTW89_BAND_5G: + ptr_ent_val = &ptr_ent->val_5ghz; + break; + case RTW89_BAND_6G: + ptr_ent_val = &ptr_ent->val_6ghz; + break; + default: + ptr_ent_val = NULL; + break; + } + + if (!ptr_ent_val) + continue; + + for (path = 0; path < NUM_OF_RTW89_ACPI_SAR_RF_PATH; path++) + rtw89_acpi_geo_sar_load_by_rt(rtwdev, ptr_ent_val, + &ent->v[subband][path]); + } +} + +#define RTW89_ACPI_GEO_SAR_DECL_HANDLER(type) \ +static const struct rtw89_acpi_geo_sar_handler \ +rtw89_acpi_geo_sar_handler_ ## type = { \ + .data_size = RTW89_ACPI_GEO_SAR_SIZE_OF(type), \ + .load = rtw89_acpi_geo_sar_load_ ## type, \ +} + +RTW89_ACPI_GEO_SAR_DECL_HANDLER(hp_legacy); +RTW89_ACPI_GEO_SAR_DECL_HANDLER(hp_has_6ghz); +RTW89_ACPI_GEO_SAR_DECL_HANDLER(rt_legacy); +RTW89_ACPI_GEO_SAR_DECL_HANDLER(rt_has_6ghz); + +static const struct rtw89_acpi_sar_recognition rtw89_acpi_sar_recs[] = { + { + .id = { + .cid = RTW89_ACPI_SAR_CID_HP, + .rev = RTW89_ACPI_SAR_REV_LEGACY, + .size = RTW89_ACPI_SAR_SIZE_OF(std_legacy), + }, + .geo = &rtw89_acpi_geo_sar_handler_hp_legacy, + + .rfpath_to_antidx = rtw89_acpi_sar_rfpath_to_hp_antidx, + .normalize = rtw89_acpi_sar_normalize_hp_val, + .load = rtw89_acpi_sar_load_std_legacy, + }, + { + .id = { + .cid = RTW89_ACPI_SAR_CID_HP, + .rev = RTW89_ACPI_SAR_REV_HAS_6GHZ, + .size = RTW89_ACPI_SAR_SIZE_OF(std_has_6ghz), + }, + .geo = &rtw89_acpi_geo_sar_handler_hp_has_6ghz, + + .rfpath_to_antidx = rtw89_acpi_sar_rfpath_to_hp_antidx, + .normalize = rtw89_acpi_sar_normalize_hp_val, + .load = rtw89_acpi_sar_load_std_has_6ghz, + }, + { + .id = { + .cid = RTW89_ACPI_SAR_CID_RT, + .rev = RTW89_ACPI_SAR_REV_LEGACY, + .size = RTW89_ACPI_SAR_SIZE_OF(std_legacy), + }, + .geo = &rtw89_acpi_geo_sar_handler_rt_legacy, + + .rfpath_to_antidx = rtw89_acpi_sar_rfpath_to_rt_antidx, + .normalize = rtw89_acpi_sar_normalize_rt_val, + .load = rtw89_acpi_sar_load_std_legacy, + }, + { + .id = { + .cid = RTW89_ACPI_SAR_CID_RT, + .rev = RTW89_ACPI_SAR_REV_HAS_6GHZ, + .size = RTW89_ACPI_SAR_SIZE_OF(std_has_6ghz), + }, + .geo = &rtw89_acpi_geo_sar_handler_rt_has_6ghz, + + .rfpath_to_antidx = rtw89_acpi_sar_rfpath_to_rt_antidx, + .normalize = rtw89_acpi_sar_normalize_rt_val, + .load = rtw89_acpi_sar_load_std_has_6ghz, + }, + { + .id = { + .cid = RTW89_ACPI_SAR_CID_RT, + .rev = RTW89_ACPI_SAR_REV_LEGACY, + .size = RTW89_ACPI_SAR_SIZE_OF(sml_legacy), + }, + .geo = &rtw89_acpi_geo_sar_handler_rt_legacy, + + .rfpath_to_antidx = rtw89_acpi_sar_rfpath_to_rt_antidx, + .normalize = rtw89_acpi_sar_normalize_rt_val, + .load = rtw89_acpi_sar_load_sml_legacy, + }, + { + .id = { + .cid = RTW89_ACPI_SAR_CID_RT, + .rev = RTW89_ACPI_SAR_REV_HAS_6GHZ, + .size = RTW89_ACPI_SAR_SIZE_OF(sml_has_6ghz), + }, + .geo = &rtw89_acpi_geo_sar_handler_rt_has_6ghz, + + .rfpath_to_antidx = rtw89_acpi_sar_rfpath_to_rt_antidx, + .normalize = rtw89_acpi_sar_normalize_rt_val, + .load = rtw89_acpi_sar_load_sml_has_6ghz, + }, +}; + +struct rtw89_acpi_sar_rec_parm { + u32 pld_len; + u8 tbl_cnt; + u16 cid; + u8 rev; +}; + +static const struct rtw89_acpi_sar_recognition * +rtw89_acpi_sar_recognize(struct rtw89_dev *rtwdev, + const struct rtw89_acpi_sar_rec_parm *parm) +{ + const u32 tbl_len = parm->pld_len / parm->tbl_cnt; + const struct rtw89_acpi_sar_recognition *rec; + struct rtw89_acpi_sar_identifier id = {}; + + rtw89_debug(rtwdev, RTW89_DBG_ACPI, + "%s: cid %u, rev %u, tbl len %u, tbl cnt %u\n", + __func__, parm->cid, parm->rev, tbl_len, parm->tbl_cnt); + + if (unlikely(parm->pld_len % parm->tbl_cnt)) { + rtw89_debug(rtwdev, RTW89_DBG_ACPI, "invalid pld len %u\n", + parm->pld_len); + return NULL; + } + + if (unlikely(tbl_len > RTW89_ACPI_SAR_SIZE_MAX)) { + rtw89_debug(rtwdev, RTW89_DBG_ACPI, "invalid tbl len %u\n", + tbl_len); + return NULL; + } + + if (unlikely(parm->tbl_cnt > MAX_NUM_OF_RTW89_ACPI_SAR_TBL)) { + rtw89_debug(rtwdev, RTW89_DBG_ACPI, "invalid tbl cnt %u\n", + parm->tbl_cnt); + return NULL; + } + + switch (parm->cid) { + case RTW89_ACPI_SAR_CID_HP: + case RTW89_ACPI_SAR_CID_RT: + id.cid = parm->cid; + break; + default: + rtw89_debug(rtwdev, RTW89_DBG_ACPI, "invalid cid 0x%x\n", + parm->cid); + return NULL; + } + + switch (parm->rev) { + case RTW89_ACPI_SAR_REV_LEGACY: + case RTW89_ACPI_SAR_REV_HAS_6GHZ: + id.rev = parm->rev; + break; + default: + rtw89_debug(rtwdev, RTW89_DBG_ACPI, "invalid rev %u\n", + parm->rev); + return NULL; + } + + id.size = tbl_len; + for (unsigned int i = 0; i < ARRAY_SIZE(rtw89_acpi_sar_recs); i++) { + rec = &rtw89_acpi_sar_recs[i]; + if (memcmp(&rec->id, &id, sizeof(rec->id)) == 0) + return rec; + } + + rtw89_debug(rtwdev, RTW89_DBG_ACPI, "failed to recognize\n"); + return NULL; +} + +static const struct rtw89_acpi_sar_recognition * +rtw89_acpi_evaluate_static_sar(struct rtw89_dev *rtwdev, + struct rtw89_sar_cfg_acpi *cfg) +{ + const struct rtw89_acpi_sar_recognition *rec = NULL; + const struct rtw89_acpi_static_sar_hdr *hdr; + struct rtw89_sar_entry_from_acpi tmp = {}; + struct rtw89_acpi_sar_rec_parm parm = {}; + struct rtw89_sar_table_from_acpi *tbl; + const struct rtw89_acpi_data *data; + u32 len; + + data = rtw89_acpi_evaluate_method(rtwdev, RTW89_ACPI_METHOD_STATIC_SAR); + if (!data) + return NULL; + + rtw89_debug(rtwdev, RTW89_DBG_ACPI, "acpi load static sar\n"); + + len = data->len; + if (len <= sizeof(*hdr)) { + rtw89_debug(rtwdev, RTW89_DBG_ACPI, "invalid buf len %u\n", len); goto out; } - buf_len = obj->buffer.length; - if (buf_len != sizeof(*res)) { - rtw89_debug(rtwdev, RTW89_DBG_ACPI, "%s: invalid buffer length: %u\n", - __func__, buf_len); + hdr = (typeof(hdr))data->buf; + + parm.cid = le16_to_cpu(hdr->cid); + parm.rev = hdr->rev; + parm.tbl_cnt = 1; + parm.pld_len = len - sizeof(*hdr); + + rec = rtw89_acpi_sar_recognize(rtwdev, &parm); + if (!rec) + goto out; + + rec->load(rtwdev, rec, hdr->content, &tmp); + + tbl = &cfg->tables[0]; + for (u8 regd = 0; regd < RTW89_REGD_NUM; regd++) + tbl->entries[regd] = tmp; + + cfg->valid_num = 1; + +out: + kfree(data); + return rec; +} + +static const struct rtw89_acpi_sar_recognition * +rtw89_acpi_evaluate_dynamic_sar(struct rtw89_dev *rtwdev, + struct rtw89_sar_cfg_acpi *cfg) +{ + const struct rtw89_acpi_sar_recognition *rec = NULL; + const struct rtw89_acpi_dynamic_sar_hdr *hdr; + struct rtw89_acpi_sar_rec_parm parm = {}; + struct rtw89_sar_table_from_acpi *tbl; + const struct rtw89_acpi_data *data; + u32 len; + + data = rtw89_acpi_evaluate_method(rtwdev, RTW89_ACPI_METHOD_DYNAMIC_SAR); + if (!data) + return NULL; + + rtw89_debug(rtwdev, RTW89_DBG_ACPI, "acpi load dynamic sar\n"); + + len = data->len; + if (len <= sizeof(*hdr)) { + rtw89_debug(rtwdev, RTW89_DBG_ACPI, "invalid buf len %u\n", len); + goto out; + } + + hdr = (typeof(hdr))data->buf; + + parm.cid = le16_to_cpu(hdr->cid); + parm.rev = hdr->rev; + parm.tbl_cnt = hdr->cnt; + parm.pld_len = len - sizeof(*hdr); + + rec = rtw89_acpi_sar_recognize(rtwdev, &parm); + if (!rec) + goto out; + + for (unsigned int i = 0; i < hdr->cnt; i++) { + const u8 *content = hdr->content + rec->id.size * i; + struct rtw89_sar_entry_from_acpi tmp = {}; + + rec->load(rtwdev, rec, content, &tmp); + + tbl = &cfg->tables[i]; + for (u8 regd = 0; regd < RTW89_REGD_NUM; regd++) + tbl->entries[regd] = tmp; + } + + cfg->valid_num = hdr->cnt; + +out: + kfree(data); + return rec; +} + +int rtw89_acpi_evaluate_dynamic_sar_indicator(struct rtw89_dev *rtwdev, + struct rtw89_sar_cfg_acpi *cfg, + bool *poll_changed) +{ + struct rtw89_sar_indicator_from_acpi *ind = &cfg->indicator; + struct rtw89_sar_indicator_from_acpi tmp = *ind; + const struct rtw89_acpi_data *data; + const u8 *tbl_base1_by_ant; + enum rtw89_rf_path path; + int ret = 0; + u32 len; + + data = rtw89_acpi_evaluate_method(rtwdev, RTW89_ACPI_METHOD_DYNAMIC_SAR_INDICATOR); + if (!data) + return -EFAULT; + + if (!poll_changed) + rtw89_debug(rtwdev, RTW89_DBG_ACPI, "acpi load dynamic sar indicator\n"); + + len = data->len; + if (len != ind->fields) { + rtw89_debug(rtwdev, RTW89_DBG_ACPI, "invalid buf len %u\n", len); ret = -EINVAL; goto out; } - *res = *(struct rtw89_acpi_rtag_result *)obj->buffer.pointer; + tbl_base1_by_ant = data->buf; - rtw89_hex_dump(rtwdev, RTW89_DBG_ACPI, "antenna_gain: ", res, sizeof(*res)); + for (path = 0; path < NUM_OF_RTW89_ACPI_SAR_RF_PATH; path++) { + u8 antidx = ind->rfpath_to_antidx(path); + u8 sel; + + if (antidx >= ind->fields) + antidx = 0; + + /* convert the table index from 1-based to 0-based */ + sel = tbl_base1_by_ant[antidx] - 1; + if (sel >= cfg->valid_num) + sel = 0; + + tmp.tblsel[path] = sel; + } + + if (memcmp(ind, &tmp, sizeof(*ind)) == 0) { + if (poll_changed) + *poll_changed = false; + } else { + if (poll_changed) + *poll_changed = true; + + *ind = tmp; + } out: - ACPI_FREE(obj); + kfree(data); return ret; } + +static +void rtw89_acpi_evaluate_geo_sar(struct rtw89_dev *rtwdev, + const struct rtw89_acpi_geo_sar_handler *hdl, + struct rtw89_sar_cfg_acpi *cfg) +{ + const struct rtw89_acpi_data *data; + u32 len; + + data = rtw89_acpi_evaluate_method(rtwdev, RTW89_ACPI_METHOD_GEO_SAR); + if (!data) + return; + + rtw89_debug(rtwdev, RTW89_DBG_ACPI, "acpi load geo sar\n"); + + len = data->len; + if (len != hdl->data_size) { + rtw89_debug(rtwdev, RTW89_DBG_ACPI, "invalid buf len %u (expected %u)\n", + len, hdl->data_size); + goto out; + } + + for (unsigned int i = 0; i < cfg->valid_num; i++) + for (u8 regd = 0; regd < RTW89_REGD_NUM; regd++) + hdl->load(rtwdev, data->buf, regd, &cfg->tables[i].entries[regd]); + +out: + kfree(data); +} + +int rtw89_acpi_evaluate_sar(struct rtw89_dev *rtwdev, + struct rtw89_sar_cfg_acpi *cfg) +{ + struct rtw89_sar_indicator_from_acpi *ind = &cfg->indicator; + const struct rtw89_acpi_sar_recognition *rec; + bool fetch_indicator = false; + int ret; + + rec = rtw89_acpi_evaluate_static_sar(rtwdev, cfg); + if (rec) + goto recognized; + + rec = rtw89_acpi_evaluate_dynamic_sar(rtwdev, cfg); + if (!rec) + return -ENOENT; + + fetch_indicator = true; + +recognized: + rtw89_acpi_evaluate_geo_sar(rtwdev, rec->geo, cfg); + + switch (rec->id.cid) { + case RTW89_ACPI_SAR_CID_HP: + cfg->downgrade_2tx = 3 << TXPWR_FACTOR_OF_RTW89_ACPI_SAR; + ind->fields = RTW89_ACPI_SAR_ANT_NR_STD; + break; + case RTW89_ACPI_SAR_CID_RT: + cfg->downgrade_2tx = 0; + ind->fields = 1; + break; + default: + return -EFAULT; + } + + if (fetch_indicator) { + ind->rfpath_to_antidx = rec->rfpath_to_antidx; + ret = rtw89_acpi_evaluate_dynamic_sar_indicator(rtwdev, cfg, NULL); + if (ret) + fetch_indicator = false; + } + + if (!fetch_indicator) + memset(ind->tblsel, 0, sizeof(ind->tblsel)); + + ind->enable_sync = fetch_indicator; + return 0; +} diff --git a/drivers/net/wireless/realtek/rtw89/acpi.h b/drivers/net/wireless/realtek/rtw89/acpi.h index b43ab106e44d..8c918ee02d2e 100644 --- a/drivers/net/wireless/realtek/rtw89/acpi.h +++ b/drivers/net/wireless/realtek/rtw89/acpi.h @@ -7,6 +7,11 @@ #include "core.h" +struct rtw89_acpi_data { + u32 len; + u8 buf[] __counted_by(len); +}; + enum rtw89_acpi_dsm_func { RTW89_ACPI_DSM_FUNC_IDN_BAND_SUP = 2, RTW89_ACPI_DSM_FUNC_6G_DIS = 3, @@ -26,6 +31,13 @@ enum rtw89_acpi_policy_mode { RTW89_ACPI_POLICY_ALLOW = 1, }; +enum rtw89_acpi_conf_tas { + RTW89_ACPI_CONF_TAS_US = BIT(0), + RTW89_ACPI_CONF_TAS_CA = BIT(1), + RTW89_ACPI_CONF_TAS_KR = BIT(2), + RTW89_ACPI_CONF_TAS_OTHERS = BIT(7), +}; + struct rtw89_acpi_country_code { /* below are allowed: * * ISO alpha2 country code @@ -54,12 +66,21 @@ struct rtw89_acpi_policy_6ghz_sp { u8 rsvd; } __packed; +struct rtw89_acpi_policy_tas { + u8 signature[4]; + u8 revision; + u8 enable; + u8 enabled_countries; + u8 rsvd[3]; +} __packed; + struct rtw89_acpi_dsm_result { union { u8 value; /* caller needs to free it after using */ struct rtw89_acpi_policy_6ghz *policy_6ghz; struct rtw89_acpi_policy_6ghz_sp *policy_6ghz_sp; + struct rtw89_acpi_policy_tas *policy_tas; } u; }; @@ -70,10 +91,179 @@ struct rtw89_acpi_rtag_result { u8 ant_gain_table[RTW89_ANT_GAIN_CHAIN_NUM][RTW89_ANT_GAIN_SUBBAND_NR]; } __packed; +enum rtw89_acpi_sar_cid { + RTW89_ACPI_SAR_CID_HP = 0x5048, + RTW89_ACPI_SAR_CID_RT = 0x5452, +}; + +enum rtw89_acpi_sar_rev { + RTW89_ACPI_SAR_REV_LEGACY = 1, + RTW89_ACPI_SAR_REV_HAS_6GHZ = 2, +}; + +#define RTW89_ACPI_SAR_ANT_NR_STD 4 +#define RTW89_ACPI_SAR_ANT_NR_SML 2 + +#define RTW89_ACPI_METHOD_STATIC_SAR "WRDS" +#define RTW89_ACPI_METHOD_DYNAMIC_SAR "RWRD" +#define RTW89_ACPI_METHOD_DYNAMIC_SAR_INDICATOR "RWSI" +#define RTW89_ACPI_METHOD_GEO_SAR "RWGS" + +struct rtw89_acpi_sar_std_legacy { + u8 v[RTW89_ACPI_SAR_ANT_NR_STD][RTW89_ACPI_SAR_SUBBAND_NR_LEGACY]; +} __packed; + +struct rtw89_acpi_sar_std_has_6ghz { + u8 v[RTW89_ACPI_SAR_ANT_NR_STD][RTW89_ACPI_SAR_SUBBAND_NR_HAS_6GHZ]; +} __packed; + +struct rtw89_acpi_sar_sml_legacy { + u8 v[RTW89_ACPI_SAR_ANT_NR_SML][RTW89_ACPI_SAR_SUBBAND_NR_LEGACY]; +} __packed; + +struct rtw89_acpi_sar_sml_has_6ghz { + u8 v[RTW89_ACPI_SAR_ANT_NR_SML][RTW89_ACPI_SAR_SUBBAND_NR_HAS_6GHZ]; +} __packed; + +struct rtw89_acpi_static_sar_hdr { + __le16 cid; + u8 rev; + u8 content[]; +} __packed; + +struct rtw89_acpi_dynamic_sar_hdr { + __le16 cid; + u8 rev; + u8 cnt; + u8 content[]; +} __packed; + +struct rtw89_acpi_sar_identifier { + enum rtw89_acpi_sar_cid cid; + enum rtw89_acpi_sar_rev rev; + u8 size; +}; + +/* for rtw89_acpi_sar_identifier::size */ +#define RTW89_ACPI_SAR_SIZE_MAX U8_MAX +#define RTW89_ACPI_SAR_SIZE_OF(type) \ + (BUILD_BUG_ON_ZERO(sizeof(struct rtw89_acpi_sar_ ## type) > \ + RTW89_ACPI_SAR_SIZE_MAX) + \ + sizeof(struct rtw89_acpi_sar_ ## type)) + +struct rtw89_acpi_sar_recognition { + struct rtw89_acpi_sar_identifier id; + const struct rtw89_acpi_geo_sar_handler *geo; + + u8 (*rfpath_to_antidx)(enum rtw89_rf_path rfpath); + s16 (*normalize)(u8 v); + void (*load)(struct rtw89_dev *rtwdev, + const struct rtw89_acpi_sar_recognition *rec, + const void *content, + struct rtw89_sar_entry_from_acpi *ent); +}; + +struct rtw89_acpi_geo_sar_hp_val { + u8 max; + s8 delta[RTW89_ACPI_SAR_ANT_NR_STD]; +} __packed; + +struct rtw89_acpi_geo_sar_hp_legacy_entry { + struct rtw89_acpi_geo_sar_hp_val val_2ghz; + struct rtw89_acpi_geo_sar_hp_val val_5ghz; +} __packed; + +struct rtw89_acpi_geo_sar_hp_has_6ghz_entry { + struct rtw89_acpi_geo_sar_hp_val val_2ghz; + struct rtw89_acpi_geo_sar_hp_val val_5ghz; + struct rtw89_acpi_geo_sar_hp_val val_6ghz; +} __packed; + +enum rtw89_acpi_geo_sar_regd_hp { + RTW89_ACPI_GEO_SAR_REGD_HP_FCC = 0, + RTW89_ACPI_GEO_SAR_REGD_HP_ETSI = 1, + RTW89_ACPI_GEO_SAR_REGD_HP_WW = 2, + + RTW89_ACPI_GEO_SAR_REGD_NR_HP, +}; + +struct rtw89_acpi_geo_sar_hp_legacy { + struct rtw89_acpi_geo_sar_hp_legacy_entry + entries[RTW89_ACPI_GEO_SAR_REGD_NR_HP]; +} __packed; + +struct rtw89_acpi_geo_sar_hp_has_6ghz { + struct rtw89_acpi_geo_sar_hp_has_6ghz_entry + entries[RTW89_ACPI_GEO_SAR_REGD_NR_HP]; +} __packed; + +struct rtw89_acpi_geo_sar_rt_val { + u8 max; + s8 delta; +} __packed; + +struct rtw89_acpi_geo_sar_rt_legacy_entry { + struct rtw89_acpi_geo_sar_rt_val val_2ghz; + struct rtw89_acpi_geo_sar_rt_val val_5ghz; +} __packed; + +struct rtw89_acpi_geo_sar_rt_has_6ghz_entry { + struct rtw89_acpi_geo_sar_rt_val val_2ghz; + struct rtw89_acpi_geo_sar_rt_val val_5ghz; + struct rtw89_acpi_geo_sar_rt_val val_6ghz; +} __packed; + +enum rtw89_acpi_geo_sar_regd_rt { + RTW89_ACPI_GEO_SAR_REGD_RT_FCC = 0, + RTW89_ACPI_GEO_SAR_REGD_RT_ETSI = 1, + RTW89_ACPI_GEO_SAR_REGD_RT_MKK = 2, + RTW89_ACPI_GEO_SAR_REGD_RT_IC = 3, + RTW89_ACPI_GEO_SAR_REGD_RT_KCC = 4, + RTW89_ACPI_GEO_SAR_REGD_RT_WW = 5, + + RTW89_ACPI_GEO_SAR_REGD_NR_RT, +}; + +struct rtw89_acpi_geo_sar_rt_legacy { + struct rtw89_acpi_geo_sar_rt_legacy_entry + entries[RTW89_ACPI_GEO_SAR_REGD_NR_RT]; +} __packed; + +struct rtw89_acpi_geo_sar_rt_has_6ghz { + struct rtw89_acpi_geo_sar_rt_has_6ghz_entry + entries[RTW89_ACPI_GEO_SAR_REGD_NR_RT]; +} __packed; + +struct rtw89_acpi_geo_sar_handler { + u8 data_size; + + void (*load)(struct rtw89_dev *rtwdev, + const void *content, + enum rtw89_regulation_type regd, + struct rtw89_sar_entry_from_acpi *ent); +}; + +/* for rtw89_acpi_geo_sar_handler::data_size */ +#define RTW89_ACPI_GEO_SAR_SIZE_MAX U8_MAX +#define RTW89_ACPI_GEO_SAR_SIZE_OF(type) \ + (BUILD_BUG_ON_ZERO(sizeof(struct rtw89_acpi_geo_sar_ ## type) > \ + RTW89_ACPI_GEO_SAR_SIZE_MAX) + \ + sizeof(struct rtw89_acpi_geo_sar_ ## type)) + +enum rtw89_acpi_sar_subband rtw89_acpi_sar_get_subband(struct rtw89_dev *rtwdev, + u32 center_freq); +enum rtw89_band rtw89_acpi_sar_subband_to_band(struct rtw89_dev *rtwdev, + enum rtw89_acpi_sar_subband subband); + int rtw89_acpi_evaluate_dsm(struct rtw89_dev *rtwdev, enum rtw89_acpi_dsm_func func, struct rtw89_acpi_dsm_result *res); int rtw89_acpi_evaluate_rtag(struct rtw89_dev *rtwdev, struct rtw89_acpi_rtag_result *res); +int rtw89_acpi_evaluate_sar(struct rtw89_dev *rtwdev, + struct rtw89_sar_cfg_acpi *cfg); +int rtw89_acpi_evaluate_dynamic_sar_indicator(struct rtw89_dev *rtwdev, + struct rtw89_sar_cfg_acpi *cfg, + bool *changed); #endif diff --git a/drivers/net/wireless/realtek/rtw89/cam.c b/drivers/net/wireless/realtek/rtw89/cam.c index eca3d767ff60..385a238fe5cc 100644 --- a/drivers/net/wireless/realtek/rtw89/cam.c +++ b/drivers/net/wireless/realtek/rtw89/cam.c @@ -6,6 +6,7 @@ #include "debug.h" #include "fw.h" #include "mac.h" +#include "ps.h" static struct sk_buff * rtw89_cam_get_sec_key_cmd(struct rtw89_dev *rtwdev, @@ -469,11 +470,17 @@ int rtw89_cam_sec_key_add(struct rtw89_dev *rtwdev, bool ext_key = false; int ret; + if (ieee80211_vif_is_mld(vif) && !chip->hw_mlo_bmc_crypto && + !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) + return -EOPNOTSUPP; + switch (key->cipher) { case WLAN_CIPHER_SUITE_WEP40: + rtw89_leave_ips_by_hwflags(rtwdev); hw_key_type = RTW89_SEC_KEY_TYPE_WEP40; break; case WLAN_CIPHER_SUITE_WEP104: + rtw89_leave_ips_by_hwflags(rtwdev); hw_key_type = RTW89_SEC_KEY_TYPE_WEP104; break; case WLAN_CIPHER_SUITE_TKIP: diff --git a/drivers/net/wireless/realtek/rtw89/chan.c b/drivers/net/wireless/realtek/rtw89/chan.c index f60e93870b09..806f42429a29 100644 --- a/drivers/net/wireless/realtek/rtw89/chan.c +++ b/drivers/net/wireless/realtek/rtw89/chan.c @@ -189,9 +189,10 @@ void rtw89_config_entity_chandef(struct rtw89_dev *rtwdev, } void rtw89_config_roc_chandef(struct rtw89_dev *rtwdev, - enum rtw89_chanctx_idx idx, + struct rtw89_vif_link *rtwvif_link, const struct cfg80211_chan_def *chandef) { + enum rtw89_chanctx_idx idx = rtwvif_link->chanctx_idx; struct rtw89_hal *hal = &rtwdev->hal; enum rtw89_chanctx_idx cur; @@ -205,6 +206,7 @@ void rtw89_config_roc_chandef(struct rtw89_dev *rtwdev, } hal->roc_chandef = *chandef; + hal->roc_link_index = rtw89_vif_link_inst_get_index(rtwvif_link); } else { cur = atomic_cmpxchg(&hal->roc_chanctx_idx, idx, RTW89_CHANCTX_IDLE); @@ -339,11 +341,10 @@ const struct rtw89_chan *__rtw89_mgnt_chan_get(struct rtw89_dev *rtwdev, roc_idx = atomic_read(&hal->roc_chanctx_idx); if (roc_idx != RTW89_CHANCTX_IDLE) { - /* ROC is ongoing (given ROC runs on RTW89_ROC_BY_LINK_INDEX). - * If @link_index is the same as RTW89_ROC_BY_LINK_INDEX, get - * the ongoing ROC chanctx. + /* ROC is ongoing (given ROC runs on @hal->roc_link_index). + * If @link_index is the same, get the ongoing ROC chanctx. */ - if (link_index == RTW89_ROC_BY_LINK_INDEX) + if (link_index == hal->roc_link_index) chanctx_idx = roc_idx; } @@ -358,12 +359,41 @@ dflt: } EXPORT_SYMBOL(__rtw89_mgnt_chan_get); +static enum rtw89_mlo_dbcc_mode +rtw89_entity_sel_mlo_dbcc_mode(struct rtw89_dev *rtwdev, u8 active_hws) +{ + if (rtwdev->chip->chip_gen != RTW89_CHIP_BE) + return MLO_DBCC_NOT_SUPPORT; + + switch (active_hws) { + case BIT(0): + return MLO_2_PLUS_0_1RF; + case BIT(1): + return MLO_0_PLUS_2_1RF; + case BIT(0) | BIT(1): + default: + return MLO_1_PLUS_1_1RF; + } +} + +static +void rtw89_entity_recalc_mlo_dbcc_mode(struct rtw89_dev *rtwdev, u8 active_hws) +{ + enum rtw89_mlo_dbcc_mode mode; + + mode = rtw89_entity_sel_mlo_dbcc_mode(rtwdev, active_hws); + rtwdev->mlo_dbcc_mode = mode; + + rtw89_debug(rtwdev, RTW89_DBG_STATE, "recalc mlo dbcc mode to %d\n", mode); +} + static void rtw89_entity_recalc_mgnt_roles(struct rtw89_dev *rtwdev) { struct rtw89_hal *hal = &rtwdev->hal; struct rtw89_entity_mgnt *mgnt = &hal->entity_mgnt; struct rtw89_vif_link *link; struct rtw89_vif *role; + u8 active_hws = 0; u8 pos = 0; int i, j; @@ -412,10 +442,13 @@ fill: continue; mgnt->chanctx_tbl[pos][i] = link->chanctx_idx; + active_hws |= BIT(i); } mgnt->active_roles[pos++] = role; } + + rtw89_entity_recalc_mlo_dbcc_mode(rtwdev, active_hws); } enum rtw89_entity_mode rtw89_entity_recalc(struct rtw89_dev *rtwdev) @@ -557,7 +590,9 @@ static u32 rtw89_mcc_get_tbtt_ofst(struct rtw89_dev *rtwdev, u64 sync_tsf = READ_ONCE(rtwvif_link->sync_bcn_tsf); u32 remainder; - if (tsf < sync_tsf) { + if (role->is_go) { + sync_tsf = 0; + } else if (tsf < sync_tsf) { rtw89_debug(rtwdev, RTW89_DBG_CHAN, "MCC get tbtt ofst: tsf might not update yet\n"); sync_tsf = 0; @@ -691,19 +726,13 @@ static void rtw89_mcc_role_macid_sta_iter(void *data, struct ieee80211_sta *sta) struct rtw89_vif *target = mcc_role->rtwvif_link->rtwvif; struct rtw89_sta *rtwsta = sta_to_rtwsta(sta); struct rtw89_vif *rtwvif = rtwsta->rtwvif; - struct rtw89_dev *rtwdev = rtwsta->rtwdev; - struct rtw89_sta_link *rtwsta_link; + u8 macid; if (rtwvif != target) return; - rtwsta_link = rtw89_sta_get_link_inst(rtwsta, 0); - if (unlikely(!rtwsta_link)) { - rtw89_err(rtwdev, "mcc sta macid: find no link on HW-0\n"); - return; - } - - rtw89_mcc_role_fw_macid_bitmap_set_bit(mcc_role, rtwsta_link->mac_id); + macid = rtw89_sta_get_main_macid(rtwsta); + rtw89_mcc_role_fw_macid_bitmap_set_bit(mcc_role, macid); } static void rtw89_mcc_fill_role_macid_bitmap(struct rtw89_dev *rtwdev, @@ -747,9 +776,11 @@ static void rtw89_mcc_fill_role_limit(struct rtw89_dev *rtwdev, int ret; int i; - if (!mcc_role->is_go && !mcc_role->is_gc) + if (!mcc_role->is_gc) return; + rtw89_p2p_noa_once_recalc(rtwvif_link); + rcu_read_lock(); bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); @@ -785,6 +816,9 @@ fill: } tsf_lmt = (tsf & GENMASK_ULL(63, 32)) | start_time; + if (tsf_lmt < tsf) + tsf_lmt += roundup_u64(tsf - tsf_lmt, interval); + max_toa_us = rtw89_mcc_get_tbtt_ofst(rtwdev, mcc_role, tsf_lmt); max_dur_us = interval - duration; max_tob_us = max_dur_us - max_toa_us; @@ -929,6 +963,15 @@ static int rtw89_mcc_fill_all_roles(struct rtw89_dev *rtwdev) return 0; } +static bool rtw89_mcc_can_courtesy(const struct rtw89_mcc_role *provider, + const struct rtw89_mcc_role *receiver) +{ + if (provider->is_go || receiver->is_gc) + return false; + + return true; +} + static void rtw89_mcc_assign_pattern(struct rtw89_dev *rtwdev, const struct rtw89_mcc_pattern *new) { @@ -937,37 +980,44 @@ static void rtw89_mcc_assign_pattern(struct rtw89_dev *rtwdev, struct rtw89_mcc_role *aux = &mcc->role_aux; struct rtw89_mcc_config *config = &mcc->config; struct rtw89_mcc_pattern *pattern = &config->pattern; + struct rtw89_mcc_courtesy_cfg *crtz; rtw89_debug(rtwdev, RTW89_DBG_CHAN, "MCC assign pattern: ref {%d | %d}, aux {%d | %d}\n", new->tob_ref, new->toa_ref, new->tob_aux, new->toa_aux); + rtw89_debug(rtwdev, RTW89_DBG_CHAN, "MCC pattern plan: %d\n", new->plan); + *pattern = *new; memset(&pattern->courtesy, 0, sizeof(pattern->courtesy)); - if (pattern->tob_aux <= 0 || pattern->toa_aux <= 0) { - pattern->courtesy.macid_tgt = aux->rtwvif_link->mac_id; - pattern->courtesy.macid_src = ref->rtwvif_link->mac_id; - pattern->courtesy.slot_num = RTW89_MCC_DFLT_COURTESY_SLOT; - pattern->courtesy.enable = true; - } else if (pattern->tob_ref <= 0 || pattern->toa_ref <= 0) { - pattern->courtesy.macid_tgt = ref->rtwvif_link->mac_id; - pattern->courtesy.macid_src = aux->rtwvif_link->mac_id; - pattern->courtesy.slot_num = RTW89_MCC_DFLT_COURTESY_SLOT; - pattern->courtesy.enable = true; + if (RTW89_MCC_REQ_COURTESY(pattern, aux) && rtw89_mcc_can_courtesy(ref, aux)) { + crtz = &pattern->courtesy.ref; + ref->crtz = crtz; + + crtz->macid_tgt = aux->rtwvif_link->mac_id; + crtz->slot_num = RTW89_MCC_DFLT_COURTESY_SLOT; + + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC courtesy ref: tgt %d, slot %d\n", + crtz->macid_tgt, crtz->slot_num); + } else { + ref->crtz = NULL; } - rtw89_debug(rtwdev, RTW89_DBG_CHAN, - "MCC pattern flags: plan %d, courtesy_en %d\n", - pattern->plan, pattern->courtesy.enable); + if (RTW89_MCC_REQ_COURTESY(pattern, ref) && rtw89_mcc_can_courtesy(aux, ref)) { + crtz = &pattern->courtesy.aux; + aux->crtz = crtz; - if (!pattern->courtesy.enable) - return; + crtz->macid_tgt = ref->rtwvif_link->mac_id; + crtz->slot_num = RTW89_MCC_DFLT_COURTESY_SLOT; - rtw89_debug(rtwdev, RTW89_DBG_CHAN, - "MCC pattern courtesy: tgt %d, src %d, slot %d\n", - pattern->courtesy.macid_tgt, pattern->courtesy.macid_src, - pattern->courtesy.slot_num); + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC courtesy aux: tgt %d, slot %d\n", + crtz->macid_tgt, crtz->slot_num); + } else { + aux->crtz = NULL; + } } /* The follow-up roughly shows the relationship between the parameters @@ -992,6 +1042,7 @@ static void __rtw89_mcc_calc_pattern_loose(struct rtw89_dev *rtwdev, struct rtw89_mcc_role *ref = &mcc->role_ref; struct rtw89_mcc_role *aux = &mcc->role_aux; struct rtw89_mcc_config *config = &mcc->config; + u16 mcc_intvl = config->mcc_interval; u16 bcn_ofst = config->beacon_offset; u16 bt_dur_in_mid = 0; u16 max_bcn_ofst; @@ -1025,7 +1076,7 @@ calc: res = bcn_ofst - bt_dur_in_mid; upper = min_t(s16, ref->duration, res); - lower = 0; + lower = max_t(s16, 0, ref->duration - (mcc_intvl - bcn_ofst)); if (ref->limit.enable) { upper = min_t(s16, upper, ref->limit.max_toa); @@ -1136,6 +1187,107 @@ static int __rtw89_mcc_calc_pattern_strict(struct rtw89_dev *rtwdev, return 0; } +static void __rtw89_mcc_fill_ptrn_anchor_ref(struct rtw89_dev *rtwdev, + struct rtw89_mcc_pattern *ptrn, + bool small_bcn_ofst) +{ + struct rtw89_mcc_info *mcc = &rtwdev->mcc; + struct rtw89_mcc_role *ref = &mcc->role_ref; + struct rtw89_mcc_role *aux = &mcc->role_aux; + struct rtw89_mcc_config *config = &mcc->config; + u16 bcn_ofst = config->beacon_offset; + u16 ref_tob; + u16 ref_toa; + + if (ref->limit.enable) { + ref_tob = ref->limit.max_tob; + ref_toa = ref->limit.max_toa; + } else { + ref_tob = ref->duration / 2; + ref_toa = ref->duration / 2; + } + + if (small_bcn_ofst) { + ptrn->toa_ref = ref_toa; + ptrn->tob_ref = ref->duration - ptrn->toa_ref; + } else { + ptrn->tob_ref = ref_tob; + ptrn->toa_ref = ref->duration - ptrn->tob_ref; + } + + ptrn->tob_aux = bcn_ofst - ptrn->toa_ref; + ptrn->toa_aux = aux->duration - ptrn->tob_aux; +} + +static void __rtw89_mcc_fill_ptrn_anchor_aux(struct rtw89_dev *rtwdev, + struct rtw89_mcc_pattern *ptrn, + bool small_bcn_ofst) +{ + struct rtw89_mcc_info *mcc = &rtwdev->mcc; + struct rtw89_mcc_role *ref = &mcc->role_ref; + struct rtw89_mcc_role *aux = &mcc->role_aux; + struct rtw89_mcc_config *config = &mcc->config; + u16 bcn_ofst = config->beacon_offset; + u16 aux_tob; + u16 aux_toa; + + if (aux->limit.enable) { + aux_tob = aux->limit.max_tob; + aux_toa = aux->limit.max_toa; + } else { + aux_tob = aux->duration / 2; + aux_toa = aux->duration / 2; + } + + if (small_bcn_ofst) { + ptrn->tob_aux = aux_tob; + ptrn->toa_aux = aux->duration - ptrn->tob_aux; + } else { + ptrn->toa_aux = aux_toa; + ptrn->tob_aux = aux->duration - ptrn->toa_aux; + } + + ptrn->toa_ref = bcn_ofst - ptrn->tob_aux; + ptrn->tob_ref = ref->duration - ptrn->toa_ref; +} + +static int __rtw89_mcc_calc_pattern_anchor(struct rtw89_dev *rtwdev, + struct rtw89_mcc_pattern *ptrn, + bool hdl_bt) +{ + struct rtw89_mcc_info *mcc = &rtwdev->mcc; + struct rtw89_mcc_role *ref = &mcc->role_ref; + struct rtw89_mcc_role *aux = &mcc->role_aux; + struct rtw89_mcc_config *config = &mcc->config; + u16 mcc_intvl = config->mcc_interval; + u16 bcn_ofst = config->beacon_offset; + bool small_bcn_ofst; + + if (bcn_ofst < RTW89_MCC_MIN_RX_BCN_TIME) + small_bcn_ofst = true; + else if (mcc_intvl - bcn_ofst < RTW89_MCC_MIN_RX_BCN_TIME) + small_bcn_ofst = false; + else + return -EPERM; + + *ptrn = (typeof(*ptrn)){ + .plan = hdl_bt ? RTW89_MCC_PLAN_TAIL_BT : RTW89_MCC_PLAN_NO_BT, + }; + + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MCC calc ptrn_ac: plan %d, bcn_ofst %d\n", + ptrn->plan, bcn_ofst); + + if (ref->is_go || ref->is_gc) + __rtw89_mcc_fill_ptrn_anchor_ref(rtwdev, ptrn, small_bcn_ofst); + else if (aux->is_go || aux->is_gc) + __rtw89_mcc_fill_ptrn_anchor_aux(rtwdev, ptrn, small_bcn_ofst); + else + __rtw89_mcc_fill_ptrn_anchor_ref(rtwdev, ptrn, small_bcn_ofst); + + return 0; +} + static int rtw89_mcc_calc_pattern(struct rtw89_dev *rtwdev, bool hdl_bt) { struct rtw89_mcc_info *mcc = &rtwdev->mcc; @@ -1189,6 +1341,10 @@ static int rtw89_mcc_calc_pattern(struct rtw89_dev *rtwdev, bool hdl_bt) goto done; } + ret = __rtw89_mcc_calc_pattern_anchor(rtwdev, &ptrn, hdl_bt); + if (!ret) + goto done; + __rtw89_mcc_calc_pattern_loose(rtwdev, &ptrn, hdl_bt); done: @@ -1439,88 +1595,41 @@ static bool rtw89_mcc_duration_decision_on_bt(struct rtw89_dev *rtwdev) return false; } -static void rtw89_mcc_sync_tbtt(struct rtw89_dev *rtwdev, - struct rtw89_mcc_role *tgt, - struct rtw89_mcc_role *src, - bool ref_is_src) -{ - struct rtw89_mcc_info *mcc = &rtwdev->mcc; - struct rtw89_mcc_config *config = &mcc->config; - u16 beacon_offset_us = ieee80211_tu_to_usec(config->beacon_offset); - u32 bcn_intvl_src_us = ieee80211_tu_to_usec(src->beacon_interval); - u32 cur_tbtt_ofst_src; - u32 tsf_ofst_tgt; - u32 remainder; - u64 tbtt_tgt; - u64 tsf_src; - int ret; - - ret = rtw89_mac_port_get_tsf(rtwdev, src->rtwvif_link, &tsf_src); - if (ret) { - rtw89_warn(rtwdev, "MCC failed to get port tsf: %d\n", ret); - return; - } - - cur_tbtt_ofst_src = rtw89_mcc_get_tbtt_ofst(rtwdev, src, tsf_src); - - if (ref_is_src) - tbtt_tgt = tsf_src - cur_tbtt_ofst_src + beacon_offset_us; - else - tbtt_tgt = tsf_src - cur_tbtt_ofst_src + - (bcn_intvl_src_us - beacon_offset_us); - - div_u64_rem(tbtt_tgt, bcn_intvl_src_us, &remainder); - tsf_ofst_tgt = bcn_intvl_src_us - remainder; - - config->sync.macid_tgt = tgt->rtwvif_link->mac_id; - config->sync.band_tgt = tgt->rtwvif_link->mac_idx; - config->sync.port_tgt = tgt->rtwvif_link->port; - config->sync.macid_src = src->rtwvif_link->mac_id; - config->sync.band_src = src->rtwvif_link->mac_idx; - config->sync.port_src = src->rtwvif_link->port; - config->sync.offset = tsf_ofst_tgt / 1024; - config->sync.enable = true; - - rtw89_debug(rtwdev, RTW89_DBG_CHAN, - "MCC sync tbtt: tgt %d, src %d, offset %d\n", - config->sync.macid_tgt, config->sync.macid_src, - config->sync.offset); - - rtw89_mac_port_tsf_sync(rtwdev, tgt->rtwvif_link, src->rtwvif_link, - config->sync.offset); -} - static int rtw89_mcc_fill_start_tsf(struct rtw89_dev *rtwdev) { struct rtw89_mcc_info *mcc = &rtwdev->mcc; struct rtw89_mcc_role *ref = &mcc->role_ref; + struct rtw89_mcc_role *aux = &mcc->role_aux; struct rtw89_mcc_config *config = &mcc->config; u32 bcn_intvl_ref_us = ieee80211_tu_to_usec(ref->beacon_interval); - u32 tob_ref_us = ieee80211_tu_to_usec(config->pattern.tob_ref); - struct rtw89_vif_link *rtwvif_link = ref->rtwvif_link; + s32 tob_ref_us = ieee80211_tu_to_usec(config->pattern.tob_ref); u64 tsf, start_tsf; u32 cur_tbtt_ofst; u64 min_time; + u64 tsf_aux; int ret; - ret = rtw89_mac_port_get_tsf(rtwdev, rtwvif_link, &tsf); - if (ret) { - rtw89_warn(rtwdev, "MCC failed to get port tsf: %d\n", ret); + if (rtw89_concurrent_via_mrc(rtwdev)) + ret = __mrc_fw_req_tsf(rtwdev, &tsf, &tsf_aux); + else + ret = __mcc_fw_req_tsf(rtwdev, &tsf, &tsf_aux); + + if (ret) return ret; - } min_time = tsf; - if (ref->is_go) + if (ref->is_go || aux->is_go) min_time += ieee80211_tu_to_usec(RTW89_MCC_SHORT_TRIGGER_TIME); else min_time += ieee80211_tu_to_usec(RTW89_MCC_LONG_TRIGGER_TIME); cur_tbtt_ofst = rtw89_mcc_get_tbtt_ofst(rtwdev, ref, tsf); start_tsf = tsf - cur_tbtt_ofst + bcn_intvl_ref_us - tob_ref_us; - while (start_tsf < min_time) - start_tsf += bcn_intvl_ref_us; + if (start_tsf < min_time) + start_tsf += roundup_u64(min_time - start_tsf, bcn_intvl_ref_us); config->start_tsf = start_tsf; + config->start_tsf_in_aux_domain = tsf_aux + start_tsf - tsf; return 0; } @@ -1537,13 +1646,11 @@ static int rtw89_mcc_fill_config(struct rtw89_dev *rtwdev) switch (mcc->mode) { case RTW89_MCC_MODE_GO_STA: - config->beacon_offset = RTW89_MCC_DFLT_BCN_OFST_TIME; + config->beacon_offset = rtw89_mcc_get_bcn_ofst(rtwdev); if (ref->is_go) { - rtw89_mcc_sync_tbtt(rtwdev, ref, aux, false); config->mcc_interval = ref->beacon_interval; rtw89_mcc_set_duration_go_sta(rtwdev, ref, aux); } else { - rtw89_mcc_sync_tbtt(rtwdev, aux, ref, true); config->mcc_interval = aux->beacon_interval; rtw89_mcc_set_duration_go_sta(rtwdev, aux, ref); } @@ -1573,10 +1680,8 @@ bottom: static int __mcc_fw_add_role(struct rtw89_dev *rtwdev, struct rtw89_mcc_role *role) { + const struct rtw89_mcc_courtesy_cfg *crtz = role->crtz; struct rtw89_mcc_info *mcc = &rtwdev->mcc; - struct rtw89_mcc_config *config = &mcc->config; - struct rtw89_mcc_pattern *pattern = &config->pattern; - struct rtw89_mcc_courtesy *courtesy = &pattern->courtesy; struct rtw89_mcc_policy *policy = &role->policy; struct rtw89_fw_mcc_add_req req = {}; const struct rtw89_chan *chan; @@ -1599,9 +1704,9 @@ static int __mcc_fw_add_role(struct rtw89_dev *rtwdev, struct rtw89_mcc_role *ro req.duration = role->duration; req.btc_in_2g = false; - if (courtesy->enable && courtesy->macid_src == req.macid) { - req.courtesy_target = courtesy->macid_tgt; - req.courtesy_num = courtesy->slot_num; + if (crtz) { + req.courtesy_target = crtz->macid_tgt; + req.courtesy_num = crtz->slot_num; req.courtesy_en = true; } @@ -1781,26 +1886,23 @@ static void __mrc_fw_add_courtesy(struct rtw89_dev *rtwdev, struct rtw89_mcc_info *mcc = &rtwdev->mcc; struct rtw89_mcc_role *ref = &mcc->role_ref; struct rtw89_mcc_role *aux = &mcc->role_aux; - struct rtw89_mcc_config *config = &mcc->config; - struct rtw89_mcc_pattern *pattern = &config->pattern; - struct rtw89_mcc_courtesy *courtesy = &pattern->courtesy; struct rtw89_fw_mrc_add_slot_arg *slot_arg_src; - u8 slot_idx_tgt; - if (!courtesy->enable) - return; - - if (courtesy->macid_src == ref->rtwvif_link->mac_id) { + if (ref->crtz) { slot_arg_src = &arg->slots[ref->slot_idx]; - slot_idx_tgt = aux->slot_idx; - } else { - slot_arg_src = &arg->slots[aux->slot_idx]; - slot_idx_tgt = ref->slot_idx; + + slot_arg_src->courtesy_target = aux->slot_idx; + slot_arg_src->courtesy_period = ref->crtz->slot_num; + slot_arg_src->courtesy_en = true; } - slot_arg_src->courtesy_target = slot_idx_tgt; - slot_arg_src->courtesy_period = courtesy->slot_num; - slot_arg_src->courtesy_en = true; + if (aux->crtz) { + slot_arg_src = &arg->slots[aux->slot_idx]; + + slot_arg_src->courtesy_target = ref->slot_idx; + slot_arg_src->courtesy_period = aux->crtz->slot_num; + slot_arg_src->courtesy_en = true; + } } static int __mrc_fw_start(struct rtw89_dev *rtwdev, bool replace) @@ -2000,30 +2102,24 @@ static void rtw89_mcc_handle_beacon_noa(struct rtw89_dev *rtwdev, bool enable) struct rtw89_mcc_role *ref = &mcc->role_ref; struct rtw89_mcc_role *aux = &mcc->role_aux; struct rtw89_mcc_config *config = &mcc->config; - struct rtw89_mcc_pattern *pattern = &config->pattern; - struct rtw89_mcc_sync *sync = &config->sync; struct ieee80211_p2p_noa_desc noa_desc = {}; - u64 start_time = config->start_tsf; u32 interval = config->mcc_interval; struct rtw89_vif_link *rtwvif_go; + u64 start_time; u32 duration; if (mcc->mode != RTW89_MCC_MODE_GO_STA) return; if (ref->is_go) { + start_time = config->start_tsf; rtwvif_go = ref->rtwvif_link; start_time += ieee80211_tu_to_usec(ref->duration); duration = config->mcc_interval - ref->duration; } else if (aux->is_go) { + start_time = config->start_tsf_in_aux_domain; rtwvif_go = aux->rtwvif_link; - start_time += ieee80211_tu_to_usec(pattern->tob_ref) + - ieee80211_tu_to_usec(config->beacon_offset) + - ieee80211_tu_to_usec(pattern->toa_aux); duration = config->mcc_interval - aux->duration; - - /* convert time domain from sta(ref) to GO(aux) */ - start_time += ieee80211_tu_to_usec(sync->offset); } else { rtw89_debug(rtwdev, RTW89_DBG_CHAN, "MCC find no GO: skip updating beacon NoA\n"); @@ -2127,6 +2223,12 @@ static int rtw89_mcc_start(struct rtw89_dev *rtwdev) } struct rtw89_mcc_stop_sel { + struct { + const struct rtw89_vif_link *target; + } hint; + + /* selection content */ + bool filled; u8 mac_id; u8 slot_idx; }; @@ -2136,6 +2238,7 @@ static void rtw89_mcc_stop_sel_fill(struct rtw89_mcc_stop_sel *sel, { sel->mac_id = mcc_role->rtwvif_link->mac_id; sel->slot_idx = mcc_role->slot_idx; + sel->filled = true; } static int rtw89_mcc_stop_sel_iterator(struct rtw89_dev *rtwdev, @@ -2145,23 +2248,41 @@ static int rtw89_mcc_stop_sel_iterator(struct rtw89_dev *rtwdev, { struct rtw89_mcc_stop_sel *sel = data; + if (mcc_role->rtwvif_link == sel->hint.target) { + rtw89_mcc_stop_sel_fill(sel, mcc_role); + return 1; /* break iteration */ + } + + if (sel->filled) + return 0; + if (!mcc_role->rtwvif_link->chanctx_assigned) return 0; rtw89_mcc_stop_sel_fill(sel, mcc_role); - return 1; /* break iteration */ + return 0; } -static void rtw89_mcc_stop(struct rtw89_dev *rtwdev) +static void rtw89_mcc_stop(struct rtw89_dev *rtwdev, + const struct rtw89_chanctx_pause_parm *pause) { + struct rtw89_hal *hal = &rtwdev->hal; struct rtw89_mcc_info *mcc = &rtwdev->mcc; struct rtw89_mcc_role *ref = &mcc->role_ref; - struct rtw89_mcc_stop_sel sel; + struct rtw89_mcc_stop_sel sel = { + .hint.target = pause ? pause->trigger : NULL, + }; int ret; + if (!pause) { + wiphy_delayed_work_cancel(rtwdev->hw->wiphy, &rtwdev->chanctx_work); + bitmap_zero(hal->changes, NUM_OF_RTW89_CHANCTX_CHANGES); + } + /* by default, stop at ref */ - rtw89_mcc_stop_sel_fill(&sel, ref); rtw89_iterate_mcc_roles(rtwdev, rtw89_mcc_stop_sel_iterator, &sel); + if (!sel.filled) + rtw89_mcc_stop_sel_fill(&sel, ref); rtw89_debug(rtwdev, RTW89_DBG_CHAN, "MCC stop at <macid %d>\n", sel.mac_id); @@ -2193,6 +2314,7 @@ static int rtw89_mcc_update(struct rtw89_dev *rtwdev) struct rtw89_mcc_info *mcc = &rtwdev->mcc; struct rtw89_mcc_config *config = &mcc->config; struct rtw89_mcc_config old_cfg = *config; + bool courtesy_changed; bool sync_changed; int ret; @@ -2205,8 +2327,15 @@ static int rtw89_mcc_update(struct rtw89_dev *rtwdev) if (ret) return ret; + if (memcmp(&old_cfg.pattern.courtesy, &config->pattern.courtesy, + sizeof(old_cfg.pattern.courtesy)) == 0) + courtesy_changed = false; + else + courtesy_changed = true; + if (old_cfg.pattern.plan != RTW89_MCC_PLAN_NO_BT || - config->pattern.plan != RTW89_MCC_PLAN_NO_BT) { + config->pattern.plan != RTW89_MCC_PLAN_NO_BT || + courtesy_changed) { if (rtw89_concurrent_via_mrc(rtwdev)) ret = __mrc_fw_start(rtwdev, true); else @@ -2238,7 +2367,7 @@ static void rtw89_mcc_track(struct rtw89_dev *rtwdev) struct rtw89_mcc_info *mcc = &rtwdev->mcc; struct rtw89_mcc_config *config = &mcc->config; struct rtw89_mcc_pattern *pattern = &config->pattern; - s16 tolerance; + u16 tolerance; u16 bcn_ofst; u16 diff; @@ -2246,18 +2375,25 @@ static void rtw89_mcc_track(struct rtw89_dev *rtwdev) return; bcn_ofst = rtw89_mcc_get_bcn_ofst(rtwdev); + if (bcn_ofst == config->beacon_offset) + return; + if (bcn_ofst > config->beacon_offset) { diff = bcn_ofst - config->beacon_offset; if (pattern->tob_aux < 0) tolerance = -pattern->tob_aux; - else + else if (pattern->toa_aux > 0) tolerance = pattern->toa_aux; + else + return; /* no chance to improve */ } else { diff = config->beacon_offset - bcn_ofst; if (pattern->toa_aux < 0) tolerance = -pattern->toa_aux; - else + else if (pattern->tob_aux > 0) tolerance = pattern->tob_aux; + else + return; /* no chance to improve */ } if (diff <= tolerance) @@ -2504,7 +2640,7 @@ void rtw89_chanctx_track(struct rtw89_dev *rtwdev) } void rtw89_chanctx_pause(struct rtw89_dev *rtwdev, - enum rtw89_chanctx_pause_reasons rsn) + const struct rtw89_chanctx_pause_parm *pause_parm) { struct rtw89_hal *hal = &rtwdev->hal; enum rtw89_entity_mode mode; @@ -2514,12 +2650,12 @@ void rtw89_chanctx_pause(struct rtw89_dev *rtwdev, if (hal->entity_pause) return; - rtw89_debug(rtwdev, RTW89_DBG_CHAN, "chanctx pause (rsn: %d)\n", rsn); + rtw89_debug(rtwdev, RTW89_DBG_CHAN, "chanctx pause (rsn: %d)\n", pause_parm->rsn); mode = rtw89_get_entity_mode(rtwdev); switch (mode) { case RTW89_ENTITY_MODE_MCC: - rtw89_mcc_stop(rtwdev); + rtw89_mcc_stop(rtwdev, pause_parm); break; default: break; @@ -2744,7 +2880,7 @@ out: cur = rtw89_get_entity_mode(rtwdev); switch (cur) { case RTW89_ENTITY_MODE_MCC: - rtw89_mcc_stop(rtwdev); + rtw89_mcc_stop(rtwdev, NULL); break; default: break; diff --git a/drivers/net/wireless/realtek/rtw89/chan.h b/drivers/net/wireless/realtek/rtw89/chan.h index e6391f6f2aa7..2a25563593af 100644 --- a/drivers/net/wireless/realtek/rtw89/chan.h +++ b/drivers/net/wireless/realtek/rtw89/chan.h @@ -31,6 +31,14 @@ #define RTW89_MCC_DFLT_TX_NULL_EARLY 3 #define RTW89_MCC_DFLT_COURTESY_SLOT 3 +#define RTW89_MCC_REQ_COURTESY_TIME 5 +#define RTW89_MCC_REQ_COURTESY(pattern, role) \ +({ \ + const struct rtw89_mcc_pattern *p = pattern; \ + p->tob_ ## role <= RTW89_MCC_REQ_COURTESY_TIME || \ + p->toa_ ## role <= RTW89_MCC_REQ_COURTESY_TIME; \ +}) + #define NUM_OF_RTW89_MCC_ROLES 2 enum rtw89_chanctx_pause_reasons { @@ -38,6 +46,11 @@ enum rtw89_chanctx_pause_reasons { RTW89_CHANCTX_PAUSE_REASON_ROC, }; +struct rtw89_chanctx_pause_parm { + const struct rtw89_vif_link *trigger; + enum rtw89_chanctx_pause_reasons rsn; +}; + struct rtw89_chanctx_cb_parm { int (*cb)(struct rtw89_dev *rtwdev, void *data); void *data; @@ -95,7 +108,7 @@ void rtw89_config_entity_chandef(struct rtw89_dev *rtwdev, enum rtw89_chanctx_idx idx, const struct cfg80211_chan_def *chandef); void rtw89_config_roc_chandef(struct rtw89_dev *rtwdev, - enum rtw89_chanctx_idx idx, + struct rtw89_vif_link *rtwvif_link, const struct cfg80211_chan_def *chandef); void rtw89_entity_init(struct rtw89_dev *rtwdev); enum rtw89_entity_mode rtw89_entity_recalc(struct rtw89_dev *rtwdev); @@ -105,7 +118,7 @@ void rtw89_queue_chanctx_change(struct rtw89_dev *rtwdev, enum rtw89_chanctx_changes change); void rtw89_chanctx_track(struct rtw89_dev *rtwdev); void rtw89_chanctx_pause(struct rtw89_dev *rtwdev, - enum rtw89_chanctx_pause_reasons rsn); + const struct rtw89_chanctx_pause_parm *parm); void rtw89_chanctx_proceed(struct rtw89_dev *rtwdev, const struct rtw89_chanctx_cb_parm *cb_parm); diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c index cc9b014457ac..49447668cbf3 100644 --- a/drivers/net/wireless/realtek/rtw89/core.c +++ b/drivers/net/wireless/realtek/rtw89/core.c @@ -203,6 +203,23 @@ static const struct ieee80211_iface_combination rtw89_iface_combs[] = { }, }; +static const u8 rtw89_ext_capa_sta[] = { + [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT, + [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF, +}; + +static const struct wiphy_iftype_ext_capab rtw89_iftypes_ext_capa[] = { + { + .iftype = NL80211_IFTYPE_STATION, + .extended_capabilities = rtw89_ext_capa_sta, + .extended_capabilities_mask = rtw89_ext_capa_sta, + .extended_capabilities_len = sizeof(rtw89_ext_capa_sta), + /* relevant only if EHT is supported */ + .eml_capabilities = 0, + .mld_capa_and_ops = 0, + }, +}; + #define RTW89_6GHZ_SPAN_HEAD 6145 #define RTW89_6GHZ_SPAN_IDX(center_freq) \ ((((int)(center_freq) - RTW89_6GHZ_SPAN_HEAD) / 5) / 2) @@ -211,6 +228,8 @@ static const struct ieee80211_iface_combination rtw89_iface_combs[] = { [RTW89_6GHZ_SPAN_IDX(center_freq)] = { \ .sar_subband_low = RTW89_SAR_6GHZ_ ## subband_l, \ .sar_subband_high = RTW89_SAR_6GHZ_ ## subband_h, \ + .acpi_sar_subband_low = RTW89_ACPI_SAR_6GHZ_ ## subband_l, \ + .acpi_sar_subband_high = RTW89_ACPI_SAR_6GHZ_ ## subband_h, \ .ant_gain_subband_low = RTW89_ANT_GAIN_6GHZ_ ## subband_l, \ .ant_gain_subband_high = RTW89_ANT_GAIN_6GHZ_ ## subband_h, \ } @@ -639,9 +658,17 @@ out: static u8 rtw89_core_tx_get_mac_id(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req) { + struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; struct rtw89_vif_link *rtwvif_link = tx_req->rtwvif_link; struct rtw89_sta_link *rtwsta_link = tx_req->rtwsta_link; + if (desc_info->mlo && !desc_info->sw_mld) { + if (rtwsta_link) + return rtw89_sta_get_main_macid(rtwsta_link->rtwsta); + else + return rtw89_vif_get_main_macid(rtwvif_link->rtwvif); + } + if (!rtwsta_link) return rtwvif_link->mac_id; @@ -671,7 +698,7 @@ rtw89_core_tx_update_mgmt_info(struct rtw89_dev *rtwdev, struct sk_buff *skb = tx_req->skb; u8 qsel, ch_dma; - qsel = desc_info->hiq ? RTW89_TX_QSEL_B0_HI : RTW89_TX_QSEL_B0_MGMT; + qsel = rtw89_core_get_qsel_mgmt(rtwdev, tx_req); ch_dma = rtw89_core_get_ch_dma(rtwdev, qsel); desc_info->qsel = qsel; @@ -1104,39 +1131,23 @@ int rtw89_h2c_tx(struct rtw89_dev *rtwdev, return 0; } -int rtw89_core_tx_write(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, struct sk_buff *skb, int *qsel) +static int rtw89_core_tx_write_link(struct rtw89_dev *rtwdev, + struct rtw89_vif_link *rtwvif_link, + struct rtw89_sta_link *rtwsta_link, + struct sk_buff *skb, int *qsel, bool sw_mld) { - struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta); - struct rtw89_vif *rtwvif = vif_to_rtwvif(vif); - struct rtw89_core_tx_request tx_req = {0}; - struct rtw89_sta_link *rtwsta_link = NULL; - struct rtw89_vif_link *rtwvif_link; + struct ieee80211_sta *sta = rtwsta_link_to_sta_safe(rtwsta_link); + struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); + struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; + struct rtw89_core_tx_request tx_req = {}; int ret; - /* By default, driver writes tx via the link on HW-0. And then, - * according to links' status, HW can change tx to another link. - */ - - if (rtwsta) { - rtwsta_link = rtw89_sta_get_link_inst(rtwsta, 0); - if (unlikely(!rtwsta_link)) { - rtw89_err(rtwdev, "tx: find no sta link on HW-0\n"); - return -ENOLINK; - } - } - - rtwvif_link = rtw89_vif_get_link_inst(rtwvif, 0); - if (unlikely(!rtwvif_link)) { - rtw89_err(rtwdev, "tx: find no vif link on HW-0\n"); - return -ENOLINK; - } - tx_req.skb = skb; tx_req.vif = vif; tx_req.sta = sta; tx_req.rtwvif_link = rtwvif_link; tx_req.rtwsta_link = rtwsta_link; + tx_req.desc_info.sw_mld = sw_mld; rtw89_traffic_stats_accu(rtwdev, &rtwdev->stats, skb, true); rtw89_traffic_stats_accu(rtwdev, &rtwvif->stats, skb, true); @@ -1155,6 +1166,33 @@ int rtw89_core_tx_write(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, return 0; } +int rtw89_core_tx_write(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, struct sk_buff *skb, int *qsel) +{ + struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta); + struct rtw89_vif *rtwvif = vif_to_rtwvif(vif); + struct rtw89_sta_link *rtwsta_link = NULL; + struct rtw89_vif_link *rtwvif_link; + + if (rtwsta) { + rtwsta_link = rtw89_get_designated_link(rtwsta); + if (unlikely(!rtwsta_link)) { + rtw89_err(rtwdev, "tx: find no sta designated link\n"); + return -ENOLINK; + } + + rtwvif_link = rtwsta_link->rtwvif_link; + } else { + rtwvif_link = rtw89_get_designated_link(rtwvif); + if (unlikely(!rtwvif_link)) { + rtw89_err(rtwdev, "tx: find no vif designated link\n"); + return -ENOLINK; + } + } + + return rtw89_core_tx_write_link(rtwdev, rtwvif_link, rtwsta_link, skb, qsel, false); +} + static __le32 rtw89_build_txwd_body0(struct rtw89_tx_desc_info *desc_info) { u32 dword = FIELD_PREP(RTW89_TXWD_BODY0_WP_OFFSET, desc_info->wp_offset) | @@ -1382,7 +1420,9 @@ static __le32 rtw89_build_txwd_body2_v2(struct rtw89_tx_desc_info *desc_info) static __le32 rtw89_build_txwd_body3_v2(struct rtw89_tx_desc_info *desc_info) { - u32 dword = FIELD_PREP(BE_TXD_BODY3_WIFI_SEQ, desc_info->seq); + u32 dword = FIELD_PREP(BE_TXD_BODY3_WIFI_SEQ, desc_info->seq) | + FIELD_PREP(BE_TXD_BODY3_MLO_FLAG, desc_info->mlo) | + FIELD_PREP(BE_TXD_BODY3_IS_MLD_SW_EN, desc_info->sw_mld); return cpu_to_le32(dword); } @@ -1635,10 +1675,7 @@ static void rtw89_core_rx_process_phy_ppdu_iter(void *data, u8 evm_pos = 0; int i; - /* FIXME: For single link, taking link on HW-0 here is okay. But, when - * enabling multiple active links, we should determine the right link. - */ - rtwsta_link = rtw89_sta_get_link_inst(rtwsta, 0); + rtwsta_link = rtw89_sta_get_link_inst(rtwsta, phy_ppdu->phy_idx); if (unlikely(!rtwsta_link)) return; @@ -2058,10 +2095,21 @@ static void rtw89_stats_trigger_frame(struct rtw89_dev *rtwdev, break; if (aid == vif->cfg.aid) { - enum nl80211_he_ru_alloc rua = rtw89_he_rua_to_ru_alloc(tf_rua >> 1); + enum nl80211_he_ru_alloc rua; rtwvif->stats.rx_tf_acc++; rtwdev->stats.rx_tf_acc++; + + /* The following only required for HE trigger frame, but we + * cannot use UL HE-SIG-A2 reserved subfield to identify it + * since some 11ax APs will fill it with all 0s, which will + * be misunderstood as EHT trigger frame. + */ + if (bss_conf->eht_support) + break; + + rua = rtw89_he_rua_to_ru_alloc(tf_rua >> 1); + if (tf_bw == IEEE80211_TRIGGER_ULBW_160_80P80MHZ && rua <= NL80211_RATE_INFO_HE_RU_ALLOC_106) rtwvif_link->pwr_diff_en = true; @@ -2152,8 +2200,10 @@ static void rtw89_vif_rx_stats_iter(void *data, u8 *mac, struct rtw89_pkt_stat *pkt_stat = &rtwdev->phystat.cur_pkt_stat; struct rtw89_rx_desc_info *desc_info = iter_data->desc_info; struct sk_buff *skb = iter_data->skb; + struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct rtw89_rx_phy_ppdu *phy_ppdu = iter_data->phy_ppdu; + bool is_mld = ieee80211_vif_is_mld(vif); struct ieee80211_bss_conf *bss_conf; struct rtw89_vif_link *rtwvif_link; const u8 *bssid = iter_data->bssid; @@ -2165,10 +2215,7 @@ static void rtw89_vif_rx_stats_iter(void *data, u8 *mac, rcu_read_lock(); - /* FIXME: For single link, taking link on HW-0 here is okay. But, when - * enabling multiple active links, we should determine the right link. - */ - rtwvif_link = rtw89_vif_get_link_inst(rtwvif, 0); + rtwvif_link = rtw89_vif_get_link_inst(rtwvif, desc_info->bb_sel); if (unlikely(!rtwvif_link)) goto out; @@ -2184,6 +2231,11 @@ static void rtw89_vif_rx_stats_iter(void *data, u8 *mac, if (!ether_addr_equal(bss_conf->bssid, bssid)) goto out; + if (is_mld) { + rx_status->link_valid = true; + rx_status->link_id = rtwvif_link->link_id; + } + if (ieee80211_is_beacon(hdr->frame_control)) { if (vif->type == NL80211_IFTYPE_STATION && !test_bit(RTW89_FLAG_WOWLAN, rtwdev->flags)) { @@ -2482,7 +2534,8 @@ static void rtw89_core_rx_process_ppdu_sts(struct rtw89_dev *rtwdev, .len = skb->len, .to_self = desc_info->addr1_match, .rate = desc_info->data_rate, - .mac_id = desc_info->mac_id}; + .mac_id = desc_info->mac_id, + .phy_idx = desc_info->bb_sel}; int ret; if (desc_info->mac_info_valid) { @@ -2593,6 +2646,7 @@ void rtw89_core_query_rxdesc_v2(struct rtw89_dev *rtwdev, desc_info->shift = le32_get_bits(rxd_s->dword0, BE_RXD_SHIFT_MASK); desc_info->long_rxdesc = le32_get_bits(rxd_s->dword0, BE_RXD_LONG_RXD); desc_info->pkt_type = le32_get_bits(rxd_s->dword0, BE_RXD_RPKT_TYPE_MASK); + desc_info->bb_sel = le32_get_bits(rxd_s->dword0, BE_RXD_BB_SEL); if (desc_info->pkt_type == RTW89_CORE_RX_TYPE_PPDU_STAT) desc_info->mac_info_valid = true; @@ -2665,10 +2719,7 @@ void rtw89_core_stats_sta_rx_status_iter(void *data, struct ieee80211_sta *sta) struct rtw89_sta_link *rtwsta_link; u8 mac_id = iter_data->mac_id; - /* FIXME: For single link, taking link on HW-0 here is okay. But, when - * enabling multiple active links, we should determine the right link. - */ - rtwsta_link = rtw89_sta_get_link_inst(rtwsta, 0); + rtwsta_link = rtw89_sta_get_link_inst(rtwsta, desc_info->bb_sel); if (unlikely(!rtwsta_link)) return; @@ -3117,9 +3168,9 @@ static bool rtw89_core_txq_agg_wait(struct rtw89_dev *rtwdev, if (!rtwsta) return false; - rtwsta_link = rtw89_sta_get_link_inst(rtwsta, 0); + rtwsta_link = rtw89_get_designated_link(rtwsta); if (unlikely(!rtwsta_link)) { - rtw89_err(rtwdev, "agg wait: find no link on HW-0\n"); + rtw89_err(rtwdev, "agg wait: find no designated link\n"); return false; } @@ -3284,8 +3335,10 @@ static int rtw89_core_send_nullfunc(struct rtw89_dev *rtwdev, { struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); int link_id = ieee80211_vif_is_mld(vif) ? rtwvif_link->link_id : -1; + struct rtw89_sta_link *rtwsta_link; struct ieee80211_sta *sta; struct ieee80211_hdr *hdr; + struct rtw89_sta *rtwsta; struct sk_buff *skb; int ret, qsel; @@ -3298,6 +3351,7 @@ static int rtw89_core_send_nullfunc(struct rtw89_dev *rtwdev, ret = -EINVAL; goto out; } + rtwsta = sta_to_rtwsta(sta); skb = ieee80211_nullfunc_get(rtwdev->hw, vif, link_id, qos); if (!skb) { @@ -3309,7 +3363,13 @@ static int rtw89_core_send_nullfunc(struct rtw89_dev *rtwdev, if (ps) hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM); - ret = rtw89_core_tx_write(rtwdev, vif, sta, skb, &qsel); + rtwsta_link = rtwsta->links[rtwvif_link->link_id]; + if (unlikely(!rtwsta_link)) { + ret = -ENOLINK; + goto out; + } + + ret = rtw89_core_tx_write_link(rtwdev, rtwvif_link, rtwsta_link, skb, &qsel, true); if (ret) { rtw89_warn(rtwdev, "nullfunc transmit failed: %d\n", ret); dev_kfree_skb_any(skb); @@ -3329,6 +3389,9 @@ out: void rtw89_roc_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) { const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + struct rtw89_chanctx_pause_parm pause_parm = { + .rsn = RTW89_CHANCTX_PAUSE_REASON_ROC, + }; struct ieee80211_hw *hw = rtwdev->hw; struct rtw89_roc *roc = &rtwvif->roc; struct rtw89_vif_link *rtwvif_link; @@ -3342,14 +3405,16 @@ void rtw89_roc_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) rtw89_leave_ips_by_hwflags(rtwdev); rtw89_leave_lps(rtwdev); - rtwvif_link = rtw89_vif_get_link_inst(rtwvif, RTW89_ROC_BY_LINK_INDEX); + rtwvif_link = rtw89_get_designated_link(rtwvif); if (unlikely(!rtwvif_link)) { - rtw89_err(rtwdev, "roc start: find no link on HW-%u\n", - RTW89_ROC_BY_LINK_INDEX); + rtw89_err(rtwdev, "roc start: find no designated link\n"); return; } - rtw89_chanctx_pause(rtwdev, RTW89_CHANCTX_PAUSE_REASON_ROC); + roc->link_id = rtwvif_link->link_id; + + pause_parm.trigger = rtwvif_link; + rtw89_chanctx_pause(rtwdev, &pause_parm); ret = rtw89_core_send_nullfunc(rtwdev, rtwvif_link, true, true); if (ret) @@ -3369,7 +3434,7 @@ void rtw89_roc_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) } cfg80211_chandef_create(&roc_chan, &roc->chan, NL80211_CHAN_NO_HT); - rtw89_config_roc_chandef(rtwdev, rtwvif_link->chanctx_idx, &roc_chan); + rtw89_config_roc_chandef(rtwdev, rtwvif_link, &roc_chan); rtw89_set_channel(rtwdev); reg = rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, rtwvif_link->mac_idx); @@ -3398,10 +3463,10 @@ void rtw89_roc_end(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) rtw89_leave_ips_by_hwflags(rtwdev); rtw89_leave_lps(rtwdev); - rtwvif_link = rtw89_vif_get_link_inst(rtwvif, RTW89_ROC_BY_LINK_INDEX); + rtwvif_link = rtwvif->links[roc->link_id]; if (unlikely(!rtwvif_link)) { - rtw89_err(rtwdev, "roc end: find no link on HW-%u\n", - RTW89_ROC_BY_LINK_INDEX); + rtw89_err(rtwdev, "roc end: find no link (link id %u)\n", + roc->link_id); return; } @@ -3409,7 +3474,7 @@ void rtw89_roc_end(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) rtw89_write32_mask(rtwdev, reg, B_AX_RX_FLTR_CFG_MASK, rtwdev->hal.rx_fltr); roc->state = RTW89_ROC_IDLE; - rtw89_config_roc_chandef(rtwdev, rtwvif_link->chanctx_idx, NULL); + rtw89_config_roc_chandef(rtwdev, rtwvif_link, NULL); rtw89_chanctx_proceed(rtwdev, NULL); ret = rtw89_core_send_nullfunc(rtwdev, rtwvif_link, true, false); if (ret) @@ -3577,6 +3642,98 @@ void rtw89_traffic_stats_init(struct rtw89_dev *rtwdev, ewma_tp_init(&stats->rx_ewma_tp); } +#define RTW89_MLSR_GOTO_2GHZ_THRESHOLD -53 +#define RTW89_MLSR_EXIT_2GHZ_THRESHOLD -38 +static void rtw89_core_mlsr_link_decision(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif) +{ + unsigned int sel_link_id = IEEE80211_MLD_MAX_NUM_LINKS; + struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); + struct rtw89_vif_link *rtwvif_link; + const struct rtw89_chan *chan; + unsigned long usable_links; + unsigned int link_id; + u8 decided_bands; + u8 rssi; + + rssi = ewma_rssi_read(&rtwdev->phystat.bcn_rssi); + if (unlikely(!rssi)) + return; + + if (RTW89_RSSI_RAW_TO_DBM(rssi) >= RTW89_MLSR_EXIT_2GHZ_THRESHOLD) + decided_bands = BIT(RTW89_BAND_5G) | BIT(RTW89_BAND_6G); + else if (RTW89_RSSI_RAW_TO_DBM(rssi) <= RTW89_MLSR_GOTO_2GHZ_THRESHOLD) + decided_bands = BIT(RTW89_BAND_2G); + else + return; + + usable_links = ieee80211_vif_usable_links(vif); + + rtwvif_link = rtw89_get_designated_link(rtwvif); + if (unlikely(!rtwvif_link)) + goto select; + + chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx); + if (decided_bands & BIT(chan->band_type)) + return; + + usable_links &= ~BIT(rtwvif_link->link_id); + +select: + rcu_read_lock(); + + for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) { + struct ieee80211_bss_conf *link_conf; + struct ieee80211_channel *channel; + enum rtw89_band band; + + link_conf = rcu_dereference(vif->link_conf[link_id]); + if (unlikely(!link_conf)) + continue; + + channel = link_conf->chanreq.oper.chan; + if (unlikely(!channel)) + continue; + + band = rtw89_nl80211_to_hw_band(channel->band); + if (decided_bands & BIT(band)) { + sel_link_id = link_id; + break; + } + } + + rcu_read_unlock(); + + if (sel_link_id == IEEE80211_MLD_MAX_NUM_LINKS) + return; + + rtw89_core_mlsr_switch(rtwdev, rtwvif, sel_link_id); +} + +static void rtw89_core_mlo_track(struct rtw89_dev *rtwdev) +{ + struct rtw89_hal *hal = &rtwdev->hal; + struct ieee80211_vif *vif; + struct rtw89_vif *rtwvif; + + if (hal->disabled_dm_bitmap & BIT(RTW89_DM_MLO)) + return; + + rtw89_for_each_rtwvif(rtwdev, rtwvif) { + vif = rtwvif_to_vif(rtwvif); + if (!vif->cfg.assoc || !ieee80211_vif_is_mld(vif)) + continue; + + switch (rtwvif->mlo_mode) { + case RTW89_MLO_MODE_MLSR: + rtw89_core_mlsr_link_decision(rtwdev, rtwvif); + break; + default: + break; + } + } +} + static void rtw89_track_work(struct wiphy *wiphy, struct wiphy_work *work) { struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, @@ -3615,9 +3772,10 @@ static void rtw89_track_work(struct wiphy *wiphy, struct wiphy_work *work) rtw89_phy_antdiv_track(rtwdev); rtw89_phy_ul_tb_ctrl_track(rtwdev); rtw89_phy_edcca_track(rtwdev); - rtw89_tas_track(rtwdev); + rtw89_sar_track(rtwdev); rtw89_chanctx_track(rtwdev); rtw89_core_rfkill_poll(rtwdev, false); + rtw89_core_mlo_track(rtwdev); if (rtwdev->lps_enabled && !rtwdev->btc.lps) rtw89_enter_lps_track(rtwdev); @@ -3846,6 +4004,9 @@ int rtw89_core_sta_link_disassoc(struct rtw89_dev *rtwdev, if (vif->type == NL80211_IFTYPE_STATION) rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, rtwvif_link, false); + if (rtwvif_link->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT) + rtw89_p2p_noa_once_deinit(rtwvif_link); + return 0; } @@ -4361,17 +4522,18 @@ static void rtw89_init_eht_cap(struct rtw89_dev *rtwdev, #define RTW89_SBAND_IFTYPES_NR 2 -static void rtw89_init_he_eht_cap(struct rtw89_dev *rtwdev, - enum nl80211_band band, - struct ieee80211_supported_band *sband) +static int rtw89_init_he_eht_cap(struct rtw89_dev *rtwdev, + enum nl80211_band band, + struct ieee80211_supported_band *sband) { struct ieee80211_sband_iftype_data *iftype_data; enum nl80211_iftype iftype; int idx = 0; - iftype_data = kcalloc(RTW89_SBAND_IFTYPES_NR, sizeof(*iftype_data), GFP_KERNEL); + iftype_data = devm_kcalloc(rtwdev->dev, RTW89_SBAND_IFTYPES_NR, + sizeof(*iftype_data), GFP_KERNEL); if (!iftype_data) - return; + return -ENOMEM; for (iftype = 0; iftype < NUM_NL80211_IFTYPES; iftype++) { switch (iftype) { @@ -4396,77 +4558,75 @@ static void rtw89_init_he_eht_cap(struct rtw89_dev *rtwdev, } _ieee80211_set_sband_iftype_data(sband, iftype_data, idx); + return 0; +} + +static struct ieee80211_supported_band * +rtw89_core_sband_dup(struct rtw89_dev *rtwdev, + const struct ieee80211_supported_band *sband) +{ + struct ieee80211_supported_band *dup; + + dup = devm_kmemdup(rtwdev->dev, sband, sizeof(*sband), GFP_KERNEL); + if (!dup) + return NULL; + + dup->channels = devm_kmemdup(rtwdev->dev, sband->channels, + sizeof(*sband->channels) * sband->n_channels, + GFP_KERNEL); + if (!dup->channels) + return NULL; + + dup->bitrates = devm_kmemdup(rtwdev->dev, sband->bitrates, + sizeof(*sband->bitrates) * sband->n_bitrates, + GFP_KERNEL); + if (!dup->bitrates) + return NULL; + + return dup; } static int rtw89_core_set_supported_band(struct rtw89_dev *rtwdev) { struct ieee80211_hw *hw = rtwdev->hw; - struct ieee80211_supported_band *sband_2ghz = NULL, *sband_5ghz = NULL; - struct ieee80211_supported_band *sband_6ghz = NULL; - u32 size = sizeof(struct ieee80211_supported_band); + struct ieee80211_supported_band *sband; u8 support_bands = rtwdev->chip->support_bands; + int ret; if (support_bands & BIT(NL80211_BAND_2GHZ)) { - sband_2ghz = kmemdup(&rtw89_sband_2ghz, size, GFP_KERNEL); - if (!sband_2ghz) - goto err; - rtw89_init_ht_cap(rtwdev, &sband_2ghz->ht_cap); - rtw89_init_he_eht_cap(rtwdev, NL80211_BAND_2GHZ, sband_2ghz); - hw->wiphy->bands[NL80211_BAND_2GHZ] = sband_2ghz; + sband = rtw89_core_sband_dup(rtwdev, &rtw89_sband_2ghz); + if (!sband) + return -ENOMEM; + rtw89_init_ht_cap(rtwdev, &sband->ht_cap); + ret = rtw89_init_he_eht_cap(rtwdev, NL80211_BAND_2GHZ, sband); + if (ret) + return ret; + hw->wiphy->bands[NL80211_BAND_2GHZ] = sband; } if (support_bands & BIT(NL80211_BAND_5GHZ)) { - sband_5ghz = kmemdup(&rtw89_sband_5ghz, size, GFP_KERNEL); - if (!sband_5ghz) - goto err; - rtw89_init_ht_cap(rtwdev, &sband_5ghz->ht_cap); - rtw89_init_vht_cap(rtwdev, &sband_5ghz->vht_cap); - rtw89_init_he_eht_cap(rtwdev, NL80211_BAND_5GHZ, sband_5ghz); - hw->wiphy->bands[NL80211_BAND_5GHZ] = sband_5ghz; + sband = rtw89_core_sband_dup(rtwdev, &rtw89_sband_5ghz); + if (!sband) + return -ENOMEM; + rtw89_init_ht_cap(rtwdev, &sband->ht_cap); + rtw89_init_vht_cap(rtwdev, &sband->vht_cap); + ret = rtw89_init_he_eht_cap(rtwdev, NL80211_BAND_5GHZ, sband); + if (ret) + return ret; + hw->wiphy->bands[NL80211_BAND_5GHZ] = sband; } if (support_bands & BIT(NL80211_BAND_6GHZ)) { - sband_6ghz = kmemdup(&rtw89_sband_6ghz, size, GFP_KERNEL); - if (!sband_6ghz) - goto err; - rtw89_init_he_eht_cap(rtwdev, NL80211_BAND_6GHZ, sband_6ghz); - hw->wiphy->bands[NL80211_BAND_6GHZ] = sband_6ghz; + sband = rtw89_core_sband_dup(rtwdev, &rtw89_sband_6ghz); + if (!sband) + return -ENOMEM; + ret = rtw89_init_he_eht_cap(rtwdev, NL80211_BAND_6GHZ, sband); + if (ret) + return ret; + hw->wiphy->bands[NL80211_BAND_6GHZ] = sband; } return 0; - -err: - hw->wiphy->bands[NL80211_BAND_2GHZ] = NULL; - hw->wiphy->bands[NL80211_BAND_5GHZ] = NULL; - hw->wiphy->bands[NL80211_BAND_6GHZ] = NULL; - if (sband_2ghz) - kfree((__force void *)sband_2ghz->iftype_data); - if (sband_5ghz) - kfree((__force void *)sband_5ghz->iftype_data); - if (sband_6ghz) - kfree((__force void *)sband_6ghz->iftype_data); - kfree(sband_2ghz); - kfree(sband_5ghz); - kfree(sband_6ghz); - return -ENOMEM; -} - -static void rtw89_core_clr_supported_band(struct rtw89_dev *rtwdev) -{ - struct ieee80211_hw *hw = rtwdev->hw; - - if (hw->wiphy->bands[NL80211_BAND_2GHZ]) - kfree((__force void *)hw->wiphy->bands[NL80211_BAND_2GHZ]->iftype_data); - if (hw->wiphy->bands[NL80211_BAND_5GHZ]) - kfree((__force void *)hw->wiphy->bands[NL80211_BAND_5GHZ]->iftype_data); - if (hw->wiphy->bands[NL80211_BAND_6GHZ]) - kfree((__force void *)hw->wiphy->bands[NL80211_BAND_6GHZ]->iftype_data); - kfree(hw->wiphy->bands[NL80211_BAND_2GHZ]); - kfree(hw->wiphy->bands[NL80211_BAND_5GHZ]); - kfree(hw->wiphy->bands[NL80211_BAND_6GHZ]); - hw->wiphy->bands[NL80211_BAND_2GHZ] = NULL; - hw->wiphy->bands[NL80211_BAND_5GHZ] = NULL; - hw->wiphy->bands[NL80211_BAND_6GHZ] = NULL; } static void rtw89_core_ppdu_sts_init(struct rtw89_dev *rtwdev) @@ -4774,6 +4934,7 @@ struct rtw89_vif_link *rtw89_vif_set_link(struct rtw89_vif *rtwvif, set_bit(index, rtwvif->links_inst_map); rtwvif->links[link_id] = rtwvif_link; + list_add_tail(&rtwvif_link->dlink_schd, &rtwvif->dlink_pool); return rtwvif_link; err: @@ -4794,6 +4955,7 @@ void rtw89_vif_unset_link(struct rtw89_vif *rtwvif, unsigned int link_id) index = rtw89_vif_link_inst_get_index(link); clear_bit(index, rtwvif->links_inst_map); *container = NULL; + list_del(&link->dlink_schd); } struct rtw89_sta_link *rtw89_sta_set_link(struct rtw89_sta *rtwsta, @@ -4824,6 +4986,7 @@ struct rtw89_sta_link *rtw89_sta_set_link(struct rtw89_sta *rtwsta, set_bit(index, rtwsta->links_inst_map); rtwsta->links[link_id] = rtwsta_link; + list_add_tail(&rtwsta_link->dlink_schd, &rtwsta->dlink_pool); return rtwsta_link; err: @@ -4844,6 +5007,7 @@ void rtw89_sta_unset_link(struct rtw89_sta *rtwsta, unsigned int link_id) index = rtw89_sta_link_inst_get_index(link); clear_bit(index, rtwsta->links_inst_map); *container = NULL; + list_del(&link->dlink_schd); } int rtw89_core_init(struct rtw89_dev *rtwdev) @@ -4860,6 +5024,7 @@ int rtw89_core_init(struct rtw89_dev *rtwdev) continue; INIT_LIST_HEAD(&rtwdev->scan_info.pkt_list[band]); } + INIT_LIST_HEAD(&rtwdev->scan_info.chan_list); INIT_WORK(&rtwdev->ba_work, rtw89_core_ba_work); INIT_WORK(&rtwdev->txq_work, rtw89_core_txq_work); INIT_DELAYED_WORK(&rtwdev->txq_reinvoke_work, rtw89_core_txq_reinvoke_work); @@ -4880,6 +5045,7 @@ int rtw89_core_init(struct rtw89_dev *rtwdev) rtwdev->total_sta_assoc = 0; rtw89_init_wait(&rtwdev->mcc.wait); + rtw89_init_wait(&rtwdev->mlo.wait); rtw89_init_wait(&rtwdev->mac.fw_ofld_wait); rtw89_init_wait(&rtwdev->wow.wait); rtw89_init_wait(&rtwdev->mac.ps_wait); @@ -4901,7 +5067,7 @@ int rtw89_core_init(struct rtw89_dev *rtwdev) if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) { rtwdev->dbcc_en = true; rtwdev->mac.qta_mode = RTW89_QTA_DBCC; - rtwdev->mlo_dbcc_mode = MLO_2_PLUS_0_1RF; + rtwdev->mlo_dbcc_mode = MLO_1_PLUS_1_1RF; } rtwdev->bbs[RTW89_PHY_0].phy_idx = RTW89_PHY_0; @@ -4919,7 +5085,7 @@ int rtw89_core_init(struct rtw89_dev *rtwdev) rtw89_ser_init(rtwdev); rtw89_entity_init(rtwdev); - rtw89_tas_init(rtwdev); + rtw89_sar_init(rtwdev); rtw89_phy_ant_gain_init(rtwdev); return 0; @@ -4945,9 +5111,6 @@ void rtw89_core_scan_start(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwv struct rtw89_bb_ctx *bb = rtw89_get_bb_ctx(rtwdev, rtwvif_link->phy_idx); rtwdev->scanning = true; - rtw89_leave_lps(rtwdev); - if (hw_scan) - rtw89_leave_ips_by_hwflags(rtwdev); ether_addr_copy(rtwvif_link->mac_addr, mac_addr); rtw89_btc_ntfy_scan_start(rtwdev, rtwvif_link->phy_idx, chan->band_type); @@ -5062,6 +5225,76 @@ out: rtw89_load_txpwr_table(rtwdev, rtwdev->rfe_parms->byr_tbl); } +int rtw89_core_mlsr_switch(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, + unsigned int link_id) +{ + struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); + u16 usable_links = ieee80211_vif_usable_links(vif); + u16 active_links = vif->active_links; + struct rtw89_vif_link *target, *cur; + int ret; + + lockdep_assert_wiphy(rtwdev->hw->wiphy); + + if (unlikely(!ieee80211_vif_is_mld(vif))) + return -EOPNOTSUPP; + + if (unlikely(!(usable_links & BIT(link_id)))) { + rtw89_warn(rtwdev, "%s: link id %u is not usable\n", __func__, + link_id); + return -ENOLINK; + } + + if (active_links == BIT(link_id)) + return 0; + + rtw89_debug(rtwdev, RTW89_DBG_STATE, "%s: switch to link id %u MLSR\n", + __func__, link_id); + + rtw89_leave_lps(rtwdev); + + ieee80211_stop_queues(rtwdev->hw); + flush_work(&rtwdev->txq_work); + + cur = rtw89_get_designated_link(rtwvif); + + ret = ieee80211_set_active_links(vif, active_links | BIT(link_id)); + if (ret) { + rtw89_err(rtwdev, "%s: failed to activate link id %u\n", + __func__, link_id); + goto wake_queue; + } + + target = rtwvif->links[link_id]; + if (unlikely(!target)) { + rtw89_err(rtwdev, "%s: failed to confirm link id %u\n", + __func__, link_id); + + ieee80211_set_active_links(vif, active_links); + ret = -EFAULT; + goto wake_queue; + } + + if (likely(cur)) + rtw89_fw_h2c_mlo_link_cfg(rtwdev, cur, false); + + rtw89_fw_h2c_mlo_link_cfg(rtwdev, target, true); + + ret = ieee80211_set_active_links(vif, BIT(link_id)); + if (ret) + rtw89_err(rtwdev, "%s: failed to inactivate links 0x%x\n", + __func__, active_links); + + rtw89_chip_rfk_channel(rtwdev, target); + + rtwvif->mlo_mode = RTW89_MLO_MODE_MLSR; + +wake_queue: + ieee80211_wake_queues(rtwdev->hw); + + return ret; +} + static int rtw89_chip_efuse_info_setup(struct rtw89_dev *rtwdev) { const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; @@ -5305,8 +5538,11 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev) if (chip->chip_gen == RTW89_CHIP_BE) hw->wiphy->flags |= WIPHY_FLAG_DISABLE_WEXT; - if (rtwdev->support_mlo) + if (rtwdev->support_mlo) { hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_MLO; + hw->wiphy->iftype_ext_capab = rtw89_iftypes_ext_capa; + hw->wiphy->num_iftype_ext_capab = ARRAY_SIZE(rtw89_iftypes_ext_capa); + } hw->wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR; @@ -5337,7 +5573,7 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev) ret = rtw89_regd_setup(rtwdev); if (ret) { rtw89_err(rtwdev, "failed to set up regd\n"); - goto err_free_supported_band; + return ret; } hw->wiphy->sar_capa = &rtw89_sar_capa; @@ -5345,7 +5581,7 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev) ret = ieee80211_register_hw(hw); if (ret) { rtw89_err(rtwdev, "failed to register hw\n"); - goto err_free_supported_band; + return ret; } ret = rtw89_regd_init_hint(rtwdev); @@ -5360,8 +5596,6 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev) err_unregister_hw: ieee80211_unregister_hw(hw); -err_free_supported_band: - rtw89_core_clr_supported_band(rtwdev); return ret; } @@ -5372,7 +5606,6 @@ static void rtw89_core_unregister_hw(struct rtw89_dev *rtwdev) rtw89_rfkill_polling_deinit(rtwdev); ieee80211_unregister_hw(hw); - rtw89_core_clr_supported_band(rtwdev); } int rtw89_core_register(struct rtw89_dev *rtwdev) @@ -5440,13 +5673,13 @@ struct rtw89_dev *rtw89_alloc_ieee80211_hw(struct device *device, if (!hw) goto err; - /* TODO: When driver MLO arch. is done, determine whether to support MLO - * according to the following conditions. - * 1. run with chanctx_ops - * 2. chip->support_link_num != 0 - * 3. FW feature supports AP_LINK_PS + /* Currently, our AP_LINK_PS handling only works for non-MLD softap + * or MLD-single-link softap. If RTW89_MLD_NON_STA_LINK_NUM enlarges, + * please tweak entire AP_LINKS_PS handling before supporting MLO. */ - support_mlo = false; + support_mlo = !no_chanctx && chip->support_link_num && + RTW89_CHK_FW_FEATURE(NOTIFY_AP_INFO, &early_fw) && + RTW89_MLD_NON_STA_LINK_NUM == 1; hw->wiphy->iface_combinations = rtw89_iface_combs; diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h index 4be05d6cad18..1c8f3b9b7c4c 100644 --- a/drivers/net/wireless/realtek/rtw89/core.h +++ b/drivers/net/wireless/realtek/rtw89/core.h @@ -798,6 +798,7 @@ struct rtw89_rx_phy_ppdu { u8 rssi[RF_PATH_MAX]; u8 mac_id; u8 chan_idx; + u8 phy_idx; u8 ie; u16 rate; u8 rpl_avg; @@ -1174,6 +1175,7 @@ struct rtw89_tx_desc_info { bool ldpc; bool upd_wlan_hdr; bool mlo; + bool sw_mld; }; struct rtw89_core_tx_request { @@ -3379,6 +3381,7 @@ struct rtw89_sec_cam_entry { struct rtw89_sta_link { struct rtw89_sta *rtwsta; + struct list_head dlink_schd; unsigned int link_id; u8 mac_id; @@ -3445,14 +3448,13 @@ enum rtw89_roc_state { RTW89_ROC_MGMT, }; -#define RTW89_ROC_BY_LINK_INDEX 0 - struct rtw89_roc { struct ieee80211_channel chan; struct wiphy_delayed_work roc_work; enum ieee80211_roc_type type; enum rtw89_roc_state state; int duration; + unsigned int link_id; }; #define RTW89_P2P_MAX_NOA_NUM 2 @@ -3483,8 +3485,17 @@ struct rtw89_p2p_noa_setter { u8 noa_index; }; +struct rtw89_ps_noa_once_handler { + bool in_duration; + u64 tsf_begin; + u64 tsf_end; + struct wiphy_delayed_work set_work; + struct wiphy_delayed_work clr_work; +}; + struct rtw89_vif_link { struct rtw89_vif *rtwvif; + struct list_head dlink_schd; unsigned int link_id; bool chanctx_assigned; /* only valid when running with chanctx_ops */ @@ -3507,6 +3518,7 @@ struct rtw89_vif_link { u8 hit_rule; u8 last_noa_nr; u64 sync_bcn_tsf; + bool rand_tsf_done; bool trigger; bool lsig_txop; u8 tgt_ind; @@ -3527,6 +3539,7 @@ struct rtw89_vif_link { struct rtw89_phy_rate_pattern rate_pattern; struct list_head general_pkt_list; struct rtw89_p2p_noa_setter p2p_noa; + struct rtw89_ps_noa_once_handler noa_once; }; enum rtw89_lv1_rcvy_step { @@ -3986,7 +3999,11 @@ struct rtw89_rfe_parms { struct rtw89_txpwr_rule_2ghz rule_2ghz; struct rtw89_txpwr_rule_5ghz rule_5ghz; struct rtw89_txpwr_rule_6ghz rule_6ghz; + struct rtw89_txpwr_rule_2ghz rule_da_2ghz; + struct rtw89_txpwr_rule_5ghz rule_da_5ghz; + struct rtw89_txpwr_rule_6ghz rule_da_6ghz; struct rtw89_tx_shape tx_shape; + bool has_da; }; struct rtw89_rfe_parms_conf { @@ -4081,9 +4098,15 @@ struct rtw89_rfe_data { struct rtw89_txpwr_lmt_2ghz_data lmt_2ghz; struct rtw89_txpwr_lmt_5ghz_data lmt_5ghz; struct rtw89_txpwr_lmt_6ghz_data lmt_6ghz; + struct rtw89_txpwr_lmt_2ghz_data da_lmt_2ghz; + struct rtw89_txpwr_lmt_5ghz_data da_lmt_5ghz; + struct rtw89_txpwr_lmt_6ghz_data da_lmt_6ghz; struct rtw89_txpwr_lmt_ru_2ghz_data lmt_ru_2ghz; struct rtw89_txpwr_lmt_ru_5ghz_data lmt_ru_5ghz; struct rtw89_txpwr_lmt_ru_6ghz_data lmt_ru_6ghz; + struct rtw89_txpwr_lmt_ru_2ghz_data da_lmt_ru_2ghz; + struct rtw89_txpwr_lmt_ru_5ghz_data da_lmt_ru_5ghz; + struct rtw89_txpwr_lmt_ru_6ghz_data da_lmt_ru_6ghz; struct rtw89_tx_shape_lmt_data tx_shape_lmt; struct rtw89_tx_shape_lmt_ru_data tx_shape_lmt_ru; struct rtw89_rfe_parms rfe_parms; @@ -4284,12 +4307,14 @@ struct rtw89_chip_info { bool support_rnr; bool support_ant_gain; bool support_tas; + bool support_sar_by_ant; bool ul_tb_waveform_ctrl; bool ul_tb_pwr_diff; bool rx_freq_frome_ie; bool hw_sec_hdr; bool hw_mgmt_tx_encrypt; bool hw_tkip_crypto; + bool hw_mlo_bmc_crypto; u8 rf_path_num; u8 tx_nss; u8 rx_nss; @@ -4494,6 +4519,7 @@ enum rtw89_fw_feature { RTW89_FW_FEATURE_LPS_CH_INFO, RTW89_FW_FEATURE_NO_PHYCAP_P1, RTW89_FW_FEATURE_NO_POWER_DIFFERENCE, + RTW89_FW_FEATURE_BEACON_LOSS_COUNT_V1, }; struct rtw89_fw_suit { @@ -4606,6 +4632,7 @@ struct rtw89_cam_info { enum rtw89_sar_sources { RTW89_SAR_SOURCE_NONE, RTW89_SAR_SOURCE_COMMON, + RTW89_SAR_SOURCE_ACPI, RTW89_SAR_SOURCE_NR, }; @@ -4630,8 +4657,62 @@ struct rtw89_sar_cfg_common { s32 cfg[RTW89_SAR_SUBBAND_NR]; }; +enum rtw89_acpi_sar_subband { + RTW89_ACPI_SAR_2GHZ_SUBBAND, + RTW89_ACPI_SAR_5GHZ_SUBBAND_1, /* U-NII-1 */ + RTW89_ACPI_SAR_5GHZ_SUBBAND_2, /* U-NII-2 */ + RTW89_ACPI_SAR_5GHZ_SUBBAND_2E, /* U-NII-2-Extended */ + RTW89_ACPI_SAR_5GHZ_SUBBAND_3_4, /* U-NII-3 and U-NII-4 */ + RTW89_ACPI_SAR_6GHZ_SUBBAND_5_L, /* U-NII-5 lower part */ + RTW89_ACPI_SAR_6GHZ_SUBBAND_5_H, /* U-NII-5 higher part */ + RTW89_ACPI_SAR_6GHZ_SUBBAND_6, /* U-NII-6 */ + RTW89_ACPI_SAR_6GHZ_SUBBAND_7_L, /* U-NII-7 lower part */ + RTW89_ACPI_SAR_6GHZ_SUBBAND_7_H, /* U-NII-7 higher part */ + RTW89_ACPI_SAR_6GHZ_SUBBAND_8, /* U-NII-8 */ + + NUM_OF_RTW89_ACPI_SAR_SUBBAND, + RTW89_ACPI_SAR_SUBBAND_NR_LEGACY = RTW89_ACPI_SAR_5GHZ_SUBBAND_3_4 + 1, + RTW89_ACPI_SAR_SUBBAND_NR_HAS_6GHZ = RTW89_ACPI_SAR_6GHZ_SUBBAND_8 + 1, +}; + +#define TXPWR_FACTOR_OF_RTW89_ACPI_SAR 3 /* unit: 0.125 dBm */ +#define MAX_VAL_OF_RTW89_ACPI_SAR S16_MAX +#define MIN_VAL_OF_RTW89_ACPI_SAR S16_MIN +#define MAX_NUM_OF_RTW89_ACPI_SAR_TBL 6 +#define NUM_OF_RTW89_ACPI_SAR_RF_PATH (RF_PATH_B + 1) + +struct rtw89_sar_entry_from_acpi { + s16 v[NUM_OF_RTW89_ACPI_SAR_SUBBAND][NUM_OF_RTW89_ACPI_SAR_RF_PATH]; +}; + +struct rtw89_sar_table_from_acpi { + /* If this table is active, must fill all fields according to either + * configuration in BIOS or some default values for SAR to work well. + */ + struct rtw89_sar_entry_from_acpi entries[RTW89_REGD_NUM]; +}; + +struct rtw89_sar_indicator_from_acpi { + bool enable_sync; + unsigned int fields; + u8 (*rfpath_to_antidx)(enum rtw89_rf_path rfpath); + + /* Select among @tables of container, rtw89_sar_cfg_acpi, by path. + * Not design with pointers since addresses will be invalid after + * sync content with local container instance. + */ + u8 tblsel[NUM_OF_RTW89_ACPI_SAR_RF_PATH]; +}; + +struct rtw89_sar_cfg_acpi { + u8 downgrade_2tx; + unsigned int valid_num; + struct rtw89_sar_table_from_acpi tables[MAX_NUM_OF_RTW89_ACPI_SAR_TBL]; + struct rtw89_sar_indicator_from_acpi indicator; +}; + struct rtw89_sar_info { - /* used to decide how to acces SAR cfg union */ + /* used to decide how to access SAR cfg union */ enum rtw89_sar_sources src; /* reserved for different knids of SAR cfg struct. @@ -4639,6 +4720,7 @@ struct rtw89_sar_info { */ union { struct rtw89_sar_cfg_common cfg_common; + struct rtw89_sar_cfg_acpi cfg_acpi; }; }; @@ -4674,11 +4756,14 @@ struct rtw89_ant_gain_info { struct rtw89_6ghz_span { enum rtw89_sar_subband sar_subband_low; enum rtw89_sar_subband sar_subband_high; + enum rtw89_acpi_sar_subband acpi_sar_subband_low; + enum rtw89_acpi_sar_subband acpi_sar_subband_high; enum rtw89_ant_gain_subband ant_gain_subband_low; enum rtw89_ant_gain_subband ant_gain_subband_high; }; #define RTW89_SAR_SPAN_VALID(span) ((span)->sar_subband_high) +#define RTW89_ACPI_SAR_SPAN_VALID(span) ((span)->acpi_sar_subband_high) #define RTW89_ANT_GAIN_SPAN_VALID(span) ((span)->ant_gain_subband_high) enum rtw89_tas_state { @@ -4692,6 +4777,7 @@ enum rtw89_tas_state { struct rtw89_tas_info { u16 tx_ratio_history[RTW89_TAS_TX_RATIO_WINDOW]; u64 txpwr_history[RTW89_TAS_TXPWR_WINDOW]; + u8 enabled_countries; u8 txpwr_head_idx; u8 txpwr_tail_idx; u8 tx_ratio_idx; @@ -4765,6 +4851,7 @@ enum rtw89_dm_type { RTW89_DM_DYNAMIC_EDCCA, RTW89_DM_THERMAL_PROTECT, RTW89_DM_TAS, + RTW89_DM_MLO, }; #define RTW89_THERMAL_PROT_LV_MAX 5 @@ -4786,6 +4873,7 @@ struct rtw89_hal { bool no_mcs_12_13; atomic_t roc_chanctx_idx; + u8 roc_link_index; DECLARE_BITMAP(changes, NUM_OF_RTW89_CHANCTX_CHANGES); DECLARE_BITMAP(entity_map, NUM_OF_RTW89_CHANCTX); @@ -5207,6 +5295,8 @@ struct rtw89_regulatory_info { const struct rtw89_regd *regd; enum rtw89_reg_6ghz_power reg_6ghz_power; struct rtw89_reg_6ghz_tpe reg_6ghz_tpe; + bool txpwr_uk_follow_etsi; + DECLARE_BITMAP(block_unii4, RTW89_REGD_MAX_COUNTRY_NUM); DECLARE_BITMAP(block_6ghz, RTW89_REGD_MAX_COUNTRY_NUM); DECLARE_BITMAP(block_6ghz_sp, RTW89_REGD_MAX_COUNTRY_NUM); @@ -5360,9 +5450,10 @@ struct rtw89_early_h2c { struct rtw89_hw_scan_info { struct rtw89_vif_link *scanning_vif; struct list_head pkt_list[NUM_NL80211_BANDS]; + struct list_head chan_list; struct rtw89_chan op_chan; + bool connected; bool abort; - u32 last_chan_idx; }; enum rtw89_phy_bb_gain_band { @@ -5574,6 +5665,8 @@ struct rtw89_mcc_role { struct rtw89_mcc_policy policy; struct rtw89_mcc_limit limit; + const struct rtw89_mcc_courtesy_cfg *crtz; + /* only valid when running with FW MRC mechanism */ u8 slot_idx; @@ -5591,13 +5684,16 @@ struct rtw89_mcc_bt_role { u16 duration; /* TU */ }; -struct rtw89_mcc_courtesy { - bool enable; +struct rtw89_mcc_courtesy_cfg { u8 slot_num; - u8 macid_src; u8 macid_tgt; }; +struct rtw89_mcc_courtesy { + struct rtw89_mcc_courtesy_cfg ref; + struct rtw89_mcc_courtesy_cfg aux; +}; + enum rtw89_mcc_plan { RTW89_MCC_PLAN_TAIL_BT, RTW89_MCC_PLAN_MID_BT, @@ -5631,6 +5727,7 @@ struct rtw89_mcc_config { struct rtw89_mcc_pattern pattern; struct rtw89_mcc_sync sync; u64 start_tsf; + u64 start_tsf_in_aux_domain; u16 mcc_interval; /* TU */ u16 beacon_offset; /* TU */ }; @@ -5651,6 +5748,16 @@ struct rtw89_mcc_info { struct rtw89_mcc_config config; }; +enum rtw89_mlo_mode { + RTW89_MLO_MODE_MLSR = 0, + + NUM_OF_RTW89_MLO_MODE, +}; + +struct rtw89_mlo_info { + struct rtw89_wait_info wait; +}; + struct rtw89_dev { struct ieee80211_hw *hw; struct device *dev; @@ -5666,6 +5773,7 @@ struct rtw89_dev { const struct rtw89_rfe_parms *rfe_parms; struct rtw89_hal hal; struct rtw89_mcc_info mcc; + struct rtw89_mlo_info mlo; struct rtw89_mac_info mac; struct rtw89_fw_info fw; struct rtw89_hci_info hci; @@ -5802,6 +5910,9 @@ struct rtw89_vif { struct rtw89_roc roc; bool offchan; + enum rtw89_mlo_mode mlo_mode; + + struct list_head dlink_pool; u8 links_inst_valid_num; DECLARE_BITMAP(links_inst_map, __RTW89_MLD_MAX_LINK_NUM); struct rtw89_vif_link *links[IEEE80211_MLD_MAX_NUM_LINKS]; @@ -5841,6 +5952,7 @@ struct rtw89_sta { DECLARE_BITMAP(pairwise_sec_cam_map, RTW89_MAX_SEC_CAM_NUM); + struct list_head dlink_pool; u8 links_inst_valid_num; DECLARE_BITMAP(links_inst_map, __RTW89_MLD_MAX_LINK_NUM); struct rtw89_sta_link *links[IEEE80211_MLD_MAX_NUM_LINKS]; @@ -5936,6 +6048,12 @@ rtw89_assoc_link_rcu_dereference(struct rtw89_dev *rtwdev, u8 macid) return rcu_dereference(rtwdev->assoc_link_on_macid[macid]); } +#define rtw89_get_designated_link(links_holder) \ +({ \ + typeof(links_holder) p = links_holder; \ + list_first_entry_or_null(&p->dlink_pool, typeof(*p->links_inst), dlink_schd); \ +}) + static inline int rtw89_hci_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req) { @@ -6825,9 +6943,14 @@ static inline void rtw89_load_txpwr_table(struct rtw89_dev *rtwdev, static inline u8 rtw89_regd_get(struct rtw89_dev *rtwdev, u8 band) { - const struct rtw89_regd *regd = rtwdev->regulatory.regd; + const struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory; + const struct rtw89_regd *regd = regulatory->regd; + u8 txpwr_regd = regd->txpwr_regd[band]; + + if (regulatory->txpwr_uk_follow_etsi && txpwr_regd == RTW89_UK) + return RTW89_ETSI; - return regd->txpwr_regd[band]; + return txpwr_regd; } static inline void rtw89_ctrl_btg_bt_rx(struct rtw89_dev *rtwdev, bool en, @@ -7185,6 +7308,7 @@ void rtw89_chip_cfg_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev, bool rtw89_ra_report_to_bitrate(struct rtw89_dev *rtwdev, u8 rpt_rate, u16 *bitrate); int rtw89_regd_setup(struct rtw89_dev *rtwdev); int rtw89_regd_init_hint(struct rtw89_dev *rtwdev); +const char *rtw89_regd_get_string(enum rtw89_regulation_type regd); void rtw89_traffic_stats_init(struct rtw89_dev *rtwdev, struct rtw89_traffic_stats *stats); int rtw89_wait_for_cond(struct rtw89_wait_info *wait, unsigned int cond); @@ -7206,5 +7330,7 @@ void rtw89_core_update_p2p_ps(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, struct ieee80211_bss_conf *bss_conf); void rtw89_core_ntfy_btc_event(struct rtw89_dev *rtwdev, enum rtw89_btc_hmsg event); +int rtw89_core_mlsr_switch(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, + unsigned int link_id); #endif diff --git a/drivers/net/wireless/realtek/rtw89/debug.c b/drivers/net/wireless/realtek/rtw89/debug.c index f2c5753fd386..d6016fa107fb 100644 --- a/drivers/net/wireless/realtek/rtw89/debug.c +++ b/drivers/net/wireless/realtek/rtw89/debug.c @@ -85,6 +85,7 @@ struct rtw89_debugfs { struct rtw89_debugfs_priv phy_info; struct rtw89_debugfs_priv stations; struct rtw89_debugfs_priv disable_dm; + struct rtw89_debugfs_priv mlo_mode; }; struct rtw89_debugfs_iter_data { @@ -854,45 +855,21 @@ static ssize_t __print_txpwr_map(struct rtw89_dev *rtwdev, char *buf, size_t buf return p - buf; } -#define case_REGD(_regd) \ - case RTW89_ ## _regd: \ - p += scnprintf(p, end - p, #_regd "\n"); \ - break - static int __print_regd(struct rtw89_dev *rtwdev, char *buf, size_t bufsz, const struct rtw89_chan *chan) { + const struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory; char *p = buf, *end = buf + bufsz; u8 band = chan->band_type; u8 regd = rtw89_regd_get(rtwdev, band); - switch (regd) { - default: - p += scnprintf(p, end - p, "UNKNOWN: %d\n", regd); - break; - case_REGD(WW); - case_REGD(ETSI); - case_REGD(FCC); - case_REGD(MKK); - case_REGD(NA); - case_REGD(IC); - case_REGD(KCC); - case_REGD(NCC); - case_REGD(CHILE); - case_REGD(ACMA); - case_REGD(MEXICO); - case_REGD(UKRAINE); - case_REGD(CN); - case_REGD(QATAR); - case_REGD(UK); - case_REGD(THAILAND); - } + p += scnprintf(p, end - p, "%s\n", rtw89_regd_get_string(regd)); + p += scnprintf(p, end - p, "\t(txpwr UK follow ETSI: %s)\n", + str_yes_no(regulatory->txpwr_uk_follow_etsi)); return p - buf; } -#undef case_REGD - struct dbgfs_txpwr_table { const struct txpwr_map *byr; const struct txpwr_map *lmt; @@ -949,6 +926,7 @@ ssize_t rtw89_debug_priv_txpwr_table_get(struct rtw89_dev *rtwdev, char *buf, size_t bufsz) { enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen; + struct rtw89_sar_parm sar_parm = {}; const struct dbgfs_txpwr_table *tbl; const struct rtw89_chan *chan; char *p = buf, *end = buf + bufsz; @@ -958,11 +936,12 @@ ssize_t rtw89_debug_priv_txpwr_table_get(struct rtw89_dev *rtwdev, rtw89_leave_ps_mode(rtwdev); chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0); + sar_parm.center_freq = chan->freq; p += rtw89_debug_priv_txpwr_table_get_regd(rtwdev, p, end - p, chan); p += scnprintf(p, end - p, "[SAR]\n"); - p += rtw89_print_sar(rtwdev, p, end - p, chan->freq); + p += rtw89_print_sar(rtwdev, p, end - p, &sar_parm); p += scnprintf(p, end - p, "[TAS]\n"); p += rtw89_print_tas(rtwdev, p, end - p); @@ -3993,14 +3972,16 @@ static int rtw89_dump_pkt_offload(char *buf, size_t bufsz, struct list_head *pkt static int rtw89_vif_link_ids_get(struct rtw89_dev *rtwdev, char *buf, size_t bufsz, u8 *mac, - struct rtw89_vif_link *rtwvif_link) + struct rtw89_vif_link *rtwvif_link, + bool designated) { struct rtw89_bssid_cam_entry *bssid_cam = &rtwvif_link->bssid_cam; char *p = buf, *end = buf + bufsz; p += scnprintf(p, end - p, " [%u] %pM\n", rtwvif_link->mac_id, rtwvif_link->mac_addr); - p += scnprintf(p, end - p, "\tlink_id=%u\n", rtwvif_link->link_id); + p += scnprintf(p, end - p, "\tlink_id=%u%s\n", rtwvif_link->link_id, + designated ? " (*)" : ""); p += scnprintf(p, end - p, "\tbssid_cam_idx=%u\n", bssid_cam->bssid_cam_idx); p += rtw89_dump_addr_cam(rtwdev, p, end - p, &rtwvif_link->addr_cam); @@ -4017,15 +3998,19 @@ void rtw89_vif_ids_get_iter(void *data, u8 *mac, struct ieee80211_vif *vif) (struct rtw89_debugfs_iter_data *)data; struct rtw89_vif *rtwvif = vif_to_rtwvif(vif); struct rtw89_dev *rtwdev = rtwvif->rtwdev; + struct rtw89_vif_link *designated_link; struct rtw89_vif_link *rtwvif_link; size_t bufsz = iter_data->bufsz; char *buf = iter_data->buf; char *p = buf, *end = buf + bufsz; unsigned int link_id; + designated_link = rtw89_get_designated_link(rtwvif); + p += scnprintf(p, end - p, "VIF %pM\n", rtwvif->mac_addr); rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) - p += rtw89_vif_link_ids_get(rtwdev, p, end - p, mac, rtwvif_link); + p += rtw89_vif_link_ids_get(rtwdev, p, end - p, mac, rtwvif_link, + rtwvif_link == designated_link); rtw89_debugfs_iter_data_next(iter_data, p, end - p, p - buf); } @@ -4055,7 +4040,8 @@ static int rtw89_dump_ba_cam(struct rtw89_dev *rtwdev, static int rtw89_sta_link_ids_get(struct rtw89_dev *rtwdev, char *buf, size_t bufsz, - struct rtw89_sta_link *rtwsta_link) + struct rtw89_sta_link *rtwsta_link, + bool designated) { struct ieee80211_link_sta *link_sta; char *p = buf, *end = buf + bufsz; @@ -4069,7 +4055,8 @@ static int rtw89_sta_link_ids_get(struct rtw89_dev *rtwdev, rcu_read_unlock(); - p += scnprintf(p, end - p, "\tlink_id=%u\n", rtwsta_link->link_id); + p += scnprintf(p, end - p, "\tlink_id=%u%s\n", rtwsta_link->link_id, + designated ? " (*)" : ""); p += rtw89_dump_addr_cam(rtwdev, p, end - p, &rtwsta_link->addr_cam); p += rtw89_dump_ba_cam(rtwdev, p, end - p, rtwsta_link); @@ -4082,16 +4069,20 @@ static void rtw89_sta_ids_get_iter(void *data, struct ieee80211_sta *sta) (struct rtw89_debugfs_iter_data *)data; struct rtw89_sta *rtwsta = sta_to_rtwsta(sta); struct rtw89_dev *rtwdev = rtwsta->rtwdev; + struct rtw89_sta_link *designated_link; struct rtw89_sta_link *rtwsta_link; size_t bufsz = iter_data->bufsz; char *buf = iter_data->buf; char *p = buf, *end = buf + bufsz; unsigned int link_id; + designated_link = rtw89_get_designated_link(rtwsta); + p += scnprintf(p, end - p, "STA %pM %s\n", sta->addr, sta->tdls ? "(TDLS)" : ""); rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id) - p += rtw89_sta_link_ids_get(rtwdev, p, end - p, rtwsta_link); + p += rtw89_sta_link_ids_get(rtwdev, p, end - p, rtwsta_link, + rtwsta_link == designated_link); rtw89_debugfs_iter_data_next(iter_data, p, end - p, p - buf); } @@ -4146,6 +4137,35 @@ static ssize_t rtw89_debug_priv_stations_get(struct rtw89_dev *rtwdev, return p - buf; } +static void rtw89_debug_disable_dm_cfg_bmap(struct rtw89_dev *rtwdev, u32 new) +{ + struct rtw89_hal *hal = &rtwdev->hal; + u32 old = hal->disabled_dm_bitmap; + + if (new == old) + return; + + hal->disabled_dm_bitmap = new; + + rtw89_debug(rtwdev, RTW89_DBG_STATE, "Disable DM: 0x%x -> 0x%x\n", old, new); +} + +static void rtw89_debug_disable_dm_set_flag(struct rtw89_dev *rtwdev, u8 flag) +{ + struct rtw89_hal *hal = &rtwdev->hal; + u32 cur = hal->disabled_dm_bitmap; + + rtw89_debug_disable_dm_cfg_bmap(rtwdev, cur | BIT(flag)); +} + +static void rtw89_debug_disable_dm_clr_flag(struct rtw89_dev *rtwdev, u8 flag) +{ + struct rtw89_hal *hal = &rtwdev->hal; + u32 cur = hal->disabled_dm_bitmap; + + rtw89_debug_disable_dm_cfg_bmap(rtwdev, cur & ~BIT(flag)); +} + #define DM_INFO(type) {RTW89_DM_ ## type, #type} static const struct rtw89_disabled_dm_info { @@ -4155,6 +4175,7 @@ static const struct rtw89_disabled_dm_info { DM_INFO(DYNAMIC_EDCCA), DM_INFO(THERMAL_PROTECT), DM_INFO(TAS), + DM_INFO(MLO), }; static ssize_t @@ -4188,7 +4209,6 @@ rtw89_debug_priv_disable_dm_set(struct rtw89_dev *rtwdev, struct rtw89_debugfs_priv *debugfs_priv, const char *buf, size_t count) { - struct rtw89_hal *hal = &rtwdev->hal; u32 conf; int ret; @@ -4196,7 +4216,83 @@ rtw89_debug_priv_disable_dm_set(struct rtw89_dev *rtwdev, if (ret) return -EINVAL; - hal->disabled_dm_bitmap = conf; + rtw89_debug_disable_dm_cfg_bmap(rtwdev, conf); + + return count; +} + +static void rtw89_debug_mlo_mode_set_mlsr(struct rtw89_dev *rtwdev, + unsigned int link_id) +{ + struct ieee80211_vif *vif; + struct rtw89_vif *rtwvif; + + rtw89_for_each_rtwvif(rtwdev, rtwvif) { + vif = rtwvif_to_vif(rtwvif); + if (!ieee80211_vif_is_mld(vif)) + continue; + + rtw89_core_mlsr_switch(rtwdev, rtwvif, link_id); + } +} + +static ssize_t +rtw89_debug_priv_mlo_mode_get(struct rtw89_dev *rtwdev, + struct rtw89_debugfs_priv *debugfs_priv, + char *buf, size_t bufsz) +{ + bool mlo_dm_dis = rtwdev->hal.disabled_dm_bitmap & BIT(RTW89_DM_MLO); + char *p = buf, *end = buf + bufsz; + struct ieee80211_vif *vif; + struct rtw89_vif *rtwvif; + int count = 0; + + p += scnprintf(p, end - p, "MLD(s) status: (MLO DM: %s)\n", + str_disable_enable(mlo_dm_dis)); + + rtw89_for_each_rtwvif(rtwdev, rtwvif) { + vif = rtwvif_to_vif(rtwvif); + if (!ieee80211_vif_is_mld(vif)) + continue; + + p += scnprintf(p, end - p, + "\t#%u: MLO mode %x, valid 0x%x, active 0x%x\n", + count++, rtwvif->mlo_mode, vif->valid_links, + vif->active_links); + } + + if (count == 0) + p += scnprintf(p, end - p, "\t(None)\n"); + + return p - buf; +} + +static ssize_t +rtw89_debug_priv_mlo_mode_set(struct rtw89_dev *rtwdev, + struct rtw89_debugfs_priv *debugfs_priv, + const char *buf, size_t count) +{ + u8 num, mlo_mode; + u32 argv; + + num = sscanf(buf, "%hhx %u", &mlo_mode, &argv); + if (num != 2) + return -EINVAL; + + rtw89_debug_disable_dm_set_flag(rtwdev, RTW89_DM_MLO); + + rtw89_debug(rtwdev, RTW89_DBG_STATE, "Set MLO mode to %x\n", mlo_mode); + + switch (mlo_mode) { + case RTW89_MLO_MODE_MLSR: + rtw89_debug_mlo_mode_set_mlsr(rtwdev, argv); + break; + default: + rtw89_debug(rtwdev, RTW89_DBG_STATE, "Unsupported MLO mode\n"); + rtw89_debug_disable_dm_clr_flag(rtwdev, RTW89_DM_MLO); + + return -EOPNOTSUPP; + } return count; } @@ -4257,7 +4353,8 @@ static const struct rtw89_debugfs rtw89_debugfs_templ = { .fw_log_manual = rtw89_debug_priv_set(fw_log_manual, WLOCK), .phy_info = rtw89_debug_priv_get(phy_info), .stations = rtw89_debug_priv_get(stations, RLOCK), - .disable_dm = rtw89_debug_priv_set_and_get(disable_dm), + .disable_dm = rtw89_debug_priv_set_and_get(disable_dm, RWLOCK), + .mlo_mode = rtw89_debug_priv_set_and_get(mlo_mode, RWLOCK), }; #define rtw89_debugfs_add(name, mode, fopname, parent) \ @@ -4302,6 +4399,7 @@ void rtw89_debugfs_add_sec1(struct rtw89_dev *rtwdev, struct dentry *debugfs_top rtw89_debugfs_add_r(phy_info); rtw89_debugfs_add_r(stations); rtw89_debugfs_add_rw(disable_dm); + rtw89_debugfs_add_rw(mlo_mode); } void rtw89_debugfs_init(struct rtw89_dev *rtwdev) diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c index 8643b17866f8..00b65b2995cf 100644 --- a/drivers/net/wireless/realtek/rtw89/fw.c +++ b/drivers/net/wireless/realtek/rtw89/fw.c @@ -15,6 +15,8 @@ #include "util.h" #include "wow.h" +static bool rtw89_is_any_vif_connected_or_connecting(struct rtw89_dev *rtwdev); + struct rtw89_eapol_2_of_2 { u8 gtkbody[14]; u8 key_des_ver; @@ -554,7 +556,7 @@ const struct rtw89_mfw_hdr *rtw89_mfw_get_hdr_ptr(struct rtw89_dev *rtwdev, if (sizeof(*mfw_hdr) > firmware->size) return NULL; - mfw_hdr = (const struct rtw89_mfw_hdr *)firmware->data; + mfw_hdr = (const struct rtw89_mfw_hdr *)&firmware->data[0]; if (mfw_hdr->sig != RTW89_MFW_SIG) return NULL; @@ -850,6 +852,7 @@ static const struct __fw_feat_cfg fw_feat_tbl[] = { __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 49, 0, RFK_PRE_NOTIFY_V1), __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 51, 0, NO_PHYCAP_P1), __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 64, 0, NO_POWER_DIFFERENCE), + __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 71, 0, BEACON_LOSS_COUNT_V1), }; static void rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info *fw, @@ -1018,7 +1021,7 @@ int rtw89_build_phy_tbl_from_elm(struct rtw89_dev *rtwdev, } n_regs = le32_to_cpu(elm->size) / sizeof(tbl->regs[0]); - regs = kcalloc(n_regs, sizeof(tbl->regs[0]), GFP_KERNEL); + regs = kcalloc(n_regs, sizeof(*regs), GFP_KERNEL); if (!regs) goto out; @@ -1298,6 +1301,18 @@ static const struct rtw89_fw_element_handler __fw_element_handlers[] = { rtw89_fw_recognize_txpwr_from_elm, { .offset = offsetof(struct rtw89_rfe_data, lmt_6ghz.conf) }, NULL, }, + [RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_2GHZ] = { + rtw89_fw_recognize_txpwr_from_elm, + { .offset = offsetof(struct rtw89_rfe_data, da_lmt_2ghz.conf) }, NULL, + }, + [RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_5GHZ] = { + rtw89_fw_recognize_txpwr_from_elm, + { .offset = offsetof(struct rtw89_rfe_data, da_lmt_5ghz.conf) }, NULL, + }, + [RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_6GHZ] = { + rtw89_fw_recognize_txpwr_from_elm, + { .offset = offsetof(struct rtw89_rfe_data, da_lmt_6ghz.conf) }, NULL, + }, [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_2GHZ] = { rtw89_fw_recognize_txpwr_from_elm, { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_2ghz.conf) }, NULL, @@ -1310,6 +1325,18 @@ static const struct rtw89_fw_element_handler __fw_element_handlers[] = { rtw89_fw_recognize_txpwr_from_elm, { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_6ghz.conf) }, NULL, }, + [RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_RU_2GHZ] = { + rtw89_fw_recognize_txpwr_from_elm, + { .offset = offsetof(struct rtw89_rfe_data, da_lmt_ru_2ghz.conf) }, NULL, + }, + [RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_RU_5GHZ] = { + rtw89_fw_recognize_txpwr_from_elm, + { .offset = offsetof(struct rtw89_rfe_data, da_lmt_ru_5ghz.conf) }, NULL, + }, + [RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_RU_6GHZ] = { + rtw89_fw_recognize_txpwr_from_elm, + { .offset = offsetof(struct rtw89_rfe_data, da_lmt_ru_6ghz.conf) }, NULL, + }, [RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT] = { rtw89_fw_recognize_txpwr_from_elm, { .offset = offsetof(struct rtw89_rfe_data, tx_shape_lmt.conf) }, NULL, @@ -2483,7 +2510,7 @@ int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable) if (enable) comp = BIT(RTW89_FW_LOG_COMP_INIT) | BIT(RTW89_FW_LOG_COMP_TASK) | BIT(RTW89_FW_LOG_COMP_PS) | BIT(RTW89_FW_LOG_COMP_ERROR) | - BIT(RTW89_FW_LOG_COMP_SCAN); + BIT(RTW89_FW_LOG_COMP_MLO) | BIT(RTW89_FW_LOG_COMP_SCAN); skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LOG_CFG_LEN); if (!skb) { @@ -3065,8 +3092,8 @@ static void __rtw89_fw_h2c_set_tx_path(struct rtw89_dev *rtwdev, ntx_path = RF_A; map_b = 0; } else { - ntx_path = hal->antenna_tx ? hal->antenna_tx : RF_B; - map_b = hal->antenna_tx == RF_AB ? 1 : 0; + ntx_path = hal->antenna_tx ? hal->antenna_tx : RF_AB; + map_b = ntx_path == RF_AB ? 1 : 0; } SET_CMC_TBL_NTX_PATH_EN(skb->data, ntx_path); @@ -4004,8 +4031,9 @@ out: int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, struct rtw89_sta_link *rtwsta_link, bool dis_conn) { - struct sk_buff *skb; u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; + struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); + bool is_mld = ieee80211_vif_is_mld(vif); u8 self_role = rtwvif_link->self_role; enum rtw89_fw_sta_type sta_type; u8 net_type = rtwvif_link->net_type; @@ -4013,6 +4041,9 @@ int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwv struct rtw89_h2c_join *h2c; u32 len = sizeof(*h2c); bool format_v1 = false; + struct sk_buff *skb; + u8 main_mac_id; + bool init_ps; int ret; if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) { @@ -4054,8 +4085,28 @@ int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwv h2c_v1 = (struct rtw89_h2c_join_v1 *)skb->data; sta_type = rtw89_fw_get_sta_type(rtwdev, rtwvif_link, rtwsta_link); + init_ps = rtwvif_link != rtw89_get_designated_link(rtwvif_link->rtwvif); + + if (rtwsta_link) + main_mac_id = rtw89_sta_get_main_macid(rtwsta_link->rtwsta); + else + main_mac_id = rtw89_vif_get_main_macid(rtwvif_link->rtwvif); + + h2c_v1->w1 = le32_encode_bits(sta_type, RTW89_H2C_JOININFO_W1_STA_TYPE) | + le32_encode_bits(is_mld, RTW89_H2C_JOININFO_W1_IS_MLD) | + le32_encode_bits(main_mac_id, RTW89_H2C_JOININFO_W1_MAIN_MACID) | + le32_encode_bits(RTW89_H2C_JOININFO_MLO_MODE_MLSR, + RTW89_H2C_JOININFO_W1_MLO_MODE) | + le32_encode_bits(0, RTW89_H2C_JOININFO_W1_EMLSR_CAB) | + le32_encode_bits(0, RTW89_H2C_JOININFO_W1_NSTR_EN) | + le32_encode_bits(init_ps, RTW89_H2C_JOININFO_W1_INIT_PWR_STATE) | + le32_encode_bits(IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_256US, + RTW89_H2C_JOININFO_W1_EMLSR_PADDING) | + le32_encode_bits(IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_256US, + RTW89_H2C_JOININFO_W1_EMLSR_TRANS_DELAY) | + le32_encode_bits(0, RTW89_H2C_JOININFO_W2_MACID_EXT) | + le32_encode_bits(0, RTW89_H2C_JOININFO_W2_MAIN_MACID_EXT); - h2c_v1->w1 = le32_encode_bits(sta_type, RTW89_H2C_JOININFO_W1_STA_TYPE); h2c_v1->w2 = 0; done: @@ -4339,6 +4390,7 @@ int rtw89_fw_h2c_set_bcn_fltr_cfg(struct rtw89_dev *rtwdev, struct rtw89_h2c_bcnfltr *h2c; u32 len = sizeof(*h2c); struct sk_buff *skb; + u8 max_cnt, cnt; int ret; if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw)) @@ -4367,12 +4419,20 @@ int rtw89_fw_h2c_set_bcn_fltr_cfg(struct rtw89_dev *rtwdev, skb_put(skb, len); h2c = (struct rtw89_h2c_bcnfltr *)skb->data; + if (RTW89_CHK_FW_FEATURE(BEACON_LOSS_COUNT_V1, &rtwdev->fw)) + max_cnt = BIT(7) - 1; + else + max_cnt = BIT(4) - 1; + + cnt = min(RTW89_BCN_LOSS_CNT, max_cnt); + h2c->w0 = le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_RSSI) | le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_BCN) | le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_EN) | le32_encode_bits(RTW89_BCN_FLTR_OFFLOAD_MODE_DEFAULT, RTW89_H2C_BCNFLTR_W0_MODE) | - le32_encode_bits(RTW89_BCN_LOSS_CNT, RTW89_H2C_BCNFLTR_W0_BCN_LOSS_CNT) | + le32_encode_bits(cnt >> 4, RTW89_H2C_BCNFLTR_W0_BCN_LOSS_CNT_H3) | + le32_encode_bits(cnt & 0xf, RTW89_H2C_BCNFLTR_W0_BCN_LOSS_CNT_L4) | le32_encode_bits(hyst, RTW89_H2C_BCNFLTR_W0_RSSI_HYST) | le32_encode_bits(thold + MAX_RSSI, RTW89_H2C_BCNFLTR_W0_RSSI_THRESHOLD) | @@ -5298,12 +5358,12 @@ int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id, } static -int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int ch_num, - struct list_head *chan_list) +int rtw89_fw_h2c_scan_list_offload_ax(struct rtw89_dev *rtwdev, int ch_num, + struct list_head *chan_list) { struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; struct rtw89_h2c_chinfo_elem *elem; - struct rtw89_mac_chinfo *ch_info; + struct rtw89_mac_chinfo_ax *ch_info; struct rtw89_h2c_chinfo *h2c; struct sk_buff *skb; unsigned int cond; @@ -5477,7 +5537,7 @@ int rtw89_fw_h2c_scan_list_offload_be(struct rtw89_dev *rtwdev, int ch_num, return 0; } -#define RTW89_SCAN_DELAY_TSF_UNIT 104800 +#define RTW89_SCAN_DELAY_TSF_UNIT 1000000 int rtw89_fw_h2c_scan_offload_ax(struct rtw89_dev *rtwdev, struct rtw89_scan_option *option, struct rtw89_vif_link *rtwvif_link, @@ -5741,7 +5801,7 @@ flex_member: RTW89_H2C_SCANOFLD_BE_OPCH_W2_PKTS_CTRL) | le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_OPCH_W2_SW_DEF) | - le32_encode_bits(2, + le32_encode_bits(rtw89_is_mlo_1_1(rtwdev) ? 1 : 2, RTW89_H2C_SCANOFLD_BE_OPCH_W2_SS); opch->w3 = le32_encode_bits(RTW89_SCANOFLD_PKT_NONE, @@ -6525,7 +6585,7 @@ void rtw89_fw_st_dbg_dump(struct rtw89_dev *rtwdev) rtw89_fw_prog_cnt_dump(rtwdev); } -static void rtw89_release_pkt_list(struct rtw89_dev *rtwdev) +static void rtw89_hw_scan_release_pkt_list(struct rtw89_dev *rtwdev) { struct list_head *pkt_list = rtwdev->scan_info.pkt_list; struct rtw89_pktofld_info *info, *tmp; @@ -6544,6 +6604,23 @@ static void rtw89_release_pkt_list(struct rtw89_dev *rtwdev) } } +static void rtw89_hw_scan_cleanup(struct rtw89_dev *rtwdev, + struct rtw89_vif_link *rtwvif_link) +{ + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; + struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; + + mac->free_chan_list(rtwdev); + rtw89_hw_scan_release_pkt_list(rtwdev); + + rtwvif->scan_req = NULL; + rtwvif->scan_ies = NULL; + scan_info->scanning_vif = NULL; + scan_info->abort = false; + scan_info->connected = false; +} + static bool rtw89_is_6ghz_wildcard_probe_req(struct rtw89_dev *rtwdev, struct cfg80211_scan_request *req, struct rtw89_pktofld_info *info, @@ -6612,7 +6689,8 @@ out: } static int rtw89_hw_scan_update_probe_req(struct rtw89_dev *rtwdev, - struct rtw89_vif_link *rtwvif_link) + struct rtw89_vif_link *rtwvif_link, + const u8 *mac_addr) { struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; struct cfg80211_scan_request *req = rtwvif->scan_req; @@ -6621,7 +6699,7 @@ static int rtw89_hw_scan_update_probe_req(struct rtw89_dev *rtwdev, int ret; for (i = 0; i < num; i++) { - skb = ieee80211_probereq_get(rtwdev->hw, rtwvif_link->mac_addr, + skb = ieee80211_probereq_get(rtwdev->hw, mac_addr, req->ssids[i].ssid, req->ssids[i].ssid_len, req->ie_len); @@ -6638,10 +6716,10 @@ static int rtw89_hw_scan_update_probe_req(struct rtw89_dev *rtwdev, return 0; } -static int rtw89_update_6ghz_rnr_chan(struct rtw89_dev *rtwdev, - struct ieee80211_scan_ies *ies, - struct cfg80211_scan_request *req, - struct rtw89_mac_chinfo *ch_info) +static int rtw89_update_6ghz_rnr_chan_ax(struct rtw89_dev *rtwdev, + struct ieee80211_scan_ies *ies, + struct cfg80211_scan_request *req, + struct rtw89_mac_chinfo_ax *ch_info) { struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif; struct list_head *pkt_list = rtwdev->scan_info.pkt_list; @@ -6713,7 +6791,7 @@ out: static void rtw89_pno_scan_add_chan_ax(struct rtw89_dev *rtwdev, int chan_type, int ssid_num, - struct rtw89_mac_chinfo *ch_info) + struct rtw89_mac_chinfo_ax *ch_info) { struct rtw89_wow_param *rtw_wow = &rtwdev->wow; struct rtw89_pktofld_info *info; @@ -6761,9 +6839,9 @@ static void rtw89_pno_scan_add_chan_ax(struct rtw89_dev *rtwdev, } } -static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type, - int ssid_num, - struct rtw89_mac_chinfo *ch_info) +static void rtw89_hw_scan_add_chan_ax(struct rtw89_dev *rtwdev, int chan_type, + int ssid_num, + struct rtw89_mac_chinfo_ax *ch_info) { struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif; @@ -6794,7 +6872,7 @@ static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type, } } - ret = rtw89_update_6ghz_rnr_chan(rtwdev, ies, req, ch_info); + ret = rtw89_update_6ghz_rnr_chan_ax(rtwdev, ies, req, ch_info); if (ret) rtw89_warn(rtwdev, "RNR fails: %d\n", ret); @@ -6950,7 +7028,7 @@ int rtw89_pno_scan_add_chan_list_ax(struct rtw89_dev *rtwdev, { struct rtw89_wow_param *rtw_wow = &rtwdev->wow; struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config; - struct rtw89_mac_chinfo *ch_info, *tmp; + struct rtw89_mac_chinfo_ax *ch_info, *tmp; struct ieee80211_channel *channel; struct list_head chan_list; int list_len; @@ -6984,7 +7062,7 @@ int rtw89_pno_scan_add_chan_list_ax(struct rtw89_dev *rtwdev, rtw89_pno_scan_add_chan_ax(rtwdev, type, nd_config->n_match_sets, ch_info); list_add_tail(&ch_info->list, &chan_list); } - ret = rtw89_fw_h2c_scan_list_offload(rtwdev, list_len, &chan_list); + ret = rtw89_fw_h2c_scan_list_offload_ax(rtwdev, list_len, &chan_list); out: list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { @@ -6995,24 +7073,24 @@ out: return ret; } -int rtw89_hw_scan_add_chan_list_ax(struct rtw89_dev *rtwdev, - struct rtw89_vif_link *rtwvif_link, bool connected) +int rtw89_hw_scan_prep_chan_list_ax(struct rtw89_dev *rtwdev, + struct rtw89_vif_link *rtwvif_link) { + struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; struct cfg80211_scan_request *req = rtwvif->scan_req; - struct rtw89_mac_chinfo *ch_info, *tmp; + struct rtw89_mac_chinfo_ax *ch_info, *tmp; struct ieee80211_channel *channel; struct list_head chan_list; bool random_seq = req->flags & NL80211_SCAN_FLAG_RANDOM_SN; - int list_len, off_chan_time = 0; enum rtw89_chan_type type; - int ret = 0; + int off_chan_time = 0; + int ret; u32 idx; INIT_LIST_HEAD(&chan_list); - for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0; - idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_AX; - idx++, list_len++) { + + for (idx = 0; idx < req->n_channels; idx++) { channel = req->channels[idx]; ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); if (!ch_info) { @@ -7039,9 +7117,9 @@ int rtw89_hw_scan_add_chan_list_ax(struct rtw89_dev *rtwdev, type = RTW89_CHAN_DFS; else type = RTW89_CHAN_ACTIVE; - rtw89_hw_scan_add_chan(rtwdev, type, req->n_ssids, ch_info); + rtw89_hw_scan_add_chan_ax(rtwdev, type, req->n_ssids, ch_info); - if (connected && + if (scan_info->connected && off_chan_time + ch_info->period > RTW89_OFF_CHAN_TIME) { tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); if (!tmp) { @@ -7053,16 +7131,16 @@ int rtw89_hw_scan_add_chan_list_ax(struct rtw89_dev *rtwdev, type = RTW89_CHAN_OPERATE; tmp->period = req->duration_mandatory ? req->duration : RTW89_CHANNEL_TIME; - rtw89_hw_scan_add_chan(rtwdev, type, 0, tmp); + rtw89_hw_scan_add_chan_ax(rtwdev, type, 0, tmp); list_add_tail(&tmp->list, &chan_list); off_chan_time = 0; - list_len++; } list_add_tail(&ch_info->list, &chan_list); off_chan_time += ch_info->period; } - rtwdev->scan_info.last_chan_idx = idx; - ret = rtw89_fw_h2c_scan_list_offload(rtwdev, list_len, &chan_list); + + list_splice_tail(&chan_list, &scan_info->chan_list); + return 0; out: list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { @@ -7073,6 +7151,46 @@ out: return ret; } +void rtw89_hw_scan_free_chan_list_ax(struct rtw89_dev *rtwdev) +{ + struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; + struct rtw89_mac_chinfo_ax *ch_info, *tmp; + + list_for_each_entry_safe(ch_info, tmp, &scan_info->chan_list, list) { + list_del(&ch_info->list); + kfree(ch_info); + } +} + +int rtw89_hw_scan_add_chan_list_ax(struct rtw89_dev *rtwdev, + struct rtw89_vif_link *rtwvif_link) +{ + struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; + struct rtw89_mac_chinfo_ax *ch_info, *tmp; + unsigned int list_len = 0; + struct list_head list; + int ret; + + INIT_LIST_HEAD(&list); + + list_for_each_entry_safe(ch_info, tmp, &scan_info->chan_list, list) { + list_move_tail(&ch_info->list, &list); + + list_len++; + if (list_len == RTW89_SCAN_LIST_LIMIT_AX) + break; + } + + ret = rtw89_fw_h2c_scan_list_offload_ax(rtwdev, list_len, &list); + + list_for_each_entry_safe(ch_info, tmp, &list, list) { + list_del(&ch_info->list); + kfree(ch_info); + } + + return ret; +} + int rtw89_pno_scan_add_chan_list_be(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link) { @@ -7126,25 +7244,24 @@ out: return ret; } -int rtw89_hw_scan_add_chan_list_be(struct rtw89_dev *rtwdev, - struct rtw89_vif_link *rtwvif_link, bool connected) +int rtw89_hw_scan_prep_chan_list_be(struct rtw89_dev *rtwdev, + struct rtw89_vif_link *rtwvif_link) { + struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; struct cfg80211_scan_request *req = rtwvif->scan_req; struct rtw89_mac_chinfo_be *ch_info, *tmp; struct ieee80211_channel *channel; struct list_head chan_list; enum rtw89_chan_type type; - int list_len, ret; bool random_seq; + int ret; u32 idx; random_seq = !!(req->flags & NL80211_SCAN_FLAG_RANDOM_SN); INIT_LIST_HEAD(&chan_list); - for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0; - idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_BE; - idx++, list_len++) { + for (idx = 0; idx < req->n_channels; idx++) { channel = req->channels[idx]; ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); if (!ch_info) { @@ -7174,9 +7291,8 @@ int rtw89_hw_scan_add_chan_list_be(struct rtw89_dev *rtwdev, list_add_tail(&ch_info->list, &chan_list); } - rtwdev->scan_info.last_chan_idx = idx; - ret = rtw89_fw_h2c_scan_list_offload_be(rtwdev, list_len, &chan_list, - rtwvif_link); + list_splice_tail(&chan_list, &scan_info->chan_list); + return 0; out: list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { @@ -7187,51 +7303,182 @@ out: return ret; } +void rtw89_hw_scan_free_chan_list_be(struct rtw89_dev *rtwdev) +{ + struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; + struct rtw89_mac_chinfo_be *ch_info, *tmp; + + list_for_each_entry_safe(ch_info, tmp, &scan_info->chan_list, list) { + list_del(&ch_info->list); + kfree(ch_info); + } +} + +int rtw89_hw_scan_add_chan_list_be(struct rtw89_dev *rtwdev, + struct rtw89_vif_link *rtwvif_link) +{ + struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; + struct rtw89_mac_chinfo_be *ch_info, *tmp; + unsigned int list_len = 0; + struct list_head list; + int ret; + + INIT_LIST_HEAD(&list); + + list_for_each_entry_safe(ch_info, tmp, &scan_info->chan_list, list) { + list_move_tail(&ch_info->list, &list); + + list_len++; + if (list_len == RTW89_SCAN_LIST_LIMIT_BE) + break; + } + + ret = rtw89_fw_h2c_scan_list_offload_be(rtwdev, list_len, &list, + rtwvif_link); + + list_for_each_entry_safe(ch_info, tmp, &list, list) { + list_del(&ch_info->list); + kfree(ch_info); + } + + return ret; +} + static int rtw89_hw_scan_prehandle(struct rtw89_dev *rtwdev, - struct rtw89_vif_link *rtwvif_link, bool connected) + struct rtw89_vif_link *rtwvif_link, + const u8 *mac_addr) { const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; int ret; - ret = rtw89_hw_scan_update_probe_req(rtwdev, rtwvif_link); + ret = rtw89_hw_scan_update_probe_req(rtwdev, rtwvif_link, mac_addr); if (ret) { rtw89_err(rtwdev, "Update probe request failed\n"); goto out; } - ret = mac->add_chan_list(rtwdev, rtwvif_link, connected); + ret = mac->prep_chan_list(rtwdev, rtwvif_link); out: return ret; } -void rtw89_hw_scan_start(struct rtw89_dev *rtwdev, - struct rtw89_vif_link *rtwvif_link, - struct ieee80211_scan_request *scan_req) +static void rtw89_hw_scan_update_link_beacon_noa(struct rtw89_dev *rtwdev, + struct rtw89_vif_link *rtwvif_link, + u16 tu) +{ + struct ieee80211_p2p_noa_desc noa_desc = {}; + u64 tsf; + int ret; + + ret = rtw89_mac_port_get_tsf(rtwdev, rtwvif_link, &tsf); + if (ret) { + rtw89_warn(rtwdev, "%s: failed to get tsf\n", __func__); + return; + } + + noa_desc.start_time = cpu_to_le32(tsf); + noa_desc.interval = cpu_to_le32(ieee80211_tu_to_usec(tu)); + noa_desc.duration = cpu_to_le32(ieee80211_tu_to_usec(tu)); + noa_desc.count = 1; + + rtw89_p2p_noa_renew(rtwvif_link); + rtw89_p2p_noa_append(rtwvif_link, &noa_desc); + rtw89_chip_h2c_update_beacon(rtwdev, rtwvif_link); +} + +static void rtw89_hw_scan_update_beacon_noa(struct rtw89_dev *rtwdev, + const struct cfg80211_scan_request *req) +{ + const struct rtw89_entity_mgnt *mgnt = &rtwdev->hal.entity_mgnt; + const struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; + const struct rtw89_chip_info *chip = rtwdev->chip; + struct rtw89_mac_chinfo_ax *chinfo_ax; + struct rtw89_mac_chinfo_be *chinfo_be; + struct rtw89_vif_link *rtwvif_link; + struct list_head *pos, *tmp; + struct ieee80211_vif *vif; + struct rtw89_vif *rtwvif; + u16 tu = 0; + + lockdep_assert_wiphy(rtwdev->hw->wiphy); + + list_for_each_safe(pos, tmp, &scan_info->chan_list) { + switch (chip->chip_gen) { + case RTW89_CHIP_AX: + chinfo_ax = list_entry(pos, typeof(*chinfo_ax), list); + tu += chinfo_ax->period; + break; + case RTW89_CHIP_BE: + chinfo_be = list_entry(pos, typeof(*chinfo_be), list); + tu += chinfo_be->period; + break; + default: + rtw89_warn(rtwdev, "%s: invalid chip gen %d\n", + __func__, chip->chip_gen); + return; + } + } + + if (unlikely(tu == 0)) { + rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN, + "%s: cannot estimate needed TU\n", __func__); + return; + } + + list_for_each_entry(rtwvif, &mgnt->active_list, mgnt_entry) { + unsigned int link_id; + + vif = rtwvif_to_vif(rtwvif); + if (vif->type != NL80211_IFTYPE_AP || !vif->p2p) + continue; + + rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) + rtw89_hw_scan_update_link_beacon_noa(rtwdev, rtwvif_link, tu); + } +} + +int rtw89_hw_scan_start(struct rtw89_dev *rtwdev, + struct rtw89_vif_link *rtwvif_link, + struct ieee80211_scan_request *scan_req) { const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + enum rtw89_entity_mode mode = rtw89_get_entity_mode(rtwdev); struct cfg80211_scan_request *req = &scan_req->req; const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx); struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; + struct rtw89_chanctx_pause_parm pause_parm = { + .rsn = RTW89_CHANCTX_PAUSE_REASON_HW_SCAN, + .trigger = rtwvif_link, + }; u32 rx_fltr = rtwdev->hal.rx_fltr; u8 mac_addr[ETH_ALEN]; u32 reg; + int ret; /* clone op and keep it during scan */ rtwdev->scan_info.op_chan = *chan; + rtwdev->scan_info.connected = rtw89_is_any_vif_connected_or_connecting(rtwdev); rtwdev->scan_info.scanning_vif = rtwvif_link; - rtwdev->scan_info.last_chan_idx = 0; rtwdev->scan_info.abort = false; rtwvif->scan_ies = &scan_req->ies; rtwvif->scan_req = req; - ieee80211_stop_queues(rtwdev->hw); - rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif_link, false); if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) get_random_mask_addr(mac_addr, req->mac_addr, req->mac_addr_mask); else ether_addr_copy(mac_addr, rtwvif_link->mac_addr); + + ret = rtw89_hw_scan_prehandle(rtwdev, rtwvif_link, mac_addr); + if (ret) { + rtw89_hw_scan_cleanup(rtwdev, rtwvif_link); + return ret; + } + + ieee80211_stop_queues(rtwdev->hw); + rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif_link, false); + rtw89_core_scan_start(rtwdev, rtwvif_link, mac_addr, true); rx_fltr &= ~B_AX_A_BCN_CHK_EN; @@ -7241,7 +7488,12 @@ void rtw89_hw_scan_start(struct rtw89_dev *rtwdev, reg = rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, rtwvif_link->mac_idx); rtw89_write32_mask(rtwdev, reg, B_AX_RX_FLTR_CFG_MASK, rx_fltr); - rtw89_chanctx_pause(rtwdev, RTW89_CHANCTX_PAUSE_REASON_HW_SCAN); + rtw89_chanctx_pause(rtwdev, &pause_parm); + + if (mode == RTW89_ENTITY_MODE_MCC) + rtw89_hw_scan_update_beacon_noa(rtwdev, req); + + return 0; } struct rtw89_hw_scan_complete_cb_data { @@ -7252,20 +7504,16 @@ struct rtw89_hw_scan_complete_cb_data { static int rtw89_hw_scan_complete_cb(struct rtw89_dev *rtwdev, void *data) { const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; - struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; struct rtw89_hw_scan_complete_cb_data *cb_data = data; struct rtw89_vif_link *rtwvif_link = cb_data->rtwvif_link; struct cfg80211_scan_info info = { .aborted = cb_data->aborted, }; - struct rtw89_vif *rtwvif; u32 reg; if (!rtwvif_link) return -EINVAL; - rtwvif = rtwvif_link->rtwvif; - reg = rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, rtwvif_link->mac_idx); rtw89_write32_mask(rtwdev, reg, B_AX_RX_FLTR_CFG_MASK, rtwdev->hal.rx_fltr); @@ -7275,12 +7523,7 @@ static int rtw89_hw_scan_complete_cb(struct rtw89_dev *rtwdev, void *data) rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif_link, true); rtw89_mac_enable_beacon_for_ap_vifs(rtwdev, true); - rtw89_release_pkt_list(rtwdev); - rtwvif->scan_req = NULL; - rtwvif->scan_ies = NULL; - scan_info->last_chan_idx = 0; - scan_info->scanning_vif = NULL; - scan_info->abort = false; + rtw89_hw_scan_cleanup(rtwdev, rtwvif_link); return 0; } @@ -7355,11 +7598,11 @@ int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, if (!rtwvif_link) return -EINVAL; - connected = rtw89_is_any_vif_connected_or_connecting(rtwdev); + connected = rtwdev->scan_info.connected; opt.enable = enable; opt.target_ch_mode = connected; if (enable) { - ret = rtw89_hw_scan_prehandle(rtwdev, rtwvif_link, connected); + ret = mac->add_chan_list(rtwdev, rtwvif_link); if (ret) goto out; } @@ -8719,6 +8962,47 @@ int rtw89_fw_h2c_ap_info_refcount(struct rtw89_dev *rtwdev, bool en) return 0; } +int rtw89_fw_h2c_mlo_link_cfg(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, + bool enable) +{ + struct rtw89_wait_info *wait = &rtwdev->mlo.wait; + struct rtw89_h2c_mlo_link_cfg *h2c; + u8 mac_id = rtwvif_link->mac_id; + u32 len = sizeof(*h2c); + struct sk_buff *skb; + unsigned int cond; + int ret; + + skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); + if (!skb) { + rtw89_err(rtwdev, "failed to alloc skb for mlo link cfg\n"); + return -ENOMEM; + } + + skb_put(skb, len); + h2c = (struct rtw89_h2c_mlo_link_cfg *)skb->data; + + h2c->w0 = le32_encode_bits(mac_id, RTW89_H2C_MLO_LINK_CFG_W0_MACID) | + le32_encode_bits(enable, RTW89_H2C_MLO_LINK_CFG_W0_OPTION); + + rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, + H2C_CAT_MAC, + H2C_CL_MLO, + H2C_FUNC_MLO_LINK_CFG, 0, 0, + len); + + cond = RTW89_MLO_WAIT_COND(mac_id, H2C_FUNC_MLO_LINK_CFG); + + ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); + if (ret) { + rtw89_err(rtwdev, "mlo link cfg (%s link id %u) failed: %d\n", + str_enable_disable(enable), rtwvif_link->link_id, ret); + return ret; + } + + return 0; +} + static bool __fw_txpwr_entry_zero_ext(const void *ext_ptr, u8 ext_len) { static const u8 zeros[U8_MAX] = {}; @@ -9106,6 +9390,26 @@ void rtw89_fw_load_tx_shape_lmt_ru(struct rtw89_tx_shape_lmt_ru_data *data) } } +static bool rtw89_fw_has_da_txpwr_table(struct rtw89_dev *rtwdev, + const struct rtw89_rfe_parms *parms) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + + if (chip->support_bands & BIT(NL80211_BAND_2GHZ) && + !(parms->rule_da_2ghz.lmt && parms->rule_da_2ghz.lmt_ru)) + return false; + + if (chip->support_bands & BIT(NL80211_BAND_5GHZ) && + !(parms->rule_da_5ghz.lmt && parms->rule_da_5ghz.lmt_ru)) + return false; + + if (chip->support_bands & BIT(NL80211_BAND_6GHZ) && + !(parms->rule_da_6ghz.lmt && parms->rule_da_6ghz.lmt_ru)) + return false; + + return true; +} + const struct rtw89_rfe_parms * rtw89_load_rfe_data_from_fw(struct rtw89_dev *rtwdev, const struct rtw89_rfe_parms *init) @@ -9142,6 +9446,21 @@ rtw89_load_rfe_data_from_fw(struct rtw89_dev *rtwdev, parms->rule_6ghz.lmt = &rfe_data->lmt_6ghz.v; } + if (rtw89_txpwr_conf_valid(&rfe_data->da_lmt_2ghz.conf)) { + rtw89_fw_load_txpwr_lmt_2ghz(&rfe_data->da_lmt_2ghz); + parms->rule_da_2ghz.lmt = &rfe_data->da_lmt_2ghz.v; + } + + if (rtw89_txpwr_conf_valid(&rfe_data->da_lmt_5ghz.conf)) { + rtw89_fw_load_txpwr_lmt_5ghz(&rfe_data->da_lmt_5ghz); + parms->rule_da_5ghz.lmt = &rfe_data->da_lmt_5ghz.v; + } + + if (rtw89_txpwr_conf_valid(&rfe_data->da_lmt_6ghz.conf)) { + rtw89_fw_load_txpwr_lmt_6ghz(&rfe_data->da_lmt_6ghz); + parms->rule_da_6ghz.lmt = &rfe_data->da_lmt_6ghz.v; + } + if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_2ghz.conf)) { rtw89_fw_load_txpwr_lmt_ru_2ghz(&rfe_data->lmt_ru_2ghz); parms->rule_2ghz.lmt_ru = &rfe_data->lmt_ru_2ghz.v; @@ -9157,6 +9476,21 @@ rtw89_load_rfe_data_from_fw(struct rtw89_dev *rtwdev, parms->rule_6ghz.lmt_ru = &rfe_data->lmt_ru_6ghz.v; } + if (rtw89_txpwr_conf_valid(&rfe_data->da_lmt_ru_2ghz.conf)) { + rtw89_fw_load_txpwr_lmt_ru_2ghz(&rfe_data->da_lmt_ru_2ghz); + parms->rule_da_2ghz.lmt_ru = &rfe_data->da_lmt_ru_2ghz.v; + } + + if (rtw89_txpwr_conf_valid(&rfe_data->da_lmt_ru_5ghz.conf)) { + rtw89_fw_load_txpwr_lmt_ru_5ghz(&rfe_data->da_lmt_ru_5ghz); + parms->rule_da_5ghz.lmt_ru = &rfe_data->da_lmt_ru_5ghz.v; + } + + if (rtw89_txpwr_conf_valid(&rfe_data->da_lmt_ru_6ghz.conf)) { + rtw89_fw_load_txpwr_lmt_ru_6ghz(&rfe_data->da_lmt_ru_6ghz); + parms->rule_da_6ghz.lmt_ru = &rfe_data->da_lmt_ru_6ghz.v; + } + if (rtw89_txpwr_conf_valid(&rfe_data->tx_shape_lmt.conf)) { rtw89_fw_load_tx_shape_lmt(&rfe_data->tx_shape_lmt); parms->tx_shape.lmt = &rfe_data->tx_shape_lmt.v; @@ -9167,5 +9501,7 @@ rtw89_load_rfe_data_from_fw(struct rtw89_dev *rtwdev, parms->tx_shape.lmt_ru = &rfe_data->tx_shape_lmt_ru.v; } + parms->has_da = rtw89_fw_has_da_txpwr_table(rtwdev, parms); + return parms; } diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h index 55255b48bdb7..0fcc824e41be 100644 --- a/drivers/net/wireless/realtek/rtw89/fw.h +++ b/drivers/net/wireless/realtek/rtw89/fw.h @@ -199,6 +199,7 @@ enum rtw89_fw_log_comp { RTW89_FW_LOG_COMP_TWT, RTW89_FW_LOG_COMP_RF, RTW89_FW_LOG_COMP_MCC = 20, + RTW89_FW_LOG_COMP_MLO = 26, RTW89_FW_LOG_COMP_SCAN = 28, }; @@ -333,9 +334,9 @@ struct rtw89_fw_macid_pause_sleep_grp { #define RTW89_SCAN_LIST_LIMIT_AX RTW89_SCAN_LIST_LIMIT(RTW89_MAC_CHINFO_SIZE) #define RTW89_SCAN_LIST_LIMIT_BE RTW89_SCAN_LIST_LIMIT(RTW89_MAC_CHINFO_SIZE_BE) -#define RTW89_BCN_LOSS_CNT 10 +#define RTW89_BCN_LOSS_CNT 60 -struct rtw89_mac_chinfo { +struct rtw89_mac_chinfo_ax { u8 period; u8 dwell_time; u8 central_ch; @@ -1636,6 +1637,8 @@ struct rtw89_h2c_join_v1 { #define RTW89_H2C_JOININFO_W1_IS_MLD BIT(3) #define RTW89_H2C_JOININFO_W1_MAIN_MACID GENMASK(11, 4) #define RTW89_H2C_JOININFO_W1_MLO_MODE BIT(12) +#define RTW89_H2C_JOININFO_MLO_MODE_MLMR 0 +#define RTW89_H2C_JOININFO_MLO_MODE_MLSR 1 #define RTW89_H2C_JOININFO_W1_EMLSR_CAB BIT(13) #define RTW89_H2C_JOININFO_W1_NSTR_EN BIT(14) #define RTW89_H2C_JOININFO_W1_INIT_PWR_STATE BIT(15) @@ -2863,6 +2866,13 @@ struct rtw89_h2c_fwips { #define RTW89_H2C_FW_IPS_W0_MACID GENMASK(7, 0) #define RTW89_H2C_FW_IPS_W0_ENABLE BIT(8) +struct rtw89_h2c_mlo_link_cfg { + __le32 w0; +}; + +#define RTW89_H2C_MLO_LINK_CFG_W0_MACID GENMASK(15, 0) +#define RTW89_H2C_MLO_LINK_CFG_W0_OPTION GENMASK(19, 16) + static inline void RTW89_SET_FWCMD_P2P_MACID(void *cmd, u32 val) { le32p_replace_bits((__le32 *)cmd, val, GENMASK(7, 0)); @@ -3562,6 +3572,7 @@ struct rtw89_c2h_done_ack { #define RTW89_C2H_DONE_ACK_W2_CLASS GENMASK(7, 2) #define RTW89_C2H_DONE_ACK_W2_FUNC GENMASK(15, 8) #define RTW89_C2H_DONE_ACK_W2_H2C_RETURN GENMASK(23, 16) +#define RTW89_C2H_SCAN_DONE_ACK_RETURN GENMASK(5, 0) #define RTW89_C2H_DONE_ACK_W2_H2C_SEQ GENMASK(31, 24) #define RTW89_GET_MAC_C2H_REV_ACK_CAT(c2h) \ @@ -3620,6 +3631,19 @@ struct rtw89_c2h_ra_rpt { #define RTW89_C2H_RA_RPT_W3_MD_SEL_B2 BIT(15) #define RTW89_C2H_RA_RPT_W3_BW_B2 BIT(16) +struct rtw89_c2h_fw_scan_rpt { + struct rtw89_c2h_hdr hdr; + u8 phy_idx; + u8 band; + u8 center_ch; + u8 ofdm_pd_idx; /* in unit of 2 dBm */ +#define PD_LOWER_BOUND_BASE 102 + s8 cck_pd_idx; + u8 rsvd0; + u8 rsvd1; + u8 rsvd2; +} __packed; + /* For WiFi 6 chips: * VHT, HE, HT-old: [6:4]: NSS, [3:0]: MCS * HT-new: [6:5]: NA, [4:0]: MCS @@ -3717,6 +3741,25 @@ static_assert(sizeof(struct rtw89_mac_mcc_tsf_rpt) <= RTW89_COMPLETION_BUF_SIZE) #define RTW89_GET_MAC_C2H_MCC_STATUS_RPT_TSF_HIGH(c2h) \ le32_get_bits(*((const __le32 *)(c2h) + 4), GENMASK(31, 0)) +struct rtw89_c2h_mlo_link_cfg_rpt { + struct rtw89_c2h_hdr hdr; + __le32 w2; +} __packed; + +#define RTW89_C2H_MLO_LINK_CFG_RPT_W2_MACID GENMASK(15, 0) +#define RTW89_C2H_MLO_LINK_CFG_RPT_W2_STATUS GENMASK(19, 16) + +enum rtw89_c2h_mlo_link_status { + RTW89_C2H_MLO_LINK_CFG_IDLE = 0, + RTW89_C2H_MLO_LINK_CFG_DONE = 1, + RTW89_C2H_MLO_LINK_CFG_ISSUE_NULL_FAIL = 2, + RTW89_C2H_MLO_LINK_CFG_TX_NULL_FAIL = 3, + RTW89_C2H_MLO_LINK_CFG_ROLE_NOT_EXIST = 4, + RTW89_C2H_MLO_LINK_CFG_NULL_1_TIMEOUT = 5, + RTW89_C2H_MLO_LINK_CFG_NULL_0_TIMEOUT = 6, + RTW89_C2H_MLO_LINK_CFG_RUNNING = 0xff, +}; + struct rtw89_mac_mrc_tsf_rpt { unsigned int num; u64 tsfs[RTW89_MAC_MRC_MAX_REQ_TSF_NUM]; @@ -3813,7 +3856,8 @@ struct rtw89_h2c_bcnfltr { #define RTW89_H2C_BCNFLTR_W0_MON_BCN BIT(1) #define RTW89_H2C_BCNFLTR_W0_MON_EN BIT(2) #define RTW89_H2C_BCNFLTR_W0_MODE GENMASK(4, 3) -#define RTW89_H2C_BCNFLTR_W0_BCN_LOSS_CNT GENMASK(11, 8) +#define RTW89_H2C_BCNFLTR_W0_BCN_LOSS_CNT_H3 GENMASK(7, 5) +#define RTW89_H2C_BCNFLTR_W0_BCN_LOSS_CNT_L4 GENMASK(11, 8) #define RTW89_H2C_BCNFLTR_W0_RSSI_HYST GENMASK(15, 12) #define RTW89_H2C_BCNFLTR_W0_RSSI_THRESHOLD GENMASK(23, 16) #define RTW89_H2C_BCNFLTR_W0_MAC_ID GENMASK(31, 24) @@ -3891,6 +3935,12 @@ enum rtw89_fw_element_id { RTW89_FW_ELEMENT_ID_TXPWR_TRK = 18, RTW89_FW_ELEMENT_ID_RFKLOG_FMT = 19, RTW89_FW_ELEMENT_ID_REGD = 20, + RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_2GHZ = 21, + RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_5GHZ = 22, + RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_6GHZ = 23, + RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_RU_2GHZ = 24, + RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_RU_5GHZ = 25, + RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_RU_6GHZ = 26, RTW89_FW_ELEMENT_ID_NUM, }; @@ -4229,6 +4279,26 @@ enum rtw89_mcc_h2c_func { #define RTW89_MCC_WAIT_COND(group, func) \ ((group) * NUM_OF_RTW89_MCC_H2C_FUNC + (func)) +/* CLASS 20 - MLO */ +#define H2C_CL_MLO 0x14 +enum rtw89_mlo_h2c_func { + H2C_FUNC_MLO_TBL_CFG = 0x0, + H2C_FUNC_MLO_STA_CFG = 0x1, + H2C_FUNC_MLO_TTLM = 0x2, + H2C_FUNC_MLO_DM_CFG = 0x3, + H2C_FUNC_MLO_EMLSR_STA_CFG = 0x4, + H2C_FUNC_MLO_MCMLO_RELINK_DROP = 0x5, + H2C_FUNC_MLO_MCMLO_SN_SYNC = 0x6, + H2C_FUNC_MLO_RELINK = 0x7, + H2C_FUNC_MLO_LINK_CFG = 0x8, + H2C_FUNC_MLO_DM_DBG = 0x9, + + NUM_OF_RTW89_MLO_H2C_FUNC, +}; + +#define RTW89_MLO_WAIT_COND(macid, func) \ + ((macid) * NUM_OF_RTW89_MLO_H2C_FUNC + (func)) + /* CLASS 24 - MRC */ #define H2C_CL_MRC 0x18 enum rtw89_mrc_h2c_func { @@ -4715,9 +4785,9 @@ int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev, struct rtw89_mac_c2h_info *c2h_info); int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable); void rtw89_fw_st_dbg_dump(struct rtw89_dev *rtwdev); -void rtw89_hw_scan_start(struct rtw89_dev *rtwdev, - struct rtw89_vif_link *rtwvif_link, - struct ieee80211_scan_request *scan_req); +int rtw89_hw_scan_start(struct rtw89_dev *rtwdev, + struct rtw89_vif_link *rtwvif_link, + struct ieee80211_scan_request *scan_req); void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, bool aborted); @@ -4726,12 +4796,18 @@ int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, bool enable); void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link); +int rtw89_hw_scan_prep_chan_list_ax(struct rtw89_dev *rtwdev, + struct rtw89_vif_link *rtwvif_link); +void rtw89_hw_scan_free_chan_list_ax(struct rtw89_dev *rtwdev); int rtw89_hw_scan_add_chan_list_ax(struct rtw89_dev *rtwdev, - struct rtw89_vif_link *rtwvif_link, bool connected); + struct rtw89_vif_link *rtwvif_link); int rtw89_pno_scan_add_chan_list_ax(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link); +int rtw89_hw_scan_prep_chan_list_be(struct rtw89_dev *rtwdev, + struct rtw89_vif_link *rtwvif_link); +void rtw89_hw_scan_free_chan_list_be(struct rtw89_dev *rtwdev); int rtw89_hw_scan_add_chan_list_be(struct rtw89_dev *rtwdev, - struct rtw89_vif_link *rtwvif_link, bool connected); + struct rtw89_vif_link *rtwvif_link); int rtw89_pno_scan_add_chan_list_be(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link); int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev); @@ -4800,6 +4876,8 @@ int rtw89_fw_h2c_mrc_sync(struct rtw89_dev *rtwdev, int rtw89_fw_h2c_mrc_upd_duration(struct rtw89_dev *rtwdev, const struct rtw89_fw_mrc_upd_duration_arg *arg); int rtw89_fw_h2c_ap_info_refcount(struct rtw89_dev *rtwdev, bool en); +int rtw89_fw_h2c_mlo_link_cfg(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, + bool enable); static inline void rtw89_fw_h2c_init_ba_cam(struct rtw89_dev *rtwdev) { diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c index b4841f948ec1..9f0e30e75009 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.c +++ b/drivers/net/wireless/realtek/rtw89/mac.c @@ -4631,11 +4631,17 @@ static void rtw89_mac_port_tsf_sync_rand(struct rtw89_dev *rtwdev, if (rtwvif_link->net_type != RTW89_NET_TYPE_AP_MODE || rtwvif_link == rtwvif_src) return; + if (rtwvif_link->rand_tsf_done) + goto out; + /* adjust offset randomly to avoid beacon conflict */ offset = offset - offset / 4 + get_random_u32() % (offset / 2); rtw89_mac_port_tsf_sync(rtwdev, rtwvif_link, rtwvif_src, (*n_offset) * offset); + rtwvif_link->rand_tsf_done = true; + +out: (*n_offset)++; } @@ -4866,6 +4872,8 @@ void rtw89_mac_set_he_tb(struct rtw89_dev *rtwdev, void rtw89_mac_stop_ap(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link) { rtw89_mac_port_cfg_func_sw(rtwdev, rtwvif_link); + + rtwvif_link->rand_tsf_done = false; } int rtw89_mac_add_vif(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link) @@ -4899,11 +4907,11 @@ rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *skb, struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif; struct rtw89_vif *rtwvif; struct rtw89_chan new; - u32 last_chan = rtwdev->scan_info.last_chan_idx, report_tsf; u16 actual_period, expect_period; u8 reason, status, tx_fail, band; u8 mac_idx, sw_def, fw_def; u8 ver = U8_MAX; + u32 report_tsf; u16 chan; int ret; @@ -4962,7 +4970,7 @@ rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *skb, return; if (rtwvif_link && rtwvif->scan_req && - last_chan < rtwvif->scan_req->n_channels) { + !list_empty(&rtwdev->scan_info.chan_list)) { ret = rtw89_hw_scan_offload(rtwdev, rtwvif_link, true); if (ret) { rtw89_hw_scan_abort(rtwdev, rtwvif_link); @@ -5017,7 +5025,8 @@ rtw89_mac_bcn_fltr_rpt(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_l switch (type) { case RTW89_BCN_FLTR_BEACON_LOSS: - if (!rtwdev->scanning && !rtwvif->offchan) + if (!rtwdev->scanning && !rtwvif->offchan && + !rtwvif_link->noa_once.in_duration) ieee80211_connection_loss(vif); else rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, rtwvif_link, true); @@ -5110,12 +5119,14 @@ rtw89_mac_c2h_done_ack(struct rtw89_dev *rtwdev, struct sk_buff *skb_c2h, u32 le return; case H2C_FUNC_ADD_SCANOFLD_CH: cond = RTW89_SCANOFLD_WAIT_COND_ADD_CH; + h2c_return &= RTW89_C2H_SCAN_DONE_ACK_RETURN; break; case H2C_FUNC_SCANOFLD: cond = RTW89_SCANOFLD_WAIT_COND_START; break; case H2C_FUNC_SCANOFLD_BE: cond = RTW89_SCANOFLD_BE_WAIT_COND_START; + h2c_return &= RTW89_C2H_SCAN_DONE_ACK_RETURN; break; } @@ -5399,6 +5410,27 @@ rtw89_mac_c2h_wow_aoac_rpt(struct rtw89_dev *rtwdev, struct sk_buff *skb, u32 le } static void +rtw89_mac_c2h_mlo_link_cfg_stat(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) +{ + const struct rtw89_c2h_mlo_link_cfg_rpt *c2h_rpt; + struct rtw89_wait_info *wait = &rtwdev->mlo.wait; + struct rtw89_completion_data data = {}; + unsigned int cond; + u16 mac_id; + u8 status; + + c2h_rpt = (const struct rtw89_c2h_mlo_link_cfg_rpt *)c2h->data; + + mac_id = le32_get_bits(c2h_rpt->w2, RTW89_C2H_MLO_LINK_CFG_RPT_W2_MACID); + status = le32_get_bits(c2h_rpt->w2, RTW89_C2H_MLO_LINK_CFG_RPT_W2_STATUS); + + data.err = status == RTW89_C2H_MLO_LINK_CFG_ROLE_NOT_EXIST || + status == RTW89_C2H_MLO_LINK_CFG_RUNNING; + cond = RTW89_MLO_WAIT_COND(mac_id, H2C_FUNC_MLO_LINK_CFG); + rtw89_complete_cond(wait, cond, &data); +} + +static void rtw89_mac_c2h_mrc_status_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) { struct rtw89_wait_info *wait = &rtwdev->mcc.wait; @@ -5553,6 +5585,18 @@ void (* const rtw89_mac_c2h_mcc_handler[])(struct rtw89_dev *rtwdev, }; static +void (* const rtw89_mac_c2h_mlo_handler[])(struct rtw89_dev *rtwdev, + struct sk_buff *c2h, u32 len) = { + [RTW89_MAC_C2H_FUNC_MLO_GET_TBL] = NULL, + [RTW89_MAC_C2H_FUNC_MLO_EMLSR_TRANS_DONE] = NULL, + [RTW89_MAC_C2H_FUNC_MLO_EMLSR_STA_CFG_DONE] = NULL, + [RTW89_MAC_C2H_FUNC_MCMLO_RELINK_RPT] = NULL, + [RTW89_MAC_C2H_FUNC_MCMLO_SN_SYNC_RPT] = NULL, + [RTW89_MAC_C2H_FUNC_MLO_LINK_CFG_STAT] = rtw89_mac_c2h_mlo_link_cfg_stat, + [RTW89_MAC_C2H_FUNC_MLO_DM_DBG_DUMP] = NULL, +}; + +static void (* const rtw89_mac_c2h_mrc_handler[])(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) = { [RTW89_MAC_C2H_FUNC_MRC_TSF_RPT] = rtw89_mac_c2h_mrc_tsf_rpt, @@ -5621,6 +5665,8 @@ bool rtw89_mac_c2h_chk_atomic(struct rtw89_dev *rtwdev, struct sk_buff *c2h, } case RTW89_MAC_C2H_CLASS_MCC: return true; + case RTW89_MAC_C2H_CLASS_MLO: + return true; case RTW89_MAC_C2H_CLASS_MRC: return true; case RTW89_MAC_C2H_CLASS_WOW: @@ -5654,6 +5700,10 @@ void rtw89_mac_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb, if (func < NUM_OF_RTW89_MAC_C2H_FUNC_MCC) handler = rtw89_mac_c2h_mcc_handler[func]; break; + case RTW89_MAC_C2H_CLASS_MLO: + if (func < NUM_OF_RTW89_MAC_C2H_FUNC_MLO) + handler = rtw89_mac_c2h_mlo_handler[func]; + break; case RTW89_MAC_C2H_CLASS_MRC: if (func < NUM_OF_RTW89_MAC_C2H_FUNC_MRC) handler = rtw89_mac_c2h_mrc_handler[func]; @@ -6889,6 +6939,8 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_ax = { .is_txq_empty = mac_is_txq_empty_ax, + .prep_chan_list = rtw89_hw_scan_prep_chan_list_ax, + .free_chan_list = rtw89_hw_scan_free_chan_list_ax, .add_chan_list = rtw89_hw_scan_add_chan_list_ax, .add_chan_list_pno = rtw89_pno_scan_add_chan_list_ax, .scan_offload = rtw89_fw_h2c_scan_offload_ax, diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h index fd7935d24501..8013c852d5be 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.h +++ b/drivers/net/wireless/realtek/rtw89/mac.h @@ -370,6 +370,7 @@ enum rtw89_mac_mem_sel { RTW89_MAC_MEM_TXD_FIFO_0_V1, RTW89_MAC_MEM_TXD_FIFO_1_V1, RTW89_MAC_MEM_WD_PAGE, + RTW89_MAC_MEM_MLD_TBL, /* keep last */ RTW89_MAC_MEM_NUM, @@ -427,6 +428,18 @@ enum rtw89_mac_c2h_mcc_func { NUM_OF_RTW89_MAC_C2H_FUNC_MCC, }; +enum rtw89_mac_c2h_mlo_func { + RTW89_MAC_C2H_FUNC_MLO_GET_TBL = 0x0, + RTW89_MAC_C2H_FUNC_MLO_EMLSR_TRANS_DONE = 0x1, + RTW89_MAC_C2H_FUNC_MLO_EMLSR_STA_CFG_DONE = 0x2, + RTW89_MAC_C2H_FUNC_MCMLO_RELINK_RPT = 0x3, + RTW89_MAC_C2H_FUNC_MCMLO_SN_SYNC_RPT = 0x4, + RTW89_MAC_C2H_FUNC_MLO_LINK_CFG_STAT = 0x5, + RTW89_MAC_C2H_FUNC_MLO_DM_DBG_DUMP = 0x6, + + NUM_OF_RTW89_MAC_C2H_FUNC_MLO, +}; + enum rtw89_mac_c2h_mrc_func { RTW89_MAC_C2H_FUNC_MRC_TSF_RPT = 0, RTW89_MAC_C2H_FUNC_MRC_STATUS_RPT = 1, @@ -453,6 +466,7 @@ enum rtw89_mac_c2h_class { RTW89_MAC_C2H_CLASS_WOW = 0x3, RTW89_MAC_C2H_CLASS_MCC = 0x4, RTW89_MAC_C2H_CLASS_FWDBG = 0x5, + RTW89_MAC_C2H_CLASS_MLO = 0xc, RTW89_MAC_C2H_CLASS_MRC = 0xe, RTW89_MAC_C2H_CLASS_AP = 0x18, RTW89_MAC_C2H_CLASS_MAX, @@ -1031,8 +1045,11 @@ struct rtw89_mac_gen_def { bool (*is_txq_empty)(struct rtw89_dev *rtwdev); + int (*prep_chan_list)(struct rtw89_dev *rtwdev, + struct rtw89_vif_link *rtwvif_link); + void (*free_chan_list)(struct rtw89_dev *rtwdev); int (*add_chan_list)(struct rtw89_dev *rtwdev, - struct rtw89_vif_link *rtwvif_link, bool connected); + struct rtw89_vif_link *rtwvif_link); int (*add_chan_list_pno)(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link); int (*scan_offload)(struct rtw89_dev *rtwdev, diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c index 4fded07d0bee..a47971003bd4 100644 --- a/drivers/net/wireless/realtek/rtw89/mac80211.c +++ b/drivers/net/wireless/realtek/rtw89/mac80211.c @@ -114,11 +114,14 @@ static int __rtw89_ops_add_iface_link(struct rtw89_dev *rtwdev, wiphy_work_init(&rtwvif_link->update_beacon_work, rtw89_core_update_beacon_work); INIT_LIST_HEAD(&rtwvif_link->general_pkt_list); + rtw89_p2p_noa_once_init(rtwvif_link); + rtwvif_link->hit_rule = 0; rtwvif_link->bcn_hit_cond = 0; rtwvif_link->chanctx_assigned = false; rtwvif_link->chanctx_idx = RTW89_CHANCTX_0; rtwvif_link->reg_6ghz_power = RTW89_REG_6GHZ_POWER_DFLT; + rtwvif_link->rand_tsf_done = false; rcu_read_lock(); @@ -142,6 +145,8 @@ static void __rtw89_ops_remove_iface_link(struct rtw89_dev *rtwdev, wiphy_work_cancel(rtwdev->hw->wiphy, &rtwvif_link->update_beacon_work); + rtw89_p2p_noa_once_deinit(rtwvif_link); + rtw89_leave_ps_mode(rtwdev); rtw89_btc_ntfy_role_info(rtwdev, rtwvif_link, NULL, BTC_ROLE_STOP); @@ -186,6 +191,7 @@ static int rtw89_ops_add_interface(struct ieee80211_hw *hw, if (!rtw89_rtwvif_in_list(rtwdev, rtwvif)) { list_add_tail(&rtwvif->list, &rtwdev->rtwvifs_list); INIT_LIST_HEAD(&rtwvif->mgnt_entry); + INIT_LIST_HEAD(&rtwvif->dlink_pool); } ether_addr_copy(rtwvif->mac_addr, vif->addr); @@ -479,6 +485,8 @@ static int __rtw89_ops_sta_add(struct rtw89_dev *rtwdev, int i; if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) { + rtwvif->mlo_mode = RTW89_MLO_MODE_MLSR; + /* for station mode, assign the mac_id from itself */ macid = rtw89_vif_get_main_macid(rtwvif); } else { @@ -494,6 +502,8 @@ static int __rtw89_ops_sta_add(struct rtw89_dev *rtwdev, for (i = 0; i < ARRAY_SIZE(sta->txq); i++) rtw89_core_txq_init(rtwdev, sta->txq[i]); + INIT_LIST_HEAD(&rtwsta->dlink_pool); + skb_queue_head_init(&rtwsta->roc_queue); bitmap_zero(rtwsta->pairwise_sec_cam_map, RTW89_MAX_SEC_CAM_NUM); @@ -1018,7 +1028,7 @@ static void rtw89_ops_sta_statistics(struct ieee80211_hw *hw, struct rtw89_sta *rtwsta = sta_to_rtwsta(sta); struct rtw89_sta_link *rtwsta_link; - rtwsta_link = rtw89_sta_get_link_inst(rtwsta, 0); + rtwsta_link = rtw89_get_designated_link(rtwsta); if (unlikely(!rtwsta_link)) return; @@ -1153,12 +1163,14 @@ static void rtw89_ops_sw_scan_start(struct ieee80211_hw *hw, lockdep_assert_wiphy(hw->wiphy); - rtwvif_link = rtw89_vif_get_link_inst(rtwvif, 0); + rtwvif_link = rtw89_get_designated_link(rtwvif); if (unlikely(!rtwvif_link)) { - rtw89_err(rtwdev, "sw scan start: find no link on HW-0\n"); + rtw89_err(rtwdev, "sw scan start: find no designated link\n"); return; } + rtw89_leave_lps(rtwdev); + rtw89_core_scan_start(rtwdev, rtwvif_link, mac_addr, false); } @@ -1171,9 +1183,9 @@ static void rtw89_ops_sw_scan_complete(struct ieee80211_hw *hw, lockdep_assert_wiphy(hw->wiphy); - rtwvif_link = rtw89_vif_get_link_inst(rtwvif, 0); + rtwvif_link = rtw89_get_designated_link(rtwvif); if (unlikely(!rtwvif_link)) { - rtw89_err(rtwdev, "sw scan complete: find no link on HW-0\n"); + rtw89_err(rtwdev, "sw scan complete: find no designated link\n"); return; } @@ -1205,13 +1217,19 @@ static int rtw89_ops_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, if (rtwdev->scanning || rtwvif->offchan) return -EBUSY; - rtwvif_link = rtw89_vif_get_link_inst(rtwvif, 0); + rtwvif_link = rtw89_get_designated_link(rtwvif); if (unlikely(!rtwvif_link)) { - rtw89_err(rtwdev, "hw scan: find no link on HW-0\n"); + rtw89_err(rtwdev, "hw scan: find no designated link\n"); return -ENOLINK; } - rtw89_hw_scan_start(rtwdev, rtwvif_link, req); + rtw89_leave_lps(rtwdev); + rtw89_leave_ips_by_hwflags(rtwdev); + + ret = rtw89_hw_scan_start(rtwdev, rtwvif_link, req); + if (ret) + return ret; + ret = rtw89_hw_scan_offload(rtwdev, rtwvif_link, true); if (ret) { rtw89_hw_scan_abort(rtwdev, rtwvif_link); @@ -1236,9 +1254,9 @@ static void rtw89_ops_cancel_hw_scan(struct ieee80211_hw *hw, if (!rtwdev->scanning) return; - rtwvif_link = rtw89_vif_get_link_inst(rtwvif, 0); + rtwvif_link = rtw89_get_designated_link(rtwvif); if (unlikely(!rtwvif_link)) { - rtw89_err(rtwdev, "cancel hw scan: find no link on HW-0\n"); + rtw89_err(rtwdev, "cancel hw scan: find no designated link\n"); return; } diff --git a/drivers/net/wireless/realtek/rtw89/mac_be.c b/drivers/net/wireless/realtek/rtw89/mac_be.c index 99b82dc85ea3..8c9d326dc907 100644 --- a/drivers/net/wireless/realtek/rtw89/mac_be.c +++ b/drivers/net/wireless/realtek/rtw89/mac_be.c @@ -29,6 +29,7 @@ static const u32 rtw89_mac_mem_base_addrs_be[RTW89_MAC_MEM_NUM] = { [RTW89_MAC_MEM_CPU_LOCAL] = CPU_LOCAL_BASE_ADDR_BE, [RTW89_MAC_MEM_BSSID_CAM] = BSSID_CAM_BASE_ADDR_BE, [RTW89_MAC_MEM_WD_PAGE] = WD_PAGE_BASE_ADDR_BE, + [RTW89_MAC_MEM_MLD_TBL] = MLD_TBL_BASE_ADDR_BE, }; static const struct rtw89_port_reg rtw89_port_base_be = { @@ -2636,6 +2637,8 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_be = { .is_txq_empty = mac_is_txq_empty_be, + .prep_chan_list = rtw89_hw_scan_prep_chan_list_be, + .free_chan_list = rtw89_hw_scan_free_chan_list_be, .add_chan_list = rtw89_hw_scan_add_chan_list_be, .add_chan_list_pno = rtw89_pno_scan_add_chan_list_be, .scan_offload = rtw89_fw_h2c_scan_offload_be, diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c index c2fe5a898dc7..064f6a940107 100644 --- a/drivers/net/wireless/realtek/rtw89/pci.c +++ b/drivers/net/wireless/realtek/rtw89/pci.c @@ -228,7 +228,7 @@ int rtw89_pci_sync_skb_for_device_and_validate_rx_info(struct rtw89_dev *rtwdev, struct sk_buff *skb) { struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb); - int rx_tag_retry = 100; + int rx_tag_retry = 1000; int ret; do { @@ -3105,17 +3105,26 @@ static bool rtw89_pci_is_dac_compatible_bridge(struct rtw89_dev *rtwdev) return false; } -static void rtw89_pci_cfg_dac(struct rtw89_dev *rtwdev) +static int rtw89_pci_cfg_dac(struct rtw89_dev *rtwdev, bool force) { struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; + struct pci_dev *pdev = rtwpci->pdev; + int ret; + u8 val; - if (!rtwpci->enable_dac) - return; + if (!rtwpci->enable_dac && !force) + return 0; if (!rtw89_pci_chip_is_manual_dac(rtwdev)) - return; + return 0; - rtw89_pci_config_byte_set(rtwdev, RTW89_PCIE_L1_CTRL, RTW89_PCIE_BIT_EN_64BITS); + /* Configure DAC only via PCI config API, not DBI interfaces */ + ret = pci_read_config_byte(pdev, RTW89_PCIE_L1_CTRL, &val); + if (ret) + return ret; + + val |= RTW89_PCIE_BIT_EN_64BITS; + return pci_write_config_byte(pdev, RTW89_PCIE_L1_CTRL, val); } static int rtw89_pci_setup_mapping(struct rtw89_dev *rtwdev, @@ -3133,13 +3142,16 @@ static int rtw89_pci_setup_mapping(struct rtw89_dev *rtwdev, } if (!rtw89_pci_is_dac_compatible_bridge(rtwdev)) - goto no_dac; + goto try_dac_done; ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(36)); if (!ret) { - rtwpci->enable_dac = true; - rtw89_pci_cfg_dac(rtwdev); - } else { + ret = rtw89_pci_cfg_dac(rtwdev, true); + if (!ret) { + rtwpci->enable_dac = true; + goto try_dac_done; + } + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (ret) { rtw89_err(rtwdev, @@ -3147,7 +3159,7 @@ static int rtw89_pci_setup_mapping(struct rtw89_dev *rtwdev, goto err_release_regions; } } -no_dac: +try_dac_done: resource_len = pci_resource_len(pdev, bar_id); rtwpci->mmap = pci_iomap(pdev, bar_id, resource_len); @@ -4302,7 +4314,7 @@ static void rtw89_pci_l2_hci_ldo(struct rtw89_dev *rtwdev) void rtw89_pci_basic_cfg(struct rtw89_dev *rtwdev, bool resume) { if (resume) - rtw89_pci_cfg_dac(rtwdev); + rtw89_pci_cfg_dac(rtwdev, false); rtw89_pci_disable_eq(rtwdev); rtw89_pci_filter_out(rtwdev); diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c index f4eee642e5ce..76a2e26d4a10 100644 --- a/drivers/net/wireless/realtek/rtw89/phy.c +++ b/drivers/net/wireless/realtek/rtw89/phy.c @@ -2034,19 +2034,10 @@ static s8 rtw89_phy_ant_gain_query(struct rtw89_dev *rtwdev, ant_gain->offset[path][subband_h]); } -static s8 rtw89_phy_ant_gain_offset(struct rtw89_dev *rtwdev, u8 band, u32 center_freq) +static s8 rtw89_phy_ant_gain_offset(struct rtw89_dev *rtwdev, u32 center_freq) { - struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain; - const struct rtw89_chip_info *chip = rtwdev->chip; - u8 regd = rtw89_regd_get(rtwdev, band); s8 offset_patha, offset_pathb; - if (!chip->support_ant_gain) - return 0; - - if (ant_gain->block_country || !(ant_gain->regd_enabled & BIT(regd))) - return 0; - offset_patha = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_A, center_freq); offset_pathb = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_B, center_freq); @@ -2056,18 +2047,31 @@ static s8 rtw89_phy_ant_gain_offset(struct rtw89_dev *rtwdev, u8 band, u32 cente return max(offset_patha, offset_pathb); } -s16 rtw89_phy_ant_gain_pwr_offset(struct rtw89_dev *rtwdev, - const struct rtw89_chan *chan) +static bool rtw89_can_apply_ant_gain(struct rtw89_dev *rtwdev, u8 band) { + const struct rtw89_rfe_parms *rfe_parms = rtwdev->rfe_parms; struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain; const struct rtw89_chip_info *chip = rtwdev->chip; - u8 regd = rtw89_regd_get(rtwdev, chan->band_type); - s8 offset_patha, offset_pathb; + u8 regd = rtw89_regd_get(rtwdev, band); if (!chip->support_ant_gain) - return 0; + return false; if (ant_gain->block_country || !(ant_gain->regd_enabled & BIT(regd))) + return false; + + if (!rfe_parms->has_da) + return false; + + return true; +} + +s16 rtw89_phy_ant_gain_pwr_offset(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan) +{ + s8 offset_patha, offset_pathb; + + if (!rtw89_can_apply_ant_gain(rtwdev, chan->band_type)) return 0; if (RTW89_CHK_FW_FEATURE(NO_POWER_DIFFERENCE, &rtwdev->fw)) @@ -2083,14 +2087,10 @@ EXPORT_SYMBOL(rtw89_phy_ant_gain_pwr_offset); int rtw89_print_ant_gain(struct rtw89_dev *rtwdev, char *buf, size_t bufsz, const struct rtw89_chan *chan) { - struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain; - const struct rtw89_chip_info *chip = rtwdev->chip; - u8 regd = rtw89_regd_get(rtwdev, chan->band_type); char *p = buf, *end = buf + bufsz; s8 offset_patha, offset_pathb; - if (!(chip->support_ant_gain && (ant_gain->regd_enabled & BIT(regd))) || - ant_gain->block_country) { + if (!rtw89_can_apply_ant_gain(rtwdev, chan->band_type)) { p += scnprintf(p, end - p, "no DAG is applied\n"); goto out; } @@ -2255,20 +2255,31 @@ s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band, u8 bw, u8 ntx, u8 rs, u8 bf, u8 ch) { const struct rtw89_rfe_parms *rfe_parms = rtwdev->rfe_parms; + const struct rtw89_txpwr_rule_2ghz *rule_da_2ghz = &rfe_parms->rule_da_2ghz; + const struct rtw89_txpwr_rule_5ghz *rule_da_5ghz = &rfe_parms->rule_da_5ghz; + const struct rtw89_txpwr_rule_6ghz *rule_da_6ghz = &rfe_parms->rule_da_6ghz; const struct rtw89_txpwr_rule_2ghz *rule_2ghz = &rfe_parms->rule_2ghz; const struct rtw89_txpwr_rule_5ghz *rule_5ghz = &rfe_parms->rule_5ghz; const struct rtw89_txpwr_rule_6ghz *rule_6ghz = &rfe_parms->rule_6ghz; struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory; enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band); + bool has_ant_gain = rtw89_can_apply_ant_gain(rtwdev, band); u32 freq = ieee80211_channel_to_frequency(ch, nl_band); u8 ch_idx = rtw89_channel_to_idx(rtwdev, band, ch); + s8 lmt = 0, da_lmt = S8_MAX, sar, offset = 0; u8 regd = rtw89_regd_get(rtwdev, band); u8 reg6 = regulatory->reg_6ghz_power; - s8 lmt = 0, sar, offset; + struct rtw89_sar_parm sar_parm = { + .center_freq = freq, + .ntx = ntx, + }; s8 cstr; switch (band) { case RTW89_BAND_2G: + if (has_ant_gain) + da_lmt = (*rule_da_2ghz->lmt)[bw][ntx][rs][bf][regd][ch_idx]; + lmt = (*rule_2ghz->lmt)[bw][ntx][rs][bf][regd][ch_idx]; if (lmt) break; @@ -2276,6 +2287,9 @@ s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band, lmt = (*rule_2ghz->lmt)[bw][ntx][rs][bf][RTW89_WW][ch_idx]; break; case RTW89_BAND_5G: + if (has_ant_gain) + da_lmt = (*rule_da_5ghz->lmt)[bw][ntx][rs][bf][regd][ch_idx]; + lmt = (*rule_5ghz->lmt)[bw][ntx][rs][bf][regd][ch_idx]; if (lmt) break; @@ -2283,6 +2297,9 @@ s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band, lmt = (*rule_5ghz->lmt)[bw][ntx][rs][bf][RTW89_WW][ch_idx]; break; case RTW89_BAND_6G: + if (has_ant_gain) + da_lmt = (*rule_da_6ghz->lmt)[bw][ntx][rs][bf][regd][reg6][ch_idx]; + lmt = (*rule_6ghz->lmt)[bw][ntx][rs][bf][regd][reg6][ch_idx]; if (lmt) break; @@ -2296,9 +2313,12 @@ s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band, return 0; } - offset = rtw89_phy_ant_gain_offset(rtwdev, band, freq); - lmt = rtw89_phy_txpwr_rf_to_mac(rtwdev, lmt + offset); - sar = rtw89_query_sar(rtwdev, freq); + da_lmt = da_lmt ?: S8_MAX; + if (da_lmt != S8_MAX) + offset = rtw89_phy_ant_gain_offset(rtwdev, freq); + + lmt = rtw89_phy_txpwr_rf_to_mac(rtwdev, min(lmt + offset, da_lmt)); + sar = rtw89_query_sar(rtwdev, &sar_parm); cstr = rtw89_phy_get_tpe_constraint(rtwdev, band); return min3(lmt, sar, cstr); @@ -2515,20 +2535,31 @@ s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev, u8 band, u8 ru, u8 ntx, u8 ch) { const struct rtw89_rfe_parms *rfe_parms = rtwdev->rfe_parms; + const struct rtw89_txpwr_rule_2ghz *rule_da_2ghz = &rfe_parms->rule_da_2ghz; + const struct rtw89_txpwr_rule_5ghz *rule_da_5ghz = &rfe_parms->rule_da_5ghz; + const struct rtw89_txpwr_rule_6ghz *rule_da_6ghz = &rfe_parms->rule_da_6ghz; const struct rtw89_txpwr_rule_2ghz *rule_2ghz = &rfe_parms->rule_2ghz; const struct rtw89_txpwr_rule_5ghz *rule_5ghz = &rfe_parms->rule_5ghz; const struct rtw89_txpwr_rule_6ghz *rule_6ghz = &rfe_parms->rule_6ghz; struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory; enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band); + bool has_ant_gain = rtw89_can_apply_ant_gain(rtwdev, band); u32 freq = ieee80211_channel_to_frequency(ch, nl_band); u8 ch_idx = rtw89_channel_to_idx(rtwdev, band, ch); + s8 lmt_ru = 0, da_lmt_ru = S8_MAX, sar, offset = 0; u8 regd = rtw89_regd_get(rtwdev, band); u8 reg6 = regulatory->reg_6ghz_power; - s8 lmt_ru = 0, sar, offset; + struct rtw89_sar_parm sar_parm = { + .center_freq = freq, + .ntx = ntx, + }; s8 cstr; switch (band) { case RTW89_BAND_2G: + if (has_ant_gain) + da_lmt_ru = (*rule_da_2ghz->lmt_ru)[ru][ntx][regd][ch_idx]; + lmt_ru = (*rule_2ghz->lmt_ru)[ru][ntx][regd][ch_idx]; if (lmt_ru) break; @@ -2536,6 +2567,9 @@ s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev, u8 band, lmt_ru = (*rule_2ghz->lmt_ru)[ru][ntx][RTW89_WW][ch_idx]; break; case RTW89_BAND_5G: + if (has_ant_gain) + da_lmt_ru = (*rule_da_5ghz->lmt_ru)[ru][ntx][regd][ch_idx]; + lmt_ru = (*rule_5ghz->lmt_ru)[ru][ntx][regd][ch_idx]; if (lmt_ru) break; @@ -2543,6 +2577,9 @@ s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev, u8 band, lmt_ru = (*rule_5ghz->lmt_ru)[ru][ntx][RTW89_WW][ch_idx]; break; case RTW89_BAND_6G: + if (has_ant_gain) + da_lmt_ru = (*rule_da_6ghz->lmt_ru)[ru][ntx][regd][reg6][ch_idx]; + lmt_ru = (*rule_6ghz->lmt_ru)[ru][ntx][regd][reg6][ch_idx]; if (lmt_ru) break; @@ -2556,9 +2593,12 @@ s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev, u8 band, return 0; } - offset = rtw89_phy_ant_gain_offset(rtwdev, band, freq); - lmt_ru = rtw89_phy_txpwr_rf_to_mac(rtwdev, lmt_ru + offset); - sar = rtw89_query_sar(rtwdev, freq); + da_lmt_ru = da_lmt_ru ?: S8_MAX; + if (da_lmt_ru != S8_MAX) + offset = rtw89_phy_ant_gain_offset(rtwdev, freq); + + lmt_ru = rtw89_phy_txpwr_rf_to_mac(rtwdev, min(lmt_ru + offset, da_lmt_ru)); + sar = rtw89_query_sar(rtwdev, &sar_parm); cstr = rtw89_phy_get_tpe_constraint(rtwdev, band); return min3(lmt_ru, sar, cstr); @@ -3015,6 +3055,35 @@ void (* const rtw89_phy_c2h_ra_handler[])(struct rtw89_dev *rtwdev, [RTW89_PHY_C2H_FUNC_TXSTS] = NULL, }; +static void +rtw89_phy_c2h_lowrt_rty(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) +{ +} + +static void +rtw89_phy_c2h_fw_scan_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) +{ + const struct rtw89_c2h_fw_scan_rpt *c2h_rpt = + (const struct rtw89_c2h_fw_scan_rpt *)c2h->data; + + rtw89_debug(rtwdev, RTW89_DBG_DIG, + "%s: band: %u, op_chan: %u, PD_low_bd(ofdm, cck): (-%d, %d), phy_idx: %u\n", + __func__, c2h_rpt->band, c2h_rpt->center_ch, + PD_LOWER_BOUND_BASE - (c2h_rpt->ofdm_pd_idx << 1), + c2h_rpt->cck_pd_idx, c2h_rpt->phy_idx); +} + +static +void (* const rtw89_phy_c2h_dm_handler[])(struct rtw89_dev *rtwdev, + struct sk_buff *c2h, u32 len) = { + [RTW89_PHY_C2H_DM_FUNC_FW_TEST] = NULL, + [RTW89_PHY_C2H_DM_FUNC_FW_TRIG_TX_RPT] = NULL, + [RTW89_PHY_C2H_DM_FUNC_SIGB] = NULL, + [RTW89_PHY_C2H_DM_FUNC_LOWRT_RTY] = rtw89_phy_c2h_lowrt_rty, + [RTW89_PHY_C2H_DM_FUNC_MCC_DIG] = NULL, + [RTW89_PHY_C2H_DM_FUNC_FW_SCAN] = rtw89_phy_c2h_fw_scan_rpt, +}; + static void rtw89_phy_c2h_rfk_rpt_log(struct rtw89_dev *rtwdev, enum rtw89_phy_c2h_rfk_log_func func, void *content, u16 len) @@ -3550,9 +3619,9 @@ void rtw89_phy_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb, handler = rtw89_phy_c2h_rfk_report_handler[func]; break; case RTW89_PHY_C2H_CLASS_DM: - if (func == RTW89_PHY_C2H_DM_FUNC_LOWRT_RTY) - return; - fallthrough; + if (func < ARRAY_SIZE(rtw89_phy_c2h_dm_handler)) + handler = rtw89_phy_c2h_dm_handler[func]; + break; default: rtw89_info(rtwdev, "PHY c2h class %d not support\n", class); return; diff --git a/drivers/net/wireless/realtek/rtw89/phy.h b/drivers/net/wireless/realtek/rtw89/phy.h index 518a100375fb..5b451f1cfaac 100644 --- a/drivers/net/wireless/realtek/rtw89/phy.h +++ b/drivers/net/wireless/realtek/rtw89/phy.h @@ -164,6 +164,7 @@ enum rtw89_phy_c2h_dm_func { RTW89_PHY_C2H_DM_FUNC_SIGB, RTW89_PHY_C2H_DM_FUNC_LOWRT_RTY, RTW89_PHY_C2H_DM_FUNC_MCC_DIG, + RTW89_PHY_C2H_DM_FUNC_FW_SCAN = 0xc, RTW89_PHY_C2H_DM_FUNC_NUM, }; @@ -935,6 +936,20 @@ static inline s8 rtw89_phy_txpwr_dbm_to_mac(struct rtw89_dev *rtwdev, s8 dbm) return clamp_t(s16, dbm << chip->txpwr_factor_mac, -64, 63); } +static inline s16 rtw89_phy_txpwr_mac_to_rf(struct rtw89_dev *rtwdev, s8 txpwr_mac) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + + return txpwr_mac << (chip->txpwr_factor_rf - chip->txpwr_factor_mac); +} + +static inline s16 rtw89_phy_txpwr_mac_to_bb(struct rtw89_dev *rtwdev, s8 txpwr_mac) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + + return txpwr_mac << (chip->txpwr_factor_bb - chip->txpwr_factor_mac); +} + void rtw89_phy_ra_assoc(struct rtw89_dev *rtwdev, struct rtw89_sta_link *rtwsta_link); void rtw89_phy_ra_update(struct rtw89_dev *rtwdev); void rtw89_phy_ra_update_sta(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta, diff --git a/drivers/net/wireless/realtek/rtw89/phy_be.c b/drivers/net/wireless/realtek/rtw89/phy_be.c index 37d8f254ae32..d321cf1fc485 100644 --- a/drivers/net/wireless/realtek/rtw89/phy_be.c +++ b/drivers/net/wireless/realtek/rtw89/phy_be.c @@ -362,7 +362,7 @@ static void rtw89_phy_bb_wrap_force_cr_init(struct rtw89_dev *rtwdev, rtw89_write32_mask(rtwdev, addr, B_BE_PWR_FORCE_RU_ENON, 0); rtw89_write32_mask(rtwdev, addr, B_BE_PWR_FORCE_RU_ON, 0); addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_FORCE_MACID, mac_idx); - rtw89_write32_mask(rtwdev, addr, B_BE_PWR_FORCE_MACID_ON, 0); + rtw89_write32_mask(rtwdev, addr, B_BE_PWR_FORCE_MACID_ALL, 0); addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_COEX_CTRL, mac_idx); rtw89_write32_mask(rtwdev, addr, B_BE_PWR_FORCE_COEX_ON, 0); addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_RATE_CTRL, mac_idx); diff --git a/drivers/net/wireless/realtek/rtw89/ps.c b/drivers/net/wireless/realtek/rtw89/ps.c index ac46a7baa00d..8e4fe73e7d77 100644 --- a/drivers/net/wireless/realtek/rtw89/ps.c +++ b/drivers/net/wireless/realtek/rtw89/ps.c @@ -382,3 +382,150 @@ u8 rtw89_p2p_noa_fetch(struct rtw89_vif_link *rtwvif_link, void **data) tail = ie->noa_desc + setter->noa_count; return tail - *data; } + +static void rtw89_ps_noa_once_set_work(struct wiphy *wiphy, struct wiphy_work *work) +{ + struct rtw89_ps_noa_once_handler *noa_once = + container_of(work, struct rtw89_ps_noa_once_handler, set_work.work); + + lockdep_assert_wiphy(wiphy); + + noa_once->in_duration = true; +} + +static void rtw89_ps_noa_once_clr_work(struct wiphy *wiphy, struct wiphy_work *work) +{ + struct rtw89_ps_noa_once_handler *noa_once = + container_of(work, struct rtw89_ps_noa_once_handler, clr_work.work); + struct rtw89_vif_link *rtwvif_link = + container_of(noa_once, struct rtw89_vif_link, noa_once); + struct rtw89_dev *rtwdev = rtwvif_link->rtwvif->rtwdev; + + lockdep_assert_wiphy(wiphy); + + rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, rtwvif_link, true); + noa_once->in_duration = false; +} + +void rtw89_p2p_noa_once_init(struct rtw89_vif_link *rtwvif_link) +{ + struct rtw89_ps_noa_once_handler *noa_once = &rtwvif_link->noa_once; + + noa_once->in_duration = false; + noa_once->tsf_begin = 0; + noa_once->tsf_end = 0; + + wiphy_delayed_work_init(&noa_once->set_work, rtw89_ps_noa_once_set_work); + wiphy_delayed_work_init(&noa_once->clr_work, rtw89_ps_noa_once_clr_work); +} + +static void rtw89_p2p_noa_once_cancel(struct rtw89_vif_link *rtwvif_link) +{ + struct rtw89_ps_noa_once_handler *noa_once = &rtwvif_link->noa_once; + struct rtw89_dev *rtwdev = rtwvif_link->rtwvif->rtwdev; + struct wiphy *wiphy = rtwdev->hw->wiphy; + + wiphy_delayed_work_cancel(wiphy, &noa_once->set_work); + wiphy_delayed_work_cancel(wiphy, &noa_once->clr_work); +} + +void rtw89_p2p_noa_once_deinit(struct rtw89_vif_link *rtwvif_link) +{ + rtw89_p2p_noa_once_cancel(rtwvif_link); + rtw89_p2p_noa_once_init(rtwvif_link); +} + +void rtw89_p2p_noa_once_recalc(struct rtw89_vif_link *rtwvif_link) +{ + struct rtw89_ps_noa_once_handler *noa_once = &rtwvif_link->noa_once; + struct rtw89_dev *rtwdev = rtwvif_link->rtwvif->rtwdev; + const struct ieee80211_p2p_noa_desc *noa_desc; + struct wiphy *wiphy = rtwdev->hw->wiphy; + struct ieee80211_bss_conf *bss_conf; + u64 tsf_begin = U64_MAX, tsf_end; + u64 set_delay_us = 0; + u64 clr_delay_us = 0; + u32 start_time; + u32 interval; + u32 duration; + u64 tsf; + int ret; + int i; + + lockdep_assert_wiphy(wiphy); + + ret = rtw89_mac_port_get_tsf(rtwdev, rtwvif_link, &tsf); + if (ret) { + rtw89_warn(rtwdev, "%s: failed to get tsf\n", __func__); + return; + } + + rcu_read_lock(); + + bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); + + for (i = 0; i < ARRAY_SIZE(bss_conf->p2p_noa_attr.desc); i++) { + bool first = tsf_begin == U64_MAX; + u64 tmp; + + noa_desc = &bss_conf->p2p_noa_attr.desc[i]; + if (noa_desc->count == 0 || noa_desc->count == 255) + continue; + + start_time = le32_to_cpu(noa_desc->start_time); + interval = le32_to_cpu(noa_desc->interval); + duration = le32_to_cpu(noa_desc->duration); + + if (unlikely(duration == 0 || + (noa_desc->count > 1 && interval == 0))) + continue; + + tmp = start_time + interval * (noa_desc->count - 1) + duration; + tmp = (tsf & GENMASK_ULL(63, 32)) + tmp; + if (unlikely(tmp <= tsf)) + continue; + tsf_end = first ? tmp : max(tsf_end, tmp); + + tmp = (tsf & GENMASK_ULL(63, 32)) | start_time; + tsf_begin = first ? tmp : min(tsf_begin, tmp); + } + + rcu_read_unlock(); + + if (tsf_begin == U64_MAX) + return; + + rtw89_p2p_noa_once_cancel(rtwvif_link); + + if (noa_once->tsf_end > tsf) { + tsf_begin = min(tsf_begin, noa_once->tsf_begin); + tsf_end = max(tsf_end, noa_once->tsf_end); + } + + clr_delay_us = min_t(u64, tsf_end - tsf, UINT_MAX); + + if (tsf_begin <= tsf) { + noa_once->in_duration = true; + goto out; + } + + set_delay_us = tsf_begin - tsf; + if (unlikely(set_delay_us > UINT_MAX)) { + rtw89_warn(rtwdev, "%s: unhandled begin\n", __func__); + set_delay_us = 0; + clr_delay_us = 0; + rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, rtwvif_link, true); + noa_once->in_duration = false; + } + +out: + if (set_delay_us) + wiphy_delayed_work_queue(wiphy, &noa_once->set_work, + usecs_to_jiffies(set_delay_us)); + if (clr_delay_us) + wiphy_delayed_work_queue(wiphy, &noa_once->clr_work, + usecs_to_jiffies(clr_delay_us)); + + noa_once->tsf_begin = tsf_begin; + noa_once->tsf_end = tsf_end; +} diff --git a/drivers/net/wireless/realtek/rtw89/ps.h b/drivers/net/wireless/realtek/rtw89/ps.h index 2b88f254a32d..b2c43d44820d 100644 --- a/drivers/net/wireless/realtek/rtw89/ps.h +++ b/drivers/net/wireless/realtek/rtw89/ps.h @@ -22,6 +22,9 @@ void rtw89_p2p_noa_renew(struct rtw89_vif_link *rtwvif_link); void rtw89_p2p_noa_append(struct rtw89_vif_link *rtwvif_link, const struct ieee80211_p2p_noa_desc *desc); u8 rtw89_p2p_noa_fetch(struct rtw89_vif_link *rtwvif_link, void **data); +void rtw89_p2p_noa_once_init(struct rtw89_vif_link *rtwvif_link); +void rtw89_p2p_noa_once_deinit(struct rtw89_vif_link *rtwvif_link); +void rtw89_p2p_noa_once_recalc(struct rtw89_vif_link *rtwvif_link); static inline void rtw89_leave_ips_by_hwflags(struct rtw89_dev *rtwdev) { diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h index c776954ad360..f05c81ae5869 100644 --- a/drivers/net/wireless/realtek/rtw89/reg.h +++ b/drivers/net/wireless/realtek/rtw89/reg.h @@ -7601,7 +7601,15 @@ #define B_BE_PWR_FORCE_RU_ON BIT(18) #define B_BE_PWR_FORCE_RU_ENON BIT(28) #define R_BE_PWR_FORCE_MACID 0x11A48 -#define B_BE_PWR_FORCE_MACID_ON BIT(9) +#define B_BE_PWR_FORCE_MACID_DBM_ON BIT(9) +#define B_BE_PWR_FORCE_MACID_DBM_VAL GENMASK(17, 10) +#define B_BE_PWR_FORCE_MACID_EN_VAL BIT(18) +#define B_BE_PWR_FORCE_MACID_EN_ON BIT(19) +#define B_BE_PWR_FORCE_MACID_ALL \ + (B_BE_PWR_FORCE_MACID_DBM_ON | \ + B_BE_PWR_FORCE_MACID_DBM_VAL | \ + B_BE_PWR_FORCE_MACID_EN_VAL | \ + B_BE_PWR_FORCE_MACID_EN_ON) #define R_BE_PWR_REG_CTRL 0x11A50 #define B_BE_PWR_BT_EN BIT(23) @@ -8737,8 +8745,10 @@ #define B_DPD_GDIS BIT(13) #define B_IQK_RFC_ON BIT(1) #define R_TXPWRB 0x56CC +#define R_P1_TXPWRB 0x76CC #define B_TXPWRB_ON BIT(28) #define B_TXPWRB_VAL GENMASK(27, 19) +#define B_TXPWRB_MAX GENMASK(8, 0) #define R_DPD_OFT_EN 0x5800 #define B_DPD_OFT_EN BIT(28) #define B_DPD_TSSI_CW GENMASK(26, 18) @@ -9360,6 +9370,9 @@ #define R_TSSI_PWR_P0 0xE610 #define R_TSSI_PWR_P1 0xE710 #define B_TSSI_CONT_EN BIT(3) +#define R_P0_TXPWRB_BE 0xE61C +#define R_P1_TXPWRB_BE 0xE71C +#define B_TXPWRB_MAX_BE GENMASK(20, 12) #define R_TSSI_MAP_OFST_P0 0xE620 #define R_TSSI_MAP_OFST_P1 0xE720 #define B_TSSI_MAP_OFST_OFDM GENMASK(17, 9) diff --git a/drivers/net/wireless/realtek/rtw89/regd.c b/drivers/net/wireless/realtek/rtw89/regd.c index 655323a79608..3ad14cab1f58 100644 --- a/drivers/net/wireless/realtek/rtw89/regd.c +++ b/drivers/net/wireless/realtek/rtw89/regd.c @@ -588,6 +588,38 @@ bottom: kfree(sband); } +#define RTW89_DEF_REGD_STR(regd) \ + [RTW89_ ## regd] = #regd + +static const char * const rtw89_regd_string[] = { + RTW89_DEF_REGD_STR(WW), + RTW89_DEF_REGD_STR(ETSI), + RTW89_DEF_REGD_STR(FCC), + RTW89_DEF_REGD_STR(MKK), + RTW89_DEF_REGD_STR(NA), + RTW89_DEF_REGD_STR(IC), + RTW89_DEF_REGD_STR(KCC), + RTW89_DEF_REGD_STR(ACMA), + RTW89_DEF_REGD_STR(NCC), + RTW89_DEF_REGD_STR(MEXICO), + RTW89_DEF_REGD_STR(CHILE), + RTW89_DEF_REGD_STR(UKRAINE), + RTW89_DEF_REGD_STR(CN), + RTW89_DEF_REGD_STR(QATAR), + RTW89_DEF_REGD_STR(UK), + RTW89_DEF_REGD_STR(THAILAND), +}; + +static_assert(ARRAY_SIZE(rtw89_regd_string) == RTW89_REGD_NUM); + +const char *rtw89_regd_get_string(enum rtw89_regulation_type regd) +{ + if (regd < 0 || regd >= RTW89_REGD_NUM) + return "(unknown)"; + + return rtw89_regd_string[regd]; +} + int rtw89_regd_setup(struct rtw89_dev *rtwdev) { struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory; @@ -604,6 +636,7 @@ int rtw89_regd_setup(struct rtw89_dev *rtwdev) } regulatory->reg_6ghz_power = RTW89_REG_6GHZ_POWER_DFLT; + regulatory->txpwr_uk_follow_etsi = true; if (!wiphy) return -EINVAL; @@ -726,11 +759,22 @@ static void rtw89_regd_apply_policy_tas(struct rtw89_dev *rtwdev) struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory; const struct rtw89_regd *regd = regulatory->regd; struct rtw89_tas_info *tas = &rtwdev->tas; + u8 tas_country; if (!tas->enable) return; - tas->block_regd = !test_bit(RTW89_REGD_FUNC_TAS, regd->func_bitmap); + if (memcmp("US", regd->alpha2, 2) == 0) + tas_country = RTW89_ACPI_CONF_TAS_US; + else if (memcmp("CA", regd->alpha2, 2) == 0) + tas_country = RTW89_ACPI_CONF_TAS_CA; + else if (memcmp("KR", regd->alpha2, 2) == 0) + tas_country = RTW89_ACPI_CONF_TAS_KR; + else + tas_country = RTW89_ACPI_CONF_TAS_OTHERS; + + tas->block_regd = !(tas->enabled_countries & tas_country && + test_bit(RTW89_REGD_FUNC_TAS, regd->func_bitmap)); } static void rtw89_regd_apply_policy_ant_gain(struct rtw89_dev *rtwdev) diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b.c b/drivers/net/wireless/realtek/rtw89/rtw8851b.c index 0d482cd57f6e..fafa200a9c8d 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8851b.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8851b.c @@ -2499,12 +2499,14 @@ const struct rtw89_chip_info rtw8851b_chip_info = { .support_unii4 = true, .support_ant_gain = false, .support_tas = false, + .support_sar_by_ant = false, .ul_tb_waveform_ctrl = true, .ul_tb_pwr_diff = false, .rx_freq_frome_ie = true, .hw_sec_hdr = false, .hw_mgmt_tx_encrypt = false, .hw_tkip_crypto = false, + .hw_mlo_bmc_crypto = false, .rf_path_num = 1, .tx_nss = 1, .rx_nss = 1, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c index 286334e26c84..cd5987fc52d7 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c @@ -2217,12 +2217,14 @@ const struct rtw89_chip_info rtw8852a_chip_info = { .support_unii4 = false, .support_ant_gain = false, .support_tas = false, + .support_sar_by_ant = false, .ul_tb_waveform_ctrl = false, .ul_tb_pwr_diff = false, .rx_freq_frome_ie = true, .hw_sec_hdr = false, .hw_mgmt_tx_encrypt = false, .hw_tkip_crypto = false, + .hw_mlo_bmc_crypto = false, .rf_path_num = 2, .tx_nss = 2, .rx_nss = 2, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b.c b/drivers/net/wireless/realtek/rtw89/rtw8852b.c index eceb4fb9880d..dacdb384de2c 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852b.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852b.c @@ -853,12 +853,14 @@ const struct rtw89_chip_info rtw8852b_chip_info = { .support_unii4 = true, .support_ant_gain = true, .support_tas = false, + .support_sar_by_ant = true, .ul_tb_waveform_ctrl = true, .ul_tb_pwr_diff = false, .rx_freq_frome_ie = true, .hw_sec_hdr = false, .hw_mgmt_tx_encrypt = false, .hw_tkip_crypto = false, + .hw_mlo_bmc_crypto = false, .rf_path_num = 2, .tx_nss = 2, .rx_nss = 2, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c b/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c index 99c9505b3cbd..0cf03f18cbb1 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c @@ -8,6 +8,7 @@ #include "phy.h" #include "reg.h" #include "rtw8852b_common.h" +#include "sar.h" #include "util.h" static const struct rtw89_reg3_def rtw8852bx_pmac_ht20_mcs7_tbl[] = { @@ -1234,6 +1235,7 @@ static u32 rtw8852bx_bb_cal_txpwr_ref(struct rtw89_dev *rtwdev, u32_encode_bits(ref, B_DPD_REF); } +/* @pwr_ofst (unit: 1/8 dBm): power of path A minus power of path B */ static void rtw8852bx_set_txpwr_ref(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, s16 pwr_ofst) { @@ -1336,6 +1338,27 @@ static void rtw8852bx_set_tx_shape(struct rtw89_dev *rtwdev, tx_shape_ofdm); } +static s16 rtw8852bx_get_txpwr_sar_diff(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan) +{ + struct rtw89_sar_parm sar_parm = { + .center_freq = chan->freq, + .force_path = true, + }; + s16 sar_bb_a, sar_bb_b; + s8 sar_mac; + + sar_parm.path = RF_PATH_A; + sar_mac = rtw89_query_sar(rtwdev, &sar_parm); + sar_bb_a = rtw89_phy_txpwr_mac_to_bb(rtwdev, sar_mac); + + sar_parm.path = RF_PATH_B; + sar_mac = rtw89_query_sar(rtwdev, &sar_parm); + sar_bb_b = rtw89_phy_txpwr_mac_to_bb(rtwdev, sar_mac); + + return sar_bb_a - sar_bb_b; +} + static void rtw8852bx_set_txpwr_diff(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan, enum rtw89_phy_idx phy_idx) @@ -1343,6 +1366,7 @@ static void rtw8852bx_set_txpwr_diff(struct rtw89_dev *rtwdev, s16 pwr_ofst; pwr_ofst = rtw89_phy_ant_gain_pwr_offset(rtwdev, chan); + pwr_ofst += rtw8852bx_get_txpwr_sar_diff(rtwdev, chan); rtw8852bx_set_txpwr_ref(rtwdev, phy_idx, pwr_ofst); } diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt.c b/drivers/net/wireless/realtek/rtw89/rtw8852bt.c index bbf37442c492..289dce688d72 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852bt.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt.c @@ -786,12 +786,14 @@ const struct rtw89_chip_info rtw8852bt_chip_info = { .support_unii4 = true, .support_ant_gain = true, .support_tas = false, + .support_sar_by_ant = true, .ul_tb_waveform_ctrl = true, .ul_tb_pwr_diff = false, .rx_freq_frome_ie = true, .hw_sec_hdr = false, .hw_mgmt_tx_encrypt = false, .hw_tkip_crypto = true, + .hw_mlo_bmc_crypto = false, .rf_path_num = 2, .tx_nss = 2, .rx_nss = 2, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c index 08bcdf246382..2a6143a8d256 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c @@ -15,7 +15,7 @@ #include "sar.h" #include "util.h" -#define RTW8852C_FW_FORMAT_MAX 1 +#define RTW8852C_FW_FORMAT_MAX 2 #define RTW8852C_FW_BASENAME "rtw89/rtw8852c_fw" #define RTW8852C_MODULE_FIRMWARE \ RTW8852C_FW_BASENAME "-" __stringify(RTW8852C_FW_FORMAT_MAX) ".bin" @@ -2079,6 +2079,31 @@ static void rtw8852c_set_txpwr_diff(struct rtw89_dev *rtwdev, rtw8852c_set_txpwr_ref(rtwdev, phy_idx, pwr_ofst); } +static void rtw8852c_set_txpwr_sar_diff(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan, + enum rtw89_phy_idx phy_idx) +{ + struct rtw89_sar_parm sar_parm = { + .center_freq = chan->freq, + .force_path = true, + }; + s16 sar_rf; + s8 sar_mac; + + if (phy_idx != RTW89_PHY_0) + return; + + sar_parm.path = RF_PATH_A; + sar_mac = rtw89_query_sar(rtwdev, &sar_parm); + sar_rf = rtw89_phy_txpwr_mac_to_rf(rtwdev, sar_mac); + rtw89_phy_write32_mask(rtwdev, R_TXPWRB, B_TXPWRB_MAX, sar_rf); + + sar_parm.path = RF_PATH_B; + sar_mac = rtw89_query_sar(rtwdev, &sar_parm); + sar_rf = rtw89_phy_txpwr_mac_to_rf(rtwdev, sar_mac); + rtw89_phy_write32_mask(rtwdev, R_P1_TXPWRB, B_TXPWRB_MAX, sar_rf); +} + static void rtw8852c_set_txpwr(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan, enum rtw89_phy_idx phy_idx) @@ -2089,6 +2114,7 @@ static void rtw8852c_set_txpwr(struct rtw89_dev *rtwdev, rtw89_phy_set_txpwr_limit(rtwdev, chan, phy_idx); rtw89_phy_set_txpwr_limit_ru(rtwdev, chan, phy_idx); rtw8852c_set_txpwr_diff(rtwdev, chan, phy_idx); + rtw8852c_set_txpwr_sar_diff(rtwdev, chan, phy_idx); } static void rtw8852c_set_txpwr_ctrl(struct rtw89_dev *rtwdev, @@ -3014,12 +3040,14 @@ const struct rtw89_chip_info rtw8852c_chip_info = { .support_unii4 = true, .support_ant_gain = true, .support_tas = true, + .support_sar_by_ant = true, .ul_tb_waveform_ctrl = false, .ul_tb_pwr_diff = true, .rx_freq_frome_ie = false, .hw_sec_hdr = true, .hw_mgmt_tx_encrypt = true, .hw_tkip_crypto = true, + .hw_mlo_bmc_crypto = false, .rf_path_num = 2, .tx_nss = 2, .rx_nss = 2, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922a.c b/drivers/net/wireless/realtek/rtw89/rtw8922a.c index 8082592db84a..1d0f6e7df497 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8922a.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8922a.c @@ -12,6 +12,7 @@ #include "reg.h" #include "rtw8922a.h" #include "rtw8922a_rfk.h" +#include "sar.h" #include "util.h" #define RTW8922A_FW_FORMAT_MAX 3 @@ -2070,7 +2071,8 @@ static void __rtw8922a_rfk_init_late(struct rtw89_dev *rtwdev, rtw89_phy_rfk_pre_ntfy_and_wait(rtwdev, phy_idx, 5); rtw89_phy_rfk_dack_and_wait(rtwdev, phy_idx, chan, 58); - rtw89_phy_rfk_rxdck_and_wait(rtwdev, phy_idx, chan, false, 32); + if (!test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags)) + rtw89_phy_rfk_rxdck_and_wait(rtwdev, phy_idx, chan, false, 128); } static void rtw8922a_rfk_init_late(struct rtw89_dev *rtwdev) @@ -2233,6 +2235,31 @@ static void rtw8922a_set_tx_shape(struct rtw89_dev *rtwdev, rtw8922a_bb_tx_triangular(rtwdev, true, phy_idx); } +static void rtw8922a_set_txpwr_sar_diff(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan, + enum rtw89_phy_idx phy_idx) +{ + struct rtw89_sar_parm sar_parm = { + .center_freq = chan->freq, + .force_path = true, + }; + s16 sar_rf; + s8 sar_mac; + + if (phy_idx != RTW89_PHY_0) + return; + + sar_parm.path = RF_PATH_A; + sar_mac = rtw89_query_sar(rtwdev, &sar_parm); + sar_rf = rtw89_phy_txpwr_mac_to_rf(rtwdev, sar_mac); + rtw89_phy_write32_mask(rtwdev, R_P0_TXPWRB_BE, B_TXPWRB_MAX_BE, sar_rf); + + sar_parm.path = RF_PATH_B; + sar_mac = rtw89_query_sar(rtwdev, &sar_parm); + sar_rf = rtw89_phy_txpwr_mac_to_rf(rtwdev, sar_mac); + rtw89_phy_write32_mask(rtwdev, R_P1_TXPWRB_BE, B_TXPWRB_MAX_BE, sar_rf); +} + static void rtw8922a_set_txpwr(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan, enum rtw89_phy_idx phy_idx) @@ -2244,6 +2271,7 @@ static void rtw8922a_set_txpwr(struct rtw89_dev *rtwdev, rtw89_phy_set_txpwr_limit_ru(rtwdev, chan, phy_idx); rtw8922a_set_txpwr_diff(rtwdev, chan, phy_idx); rtw8922a_set_txpwr_ref(rtwdev, phy_idx); + rtw8922a_set_txpwr_sar_diff(rtwdev, chan, phy_idx); } static void rtw8922a_set_txpwr_ctrl(struct rtw89_dev *rtwdev, @@ -2823,12 +2851,14 @@ const struct rtw89_chip_info rtw8922a_chip_info = { .support_unii4 = true, .support_ant_gain = true, .support_tas = false, + .support_sar_by_ant = true, .ul_tb_waveform_ctrl = false, .ul_tb_pwr_diff = false, .rx_freq_frome_ie = false, .hw_sec_hdr = true, .hw_mgmt_tx_encrypt = true, .hw_tkip_crypto = true, + .hw_mlo_bmc_crypto = false, .rf_path_num = 2, .tx_nss = 2, .rx_nss = 2, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c index c4c93f836a2f..1659ea64ade1 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c @@ -77,11 +77,6 @@ void rtw8922a_ctl_band_ch_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, RR_CFGCH_BAND0 | RR_CFGCH_CH); rf_reg[path][i] |= u32_encode_bits(central_ch, RR_CFGCH_CH); - if (band == RTW89_BAND_2G) - rtw89_write_rf(rtwdev, path, RR_SMD, RR_VCO2, 0x0); - else - rtw89_write_rf(rtwdev, path, RR_SMD, RR_VCO2, 0x1); - switch (band) { case RTW89_BAND_2G: default: diff --git a/drivers/net/wireless/realtek/rtw89/sar.c b/drivers/net/wireless/realtek/rtw89/sar.c index 0b5af9528702..517b66022f18 100644 --- a/drivers/net/wireless/realtek/rtw89/sar.c +++ b/drivers/net/wireless/realtek/rtw89/sar.c @@ -57,10 +57,12 @@ static enum rtw89_sar_subband rtw89_sar_get_subband(struct rtw89_dev *rtwdev, } static int rtw89_query_sar_config_common(struct rtw89_dev *rtwdev, - u32 center_freq, s32 *cfg) + const struct rtw89_sar_parm *sar_parm, + s32 *cfg) { struct rtw89_sar_cfg_common *rtwsar = &rtwdev->sar.cfg_common; enum rtw89_sar_subband subband_l, subband_h; + u32 center_freq = sar_parm->center_freq; const struct rtw89_6ghz_span *span; span = rtw89_get_6ghz_span(rtwdev, center_freq); @@ -90,6 +92,93 @@ static int rtw89_query_sar_config_common(struct rtw89_dev *rtwdev, return 0; } +static const struct rtw89_sar_entry_from_acpi * +rtw89_sar_cfg_acpi_get_ent(const struct rtw89_sar_cfg_acpi *rtwsar, + enum rtw89_rf_path path, + enum rtw89_regulation_type regd) +{ + const struct rtw89_sar_indicator_from_acpi *ind = &rtwsar->indicator; + const struct rtw89_sar_table_from_acpi *tbl; + u8 sel; + + sel = ind->tblsel[path]; + tbl = &rtwsar->tables[sel]; + + return &tbl->entries[regd]; +} + +static +s32 rtw89_sar_cfg_acpi_get_min(const struct rtw89_sar_entry_from_acpi *ent, + enum rtw89_rf_path path, + enum rtw89_acpi_sar_subband subband_low, + enum rtw89_acpi_sar_subband subband_high) +{ + return min(ent->v[subband_low][path], ent->v[subband_high][path]); +} + +static int rtw89_query_sar_config_acpi(struct rtw89_dev *rtwdev, + const struct rtw89_sar_parm *sar_parm, + s32 *cfg) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + const struct rtw89_sar_cfg_acpi *rtwsar = &rtwdev->sar.cfg_acpi; + const struct rtw89_sar_entry_from_acpi *ent_a, *ent_b; + enum rtw89_acpi_sar_subband subband_l, subband_h; + u32 center_freq = sar_parm->center_freq; + const struct rtw89_6ghz_span *span; + enum rtw89_regulation_type regd; + enum rtw89_band band; + s32 cfg_a, cfg_b; + + span = rtw89_get_6ghz_span(rtwdev, center_freq); + + if (span && RTW89_ACPI_SAR_SPAN_VALID(span)) { + subband_l = span->acpi_sar_subband_low; + subband_h = span->acpi_sar_subband_high; + } else { + subband_l = rtw89_acpi_sar_get_subband(rtwdev, center_freq); + subband_h = subband_l; + } + + band = rtw89_acpi_sar_subband_to_band(rtwdev, subband_l); + regd = rtw89_regd_get(rtwdev, band); + + ent_a = rtw89_sar_cfg_acpi_get_ent(rtwsar, RF_PATH_A, regd); + ent_b = rtw89_sar_cfg_acpi_get_ent(rtwsar, RF_PATH_B, regd); + + cfg_a = rtw89_sar_cfg_acpi_get_min(ent_a, RF_PATH_A, subband_l, subband_h); + cfg_b = rtw89_sar_cfg_acpi_get_min(ent_b, RF_PATH_B, subband_l, subband_h); + + if (chip->support_sar_by_ant) { + /* With declaration of support_sar_by_ant, relax the general + * SAR querying to return the maximum between paths. However, + * expect chip has dealt with the corresponding SAR settings + * by path. (To get SAR for a given path, chip can then query + * with force_path.) + */ + if (sar_parm->force_path) { + switch (sar_parm->path) { + default: + case RF_PATH_A: + *cfg = cfg_a; + break; + case RF_PATH_B: + *cfg = cfg_b; + break; + } + } else { + *cfg = max(cfg_a, cfg_b); + } + } else { + *cfg = min(cfg_a, cfg_b); + } + + if (sar_parm->ntx == RTW89_2TX) + *cfg -= rtwsar->downgrade_2tx; + + return 0; +} + static const struct rtw89_sar_handler rtw89_sar_handlers[RTW89_SAR_SOURCE_NR] = { [RTW89_SAR_SOURCE_COMMON] = { @@ -97,6 +186,11 @@ struct rtw89_sar_handler rtw89_sar_handlers[RTW89_SAR_SOURCE_NR] = { .txpwr_factor_sar = 2, .query_sar_config = rtw89_query_sar_config_common, }, + [RTW89_SAR_SOURCE_ACPI] = { + .descr_sar_source = "RTW89_SAR_SOURCE_ACPI", + .txpwr_factor_sar = TXPWR_FACTOR_OF_RTW89_ACPI_SAR, + .query_sar_config = rtw89_query_sar_config_acpi, + }, }; #define rtw89_sar_set_src(_dev, _src, _cfg_name, _cfg_data) \ @@ -175,7 +269,7 @@ static const char *rtw89_tas_state_str(enum rtw89_tas_state state) } } -s8 rtw89_query_sar(struct rtw89_dev *rtwdev, u32 center_freq) +s8 rtw89_query_sar(struct rtw89_dev *rtwdev, const struct rtw89_sar_parm *sar_parm) { const enum rtw89_sar_sources src = rtwdev->sar.src; /* its members are protected by rtw89_sar_set_src() */ @@ -191,7 +285,7 @@ s8 rtw89_query_sar(struct rtw89_dev *rtwdev, u32 center_freq) if (src == RTW89_SAR_SOURCE_NONE) return RTW89_SAR_TXPWR_MAC_MAX; - ret = sar_hdl->query_sar_config(rtwdev, center_freq, &cfg); + ret = sar_hdl->query_sar_config(rtwdev, sar_parm, &cfg); if (ret) return RTW89_SAR_TXPWR_MAC_MAX; @@ -215,9 +309,10 @@ s8 rtw89_query_sar(struct rtw89_dev *rtwdev, u32 center_freq) return rtw89_txpwr_sar_to_mac(rtwdev, fct, cfg); } +EXPORT_SYMBOL(rtw89_query_sar); int rtw89_print_sar(struct rtw89_dev *rtwdev, char *buf, size_t bufsz, - u32 center_freq) + const struct rtw89_sar_parm *sar_parm) { const enum rtw89_sar_sources src = rtwdev->sar.src; /* its members are protected by rtw89_sar_set_src() */ @@ -238,7 +333,7 @@ int rtw89_print_sar(struct rtw89_dev *rtwdev, char *buf, size_t bufsz, p += scnprintf(p, end - p, "source: %d (%s)\n", src, sar_hdl->descr_sar_source); - ret = sar_hdl->query_sar_config(rtwdev, center_freq, &cfg); + ret = sar_hdl->query_sar_config(rtwdev, sar_parm, &cfg); if (ret) { p += scnprintf(p, end - p, "config: return code: %d\n", ret); p += scnprintf(p, end - p, @@ -252,6 +347,8 @@ int rtw89_print_sar(struct rtw89_dev *rtwdev, char *buf, size_t bufsz, p += scnprintf(p, end - p, "config: %d (unit: 1/%lu dBm)\n", cfg, BIT(fct)); + p += scnprintf(p, end - p, "support different configs by antenna: %s\n", + str_yes_no(rtwdev->chip->support_sar_by_ant)); out: return p - buf; } @@ -286,16 +383,7 @@ out: static int rtw89_apply_sar_common(struct rtw89_dev *rtwdev, const struct rtw89_sar_cfg_common *sar) { - enum rtw89_sar_sources src; - - lockdep_assert_wiphy(rtwdev->hw->wiphy); - - src = rtwdev->sar.src; - if (src != RTW89_SAR_SOURCE_NONE && src != RTW89_SAR_SOURCE_COMMON) { - rtw89_warn(rtwdev, "SAR source: %d is in use", src); - return -EBUSY; - } - + /* let common SAR have the highest priority; always apply it */ rtw89_sar_set_src(rtwdev, RTW89_SAR_SOURCE_COMMON, cfg_common, sar); rtw89_core_set_chip_txpwr(rtwdev); rtw89_tas_reset(rtwdev, false); @@ -363,18 +451,92 @@ int rtw89_ops_set_sar_specs(struct ieee80211_hw *hw, return rtw89_apply_sar_common(rtwdev, &sar_common); } +static void rtw89_apply_sar_acpi(struct rtw89_dev *rtwdev, + const struct rtw89_sar_cfg_acpi *sar) +{ + const struct rtw89_sar_table_from_acpi *tbl; + const struct rtw89_sar_entry_from_acpi *ent; + enum rtw89_sar_sources src; + unsigned int i, j, k; + + src = rtwdev->sar.src; + if (src != RTW89_SAR_SOURCE_NONE) { + rtw89_warn(rtwdev, "SAR source: %d is in use", src); + return; + } + + rtw89_debug(rtwdev, RTW89_DBG_SAR, + "SAR-ACPI downgrade 2TX: %u (unit: 1/%lu dBm)\n", + sar->downgrade_2tx, BIT(TXPWR_FACTOR_OF_RTW89_ACPI_SAR)); + + for (i = 0; i < sar->valid_num; i++) { + tbl = &sar->tables[i]; + + for (j = 0; j < RTW89_REGD_NUM; j++) { + ent = &tbl->entries[j]; + + rtw89_debug(rtwdev, RTW89_DBG_SAR, + "SAR-ACPI-[%u] REGD-%s (unit: 1/%lu dBm)\n", + i, rtw89_regd_get_string(j), + BIT(TXPWR_FACTOR_OF_RTW89_ACPI_SAR)); + + for (k = 0; k < NUM_OF_RTW89_ACPI_SAR_SUBBAND; k++) + rtw89_debug(rtwdev, RTW89_DBG_SAR, + "On subband %u, { %d, %d }\n", k, + ent->v[k][RF_PATH_A], ent->v[k][RF_PATH_B]); + } + } + + rtw89_sar_set_src(rtwdev, RTW89_SAR_SOURCE_ACPI, cfg_acpi, sar); + + /* SAR via ACPI is only configured in the early initial phase, so + * it does not seem necessary to reset txpwr related things here. + */ +} + +static void rtw89_set_sar_from_acpi(struct rtw89_dev *rtwdev) +{ + struct rtw89_sar_cfg_acpi *cfg; + int ret; + + lockdep_assert_wiphy(rtwdev->hw->wiphy); + + cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); + if (!cfg) + return; + + ret = rtw89_acpi_evaluate_sar(rtwdev, cfg); + if (ret) { + rtw89_debug(rtwdev, RTW89_DBG_SAR, + "evaluating ACPI SAR returns %d\n", ret); + goto out; + } + + if (unlikely(!cfg->valid_num)) { + rtw89_debug(rtwdev, RTW89_DBG_SAR, "no valid SAR table from ACPI\n"); + goto out; + } + + rtw89_apply_sar_acpi(rtwdev, cfg); + +out: + kfree(cfg); +} + static bool rtw89_tas_query_sar_config(struct rtw89_dev *rtwdev, s32 *cfg) { const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0); const enum rtw89_sar_sources src = rtwdev->sar.src; /* its members are protected by rtw89_sar_set_src() */ const struct rtw89_sar_handler *sar_hdl = &rtw89_sar_handlers[src]; + struct rtw89_sar_parm sar_parm = {}; int ret; if (src == RTW89_SAR_SOURCE_NONE) return false; - ret = sar_hdl->query_sar_config(rtwdev, chan->freq, cfg); + sar_parm.center_freq = chan->freq; + ret = sar_hdl->query_sar_config(rtwdev, &sar_parm, cfg); if (ret) return false; @@ -383,18 +545,27 @@ static bool rtw89_tas_query_sar_config(struct rtw89_dev *rtwdev, s32 *cfg) return true; } -static void rtw89_tas_state_update(struct rtw89_dev *rtwdev, - enum rtw89_tas_state state) +static bool __rtw89_tas_state_update(struct rtw89_dev *rtwdev, + enum rtw89_tas_state state) { struct rtw89_tas_info *tas = &rtwdev->tas; if (tas->state == state) - return; + return false; rtw89_debug(rtwdev, RTW89_DBG_SAR, "tas: switch state: %s -> %s\n", rtw89_tas_state_str(tas->state), rtw89_tas_state_str(state)); tas->state = state; + return true; +} + +static void rtw89_tas_state_update(struct rtw89_dev *rtwdev, + enum rtw89_tas_state state) +{ + if (!__rtw89_tas_state_update(rtwdev, state)) + return; + rtw89_core_set_chip_txpwr(rtwdev); } @@ -489,7 +660,7 @@ static void rtw89_tas_history_update(struct rtw89_dev *rtwdev) rtw89_linear_to_db_quarter(div_u64(txpwr, PERCENT))); } -static void rtw89_tas_rolling_average(struct rtw89_dev *rtwdev) +static bool rtw89_tas_rolling_average(struct rtw89_dev *rtwdev) { struct rtw89_tas_info *tas = &rtwdev->tas; s32 dpr_on_threshold, dpr_off_threshold; @@ -515,18 +686,18 @@ static void rtw89_tas_rolling_average(struct rtw89_dev *rtwdev) else if (txpwr_avg < dpr_off_threshold) state = RTW89_TAS_STATE_DPR_OFF; else - return; + return false; - rtw89_tas_state_update(rtwdev, state); + return __rtw89_tas_state_update(rtwdev, state); } -void rtw89_tas_init(struct rtw89_dev *rtwdev) +static void rtw89_tas_init(struct rtw89_dev *rtwdev) { const struct rtw89_chip_info *chip = rtwdev->chip; struct rtw89_tas_info *tas = &rtwdev->tas; + const struct rtw89_acpi_policy_tas *ptr; struct rtw89_acpi_dsm_result res = {}; int ret; - u8 val; if (!chip->support_tas) return; @@ -538,8 +709,9 @@ void rtw89_tas_init(struct rtw89_dev *rtwdev) return; } - val = res.u.value; - switch (val) { + ptr = res.u.policy_tas; + + switch (ptr->enable) { case 0: tas->enable = false; break; @@ -552,8 +724,13 @@ void rtw89_tas_init(struct rtw89_dev *rtwdev) if (!tas->enable) { rtw89_debug(rtwdev, RTW89_DBG_SAR, "TAS not enable\n"); - return; + goto out; } + + tas->enabled_countries = ptr->enabled_countries; + +out: + kfree(ptr); } void rtw89_tas_reset(struct rtw89_dev *rtwdev, bool force) @@ -598,29 +775,28 @@ void rtw89_tas_reset(struct rtw89_dev *rtwdev, bool force) "tas: band: %u, freq: %u\n", chan->band_type, chan->freq); } -void rtw89_tas_track(struct rtw89_dev *rtwdev) +static bool rtw89_tas_track(struct rtw89_dev *rtwdev) { struct rtw89_tas_info *tas = &rtwdev->tas; struct rtw89_hal *hal = &rtwdev->hal; s32 cfg; if (hal->disabled_dm_bitmap & BIT(RTW89_DM_TAS)) - return; + return false; if (!rtw89_tas_is_active(rtwdev)) - return; + return false; - if (!rtw89_tas_query_sar_config(rtwdev, &cfg) || tas->block_regd) { - rtw89_tas_state_update(rtwdev, RTW89_TAS_STATE_STATIC_SAR); - return; - } + if (!rtw89_tas_query_sar_config(rtwdev, &cfg) || tas->block_regd) + return __rtw89_tas_state_update(rtwdev, RTW89_TAS_STATE_STATIC_SAR); if (tas->pause) - return; + return false; rtw89_tas_window_update(rtwdev); rtw89_tas_history_update(rtwdev); - rtw89_tas_rolling_average(rtwdev); + + return rtw89_tas_rolling_average(rtwdev); } void rtw89_tas_scan(struct rtw89_dev *rtwdev, bool start) @@ -667,3 +843,51 @@ void rtw89_tas_chanctx_cb(struct rtw89_dev *rtwdev, } } EXPORT_SYMBOL(rtw89_tas_chanctx_cb); + +void rtw89_sar_init(struct rtw89_dev *rtwdev) +{ + rtw89_set_sar_from_acpi(rtwdev); + rtw89_tas_init(rtwdev); +} + +static bool rtw89_sar_track_acpi(struct rtw89_dev *rtwdev) +{ + struct rtw89_sar_cfg_acpi *cfg = &rtwdev->sar.cfg_acpi; + struct rtw89_sar_indicator_from_acpi *ind = &cfg->indicator; + const enum rtw89_sar_sources src = rtwdev->sar.src; + bool changed; + int ret; + + lockdep_assert_wiphy(rtwdev->hw->wiphy); + + if (src != RTW89_SAR_SOURCE_ACPI) + return false; + + if (!ind->enable_sync) + return false; + + ret = rtw89_acpi_evaluate_dynamic_sar_indicator(rtwdev, cfg, &changed); + if (likely(!ret)) + return changed; + + rtw89_debug(rtwdev, RTW89_DBG_SAR, + "%s: failed to track indicator: %d; reset and disable\n", + __func__, ret); + + memset(ind->tblsel, 0, sizeof(ind->tblsel)); + ind->enable_sync = false; + return true; +} + +void rtw89_sar_track(struct rtw89_dev *rtwdev) +{ + unsigned int changes = 0; + + changes += rtw89_sar_track_acpi(rtwdev); + changes += rtw89_tas_track(rtwdev); + + if (!changes) + return; + + rtw89_core_set_chip_txpwr(rtwdev); +} diff --git a/drivers/net/wireless/realtek/rtw89/sar.h b/drivers/net/wireless/realtek/rtw89/sar.h index 0df1661db9a8..4b7f3d44f57b 100644 --- a/drivers/net/wireless/realtek/rtw89/sar.h +++ b/drivers/net/wireless/realtek/rtw89/sar.h @@ -10,25 +10,34 @@ #define RTW89_SAR_TXPWR_MAC_MAX 63 #define RTW89_SAR_TXPWR_MAC_MIN -64 +struct rtw89_sar_parm { + u32 center_freq; + enum rtw89_ntx ntx; + + bool force_path; + enum rtw89_rf_path path; +}; + struct rtw89_sar_handler { const char *descr_sar_source; u8 txpwr_factor_sar; - int (*query_sar_config)(struct rtw89_dev *rtwdev, u32 center_freq, s32 *cfg); + int (*query_sar_config)(struct rtw89_dev *rtwdev, + const struct rtw89_sar_parm *sar_parm, s32 *cfg); }; extern const struct cfg80211_sar_capa rtw89_sar_capa; -s8 rtw89_query_sar(struct rtw89_dev *rtwdev, u32 center_freq); +s8 rtw89_query_sar(struct rtw89_dev *rtwdev, const struct rtw89_sar_parm *sar_parm); int rtw89_print_sar(struct rtw89_dev *rtwdev, char *buf, size_t bufsz, - u32 center_freq); + const struct rtw89_sar_parm *sar_parm); int rtw89_print_tas(struct rtw89_dev *rtwdev, char *buf, size_t bufsz); int rtw89_ops_set_sar_specs(struct ieee80211_hw *hw, const struct cfg80211_sar_specs *sar); -void rtw89_tas_init(struct rtw89_dev *rtwdev); void rtw89_tas_reset(struct rtw89_dev *rtwdev, bool force); -void rtw89_tas_track(struct rtw89_dev *rtwdev); void rtw89_tas_scan(struct rtw89_dev *rtwdev, bool start); void rtw89_tas_chanctx_cb(struct rtw89_dev *rtwdev, enum rtw89_chanctx_state state); +void rtw89_sar_init(struct rtw89_dev *rtwdev); +void rtw89_sar_track(struct rtw89_dev *rtwdev); #endif diff --git a/drivers/net/wireless/realtek/rtw89/ser.c b/drivers/net/wireless/realtek/rtw89/ser.c index 0740e303680c..811c91481441 100644 --- a/drivers/net/wireless/realtek/rtw89/ser.c +++ b/drivers/net/wireless/realtek/rtw89/ser.c @@ -309,6 +309,9 @@ static void ser_reset_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) rtw89_core_release_bit_map(rtwdev->hw_port, rtwvif_link->port); rtwvif_link->net_type = RTW89_NET_TYPE_NO_LINK; rtwvif_link->trigger = false; + rtwvif_link->rand_tsf_done = false; + + rtw89_p2p_noa_once_deinit(rtwvif_link); } } diff --git a/drivers/net/wireless/realtek/rtw89/txrx.h b/drivers/net/wireless/realtek/rtw89/txrx.h index 70fe7cebc9d5..94f27a9ee9f7 100644 --- a/drivers/net/wireless/realtek/rtw89/txrx.h +++ b/drivers/net/wireless/realtek/rtw89/txrx.h @@ -712,6 +712,25 @@ static inline u8 rtw89_core_get_qsel(struct rtw89_dev *rtwdev, u8 tid) } } +static inline u8 +rtw89_core_get_qsel_mgmt(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req) +{ + struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; + struct rtw89_vif_link *rtwvif_link = tx_req->rtwvif_link; + + if (desc_info->hiq) { + if (rtwvif_link->mac_idx == RTW89_MAC_1) + return RTW89_TX_QSEL_B1_HI; + else + return RTW89_TX_QSEL_B0_HI; + } + + if (rtwvif_link->mac_idx == RTW89_MAC_1) + return RTW89_TX_QSEL_B1_MGMT; + else + return RTW89_TX_QSEL_B0_MGMT; +} + static inline u8 rtw89_core_get_ch_dma(struct rtw89_dev *rtwdev, u8 qsel) { switch (qsel) { @@ -719,12 +738,24 @@ static inline u8 rtw89_core_get_ch_dma(struct rtw89_dev *rtwdev, u8 qsel) rtw89_warn(rtwdev, "Cannot map qsel to dma: %d\n", qsel); fallthrough; case RTW89_TX_QSEL_BE_0: + case RTW89_TX_QSEL_BE_1: + case RTW89_TX_QSEL_BE_2: + case RTW89_TX_QSEL_BE_3: return RTW89_TXCH_ACH0; case RTW89_TX_QSEL_BK_0: + case RTW89_TX_QSEL_BK_1: + case RTW89_TX_QSEL_BK_2: + case RTW89_TX_QSEL_BK_3: return RTW89_TXCH_ACH1; case RTW89_TX_QSEL_VI_0: + case RTW89_TX_QSEL_VI_1: + case RTW89_TX_QSEL_VI_2: + case RTW89_TX_QSEL_VI_3: return RTW89_TXCH_ACH2; case RTW89_TX_QSEL_VO_0: + case RTW89_TX_QSEL_VO_1: + case RTW89_TX_QSEL_VO_2: + case RTW89_TX_QSEL_VO_3: return RTW89_TXCH_ACH3; case RTW89_TX_QSEL_B0_MGMT: return RTW89_TXCH_CH8; diff --git a/drivers/net/wireless/realtek/rtw89/wow.c b/drivers/net/wireless/realtek/rtw89/wow.c index 17eee58503cb..34a0ab49bd7a 100644 --- a/drivers/net/wireless/realtek/rtw89/wow.c +++ b/drivers/net/wireless/realtek/rtw89/wow.c @@ -1086,8 +1086,7 @@ static int rtw89_wow_set_wakeups(struct rtw89_dev *rtwdev, rtw89_wow_init_pno(rtwdev, wowlan->nd_config); rtw89_for_each_rtwvif(rtwdev, rtwvif) { - /* use the link on HW-0 to do wow flow */ - rtwvif_link = rtw89_vif_get_link_inst(rtwvif, 0); + rtwvif_link = rtw89_get_designated_link(rtwvif); if (!rtwvif_link) continue; diff --git a/drivers/nfc/s3fwrn5/core.c b/drivers/nfc/s3fwrn5/core.c index aec356880adf..af0fa8bd970b 100644 --- a/drivers/nfc/s3fwrn5/core.c +++ b/drivers/nfc/s3fwrn5/core.c @@ -2,7 +2,7 @@ /* * NCI based driver for Samsung S3FWRN5 NFC chip * - * Copyright (C) 2015 Samsung Electrnoics + * Copyright (C) 2015 Samsung Electronics * Robert Baldyga <r.baldyga@samsung.com> */ diff --git a/drivers/nfc/s3fwrn5/firmware.c b/drivers/nfc/s3fwrn5/firmware.c index c20fdbac51c5..781cdbcac104 100644 --- a/drivers/nfc/s3fwrn5/firmware.c +++ b/drivers/nfc/s3fwrn5/firmware.c @@ -2,7 +2,7 @@ /* * NCI based driver for Samsung S3FWRN5 NFC chip * - * Copyright (C) 2015 Samsung Electrnoics + * Copyright (C) 2015 Samsung Electronics * Robert Baldyga <r.baldyga@samsung.com> */ diff --git a/drivers/nfc/s3fwrn5/firmware.h b/drivers/nfc/s3fwrn5/firmware.h index 3a82ce5837fb..19f479aa6920 100644 --- a/drivers/nfc/s3fwrn5/firmware.h +++ b/drivers/nfc/s3fwrn5/firmware.h @@ -2,7 +2,7 @@ /* * NCI based driver for Samsung S3FWRN5 NFC chip * - * Copyright (C) 2015 Samsung Electrnoics + * Copyright (C) 2015 Samsung Electronics * Robert Baldyga <r.baldyga@samsung.com> */ diff --git a/drivers/nfc/s3fwrn5/i2c.c b/drivers/nfc/s3fwrn5/i2c.c index 536c566e3f59..110d086cfe5b 100644 --- a/drivers/nfc/s3fwrn5/i2c.c +++ b/drivers/nfc/s3fwrn5/i2c.c @@ -2,7 +2,7 @@ /* * I2C Link Layer for Samsung S3FWRN5 NCI based Driver * - * Copyright (C) 2015 Samsung Electrnoics + * Copyright (C) 2015 Samsung Electronics * Robert Baldyga <r.baldyga@samsung.com> */ diff --git a/drivers/nfc/s3fwrn5/nci.c b/drivers/nfc/s3fwrn5/nci.c index ca6828f55ba0..5a9de11bbece 100644 --- a/drivers/nfc/s3fwrn5/nci.c +++ b/drivers/nfc/s3fwrn5/nci.c @@ -2,7 +2,7 @@ /* * NCI based driver for Samsung S3FWRN5 NFC chip * - * Copyright (C) 2015 Samsung Electrnoics + * Copyright (C) 2015 Samsung Electronics * Robert Baldyga <r.baldyga@samsung.com> */ diff --git a/drivers/nfc/s3fwrn5/nci.h b/drivers/nfc/s3fwrn5/nci.h index c2d906591e9e..bc4bce2bbc4d 100644 --- a/drivers/nfc/s3fwrn5/nci.h +++ b/drivers/nfc/s3fwrn5/nci.h @@ -2,7 +2,7 @@ /* * NCI based driver for Samsung S3FWRN5 NFC chip * - * Copyright (C) 2015 Samsung Electrnoics + * Copyright (C) 2015 Samsung Electronics * Robert Baldyga <r.baldyga@samsung.com> */ diff --git a/drivers/nfc/s3fwrn5/phy_common.c b/drivers/nfc/s3fwrn5/phy_common.c index 81318478d5fd..deb2c039f0fd 100644 --- a/drivers/nfc/s3fwrn5/phy_common.c +++ b/drivers/nfc/s3fwrn5/phy_common.c @@ -2,9 +2,9 @@ /* * Link Layer for Samsung S3FWRN5 NCI based Driver * - * Copyright (C) 2015 Samsung Electrnoics + * Copyright (C) 2015 Samsung Electronics * Robert Baldyga <r.baldyga@samsung.com> - * Copyright (C) 2020 Samsung Electrnoics + * Copyright (C) 2020 Samsung Electronics * Bongsu Jeon <bongsu.jeon@samsung.com> */ diff --git a/drivers/nfc/s3fwrn5/phy_common.h b/drivers/nfc/s3fwrn5/phy_common.h index 99749c9294d1..9cef25436bf9 100644 --- a/drivers/nfc/s3fwrn5/phy_common.h +++ b/drivers/nfc/s3fwrn5/phy_common.h @@ -2,9 +2,9 @@ * * Link Layer for Samsung S3FWRN5 NCI based Driver * - * Copyright (C) 2015 Samsung Electrnoics + * Copyright (C) 2015 Samsung Electronics * Robert Baldyga <r.baldyga@samsung.com> - * Copyright (C) 2020 Samsung Electrnoics + * Copyright (C) 2020 Samsung Electronics * Bongsu Jeon <bongsu.jeon@samsung.com> */ diff --git a/drivers/nfc/s3fwrn5/s3fwrn5.h b/drivers/nfc/s3fwrn5/s3fwrn5.h index bb8f936d13a2..2b492236090b 100644 --- a/drivers/nfc/s3fwrn5/s3fwrn5.h +++ b/drivers/nfc/s3fwrn5/s3fwrn5.h @@ -2,7 +2,7 @@ /* * NCI based driver for Samsung S3FWRN5 NFC chip * - * Copyright (C) 2015 Samsung Electrnoics + * Copyright (C) 2015 Samsung Electronics * Robert Baldyga <r.baldyga@samsung.com> */ diff --git a/drivers/nfc/virtual_ncidev.c b/drivers/nfc/virtual_ncidev.c index 6b89d596ba9a..9ef8ef2d4363 100644 --- a/drivers/nfc/virtual_ncidev.c +++ b/drivers/nfc/virtual_ncidev.c @@ -2,7 +2,7 @@ /* * Virtual NCI device simulation driver * - * Copyright (C) 2020 Samsung Electrnoics + * Copyright (C) 2020 Samsung Electronics * Bongsu Jeon <bongsu.jeon@samsung.com> */ diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig index 4d64b6935bb9..7dca58f0a237 100644 --- a/drivers/nvme/host/Kconfig +++ b/drivers/nvme/host/Kconfig @@ -84,9 +84,9 @@ config NVME_TCP tristate "NVM Express over Fabrics TCP host driver" depends on INET depends on BLOCK + select CRC32 + select NET_CRC32C select NVME_FABRICS - select CRYPTO - select CRYPTO_CRC32C help This provides support for the NVMe over Fabrics protocol using the TCP transport. This allows you to use remote block devices diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index ac53629fce68..6b04473c0ab7 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -2059,7 +2059,21 @@ static bool nvme_update_disk_info(struct nvme_ns *ns, struct nvme_id_ns *id, if (id->nsfeat & NVME_NS_FEAT_ATOMICS && id->nawupf) atomic_bs = (1 + le16_to_cpu(id->nawupf)) * bs; else - atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs; + atomic_bs = (1 + ns->ctrl->awupf) * bs; + + /* + * Set subsystem atomic bs. + */ + if (ns->ctrl->subsys->atomic_bs) { + if (atomic_bs != ns->ctrl->subsys->atomic_bs) { + dev_err_ratelimited(ns->ctrl->device, + "%s: Inconsistent Atomic Write Size, Namespace will not be added: Subsystem=%d bytes, Controller/Namespace=%d bytes\n", + ns->disk ? ns->disk->disk_name : "?", + ns->ctrl->subsys->atomic_bs, + atomic_bs); + } + } else + ns->ctrl->subsys->atomic_bs = atomic_bs; nvme_update_atomic_write_disk_info(ns, id, lim, bs, atomic_bs); } @@ -2201,6 +2215,17 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns, nvme_set_chunk_sectors(ns, id, &lim); if (!nvme_update_disk_info(ns, id, &lim)) capacity = 0; + + /* + * Validate the max atomic write size fits within the subsystem's + * atomic write capabilities. + */ + if (lim.atomic_write_hw_max > ns->ctrl->subsys->atomic_bs) { + blk_mq_unfreeze_queue(ns->disk->queue, memflags); + ret = -ENXIO; + goto out; + } + nvme_config_discard(ns, &lim); if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && ns->head->ids.csi == NVME_CSI_ZNS) @@ -3031,7 +3056,6 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) kfree(subsys); return -EINVAL; } - subsys->awupf = le16_to_cpu(id->awupf); nvme_mpath_default_iopolicy(subsys); subsys->dev.class = &nvme_subsys_class; @@ -3441,7 +3465,7 @@ static int nvme_init_identify(struct nvme_ctrl *ctrl) dev_pm_qos_expose_latency_tolerance(ctrl->device); else if (!ctrl->apst_enabled && prev_apst_enabled) dev_pm_qos_hide_latency_tolerance(ctrl->device); - + ctrl->awupf = le16_to_cpu(id->awupf); out_free: kfree(id); return ret; diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 250f3da67cc9..cf0ef4745564 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -638,7 +638,8 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) blk_set_stacking_limits(&lim); lim.dma_alignment = 3; - lim.features |= BLK_FEAT_IO_STAT | BLK_FEAT_NOWAIT | BLK_FEAT_POLL; + lim.features |= BLK_FEAT_IO_STAT | BLK_FEAT_NOWAIT | + BLK_FEAT_POLL | BLK_FEAT_ATOMIC_WRITES; if (head->ids.csi == NVME_CSI_ZNS) lim.features |= BLK_FEAT_ZONED; diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 51e078642127..8fc4683418a3 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -410,6 +410,7 @@ struct nvme_ctrl { enum nvme_ctrl_type cntrltype; enum nvme_dctype dctype; + u16 awupf; /* 0's based value. */ }; static inline enum nvme_ctrl_state nvme_ctrl_state(struct nvme_ctrl *ctrl) @@ -442,11 +443,11 @@ struct nvme_subsystem { u8 cmic; enum nvme_subsys_type subtype; u16 vendor_id; - u16 awupf; /* 0's based awupf value. */ struct ida ns_ida; #ifdef CONFIG_NVME_MULTIPATH enum nvme_iopolicy iopolicy; #endif + u32 atomic_bs; }; /* diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 2e30e9be7408..f1dd804151b1 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -390,7 +390,7 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, __le32 *dbbuf_db, * as it only leads to a small amount of wasted memory for the lifetime of * the I/O. */ -static int nvme_pci_npages_prp(void) +static __always_inline int nvme_pci_npages_prp(void) { unsigned max_bytes = (NVME_MAX_KB_SZ * 1024) + NVME_CTRL_PAGE_SIZE; unsigned nprps = DIV_ROUND_UP(max_bytes, NVME_CTRL_PAGE_SIZE); @@ -1202,7 +1202,9 @@ static void nvme_poll_irqdisable(struct nvme_queue *nvmeq) WARN_ON_ONCE(test_bit(NVMEQ_POLLED, &nvmeq->flags)); disable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); + spin_lock(&nvmeq->cq_poll_lock); nvme_poll_cq(nvmeq, NULL); + spin_unlock(&nvmeq->cq_poll_lock); enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); } @@ -3737,6 +3739,8 @@ static const struct pci_device_id nvme_id_table[] = { .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, { PCI_DEVICE(0x1e49, 0x0041), /* ZHITAI TiPro7000 NVMe SSD */ .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, + { PCI_DEVICE(0x025e, 0xf1ac), /* SOLIDIGM P44 pro SSDPFKKW020X7 */ + .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, { PCI_DEVICE(0xc0a9, 0x540a), /* Crucial P2 */ .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1d97, 0x2263), /* Lexar NM610 */ diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index aba365f97cf6..8ae6cc2280ca 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -8,6 +8,7 @@ #include <linux/init.h> #include <linux/slab.h> #include <linux/err.h> +#include <linux/crc32.h> #include <linux/nvme-tcp.h> #include <linux/nvme-keyring.h> #include <net/sock.h> @@ -16,7 +17,6 @@ #include <net/tls_prot.h> #include <net/handshake.h> #include <linux/blk-mq.h> -#include <crypto/hash.h> #include <net/busy_poll.h> #include <trace/events/sock.h> @@ -168,8 +168,8 @@ struct nvme_tcp_queue { bool hdr_digest; bool data_digest; bool tls_enabled; - struct ahash_request *rcv_hash; - struct ahash_request *snd_hash; + u32 rcv_crc; + u32 snd_crc; __le32 exp_ddgst; __le32 recv_ddgst; struct completion tls_complete; @@ -456,32 +456,38 @@ nvme_tcp_fetch_request(struct nvme_tcp_queue *queue) return req; } -static inline void nvme_tcp_ddgst_final(struct ahash_request *hash, - __le32 *dgst) +#define NVME_TCP_CRC_SEED (~0) + +static inline void nvme_tcp_ddgst_update(u32 *crcp, + struct page *page, size_t off, size_t len) { - ahash_request_set_crypt(hash, NULL, (u8 *)dgst, 0); - crypto_ahash_final(hash); + page += off / PAGE_SIZE; + off %= PAGE_SIZE; + while (len) { + const void *vaddr = kmap_local_page(page); + size_t n = min(len, (size_t)PAGE_SIZE - off); + + *crcp = crc32c(*crcp, vaddr + off, n); + kunmap_local(vaddr); + page++; + off = 0; + len -= n; + } } -static inline void nvme_tcp_ddgst_update(struct ahash_request *hash, - struct page *page, off_t off, size_t len) +static inline __le32 nvme_tcp_ddgst_final(u32 crc) { - struct scatterlist sg; - - sg_init_table(&sg, 1); - sg_set_page(&sg, page, len, off); - ahash_request_set_crypt(hash, &sg, NULL, len); - crypto_ahash_update(hash); + return cpu_to_le32(~crc); } -static inline void nvme_tcp_hdgst(struct ahash_request *hash, - void *pdu, size_t len) +static inline __le32 nvme_tcp_hdgst(const void *pdu, size_t len) { - struct scatterlist sg; + return cpu_to_le32(~crc32c(NVME_TCP_CRC_SEED, pdu, len)); +} - sg_init_one(&sg, pdu, len); - ahash_request_set_crypt(hash, &sg, pdu + len, len); - crypto_ahash_digest(hash); +static inline void nvme_tcp_set_hdgst(void *pdu, size_t len) +{ + *(__le32 *)(pdu + len) = nvme_tcp_hdgst(pdu, len); } static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue, @@ -499,8 +505,7 @@ static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue, } recv_digest = *(__le32 *)(pdu + hdr->hlen); - nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len); - exp_digest = *(__le32 *)(pdu + hdr->hlen); + exp_digest = nvme_tcp_hdgst(pdu, pdu_len); if (recv_digest != exp_digest) { dev_err(queue->ctrl->ctrl.device, "header digest error: recv %#x expected %#x\n", @@ -526,7 +531,7 @@ static int nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu) nvme_tcp_queue_id(queue)); return -EPROTO; } - crypto_ahash_init(queue->rcv_hash); + queue->rcv_crc = NVME_TCP_CRC_SEED; return 0; } @@ -926,8 +931,8 @@ static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb, iov_iter_count(&req->iter)); if (queue->data_digest) - ret = skb_copy_and_hash_datagram_iter(skb, *offset, - &req->iter, recv_len, queue->rcv_hash); + ret = skb_copy_and_crc32c_datagram_iter(skb, *offset, + &req->iter, recv_len, &queue->rcv_crc); else ret = skb_copy_datagram_iter(skb, *offset, &req->iter, recv_len); @@ -945,7 +950,7 @@ static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb, if (!queue->data_remaining) { if (queue->data_digest) { - nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst); + queue->exp_ddgst = nvme_tcp_ddgst_final(queue->rcv_crc); queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH; } else { if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) { @@ -1147,7 +1152,7 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req) return ret; if (queue->data_digest) - nvme_tcp_ddgst_update(queue->snd_hash, page, + nvme_tcp_ddgst_update(&queue->snd_crc, page, offset, ret); /* @@ -1161,8 +1166,8 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req) /* fully successful last send in current PDU */ if (last && ret == len) { if (queue->data_digest) { - nvme_tcp_ddgst_final(queue->snd_hash, - &req->ddgst); + req->ddgst = + nvme_tcp_ddgst_final(queue->snd_crc); req->state = NVME_TCP_SEND_DDGST; req->offset = 0; } else { @@ -1194,7 +1199,7 @@ static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req) msg.msg_flags |= MSG_EOR; if (queue->hdr_digest && !req->offset) - nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); + nvme_tcp_set_hdgst(pdu, sizeof(*pdu)); bvec_set_virt(&bvec, (void *)pdu + req->offset, len); iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len); @@ -1207,7 +1212,7 @@ static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req) if (inline_data) { req->state = NVME_TCP_SEND_DATA; if (queue->data_digest) - crypto_ahash_init(queue->snd_hash); + queue->snd_crc = NVME_TCP_CRC_SEED; } else { nvme_tcp_done_send_req(queue); } @@ -1229,7 +1234,7 @@ static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req) int ret; if (queue->hdr_digest && !req->offset) - nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); + nvme_tcp_set_hdgst(pdu, sizeof(*pdu)); if (!req->h2cdata_left) msg.msg_flags |= MSG_SPLICE_PAGES; @@ -1244,7 +1249,7 @@ static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req) if (!len) { req->state = NVME_TCP_SEND_DATA; if (queue->data_digest) - crypto_ahash_init(queue->snd_hash); + queue->snd_crc = NVME_TCP_CRC_SEED; return 1; } req->offset += ret; @@ -1384,41 +1389,6 @@ static void nvme_tcp_io_work(struct work_struct *w) queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); } -static void nvme_tcp_free_crypto(struct nvme_tcp_queue *queue) -{ - struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash); - - ahash_request_free(queue->rcv_hash); - ahash_request_free(queue->snd_hash); - crypto_free_ahash(tfm); -} - -static int nvme_tcp_alloc_crypto(struct nvme_tcp_queue *queue) -{ - struct crypto_ahash *tfm; - - tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC); - if (IS_ERR(tfm)) - return PTR_ERR(tfm); - - queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL); - if (!queue->snd_hash) - goto free_tfm; - ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL); - - queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL); - if (!queue->rcv_hash) - goto free_snd_hash; - ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL); - - return 0; -free_snd_hash: - ahash_request_free(queue->snd_hash); -free_tfm: - crypto_free_ahash(tfm); - return -ENOMEM; -} - static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl *ctrl) { struct nvme_tcp_request *async = &ctrl->async_req; @@ -1451,9 +1421,6 @@ static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid) if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags)) return; - if (queue->hdr_digest || queue->data_digest) - nvme_tcp_free_crypto(queue); - page_frag_cache_drain(&queue->pf_cache); noreclaim_flag = memalloc_noreclaim_save(); @@ -1867,21 +1834,13 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid, queue->hdr_digest = nctrl->opts->hdr_digest; queue->data_digest = nctrl->opts->data_digest; - if (queue->hdr_digest || queue->data_digest) { - ret = nvme_tcp_alloc_crypto(queue); - if (ret) { - dev_err(nctrl->device, - "failed to allocate queue %d crypto\n", qid); - goto err_sock; - } - } rcv_pdu_size = sizeof(struct nvme_tcp_rsp_pdu) + nvme_tcp_hdgst_len(queue); queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL); if (!queue->pdu) { ret = -ENOMEM; - goto err_crypto; + goto err_sock; } dev_dbg(nctrl->device, "connecting queue %d\n", @@ -1914,9 +1873,6 @@ err_init_connect: kernel_sock_shutdown(queue->sock, SHUT_RDWR); err_rcv_pdu: kfree(queue->pdu); -err_crypto: - if (queue->hdr_digest || queue->data_digest) - nvme_tcp_free_crypto(queue); err_sock: /* ->sock will be released by fput() */ fput(queue->sock->file); diff --git a/drivers/nvme/target/pci-epf.c b/drivers/nvme/target/pci-epf.c index 7fab7f3d79b7..7123c855b5a6 100644 --- a/drivers/nvme/target/pci-epf.c +++ b/drivers/nvme/target/pci-epf.c @@ -62,8 +62,7 @@ static DEFINE_MUTEX(nvmet_pci_epf_ports_mutex); #define NVMET_PCI_EPF_CQ_RETRY_INTERVAL msecs_to_jiffies(1) enum nvmet_pci_epf_queue_flags { - NVMET_PCI_EPF_Q_IS_SQ = 0, /* The queue is a submission queue */ - NVMET_PCI_EPF_Q_LIVE, /* The queue is live */ + NVMET_PCI_EPF_Q_LIVE = 0, /* The queue is live */ NVMET_PCI_EPF_Q_IRQ_ENABLED, /* IRQ is enabled for this queue */ }; @@ -596,9 +595,6 @@ static bool nvmet_pci_epf_should_raise_irq(struct nvmet_pci_epf_ctrl *ctrl, struct nvmet_pci_epf_irq_vector *iv = cq->iv; bool ret; - if (!test_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags)) - return false; - /* IRQ coalescing for the admin queue is not allowed. */ if (!cq->qid) return true; @@ -625,7 +621,8 @@ static void nvmet_pci_epf_raise_irq(struct nvmet_pci_epf_ctrl *ctrl, struct pci_epf *epf = nvme_epf->epf; int ret = 0; - if (!test_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags)) + if (!test_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags) || + !test_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags)) return; mutex_lock(&ctrl->irq_lock); @@ -636,14 +633,16 @@ static void nvmet_pci_epf_raise_irq(struct nvmet_pci_epf_ctrl *ctrl, switch (nvme_epf->irq_type) { case PCI_IRQ_MSIX: case PCI_IRQ_MSI: + /* + * If we fail to raise an MSI or MSI-X interrupt, it is likely + * because the host is using legacy INTX IRQs (e.g. BIOS, + * grub), but we can fallback to the INTX type only if the + * endpoint controller supports this type. + */ ret = pci_epc_raise_irq(epf->epc, epf->func_no, epf->vfunc_no, nvme_epf->irq_type, cq->vector + 1); - if (!ret) + if (!ret || !nvme_epf->epc_features->intx_capable) break; - /* - * If we got an error, it is likely because the host is using - * legacy IRQs (e.g. BIOS, grub). - */ fallthrough; case PCI_IRQ_INTX: ret = pci_epc_raise_irq(epf->epc, epf->func_no, epf->vfunc_no, @@ -656,7 +655,9 @@ static void nvmet_pci_epf_raise_irq(struct nvmet_pci_epf_ctrl *ctrl, } if (ret) - dev_err(ctrl->dev, "Failed to raise IRQ (err=%d)\n", ret); + dev_err_ratelimited(ctrl->dev, + "CQ[%u]: Failed to raise IRQ (err=%d)\n", + cq->qid, ret); unlock: mutex_unlock(&ctrl->irq_lock); @@ -1319,8 +1320,14 @@ static u16 nvmet_pci_epf_create_cq(struct nvmet_ctrl *tctrl, set_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags); - dev_dbg(ctrl->dev, "CQ[%u]: %u entries of %zu B, IRQ vector %u\n", - cqid, qsize, cq->qes, cq->vector); + if (test_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags)) + dev_dbg(ctrl->dev, + "CQ[%u]: %u entries of %zu B, IRQ vector %u\n", + cqid, qsize, cq->qes, cq->vector); + else + dev_dbg(ctrl->dev, + "CQ[%u]: %u entries of %zu B, IRQ disabled\n", + cqid, qsize, cq->qes); return NVME_SC_SUCCESS; @@ -1344,7 +1351,8 @@ static u16 nvmet_pci_epf_delete_cq(struct nvmet_ctrl *tctrl, u16 cqid) cancel_delayed_work_sync(&cq->work); nvmet_pci_epf_drain_queue(cq); - nvmet_pci_epf_remove_irq_vector(ctrl, cq->vector); + if (test_and_clear_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags)) + nvmet_pci_epf_remove_irq_vector(ctrl, cq->vector); nvmet_pci_epf_mem_unmap(ctrl->nvme_epf, &cq->pci_map); return NVME_SC_SUCCESS; @@ -1533,7 +1541,6 @@ static void nvmet_pci_epf_init_queue(struct nvmet_pci_epf_ctrl *ctrl, if (sq) { queue = &ctrl->sq[qid]; - set_bit(NVMET_PCI_EPF_Q_IS_SQ, &queue->flags); } else { queue = &ctrl->cq[qid]; INIT_DELAYED_WORK(&queue->work, nvmet_pci_epf_cq_work); diff --git a/drivers/phy/phy-can-transceiver.c b/drivers/phy/phy-can-transceiver.c index 2bec70615449..f59caff4b3d4 100644 --- a/drivers/phy/phy-can-transceiver.c +++ b/drivers/phy/phy-can-transceiver.c @@ -93,6 +93,16 @@ static const struct of_device_id can_transceiver_phy_ids[] = { }; MODULE_DEVICE_TABLE(of, can_transceiver_phy_ids); +/* Temporary wrapper until the multiplexer subsystem supports optional muxes */ +static inline struct mux_state * +devm_mux_state_get_optional(struct device *dev, const char *mux_name) +{ + if (!of_property_present(dev->of_node, "mux-states")) + return NULL; + + return devm_mux_state_get(dev, mux_name); +} + static int can_transceiver_phy_probe(struct platform_device *pdev) { struct phy_provider *phy_provider; @@ -114,13 +124,11 @@ static int can_transceiver_phy_probe(struct platform_device *pdev) match = of_match_node(can_transceiver_phy_ids, pdev->dev.of_node); drvdata = match->data; - mux_state = devm_mux_state_get(dev, NULL); - if (IS_ERR(mux_state)) { - if (PTR_ERR(mux_state) == -EPROBE_DEFER) - return PTR_ERR(mux_state); - } else { - can_transceiver_phy->mux_state = mux_state; - } + mux_state = devm_mux_state_get_optional(dev, NULL); + if (IS_ERR(mux_state)) + return PTR_ERR(mux_state); + + can_transceiver_phy->mux_state = mux_state; phy = devm_phy_create(dev, dev->of_node, &can_transceiver_phy_ops); diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c b/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c index 45b3b792696e..b33e2e2b5014 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c @@ -1754,7 +1754,8 @@ static void qmp_ufs_init_registers(struct qmp_ufs *qmp, const struct qmp_phy_cfg qmp_ufs_init_all(qmp, &cfg->tbls_hs_overlay[i]); } - qmp_ufs_init_all(qmp, &cfg->tbls_hs_b); + if (qmp->mode == PHY_MODE_UFS_HS_B) + qmp_ufs_init_all(qmp, &cfg->tbls_hs_b); } static int qmp_ufs_com_init(struct qmp_ufs *qmp) diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c index 775f4f973a6c..9fdf17e0848a 100644 --- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c +++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c @@ -9,6 +9,7 @@ * Copyright (C) 2014 Cogent Embedded, Inc. */ +#include <linux/cleanup.h> #include <linux/extcon-provider.h> #include <linux/interrupt.h> #include <linux/io.h> @@ -107,7 +108,6 @@ struct rcar_gen3_phy { struct rcar_gen3_chan *ch; u32 int_enable_bits; bool initialized; - bool otg_initialized; bool powered; }; @@ -119,9 +119,8 @@ struct rcar_gen3_chan { struct regulator *vbus; struct reset_control *rstc; struct work_struct work; - struct mutex lock; /* protects rphys[...].powered */ + spinlock_t lock; /* protects access to hardware and driver data structure. */ enum usb_dr_mode dr_mode; - int irq; u32 obint_enable_bits; bool extcon_host; bool is_otg_channel; @@ -320,16 +319,15 @@ static bool rcar_gen3_is_any_rphy_initialized(struct rcar_gen3_chan *ch) return false; } -static bool rcar_gen3_needs_init_otg(struct rcar_gen3_chan *ch) +static bool rcar_gen3_is_any_otg_rphy_initialized(struct rcar_gen3_chan *ch) { - int i; - - for (i = 0; i < NUM_OF_PHYS; i++) { - if (ch->rphys[i].otg_initialized) - return false; + for (enum rcar_gen3_phy_index i = PHY_INDEX_BOTH_HC; i <= PHY_INDEX_EHCI; + i++) { + if (ch->rphys[i].initialized) + return true; } - return true; + return false; } static bool rcar_gen3_are_all_rphys_power_off(struct rcar_gen3_chan *ch) @@ -351,7 +349,9 @@ static ssize_t role_store(struct device *dev, struct device_attribute *attr, bool is_b_device; enum phy_mode cur_mode, new_mode; - if (!ch->is_otg_channel || !rcar_gen3_is_any_rphy_initialized(ch)) + guard(spinlock_irqsave)(&ch->lock); + + if (!ch->is_otg_channel || !rcar_gen3_is_any_otg_rphy_initialized(ch)) return -EIO; if (sysfs_streq(buf, "host")) @@ -389,7 +389,7 @@ static ssize_t role_show(struct device *dev, struct device_attribute *attr, { struct rcar_gen3_chan *ch = dev_get_drvdata(dev); - if (!ch->is_otg_channel || !rcar_gen3_is_any_rphy_initialized(ch)) + if (!ch->is_otg_channel || !rcar_gen3_is_any_otg_rphy_initialized(ch)) return -EIO; return sprintf(buf, "%s\n", rcar_gen3_is_host(ch) ? "host" : @@ -402,6 +402,9 @@ static void rcar_gen3_init_otg(struct rcar_gen3_chan *ch) void __iomem *usb2_base = ch->base; u32 val; + if (!ch->is_otg_channel || rcar_gen3_is_any_otg_rphy_initialized(ch)) + return; + /* Should not use functions of read-modify-write a register */ val = readl(usb2_base + USB2_LINECTRL1); val = (val & ~USB2_LINECTRL1_DP_RPD) | USB2_LINECTRL1_DPRPD_EN | @@ -415,7 +418,7 @@ static void rcar_gen3_init_otg(struct rcar_gen3_chan *ch) val = readl(usb2_base + USB2_ADPCTRL); writel(val | USB2_ADPCTRL_IDPULLUP, usb2_base + USB2_ADPCTRL); } - msleep(20); + mdelay(20); writel(0xffffffff, usb2_base + USB2_OBINTSTA); writel(ch->obint_enable_bits, usb2_base + USB2_OBINTEN); @@ -427,16 +430,27 @@ static irqreturn_t rcar_gen3_phy_usb2_irq(int irq, void *_ch) { struct rcar_gen3_chan *ch = _ch; void __iomem *usb2_base = ch->base; - u32 status = readl(usb2_base + USB2_OBINTSTA); + struct device *dev = ch->dev; irqreturn_t ret = IRQ_NONE; + u32 status; + + pm_runtime_get_noresume(dev); + + if (pm_runtime_suspended(dev)) + goto rpm_put; - if (status & ch->obint_enable_bits) { - dev_vdbg(ch->dev, "%s: %08x\n", __func__, status); - writel(ch->obint_enable_bits, usb2_base + USB2_OBINTSTA); - rcar_gen3_device_recognition(ch); - ret = IRQ_HANDLED; + scoped_guard(spinlock, &ch->lock) { + status = readl(usb2_base + USB2_OBINTSTA); + if (status & ch->obint_enable_bits) { + dev_vdbg(dev, "%s: %08x\n", __func__, status); + writel(ch->obint_enable_bits, usb2_base + USB2_OBINTSTA); + rcar_gen3_device_recognition(ch); + ret = IRQ_HANDLED; + } } +rpm_put: + pm_runtime_put_noidle(dev); return ret; } @@ -446,32 +460,23 @@ static int rcar_gen3_phy_usb2_init(struct phy *p) struct rcar_gen3_chan *channel = rphy->ch; void __iomem *usb2_base = channel->base; u32 val; - int ret; - if (!rcar_gen3_is_any_rphy_initialized(channel) && channel->irq >= 0) { - INIT_WORK(&channel->work, rcar_gen3_phy_usb2_work); - ret = request_irq(channel->irq, rcar_gen3_phy_usb2_irq, - IRQF_SHARED, dev_name(channel->dev), channel); - if (ret < 0) { - dev_err(channel->dev, "No irq handler (%d)\n", channel->irq); - return ret; - } - } + guard(spinlock_irqsave)(&channel->lock); /* Initialize USB2 part */ val = readl(usb2_base + USB2_INT_ENABLE); val |= USB2_INT_ENABLE_UCOM_INTEN | rphy->int_enable_bits; writel(val, usb2_base + USB2_INT_ENABLE); - writel(USB2_SPD_RSM_TIMSET_INIT, usb2_base + USB2_SPD_RSM_TIMSET); - writel(USB2_OC_TIMSET_INIT, usb2_base + USB2_OC_TIMSET); - - /* Initialize otg part */ - if (channel->is_otg_channel) { - if (rcar_gen3_needs_init_otg(channel)) - rcar_gen3_init_otg(channel); - rphy->otg_initialized = true; + + if (!rcar_gen3_is_any_rphy_initialized(channel)) { + writel(USB2_SPD_RSM_TIMSET_INIT, usb2_base + USB2_SPD_RSM_TIMSET); + writel(USB2_OC_TIMSET_INIT, usb2_base + USB2_OC_TIMSET); } + /* Initialize otg part (only if we initialize a PHY with IRQs). */ + if (rphy->int_enable_bits) + rcar_gen3_init_otg(channel); + rphy->initialized = true; return 0; @@ -484,10 +489,9 @@ static int rcar_gen3_phy_usb2_exit(struct phy *p) void __iomem *usb2_base = channel->base; u32 val; - rphy->initialized = false; + guard(spinlock_irqsave)(&channel->lock); - if (channel->is_otg_channel) - rphy->otg_initialized = false; + rphy->initialized = false; val = readl(usb2_base + USB2_INT_ENABLE); val &= ~rphy->int_enable_bits; @@ -495,9 +499,6 @@ static int rcar_gen3_phy_usb2_exit(struct phy *p) val &= ~USB2_INT_ENABLE_UCOM_INTEN; writel(val, usb2_base + USB2_INT_ENABLE); - if (channel->irq >= 0 && !rcar_gen3_is_any_rphy_initialized(channel)) - free_irq(channel->irq, channel); - return 0; } @@ -509,16 +510,17 @@ static int rcar_gen3_phy_usb2_power_on(struct phy *p) u32 val; int ret = 0; - mutex_lock(&channel->lock); - if (!rcar_gen3_are_all_rphys_power_off(channel)) - goto out; - if (channel->vbus) { ret = regulator_enable(channel->vbus); if (ret) - goto out; + return ret; } + guard(spinlock_irqsave)(&channel->lock); + + if (!rcar_gen3_are_all_rphys_power_off(channel)) + goto out; + val = readl(usb2_base + USB2_USBCTR); val |= USB2_USBCTR_PLL_RST; writel(val, usb2_base + USB2_USBCTR); @@ -528,7 +530,6 @@ static int rcar_gen3_phy_usb2_power_on(struct phy *p) out: /* The powered flag should be set for any other phys anyway */ rphy->powered = true; - mutex_unlock(&channel->lock); return 0; } @@ -539,18 +540,20 @@ static int rcar_gen3_phy_usb2_power_off(struct phy *p) struct rcar_gen3_chan *channel = rphy->ch; int ret = 0; - mutex_lock(&channel->lock); - rphy->powered = false; + scoped_guard(spinlock_irqsave, &channel->lock) { + rphy->powered = false; - if (!rcar_gen3_are_all_rphys_power_off(channel)) - goto out; + if (rcar_gen3_are_all_rphys_power_off(channel)) { + u32 val = readl(channel->base + USB2_USBCTR); + + val |= USB2_USBCTR_PLL_RST; + writel(val, channel->base + USB2_USBCTR); + } + } if (channel->vbus) ret = regulator_disable(channel->vbus); -out: - mutex_unlock(&channel->lock); - return ret; } @@ -703,7 +706,7 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct rcar_gen3_chan *channel; struct phy_provider *provider; - int ret = 0, i; + int ret = 0, i, irq; if (!dev->of_node) { dev_err(dev, "This driver needs device tree\n"); @@ -719,8 +722,6 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev) return PTR_ERR(channel->base); channel->obint_enable_bits = USB2_OBINT_BITS; - /* get irq number here and request_irq for OTG in phy_init */ - channel->irq = platform_get_irq_optional(pdev, 0); channel->dr_mode = rcar_gen3_get_dr_mode(dev->of_node); if (channel->dr_mode != USB_DR_MODE_UNKNOWN) { channel->is_otg_channel = true; @@ -763,7 +764,7 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev) if (phy_data->no_adp_ctrl) channel->obint_enable_bits = USB2_OBINT_IDCHG_EN; - mutex_init(&channel->lock); + spin_lock_init(&channel->lock); for (i = 0; i < NUM_OF_PHYS; i++) { channel->rphys[i].phy = devm_phy_create(dev, NULL, phy_data->phy_usb2_ops); @@ -789,6 +790,20 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev) channel->vbus = NULL; } + irq = platform_get_irq_optional(pdev, 0); + if (irq < 0 && irq != -ENXIO) { + ret = irq; + goto error; + } else if (irq > 0) { + INIT_WORK(&channel->work, rcar_gen3_phy_usb2_work); + ret = devm_request_irq(dev, irq, rcar_gen3_phy_usb2_irq, + IRQF_SHARED, dev_name(dev), channel); + if (ret < 0) { + dev_err(dev, "Failed to request irq (%d)\n", irq); + goto error; + } + } + provider = devm_of_phy_provider_register(dev, rcar_gen3_phy_usb2_xlate); if (IS_ERR(provider)) { dev_err(dev, "Failed to register PHY provider\n"); diff --git a/drivers/phy/rockchip/phy-rockchip-samsung-dcphy.c b/drivers/phy/rockchip/phy-rockchip-samsung-dcphy.c index 08c78c1bafc9..28a052e17366 100644 --- a/drivers/phy/rockchip/phy-rockchip-samsung-dcphy.c +++ b/drivers/phy/rockchip/phy-rockchip-samsung-dcphy.c @@ -1653,7 +1653,7 @@ static __maybe_unused int samsung_mipi_dcphy_runtime_resume(struct device *dev) return ret; } - clk_prepare_enable(samsung->ref_clk); + ret = clk_prepare_enable(samsung->ref_clk); if (ret) { dev_err(samsung->dev, "Failed to enable reference clock, %d\n", ret); clk_disable_unprepare(samsung->pclk); diff --git a/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c b/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c index fe7c05748356..77236f012a1f 100644 --- a/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c +++ b/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c @@ -476,6 +476,8 @@ static const struct ropll_config ropll_tmds_cfg[] = { 1, 1, 0, 0x20, 0x0c, 1, 0x0e, 0, 0, }, { 650000, 162, 162, 1, 1, 11, 1, 1, 1, 1, 1, 1, 1, 54, 0, 16, 4, 1, 1, 1, 0, 0x20, 0x0c, 1, 0x0e, 0, 0, }, + { 502500, 84, 84, 1, 1, 7, 1, 1, 1, 1, 1, 1, 1, 11, 1, 4, 5, + 4, 11, 1, 0, 0x20, 0x0c, 1, 0x0e, 0, 0, }, { 337500, 0x70, 0x70, 1, 1, 0xf, 1, 1, 1, 1, 1, 1, 1, 0x2, 0, 0x01, 5, 1, 1, 1, 0, 0x20, 0x0c, 1, 0x0e, 0, 0, }, { 400000, 100, 100, 1, 1, 11, 1, 1, 0, 1, 0, 1, 1, 0x9, 0, 0x05, 0, diff --git a/drivers/phy/starfive/phy-jh7110-usb.c b/drivers/phy/starfive/phy-jh7110-usb.c index cb5454fbe2c8..b505d89860b4 100644 --- a/drivers/phy/starfive/phy-jh7110-usb.c +++ b/drivers/phy/starfive/phy-jh7110-usb.c @@ -18,6 +18,8 @@ #include <linux/usb/of.h> #define USB_125M_CLK_RATE 125000000 +#define USB_CLK_MODE_OFF 0x0 +#define USB_CLK_MODE_RX_NORMAL_PWR BIT(1) #define USB_LS_KEEPALIVE_OFF 0x4 #define USB_LS_KEEPALIVE_ENABLE BIT(4) @@ -78,6 +80,7 @@ static int jh7110_usb2_phy_init(struct phy *_phy) { struct jh7110_usb2_phy *phy = phy_get_drvdata(_phy); int ret; + unsigned int val; ret = clk_set_rate(phy->usb_125m_clk, USB_125M_CLK_RATE); if (ret) @@ -87,6 +90,10 @@ static int jh7110_usb2_phy_init(struct phy *_phy) if (ret) return ret; + val = readl(phy->regs + USB_CLK_MODE_OFF); + val |= USB_CLK_MODE_RX_NORMAL_PWR; + writel(val, phy->regs + USB_CLK_MODE_OFF); + return 0; } diff --git a/drivers/phy/tegra/xusb-tegra186.c b/drivers/phy/tegra/xusb-tegra186.c index fae6242aa730..23a23f2d64e5 100644 --- a/drivers/phy/tegra/xusb-tegra186.c +++ b/drivers/phy/tegra/xusb-tegra186.c @@ -237,6 +237,8 @@ #define DATA0_VAL_PD BIT(1) #define USE_XUSB_AO BIT(4) +#define TEGRA_UTMI_PAD_MAX 4 + #define TEGRA186_LANE(_name, _offset, _shift, _mask, _type) \ { \ .name = _name, \ @@ -269,7 +271,7 @@ struct tegra186_xusb_padctl { /* UTMI bias and tracking */ struct clk *usb2_trk_clk; - unsigned int bias_pad_enable; + DECLARE_BITMAP(utmi_pad_enabled, TEGRA_UTMI_PAD_MAX); /* padctl context */ struct tegra186_xusb_padctl_context context; @@ -603,12 +605,8 @@ static void tegra186_utmi_bias_pad_power_on(struct tegra_xusb_padctl *padctl) u32 value; int err; - mutex_lock(&padctl->lock); - - if (priv->bias_pad_enable++ > 0) { - mutex_unlock(&padctl->lock); + if (!bitmap_empty(priv->utmi_pad_enabled, TEGRA_UTMI_PAD_MAX)) return; - } err = clk_prepare_enable(priv->usb2_trk_clk); if (err < 0) @@ -658,8 +656,6 @@ static void tegra186_utmi_bias_pad_power_on(struct tegra_xusb_padctl *padctl) } else { clk_disable_unprepare(priv->usb2_trk_clk); } - - mutex_unlock(&padctl->lock); } static void tegra186_utmi_bias_pad_power_off(struct tegra_xusb_padctl *padctl) @@ -667,17 +663,8 @@ static void tegra186_utmi_bias_pad_power_off(struct tegra_xusb_padctl *padctl) struct tegra186_xusb_padctl *priv = to_tegra186_xusb_padctl(padctl); u32 value; - mutex_lock(&padctl->lock); - - if (WARN_ON(priv->bias_pad_enable == 0)) { - mutex_unlock(&padctl->lock); - return; - } - - if (--priv->bias_pad_enable > 0) { - mutex_unlock(&padctl->lock); + if (!bitmap_empty(priv->utmi_pad_enabled, TEGRA_UTMI_PAD_MAX)) return; - } value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL1); value |= USB2_PD_TRK; @@ -690,13 +677,13 @@ static void tegra186_utmi_bias_pad_power_off(struct tegra_xusb_padctl *padctl) clk_disable_unprepare(priv->usb2_trk_clk); } - mutex_unlock(&padctl->lock); } static void tegra186_utmi_pad_power_on(struct phy *phy) { struct tegra_xusb_lane *lane = phy_get_drvdata(phy); struct tegra_xusb_padctl *padctl = lane->pad->padctl; + struct tegra186_xusb_padctl *priv = to_tegra186_xusb_padctl(padctl); struct tegra_xusb_usb2_port *port; struct device *dev = padctl->dev; unsigned int index = lane->index; @@ -705,9 +692,16 @@ static void tegra186_utmi_pad_power_on(struct phy *phy) if (!phy) return; + mutex_lock(&padctl->lock); + if (test_bit(index, priv->utmi_pad_enabled)) { + mutex_unlock(&padctl->lock); + return; + } + port = tegra_xusb_find_usb2_port(padctl, index); if (!port) { dev_err(dev, "no port found for USB2 lane %u\n", index); + mutex_unlock(&padctl->lock); return; } @@ -724,18 +718,28 @@ static void tegra186_utmi_pad_power_on(struct phy *phy) value = padctl_readl(padctl, XUSB_PADCTL_USB2_OTG_PADX_CTL1(index)); value &= ~USB2_OTG_PD_DR; padctl_writel(padctl, value, XUSB_PADCTL_USB2_OTG_PADX_CTL1(index)); + + set_bit(index, priv->utmi_pad_enabled); + mutex_unlock(&padctl->lock); } static void tegra186_utmi_pad_power_down(struct phy *phy) { struct tegra_xusb_lane *lane = phy_get_drvdata(phy); struct tegra_xusb_padctl *padctl = lane->pad->padctl; + struct tegra186_xusb_padctl *priv = to_tegra186_xusb_padctl(padctl); unsigned int index = lane->index; u32 value; if (!phy) return; + mutex_lock(&padctl->lock); + if (!test_bit(index, priv->utmi_pad_enabled)) { + mutex_unlock(&padctl->lock); + return; + } + dev_dbg(padctl->dev, "power down UTMI pad %u\n", index); value = padctl_readl(padctl, XUSB_PADCTL_USB2_OTG_PADX_CTL0(index)); @@ -748,7 +752,11 @@ static void tegra186_utmi_pad_power_down(struct phy *phy) udelay(2); + clear_bit(index, priv->utmi_pad_enabled); + tegra186_utmi_bias_pad_power_off(padctl); + + mutex_unlock(&padctl->lock); } static int tegra186_xusb_padctl_vbus_override(struct tegra_xusb_padctl *padctl, diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c index 79d4814d758d..c89df95aa6ca 100644 --- a/drivers/phy/tegra/xusb.c +++ b/drivers/phy/tegra/xusb.c @@ -548,16 +548,16 @@ static int tegra_xusb_port_init(struct tegra_xusb_port *port, err = dev_set_name(&port->dev, "%s-%u", name, index); if (err < 0) - goto unregister; + goto put_device; err = device_add(&port->dev); if (err < 0) - goto unregister; + goto put_device; return 0; -unregister: - device_unregister(&port->dev); +put_device: + put_device(&port->dev); return err; } diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c index 82f0cc43bbf4..0eb816395dc6 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm.c +++ b/drivers/pinctrl/qcom/pinctrl-msm.c @@ -44,7 +44,6 @@ * @pctrl: pinctrl handle. * @chip: gpiochip handle. * @desc: pin controller descriptor - * @restart_nb: restart notifier block. * @irq: parent irq for the TLMM irq_chip. * @intr_target_use_scm: route irq to application cpu using scm calls * @lock: Spinlock to protect register resources as well @@ -64,7 +63,6 @@ struct msm_pinctrl { struct pinctrl_dev *pctrl; struct gpio_chip chip; struct pinctrl_desc desc; - struct notifier_block restart_nb; int irq; @@ -1471,10 +1469,9 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl) return 0; } -static int msm_ps_hold_restart(struct notifier_block *nb, unsigned long action, - void *data) +static int msm_ps_hold_restart(struct sys_off_data *data) { - struct msm_pinctrl *pctrl = container_of(nb, struct msm_pinctrl, restart_nb); + struct msm_pinctrl *pctrl = data->cb_data; writel(0, pctrl->regs[0] + PS_HOLD_OFFSET); mdelay(1000); @@ -1485,7 +1482,11 @@ static struct msm_pinctrl *poweroff_pctrl; static void msm_ps_hold_poweroff(void) { - msm_ps_hold_restart(&poweroff_pctrl->restart_nb, 0, NULL); + struct sys_off_data data = { + .cb_data = poweroff_pctrl, + }; + + msm_ps_hold_restart(&data); } static void msm_pinctrl_setup_pm_reset(struct msm_pinctrl *pctrl) @@ -1495,9 +1496,11 @@ static void msm_pinctrl_setup_pm_reset(struct msm_pinctrl *pctrl) for (i = 0; i < pctrl->soc->nfunctions; i++) if (!strcmp(func[i].name, "ps_hold")) { - pctrl->restart_nb.notifier_call = msm_ps_hold_restart; - pctrl->restart_nb.priority = 128; - if (register_restart_handler(&pctrl->restart_nb)) + if (devm_register_sys_off_handler(pctrl->dev, + SYS_OFF_MODE_RESTART, + 128, + msm_ps_hold_restart, + pctrl)) dev_err(pctrl->dev, "failed to setup restart handler.\n"); poweroff_pctrl = pctrl; @@ -1599,8 +1602,6 @@ void msm_pinctrl_remove(struct platform_device *pdev) struct msm_pinctrl *pctrl = platform_get_drvdata(pdev); gpiochip_remove(&pctrl->chip); - - unregister_restart_handler(&pctrl->restart_nb); } EXPORT_SYMBOL(msm_pinctrl_remove); diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c index c273855fbb18..ce804438c32d 100644 --- a/drivers/ptp/ptp_ocp.c +++ b/drivers/ptp/ptp_ocp.c @@ -315,6 +315,8 @@ struct ptp_ocp_serial_port { #define OCP_BOARD_ID_LEN 13 #define OCP_SERIAL_LEN 6 #define OCP_SMA_NUM 4 +#define OCP_SIGNAL_NUM 4 +#define OCP_FREQ_NUM 4 enum { PORT_GNSS, @@ -342,8 +344,8 @@ struct ptp_ocp { struct dcf_master_reg __iomem *dcf_out; struct dcf_slave_reg __iomem *dcf_in; struct tod_reg __iomem *nmea_out; - struct frequency_reg __iomem *freq_in[4]; - struct ptp_ocp_ext_src *signal_out[4]; + struct frequency_reg __iomem *freq_in[OCP_FREQ_NUM]; + struct ptp_ocp_ext_src *signal_out[OCP_SIGNAL_NUM]; struct ptp_ocp_ext_src *pps; struct ptp_ocp_ext_src *ts0; struct ptp_ocp_ext_src *ts1; @@ -378,10 +380,12 @@ struct ptp_ocp { u32 utc_tai_offset; u32 ts_window_adjust; u64 fw_cap; - struct ptp_ocp_signal signal[4]; + struct ptp_ocp_signal signal[OCP_SIGNAL_NUM]; struct ptp_ocp_sma_connector sma[OCP_SMA_NUM]; const struct ocp_sma_op *sma_op; struct dpll_device *dpll; + int signals_nr; + int freq_in_nr; }; #define OCP_REQ_TIMESTAMP BIT(0) @@ -2697,6 +2701,8 @@ ptp_ocp_fb_board_init(struct ptp_ocp *bp, struct ocp_resource *r) bp->eeprom_map = fb_eeprom_map; bp->fw_version = ioread32(&bp->image->version); bp->sma_op = &ocp_fb_sma_op; + bp->signals_nr = 4; + bp->freq_in_nr = 4; ptp_ocp_fb_set_version(bp); @@ -2862,6 +2868,8 @@ ptp_ocp_art_board_init(struct ptp_ocp *bp, struct ocp_resource *r) bp->fw_version = ioread32(&bp->reg->version); bp->fw_tag = 2; bp->sma_op = &ocp_art_sma_op; + bp->signals_nr = 4; + bp->freq_in_nr = 4; /* Enable MAC serial port during initialisation */ iowrite32(1, &bp->board_config->mro50_serial_activate); @@ -2888,6 +2896,8 @@ ptp_ocp_adva_board_init(struct ptp_ocp *bp, struct ocp_resource *r) bp->flash_start = 0xA00000; bp->eeprom_map = fb_eeprom_map; bp->sma_op = &ocp_adva_sma_op; + bp->signals_nr = 2; + bp->freq_in_nr = 2; version = ioread32(&bp->image->version); /* if lower 16 bits are empty, this is the fw loader. */ @@ -4008,7 +4018,7 @@ _signal_summary_show(struct seq_file *s, struct ptp_ocp *bp, int nr) { struct signal_reg __iomem *reg = bp->signal_out[nr]->mem; struct ptp_ocp_signal *signal = &bp->signal[nr]; - char label[8]; + char label[16]; bool on; u32 val; @@ -4031,7 +4041,7 @@ static void _frequency_summary_show(struct seq_file *s, int nr, struct frequency_reg __iomem *reg) { - char label[8]; + char label[16]; bool on; u32 val; @@ -4175,11 +4185,11 @@ ptp_ocp_summary_show(struct seq_file *s, void *data) } if (bp->fw_cap & OCP_CAP_SIGNAL) - for (i = 0; i < 4; i++) + for (i = 0; i < bp->signals_nr; i++) _signal_summary_show(s, bp, i); if (bp->fw_cap & OCP_CAP_FREQ) - for (i = 0; i < 4; i++) + for (i = 0; i < bp->freq_in_nr; i++) _frequency_summary_show(s, i, bp->freq_in[i]); if (bp->irig_out) { diff --git a/drivers/regulator/max20086-regulator.c b/drivers/regulator/max20086-regulator.c index 59eb23d467ec..198d45f8e884 100644 --- a/drivers/regulator/max20086-regulator.c +++ b/drivers/regulator/max20086-regulator.c @@ -132,7 +132,7 @@ static int max20086_regulators_register(struct max20086 *chip) static int max20086_parse_regulators_dt(struct max20086 *chip, bool *boot_on) { - struct of_regulator_match matches[MAX20086_MAX_REGULATORS] = { }; + struct of_regulator_match *matches; struct device_node *node; unsigned int i; int ret; @@ -143,6 +143,11 @@ static int max20086_parse_regulators_dt(struct max20086 *chip, bool *boot_on) return -ENODEV; } + matches = devm_kcalloc(chip->dev, chip->info->num_outputs, + sizeof(*matches), GFP_KERNEL); + if (!matches) + return -ENOMEM; + for (i = 0; i < chip->info->num_outputs; ++i) matches[i].name = max20086_output_names[i]; diff --git a/drivers/remoteproc/qcom_wcnss.c b/drivers/remoteproc/qcom_wcnss.c index 775b056d795a..2c7e519a2254 100644 --- a/drivers/remoteproc/qcom_wcnss.c +++ b/drivers/remoteproc/qcom_wcnss.c @@ -456,7 +456,8 @@ static int wcnss_init_regulators(struct qcom_wcnss *wcnss, if (wcnss->num_pds) { info += wcnss->num_pds; /* Handle single power domain case */ - num_vregs += num_pd_vregs - wcnss->num_pds; + if (wcnss->num_pds < num_pd_vregs) + num_vregs += num_pd_vregs - wcnss->num_pds; } else { num_vregs += num_pd_vregs; } diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c index 7a447ff600d2..a8db66428f80 100644 --- a/drivers/scsi/sd_zbc.c +++ b/drivers/scsi/sd_zbc.c @@ -169,6 +169,7 @@ static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp, unsigned int nr_zones, size_t *buflen) { struct request_queue *q = sdkp->disk->queue; + unsigned int max_segments; size_t bufsize; void *buf; @@ -180,12 +181,15 @@ static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp, * Furthermore, since the report zone command cannot be split, make * sure that the allocated buffer can always be mapped by limiting the * number of pages allocated to the HBA max segments limit. + * Since max segments can be larger than the max inline bio vectors, + * further limit the allocated buffer to BIO_MAX_INLINE_VECS. */ nr_zones = min(nr_zones, sdkp->zone_info.nr_zones); bufsize = roundup((nr_zones + 1) * 64, SECTOR_SIZE); bufsize = min_t(size_t, bufsize, queue_max_hw_sectors(q) << SECTOR_SHIFT); - bufsize = min_t(size_t, bufsize, queue_max_segments(q) << PAGE_SHIFT); + max_segments = min(BIO_MAX_INLINE_VECS, queue_max_segments(q)); + bufsize = min_t(size_t, bufsize, max_segments << PAGE_SHIFT); while (bufsize >= SECTOR_SIZE) { buf = kvzalloc(bufsize, GFP_KERNEL | __GFP_NORETRY); diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c index 6f8a20014e76..39aecd34c641 100644 --- a/drivers/soundwire/bus.c +++ b/drivers/soundwire/bus.c @@ -122,6 +122,10 @@ int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent, set_bit(SDW_GROUP13_DEV_NUM, bus->assigned); set_bit(SDW_MASTER_DEV_NUM, bus->assigned); + ret = sdw_irq_create(bus, fwnode); + if (ret) + return ret; + /* * SDW is an enumerable bus, but devices can be powered off. So, * they won't be able to report as present. @@ -138,6 +142,7 @@ int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent, if (ret < 0) { dev_err(bus->dev, "Finding slaves failed:%d\n", ret); + sdw_irq_delete(bus); return ret; } @@ -156,10 +161,6 @@ int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent, bus->params.curr_bank = SDW_BANK0; bus->params.next_bank = SDW_BANK1; - ret = sdw_irq_create(bus, fwnode); - if (ret) - return ret; - return 0; } EXPORT_SYMBOL(sdw_bus_master_add); diff --git a/drivers/spi/spi-loopback-test.c b/drivers/spi/spi-loopback-test.c index 31a878d9458d..7740f94847a8 100644 --- a/drivers/spi/spi-loopback-test.c +++ b/drivers/spi/spi-loopback-test.c @@ -420,7 +420,7 @@ MODULE_LICENSE("GPL"); static void spi_test_print_hex_dump(char *pre, const void *ptr, size_t len) { /* limit the hex_dump */ - if (len < 1024) { + if (len <= 1024) { print_hex_dump(KERN_INFO, pre, DUMP_PREFIX_OFFSET, 16, 1, ptr, len, 0); diff --git a/drivers/spi/spi-sun4i.c b/drivers/spi/spi-sun4i.c index f89826d7dc49..aa92fd5a35a9 100644 --- a/drivers/spi/spi-sun4i.c +++ b/drivers/spi/spi-sun4i.c @@ -264,6 +264,9 @@ static int sun4i_spi_transfer_one(struct spi_controller *host, else reg |= SUN4I_CTL_DHB; + /* Now that the settings are correct, enable the interface */ + reg |= SUN4I_CTL_ENABLE; + sun4i_spi_write(sspi, SUN4I_CTL_REG, reg); /* Ensure that we have a parent clock fast enough */ @@ -404,7 +407,7 @@ static int sun4i_spi_runtime_resume(struct device *dev) } sun4i_spi_write(sspi, SUN4I_CTL_REG, - SUN4I_CTL_ENABLE | SUN4I_CTL_MASTER | SUN4I_CTL_TP); + SUN4I_CTL_MASTER | SUN4I_CTL_TP); return 0; diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c index 2a8bb798e95b..795a8482c2c7 100644 --- a/drivers/spi/spi-tegra114.c +++ b/drivers/spi/spi-tegra114.c @@ -728,9 +728,9 @@ static int tegra_spi_set_hw_cs_timing(struct spi_device *spi) u32 inactive_cycles; u8 cs_state; - if ((setup->unit && setup->unit != SPI_DELAY_UNIT_SCK) || - (hold->unit && hold->unit != SPI_DELAY_UNIT_SCK) || - (inactive->unit && inactive->unit != SPI_DELAY_UNIT_SCK)) { + if ((setup->value && setup->unit != SPI_DELAY_UNIT_SCK) || + (hold->value && hold->unit != SPI_DELAY_UNIT_SCK) || + (inactive->value && inactive->unit != SPI_DELAY_UNIT_SCK)) { dev_err(&spi->dev, "Invalid delay unit %d, should be SPI_DELAY_UNIT_SCK\n", SPI_DELAY_UNIT_SCK); diff --git a/drivers/usb/gadget/function/f_midi2.c b/drivers/usb/gadget/function/f_midi2.c index 12e866fb311d..0a800ba53816 100644 --- a/drivers/usb/gadget/function/f_midi2.c +++ b/drivers/usb/gadget/function/f_midi2.c @@ -475,7 +475,7 @@ static void reply_ump_stream_ep_info(struct f_midi2_ep *ep) /* reply a UMP EP device info */ static void reply_ump_stream_ep_device(struct f_midi2_ep *ep) { - struct snd_ump_stream_msg_devince_info rep = { + struct snd_ump_stream_msg_device_info rep = { .type = UMP_MSG_TYPE_STREAM, .status = UMP_STREAM_MSG_STATUS_DEVICE_INFO, .manufacture_id = ep->info.manufacturer, diff --git a/fs/bcachefs/backpointers.c b/fs/bcachefs/backpointers.c index ff26bb515150..5f195d2280a4 100644 --- a/fs/bcachefs/backpointers.c +++ b/fs/bcachefs/backpointers.c @@ -192,7 +192,8 @@ static inline int bch2_backpointers_maybe_flush(struct btree_trans *trans, static int backpointer_target_not_found(struct btree_trans *trans, struct bkey_s_c_backpointer bp, struct bkey_s_c target_k, - struct bkey_buf *last_flushed) + struct bkey_buf *last_flushed, + bool commit) { struct bch_fs *c = trans->c; struct printbuf buf = PRINTBUF; @@ -228,18 +229,77 @@ static int backpointer_target_not_found(struct btree_trans *trans, } if (fsck_err(trans, backpointer_to_missing_ptr, - "%s", buf.buf)) + "%s", buf.buf)) { ret = bch2_backpointer_del(trans, bp.k->p); + if (ret || !commit) + goto out; + + /* + * Normally, on transaction commit from inside a transaction, + * we'll return -BCH_ERR_transaction_restart_nested, since a + * transaction commit invalidates pointers given out by peek(). + * + * However, since we're updating a write buffer btree, if we + * return a transaction restart and loop we won't see that the + * backpointer has been deleted without an additional write + * buffer flush - and those are expensive. + * + * So we're relying on the caller immediately advancing to the + * next backpointer and starting a new transaction immediately + * after backpointer_get_key() returns NULL: + */ + ret = bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc); + } +out: fsck_err: printbuf_exit(&buf); return ret; } -struct bkey_s_c bch2_backpointer_get_key(struct btree_trans *trans, - struct bkey_s_c_backpointer bp, - struct btree_iter *iter, - unsigned iter_flags, - struct bkey_buf *last_flushed) +static struct btree *__bch2_backpointer_get_node(struct btree_trans *trans, + struct bkey_s_c_backpointer bp, + struct btree_iter *iter, + struct bkey_buf *last_flushed, + bool commit) +{ + struct bch_fs *c = trans->c; + + BUG_ON(!bp.v->level); + + bch2_trans_node_iter_init(trans, iter, + bp.v->btree_id, + bp.v->pos, + 0, + bp.v->level - 1, + 0); + struct btree *b = bch2_btree_iter_peek_node(trans, iter); + if (IS_ERR_OR_NULL(b)) + goto err; + + BUG_ON(b->c.level != bp.v->level - 1); + + if (extent_matches_bp(c, bp.v->btree_id, bp.v->level, + bkey_i_to_s_c(&b->key), bp)) + return b; + + if (btree_node_will_make_reachable(b)) { + b = ERR_PTR(-BCH_ERR_backpointer_to_overwritten_btree_node); + } else { + int ret = backpointer_target_not_found(trans, bp, bkey_i_to_s_c(&b->key), + last_flushed, commit); + b = ret ? ERR_PTR(ret) : NULL; + } +err: + bch2_trans_iter_exit(trans, iter); + return b; +} + +static struct bkey_s_c __bch2_backpointer_get_key(struct btree_trans *trans, + struct bkey_s_c_backpointer bp, + struct btree_iter *iter, + unsigned iter_flags, + struct bkey_buf *last_flushed, + bool commit) { struct bch_fs *c = trans->c; @@ -277,10 +337,10 @@ struct bkey_s_c bch2_backpointer_get_key(struct btree_trans *trans, bch2_trans_iter_exit(trans, iter); if (!bp.v->level) { - int ret = backpointer_target_not_found(trans, bp, k, last_flushed); + int ret = backpointer_target_not_found(trans, bp, k, last_flushed, commit); return ret ? bkey_s_c_err(ret) : bkey_s_c_null; } else { - struct btree *b = bch2_backpointer_get_node(trans, bp, iter, last_flushed); + struct btree *b = __bch2_backpointer_get_node(trans, bp, iter, last_flushed, commit); if (b == ERR_PTR(-BCH_ERR_backpointer_to_overwritten_btree_node)) return bkey_s_c_null; if (IS_ERR_OR_NULL(b)) @@ -295,35 +355,16 @@ struct btree *bch2_backpointer_get_node(struct btree_trans *trans, struct btree_iter *iter, struct bkey_buf *last_flushed) { - struct bch_fs *c = trans->c; - - BUG_ON(!bp.v->level); - - bch2_trans_node_iter_init(trans, iter, - bp.v->btree_id, - bp.v->pos, - 0, - bp.v->level - 1, - 0); - struct btree *b = bch2_btree_iter_peek_node(trans, iter); - if (IS_ERR_OR_NULL(b)) - goto err; - - BUG_ON(b->c.level != bp.v->level - 1); - - if (extent_matches_bp(c, bp.v->btree_id, bp.v->level, - bkey_i_to_s_c(&b->key), bp)) - return b; + return __bch2_backpointer_get_node(trans, bp, iter, last_flushed, true); +} - if (btree_node_will_make_reachable(b)) { - b = ERR_PTR(-BCH_ERR_backpointer_to_overwritten_btree_node); - } else { - int ret = backpointer_target_not_found(trans, bp, bkey_i_to_s_c(&b->key), last_flushed); - b = ret ? ERR_PTR(ret) : NULL; - } -err: - bch2_trans_iter_exit(trans, iter); - return b; +struct bkey_s_c bch2_backpointer_get_key(struct btree_trans *trans, + struct bkey_s_c_backpointer bp, + struct btree_iter *iter, + unsigned iter_flags, + struct bkey_buf *last_flushed) +{ + return __bch2_backpointer_get_key(trans, bp, iter, iter_flags, last_flushed, true); } static int bch2_check_backpointer_has_valid_bucket(struct btree_trans *trans, struct bkey_s_c k, @@ -521,7 +562,7 @@ check_existing_bp: struct bkey_s_c_backpointer other_bp = bkey_s_c_to_backpointer(bp_k); struct bkey_s_c other_extent = - bch2_backpointer_get_key(trans, other_bp, &other_extent_iter, 0, NULL); + __bch2_backpointer_get_key(trans, other_bp, &other_extent_iter, 0, NULL, false); ret = bkey_err(other_extent); if (ret == -BCH_ERR_backpointer_to_overwritten_btree_node) ret = 0; diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c index 9b80201c7982..899891295797 100644 --- a/fs/bcachefs/btree_cache.c +++ b/fs/bcachefs/btree_cache.c @@ -852,7 +852,6 @@ out: b->sib_u64s[1] = 0; b->whiteout_u64s = 0; bch2_btree_keys_init(b); - set_btree_node_accessed(b); bch2_time_stats_update(&c->times[BCH_TIME_btree_node_mem_alloc], start_time); @@ -1286,6 +1285,10 @@ lock_node: six_unlock_read(&b->c.lock); goto retry; } + + /* avoid atomic set bit if it's not needed: */ + if (!btree_node_accessed(b)) + set_btree_node_accessed(b); } /* XXX: waiting on IO with btree locks held: */ @@ -1301,10 +1304,6 @@ lock_node: prefetch(p + L1_CACHE_BYTES * 2); } - /* avoid atomic set bit if it's not needed: */ - if (!btree_node_accessed(b)) - set_btree_node_accessed(b); - if (unlikely(btree_node_read_error(b))) { six_unlock_read(&b->c.lock); b = ERR_PTR(-BCH_ERR_btree_node_read_err_cached); diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c index 59fa527ac685..a873ec1baf58 100644 --- a/fs/bcachefs/btree_iter.c +++ b/fs/bcachefs/btree_iter.c @@ -1971,6 +1971,12 @@ struct btree *bch2_btree_iter_next_node(struct btree_trans *trans, struct btree_ return NULL; } + /* + * We don't correctly handle nodes with extra intent locks here: + * downgrade so we don't violate locking invariants + */ + bch2_btree_path_downgrade(trans, path); + if (!bch2_btree_node_relock(trans, path, path->level + 1)) { __bch2_btree_path_unlock(trans, path); path->l[path->level].b = ERR_PTR(-BCH_ERR_no_btree_node_relock); @@ -2743,7 +2749,7 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_trans *trans, struct btre ret = trans_maybe_inject_restart(trans, _RET_IP_); if (unlikely(ret)) { k = bkey_s_c_err(ret); - goto out_no_locked; + goto out; } /* extents can't span inode numbers: */ @@ -2763,13 +2769,15 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_trans *trans, struct btre ret = bch2_btree_path_traverse(trans, iter->path, iter->flags); if (unlikely(ret)) { k = bkey_s_c_err(ret); - goto out_no_locked; + goto out; } struct btree_path *path = btree_iter_path(trans, iter); if (unlikely(!btree_path_node(path, path->level))) return bkey_s_c_null; + btree_path_set_should_be_locked(trans, path); + if ((iter->flags & BTREE_ITER_cached) || !(iter->flags & (BTREE_ITER_is_extents|BTREE_ITER_filter_snapshots))) { k = bkey_s_c_null; @@ -2790,12 +2798,12 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_trans *trans, struct btre if (!bkey_err(k)) iter->k = *k.k; /* We're not returning a key from iter->path: */ - goto out_no_locked; + goto out; } - k = bch2_btree_path_peek_slot(trans->paths + iter->path, &iter->k); + k = bch2_btree_path_peek_slot(btree_iter_path(trans, iter), &iter->k); if (unlikely(!k.k)) - goto out_no_locked; + goto out; if (unlikely(k.k->type == KEY_TYPE_whiteout && (iter->flags & BTREE_ITER_filter_snapshots) && @@ -2833,7 +2841,7 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_trans *trans, struct btre } if (unlikely(bkey_err(k))) - goto out_no_locked; + goto out; next = k.k ? bkey_start_pos(k.k) : POS_MAX; @@ -2855,8 +2863,6 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_trans *trans, struct btre } } out: - btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter)); -out_no_locked: bch2_btree_iter_verify_entry_exit(iter); bch2_btree_iter_verify(trans, iter); ret = bch2_btree_iter_verify_ret(trans, iter, k); diff --git a/fs/bcachefs/disk_accounting.c b/fs/bcachefs/disk_accounting.c index b007319b72e9..1f0422bfae35 100644 --- a/fs/bcachefs/disk_accounting.c +++ b/fs/bcachefs/disk_accounting.c @@ -376,6 +376,19 @@ int bch2_accounting_mem_insert(struct bch_fs *c, struct bkey_s_c_accounting a, return ret; } +int bch2_accounting_mem_insert_locked(struct bch_fs *c, struct bkey_s_c_accounting a, + enum bch_accounting_mode mode) +{ + struct bch_replicas_padded r; + + if (mode != BCH_ACCOUNTING_read && + accounting_to_replicas(&r.e, a.k->p) && + !bch2_replicas_marked_locked(c, &r.e)) + return -BCH_ERR_btree_insert_need_mark_replicas; + + return __bch2_accounting_mem_insert(c, a); +} + static bool accounting_mem_entry_is_zero(struct accounting_mem_entry *e) { for (unsigned i = 0; i < e->nr_counters; i++) @@ -583,7 +596,7 @@ int bch2_gc_accounting_done(struct bch_fs *c) accounting_key_init(&k_i.k, &acc_k, src_v, nr); bch2_accounting_mem_mod_locked(trans, bkey_i_to_s_c_accounting(&k_i.k), - BCH_ACCOUNTING_normal); + BCH_ACCOUNTING_normal, true); preempt_disable(); struct bch_fs_usage_base *dst = this_cpu_ptr(c->usage); @@ -612,7 +625,7 @@ static int accounting_read_key(struct btree_trans *trans, struct bkey_s_c k) percpu_down_read(&c->mark_lock); int ret = bch2_accounting_mem_mod_locked(trans, bkey_s_c_to_accounting(k), - BCH_ACCOUNTING_read); + BCH_ACCOUNTING_read, false); percpu_up_read(&c->mark_lock); return ret; } diff --git a/fs/bcachefs/disk_accounting.h b/fs/bcachefs/disk_accounting.h index abb1f6206fe9..d557b99b3c0a 100644 --- a/fs/bcachefs/disk_accounting.h +++ b/fs/bcachefs/disk_accounting.h @@ -136,6 +136,7 @@ enum bch_accounting_mode { }; int bch2_accounting_mem_insert(struct bch_fs *, struct bkey_s_c_accounting, enum bch_accounting_mode); +int bch2_accounting_mem_insert_locked(struct bch_fs *, struct bkey_s_c_accounting, enum bch_accounting_mode); void bch2_accounting_mem_gc(struct bch_fs *); static inline bool bch2_accounting_is_mem(struct disk_accounting_pos acc) @@ -150,7 +151,8 @@ static inline bool bch2_accounting_is_mem(struct disk_accounting_pos acc) */ static inline int bch2_accounting_mem_mod_locked(struct btree_trans *trans, struct bkey_s_c_accounting a, - enum bch_accounting_mode mode) + enum bch_accounting_mode mode, + bool write_locked) { struct bch_fs *c = trans->c; struct bch_accounting_mem *acc = &c->accounting; @@ -189,7 +191,11 @@ static inline int bch2_accounting_mem_mod_locked(struct btree_trans *trans, while ((idx = eytzinger0_find(acc->k.data, acc->k.nr, sizeof(acc->k.data[0]), accounting_pos_cmp, &a.k->p)) >= acc->k.nr) { - int ret = bch2_accounting_mem_insert(c, a, mode); + int ret = 0; + if (unlikely(write_locked)) + ret = bch2_accounting_mem_insert_locked(c, a, mode); + else + ret = bch2_accounting_mem_insert(c, a, mode); if (ret) return ret; } @@ -206,7 +212,7 @@ static inline int bch2_accounting_mem_mod_locked(struct btree_trans *trans, static inline int bch2_accounting_mem_add(struct btree_trans *trans, struct bkey_s_c_accounting a, bool gc) { percpu_down_read(&trans->c->mark_lock); - int ret = bch2_accounting_mem_mod_locked(trans, a, gc ? BCH_ACCOUNTING_gc : BCH_ACCOUNTING_normal); + int ret = bch2_accounting_mem_mod_locked(trans, a, gc ? BCH_ACCOUNTING_gc : BCH_ACCOUNTING_normal, false); percpu_up_read(&trans->c->mark_lock); return ret; } @@ -259,7 +265,7 @@ static inline int bch2_accounting_trans_commit_hook(struct btree_trans *trans, EBUG_ON(bversion_zero(a->k.bversion)); return likely(!(commit_flags & BCH_TRANS_COMMIT_skip_accounting_apply)) - ? bch2_accounting_mem_mod_locked(trans, accounting_i_to_s_c(a), BCH_ACCOUNTING_normal) + ? bch2_accounting_mem_mod_locked(trans, accounting_i_to_s_c(a), BCH_ACCOUNTING_normal, false) : 0; } @@ -271,7 +277,7 @@ static inline void bch2_accounting_trans_commit_revert(struct btree_trans *trans struct bkey_s_accounting a = accounting_i_to_s(a_i); bch2_accounting_neg(a); - bch2_accounting_mem_mod_locked(trans, a.c, BCH_ACCOUNTING_normal); + bch2_accounting_mem_mod_locked(trans, a.c, BCH_ACCOUNTING_normal, false); bch2_accounting_neg(a); } } diff --git a/fs/bcachefs/fs.c b/fs/bcachefs/fs.c index b6801861c66f..4b742e62255b 100644 --- a/fs/bcachefs/fs.c +++ b/fs/bcachefs/fs.c @@ -1429,7 +1429,9 @@ static int bch2_next_fiemap_extent(struct btree_trans *trans, if (ret) goto err; - ret = bch2_next_fiemap_pagecache_extent(trans, inode, start, end, cur); + u64 pagecache_end = k.k ? max(start, bkey_start_offset(k.k)) : end; + + ret = bch2_next_fiemap_pagecache_extent(trans, inode, start, pagecache_end, cur); if (ret) goto err; diff --git a/fs/bcachefs/fsck.c b/fs/bcachefs/fsck.c index 7b25cedd3e40..71d428f376a5 100644 --- a/fs/bcachefs/fsck.c +++ b/fs/bcachefs/fsck.c @@ -2446,7 +2446,7 @@ static int check_subvol_path(struct btree_trans *trans, struct btree_iter *iter, u32 parent = le32_to_cpu(s.v->fs_path_parent); if (darray_u32_has(&subvol_path, parent)) { - if (fsck_err(c, subvol_loop, "subvolume loop")) + if (fsck_err(trans, subvol_loop, "subvolume loop")) ret = reattach_subvol(trans, s); break; } diff --git a/fs/bcachefs/journal_reclaim.c b/fs/bcachefs/journal_reclaim.c index 976464d8a695..cc00b0fc40d8 100644 --- a/fs/bcachefs/journal_reclaim.c +++ b/fs/bcachefs/journal_reclaim.c @@ -17,6 +17,8 @@ #include <linux/kthread.h> #include <linux/sched/mm.h> +static bool __should_discard_bucket(struct journal *, struct journal_device *); + /* Free space calculations: */ static unsigned journal_space_from(struct journal_device *ja, @@ -203,8 +205,7 @@ void bch2_journal_space_available(struct journal *j) ja->bucket_seq[ja->dirty_idx_ondisk] < j->last_seq_ondisk) ja->dirty_idx_ondisk = (ja->dirty_idx_ondisk + 1) % ja->nr; - if (ja->discard_idx != ja->dirty_idx_ondisk) - can_discard = true; + can_discard |= __should_discard_bucket(j, ja); max_entry_size = min_t(unsigned, max_entry_size, ca->mi.bucket_size); nr_online++; @@ -264,13 +265,19 @@ out: /* Discards - last part of journal reclaim: */ -static bool should_discard_bucket(struct journal *j, struct journal_device *ja) +static bool __should_discard_bucket(struct journal *j, struct journal_device *ja) { - spin_lock(&j->lock); unsigned min_free = max(4, ja->nr / 8); - bool ret = bch2_journal_dev_buckets_available(j, ja, journal_space_discarded) < min_free && + return bch2_journal_dev_buckets_available(j, ja, journal_space_discarded) < + min_free && ja->discard_idx != ja->dirty_idx_ondisk; +} + +static bool should_discard_bucket(struct journal *j, struct journal_device *ja) +{ + spin_lock(&j->lock); + bool ret = __should_discard_bucket(j, ja); spin_unlock(&j->lock); return ret; diff --git a/fs/bcachefs/rebalance.c b/fs/bcachefs/rebalance.c index 4ccdfc1f34aa..623273556aa9 100644 --- a/fs/bcachefs/rebalance.c +++ b/fs/bcachefs/rebalance.c @@ -309,7 +309,7 @@ static int bch2_bkey_clear_needs_rebalance(struct btree_trans *trans, struct btree_iter *iter, struct bkey_s_c k) { - if (!bch2_bkey_rebalance_opts(k)) + if (k.k->type == KEY_TYPE_reflink_v || !bch2_bkey_rebalance_opts(k)) return 0; struct bkey_i *n = bch2_bkey_make_mut(trans, iter, &k, 0); diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 02c916a55020..6d63b958c4bb 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -1105,6 +1105,8 @@ struct nfs_server *nfs_create_server(struct fs_context *fc) if (server->namelen == 0 || server->namelen > NFS2_MAXNAMLEN) server->namelen = NFS2_MAXNAMLEN; } + /* Linux 'subtree_check' borkenness mandates this setting */ + server->fh_expire_type = NFS_FH_VOL_RENAME; if (!(fattr->valid & NFS_ATTR_FATTR)) { error = ctx->nfs_mod->rpc_ops->getattr(server, ctx->mntfh, @@ -1200,6 +1202,10 @@ void nfs_clients_init(struct net *net) #if IS_ENABLED(CONFIG_NFS_V4) idr_init(&nn->cb_ident_idr); #endif +#if IS_ENABLED(CONFIG_NFS_V4_1) + INIT_LIST_HEAD(&nn->nfs4_data_server_cache); + spin_lock_init(&nn->nfs4_data_server_lock); +#endif spin_lock_init(&nn->nfs_client_lock); nn->boot_time = ktime_get_real(); memset(&nn->rpcstats, 0, sizeof(nn->rpcstats)); @@ -1216,6 +1222,9 @@ void nfs_clients_exit(struct net *net) nfs_cleanup_cb_ident_idr(net); WARN_ON_ONCE(!list_empty(&nn->nfs_client_list)); WARN_ON_ONCE(!list_empty(&nn->nfs_volume_list)); +#if IS_ENABLED(CONFIG_NFS_V4_1) + WARN_ON_ONCE(!list_empty(&nn->nfs4_data_server_cache)); +#endif } #ifdef CONFIG_PROC_FS diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index bd23fc736b39..d0e0b435a843 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -2676,6 +2676,18 @@ nfs_unblock_rename(struct rpc_task *task, struct nfs_renamedata *data) unblock_revalidate(new_dentry); } +static bool nfs_rename_is_unsafe_cross_dir(struct dentry *old_dentry, + struct dentry *new_dentry) +{ + struct nfs_server *server = NFS_SB(old_dentry->d_sb); + + if (old_dentry->d_parent != new_dentry->d_parent) + return false; + if (server->fh_expire_type & NFS_FH_RENAME_UNSAFE) + return !(server->fh_expire_type & NFS_FH_NOEXPIRE_WITH_OPEN); + return true; +} + /* * RENAME * FIXME: Some nfsds, like the Linux user space nfsd, may generate a @@ -2763,7 +2775,8 @@ int nfs_rename(struct mnt_idmap *idmap, struct inode *old_dir, } - if (S_ISREG(old_inode->i_mode)) + if (S_ISREG(old_inode->i_mode) && + nfs_rename_is_unsafe_cross_dir(old_dentry, new_dentry)) nfs_sync_inode(old_inode); task = nfs_async_rename(old_dir, new_dir, old_dentry, new_dentry, must_unblock ? nfs_unblock_rename : NULL); diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index f32f8d7c9122..48d89716193a 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -757,7 +757,6 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr) { struct nfs_direct_req *dreq = hdr->dreq; struct nfs_commit_info cinfo; - struct nfs_page *req = nfs_list_entry(hdr->pages.next); struct inode *inode = dreq->inode; int flags = NFS_ODIRECT_DONE; @@ -786,6 +785,7 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr) spin_unlock(&inode->i_lock); while (!list_empty(&hdr->pages)) { + struct nfs_page *req; req = nfs_list_entry(hdr->pages.next); nfs_list_remove_request(req); diff --git a/fs/nfs/filelayout/filelayoutdev.c b/fs/nfs/filelayout/filelayoutdev.c index 4fa304fa5bc4..29d9234d5c08 100644 --- a/fs/nfs/filelayout/filelayoutdev.c +++ b/fs/nfs/filelayout/filelayoutdev.c @@ -76,6 +76,7 @@ nfs4_fl_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev, struct page *scratch; struct list_head dsaddrs; struct nfs4_pnfs_ds_addr *da; + struct net *net = server->nfs_client->cl_net; /* set up xdr stream */ scratch = alloc_page(gfp_flags); @@ -159,8 +160,7 @@ nfs4_fl_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev, mp_count = be32_to_cpup(p); /* multipath count */ for (j = 0; j < mp_count; j++) { - da = nfs4_decode_mp_ds_addr(server->nfs_client->cl_net, - &stream, gfp_flags); + da = nfs4_decode_mp_ds_addr(net, &stream, gfp_flags); if (da) list_add_tail(&da->da_node, &dsaddrs); } @@ -170,7 +170,7 @@ nfs4_fl_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev, goto out_err_free_deviceid; } - dsaddr->ds_list[i] = nfs4_pnfs_ds_add(&dsaddrs, gfp_flags); + dsaddr->ds_list[i] = nfs4_pnfs_ds_add(net, &dsaddrs, gfp_flags); if (!dsaddr->ds_list[i]) goto out_err_drain_dsaddrs; trace_fl_getdevinfo(server, &pdev->dev_id, dsaddr->ds_list[i]->ds_remotestr); diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c index 61ad269c825f..e6909cafab68 100644 --- a/fs/nfs/flexfilelayout/flexfilelayout.c +++ b/fs/nfs/flexfilelayout/flexfilelayout.c @@ -1329,7 +1329,7 @@ static int ff_layout_read_done_cb(struct rpc_task *task, hdr->args.offset, hdr->args.count, &hdr->res.op_status, OP_READ, task->tk_status); - trace_ff_layout_read_error(hdr); + trace_ff_layout_read_error(hdr, task->tk_status); } err = ff_layout_async_handle_error(task, hdr->args.context->state, @@ -1502,7 +1502,7 @@ static int ff_layout_write_done_cb(struct rpc_task *task, hdr->args.offset, hdr->args.count, &hdr->res.op_status, OP_WRITE, task->tk_status); - trace_ff_layout_write_error(hdr); + trace_ff_layout_write_error(hdr, task->tk_status); } err = ff_layout_async_handle_error(task, hdr->args.context->state, @@ -1551,7 +1551,7 @@ static int ff_layout_commit_done_cb(struct rpc_task *task, data->args.offset, data->args.count, &data->res.op_status, OP_COMMIT, task->tk_status); - trace_ff_layout_commit_error(data); + trace_ff_layout_commit_error(data, task->tk_status); } err = ff_layout_async_handle_error(task, NULL, data->ds_clp, diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c index e58bedfb1dcc..4a304cf17c4b 100644 --- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c +++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c @@ -49,6 +49,7 @@ nfs4_ff_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev, struct nfs4_pnfs_ds_addr *da; struct nfs4_ff_layout_ds *new_ds = NULL; struct nfs4_ff_ds_version *ds_versions = NULL; + struct net *net = server->nfs_client->cl_net; u32 mp_count; u32 version_count; __be32 *p; @@ -80,8 +81,7 @@ nfs4_ff_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev, for (i = 0; i < mp_count; i++) { /* multipath ds */ - da = nfs4_decode_mp_ds_addr(server->nfs_client->cl_net, - &stream, gfp_flags); + da = nfs4_decode_mp_ds_addr(net, &stream, gfp_flags); if (da) list_add_tail(&da->da_node, &dsaddrs); } @@ -149,7 +149,7 @@ nfs4_ff_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev, new_ds->ds_versions = ds_versions; new_ds->ds_versions_cnt = version_count; - new_ds->ds = nfs4_pnfs_ds_add(&dsaddrs, gfp_flags); + new_ds->ds = nfs4_pnfs_ds_add(net, &dsaddrs, gfp_flags); if (!new_ds->ds) goto out_err_drain_dsaddrs; diff --git a/fs/nfs/localio.c b/fs/nfs/localio.c index 5c21caeae075..4ec952f9f47d 100644 --- a/fs/nfs/localio.c +++ b/fs/nfs/localio.c @@ -278,6 +278,7 @@ nfs_local_open_fh(struct nfs_client *clp, const struct cred *cred, new = __nfs_local_open_fh(clp, cred, fh, nfl, mode); if (IS_ERR(new)) return NULL; + rcu_read_lock(); /* try to swap in the pointer */ spin_lock(&clp->cl_uuid.lock); nf = rcu_dereference_protected(*pnf, 1); @@ -287,7 +288,6 @@ nfs_local_open_fh(struct nfs_client *clp, const struct cred *cred, rcu_assign_pointer(*pnf, nf); } spin_unlock(&clp->cl_uuid.lock); - rcu_read_lock(); } nf = nfs_local_file_get(nf); rcu_read_unlock(); diff --git a/fs/nfs/netns.h b/fs/nfs/netns.h index a68b21603ea9..6ba3ea39e928 100644 --- a/fs/nfs/netns.h +++ b/fs/nfs/netns.h @@ -31,7 +31,11 @@ struct nfs_net { unsigned short nfs_callback_tcpport; unsigned short nfs_callback_tcpport6; int cb_users[NFS4_MAX_MINOR_VERSION + 1]; -#endif +#endif /* CONFIG_NFS_V4 */ +#if IS_ENABLED(CONFIG_NFS_V4_1) + struct list_head nfs4_data_server_cache; + spinlock_t nfs4_data_server_lock; +#endif /* CONFIG_NFS_V4_1 */ struct nfs_netns_client *nfs_client; spinlock_t nfs_client_lock; ktime_t boot_time; diff --git a/fs/nfs/nfs3acl.c b/fs/nfs/nfs3acl.c index 18d8f6529f61..a126eb31f62f 100644 --- a/fs/nfs/nfs3acl.c +++ b/fs/nfs/nfs3acl.c @@ -104,7 +104,7 @@ struct posix_acl *nfs3_get_acl(struct inode *inode, int type, bool rcu) switch (status) { case 0: - status = nfs_refresh_inode(inode, res.fattr); + nfs_refresh_inode(inode, res.fattr); break; case -EPFNOSUPPORT: case -EPROTONOSUPPORT: diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 970f28dbf253..b1d2122bd5a7 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -671,6 +671,15 @@ nfs4_async_handle_exception(struct rpc_task *task, struct nfs_server *server, struct nfs_client *clp = server->nfs_client; int ret; + if ((task->tk_rpc_status == -ENETDOWN || + task->tk_rpc_status == -ENETUNREACH) && + task->tk_flags & RPC_TASK_NETUNREACH_FATAL) { + exception->delay = 0; + exception->recovering = 0; + exception->retry = 0; + return -EIO; + } + ret = nfs4_do_handle_exception(server, errorcode, exception); if (exception->delay) { int ret2 = nfs4_exception_should_retrans(server, exception); @@ -7074,10 +7083,18 @@ static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl, struct nfs4_unlockdata *p; struct nfs4_state *state = lsp->ls_state; struct inode *inode = state->inode; + struct nfs_lock_context *l_ctx; p = kzalloc(sizeof(*p), GFP_KERNEL); if (p == NULL) return NULL; + l_ctx = nfs_get_lock_context(ctx); + if (!IS_ERR(l_ctx)) { + p->l_ctx = l_ctx; + } else { + kfree(p); + return NULL; + } p->arg.fh = NFS_FH(inode); p->arg.fl = &p->fl; p->arg.seqid = seqid; @@ -7085,7 +7102,6 @@ static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl, p->lsp = lsp; /* Ensure we don't close file until we're done freeing locks! */ p->ctx = get_nfs_open_context(ctx); - p->l_ctx = nfs_get_lock_context(ctx); locks_init_lock(&p->fl); locks_copy_lock(&p->fl, fl); p->server = NFS_SERVER(inode); diff --git a/fs/nfs/nfs4trace.h b/fs/nfs/nfs4trace.h index bc67fe6801b1..deab4c0e21a0 100644 --- a/fs/nfs/nfs4trace.h +++ b/fs/nfs/nfs4trace.h @@ -2051,13 +2051,15 @@ TRACE_EVENT(fl_getdevinfo, DECLARE_EVENT_CLASS(nfs4_flexfiles_io_event, TP_PROTO( - const struct nfs_pgio_header *hdr + const struct nfs_pgio_header *hdr, + int error ), - TP_ARGS(hdr), + TP_ARGS(hdr, error), TP_STRUCT__entry( __field(unsigned long, error) + __field(unsigned long, nfs_error) __field(dev_t, dev) __field(u32, fhandle) __field(u64, fileid) @@ -2073,7 +2075,8 @@ DECLARE_EVENT_CLASS(nfs4_flexfiles_io_event, TP_fast_assign( const struct inode *inode = hdr->inode; - __entry->error = hdr->res.op_status; + __entry->error = -error; + __entry->nfs_error = hdr->res.op_status; __entry->fhandle = nfs_fhandle_hash(hdr->args.fh); __entry->fileid = NFS_FILEID(inode); __entry->dev = inode->i_sb->s_dev; @@ -2088,7 +2091,8 @@ DECLARE_EVENT_CLASS(nfs4_flexfiles_io_event, TP_printk( "error=%ld (%s) fileid=%02x:%02x:%llu fhandle=0x%08x " - "offset=%llu count=%u stateid=%d:0x%08x dstaddr=%s", + "offset=%llu count=%u stateid=%d:0x%08x dstaddr=%s " + "nfs_error=%lu (%s)", -__entry->error, show_nfs4_status(__entry->error), MAJOR(__entry->dev), MINOR(__entry->dev), @@ -2096,28 +2100,32 @@ DECLARE_EVENT_CLASS(nfs4_flexfiles_io_event, __entry->fhandle, __entry->offset, __entry->count, __entry->stateid_seq, __entry->stateid_hash, - __get_str(dstaddr) + __get_str(dstaddr), __entry->nfs_error, + show_nfs4_status(__entry->nfs_error) ) ); #define DEFINE_NFS4_FLEXFILES_IO_EVENT(name) \ DEFINE_EVENT(nfs4_flexfiles_io_event, name, \ TP_PROTO( \ - const struct nfs_pgio_header *hdr \ + const struct nfs_pgio_header *hdr, \ + int error \ ), \ - TP_ARGS(hdr)) + TP_ARGS(hdr, error)) DEFINE_NFS4_FLEXFILES_IO_EVENT(ff_layout_read_error); DEFINE_NFS4_FLEXFILES_IO_EVENT(ff_layout_write_error); TRACE_EVENT(ff_layout_commit_error, TP_PROTO( - const struct nfs_commit_data *data + const struct nfs_commit_data *data, + int error ), - TP_ARGS(data), + TP_ARGS(data, error), TP_STRUCT__entry( __field(unsigned long, error) + __field(unsigned long, nfs_error) __field(dev_t, dev) __field(u32, fhandle) __field(u64, fileid) @@ -2131,7 +2139,8 @@ TRACE_EVENT(ff_layout_commit_error, TP_fast_assign( const struct inode *inode = data->inode; - __entry->error = data->res.op_status; + __entry->error = -error; + __entry->nfs_error = data->res.op_status; __entry->fhandle = nfs_fhandle_hash(data->args.fh); __entry->fileid = NFS_FILEID(inode); __entry->dev = inode->i_sb->s_dev; @@ -2142,14 +2151,15 @@ TRACE_EVENT(ff_layout_commit_error, TP_printk( "error=%ld (%s) fileid=%02x:%02x:%llu fhandle=0x%08x " - "offset=%llu count=%u dstaddr=%s", + "offset=%llu count=%u dstaddr=%s nfs_error=%lu (%s)", -__entry->error, show_nfs4_status(__entry->error), MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long long)__entry->fileid, __entry->fhandle, __entry->offset, __entry->count, - __get_str(dstaddr) + __get_str(dstaddr), __entry->nfs_error, + show_nfs4_status(__entry->nfs_error) ) ); diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 5f582713bf05..3adb7d0dbec7 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -745,6 +745,14 @@ pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo, return remaining; } +static void pnfs_reset_return_info(struct pnfs_layout_hdr *lo) +{ + struct pnfs_layout_segment *lseg; + + list_for_each_entry(lseg, &lo->plh_return_segs, pls_list) + pnfs_set_plh_return_info(lo, lseg->pls_range.iomode, 0); +} + static void pnfs_free_returned_lsegs(struct pnfs_layout_hdr *lo, struct list_head *free_me, @@ -1246,21 +1254,15 @@ static void pnfs_clear_layoutcommit(struct inode *inode, static void pnfs_layoutreturn_retry_later_locked(struct pnfs_layout_hdr *lo, const nfs4_stateid *arg_stateid, - const struct pnfs_layout_range *range) + const struct pnfs_layout_range *range, + struct list_head *freeme) { - const struct pnfs_layout_segment *lseg; - u32 seq = be32_to_cpu(arg_stateid->seqid); - if (pnfs_layout_is_valid(lo) && - nfs4_stateid_match_other(&lo->plh_stateid, arg_stateid)) { - list_for_each_entry(lseg, &lo->plh_return_segs, pls_list) { - if (pnfs_seqid_is_newer(lseg->pls_seq, seq) || - !pnfs_should_free_range(&lseg->pls_range, range)) - continue; - pnfs_set_plh_return_info(lo, range->iomode, seq); - break; - } - } + nfs4_stateid_match_other(&lo->plh_stateid, arg_stateid)) + pnfs_reset_return_info(lo); + else + pnfs_mark_layout_stateid_invalid(lo, freeme); + pnfs_clear_layoutreturn_waitbit(lo); } void pnfs_layoutreturn_retry_later(struct pnfs_layout_hdr *lo, @@ -1268,11 +1270,12 @@ void pnfs_layoutreturn_retry_later(struct pnfs_layout_hdr *lo, const struct pnfs_layout_range *range) { struct inode *inode = lo->plh_inode; + LIST_HEAD(freeme); spin_lock(&inode->i_lock); - pnfs_layoutreturn_retry_later_locked(lo, arg_stateid, range); - pnfs_clear_layoutreturn_waitbit(lo); + pnfs_layoutreturn_retry_later_locked(lo, arg_stateid, range, &freeme); spin_unlock(&inode->i_lock); + pnfs_free_lseg_list(&freeme); } void pnfs_layoutreturn_free_lsegs(struct pnfs_layout_hdr *lo, @@ -1292,6 +1295,7 @@ void pnfs_layoutreturn_free_lsegs(struct pnfs_layout_hdr *lo, pnfs_mark_matching_lsegs_invalid(lo, &freeme, range, seq); pnfs_free_returned_lsegs(lo, &freeme, range, seq); pnfs_set_layout_stateid(lo, stateid, NULL, true); + pnfs_reset_return_info(lo); } else pnfs_mark_layout_stateid_invalid(lo, &freeme); out_unlock: @@ -1661,6 +1665,18 @@ int pnfs_roc_done(struct rpc_task *task, struct nfs4_layoutreturn_args **argpp, /* Was there an RPC level error? If not, retry */ if (task->tk_rpc_status == 0) break; + /* + * Is there a fatal network level error? + * If so release the layout, but flag the error. + */ + if ((task->tk_rpc_status == -ENETDOWN || + task->tk_rpc_status == -ENETUNREACH) && + task->tk_flags & RPC_TASK_NETUNREACH_FATAL) { + *ret = 0; + (*respp)->lrs_present = 0; + retval = -EIO; + break; + } /* If the call was not sent, let caller handle it */ if (!RPC_WAS_SENT(task)) return 0; @@ -1695,6 +1711,7 @@ void pnfs_roc_release(struct nfs4_layoutreturn_args *args, struct inode *inode = args->inode; const nfs4_stateid *res_stateid = NULL; struct nfs4_xdr_opaque_data *ld_private = args->ld_private; + LIST_HEAD(freeme); switch (ret) { case -NFS4ERR_BADSESSION: @@ -1703,9 +1720,9 @@ void pnfs_roc_release(struct nfs4_layoutreturn_args *args, case -NFS4ERR_NOMATCHING_LAYOUT: spin_lock(&inode->i_lock); pnfs_layoutreturn_retry_later_locked(lo, &args->stateid, - &args->range); - pnfs_clear_layoutreturn_waitbit(lo); + &args->range, &freeme); spin_unlock(&inode->i_lock); + pnfs_free_lseg_list(&freeme); break; case 0: if (res->lrs_present) diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index 30d2613e912b..91ff877185c8 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -60,6 +60,7 @@ struct nfs4_pnfs_ds { struct list_head ds_node; /* nfs4_pnfs_dev_hlist dev_dslist */ char *ds_remotestr; /* comma sep list of addrs */ struct list_head ds_addrs; + const struct net *ds_net; struct nfs_client *ds_clp; refcount_t ds_count; unsigned long ds_state; @@ -415,7 +416,8 @@ int pnfs_generic_commit_pagelist(struct inode *inode, int pnfs_generic_scan_commit_lists(struct nfs_commit_info *cinfo, int max); void pnfs_generic_write_commit_done(struct rpc_task *task, void *data); void nfs4_pnfs_ds_put(struct nfs4_pnfs_ds *ds); -struct nfs4_pnfs_ds *nfs4_pnfs_ds_add(struct list_head *dsaddrs, +struct nfs4_pnfs_ds *nfs4_pnfs_ds_add(const struct net *net, + struct list_head *dsaddrs, gfp_t gfp_flags); void nfs4_pnfs_v3_ds_connect_unload(void); int nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds, diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c index dbef837e871a..91ef486f40b9 100644 --- a/fs/nfs/pnfs_nfs.c +++ b/fs/nfs/pnfs_nfs.c @@ -16,6 +16,7 @@ #include "nfs4session.h" #include "internal.h" #include "pnfs.h" +#include "netns.h" #define NFSDBG_FACILITY NFSDBG_PNFS @@ -504,14 +505,14 @@ EXPORT_SYMBOL_GPL(pnfs_generic_commit_pagelist); /* * Data server cache * - * Data servers can be mapped to different device ids. - * nfs4_pnfs_ds reference counting + * Data servers can be mapped to different device ids, but should + * never be shared between net namespaces. + * + * nfs4_pnfs_ds reference counting: * - set to 1 on allocation * - incremented when a device id maps a data server already in the cache. * - decremented when deviceid is removed from the cache. */ -static DEFINE_SPINLOCK(nfs4_ds_cache_lock); -static LIST_HEAD(nfs4_data_server_cache); /* Debug routines */ static void @@ -604,11 +605,11 @@ _same_data_server_addrs_locked(const struct list_head *dsaddrs1, * Lookup DS by addresses. nfs4_ds_cache_lock is held */ static struct nfs4_pnfs_ds * -_data_server_lookup_locked(const struct list_head *dsaddrs) +_data_server_lookup_locked(const struct nfs_net *nn, const struct list_head *dsaddrs) { struct nfs4_pnfs_ds *ds; - list_for_each_entry(ds, &nfs4_data_server_cache, ds_node) + list_for_each_entry(ds, &nn->nfs4_data_server_cache, ds_node) if (_same_data_server_addrs_locked(&ds->ds_addrs, dsaddrs)) return ds; return NULL; @@ -653,10 +654,11 @@ static void destroy_ds(struct nfs4_pnfs_ds *ds) void nfs4_pnfs_ds_put(struct nfs4_pnfs_ds *ds) { - if (refcount_dec_and_lock(&ds->ds_count, - &nfs4_ds_cache_lock)) { + struct nfs_net *nn = net_generic(ds->ds_net, nfs_net_id); + + if (refcount_dec_and_lock(&ds->ds_count, &nn->nfs4_data_server_lock)) { list_del_init(&ds->ds_node); - spin_unlock(&nfs4_ds_cache_lock); + spin_unlock(&nn->nfs4_data_server_lock); destroy_ds(ds); } } @@ -716,8 +718,9 @@ out_err: * uncached and return cached struct nfs4_pnfs_ds. */ struct nfs4_pnfs_ds * -nfs4_pnfs_ds_add(struct list_head *dsaddrs, gfp_t gfp_flags) +nfs4_pnfs_ds_add(const struct net *net, struct list_head *dsaddrs, gfp_t gfp_flags) { + struct nfs_net *nn = net_generic(net, nfs_net_id); struct nfs4_pnfs_ds *tmp_ds, *ds = NULL; char *remotestr; @@ -733,16 +736,17 @@ nfs4_pnfs_ds_add(struct list_head *dsaddrs, gfp_t gfp_flags) /* this is only used for debugging, so it's ok if its NULL */ remotestr = nfs4_pnfs_remotestr(dsaddrs, gfp_flags); - spin_lock(&nfs4_ds_cache_lock); - tmp_ds = _data_server_lookup_locked(dsaddrs); + spin_lock(&nn->nfs4_data_server_lock); + tmp_ds = _data_server_lookup_locked(nn, dsaddrs); if (tmp_ds == NULL) { INIT_LIST_HEAD(&ds->ds_addrs); list_splice_init(dsaddrs, &ds->ds_addrs); ds->ds_remotestr = remotestr; refcount_set(&ds->ds_count, 1); INIT_LIST_HEAD(&ds->ds_node); + ds->ds_net = net; ds->ds_clp = NULL; - list_add(&ds->ds_node, &nfs4_data_server_cache); + list_add(&ds->ds_node, &nn->nfs4_data_server_cache); dprintk("%s add new data server %s\n", __func__, ds->ds_remotestr); } else { @@ -754,7 +758,7 @@ nfs4_pnfs_ds_add(struct list_head *dsaddrs, gfp_t gfp_flags) refcount_read(&tmp_ds->ds_count)); ds = tmp_ds; } - spin_unlock(&nfs4_ds_cache_lock); + spin_unlock(&nn->nfs4_data_server_lock); out: return ds; } diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c index 5ac743c6bc2e..08a6f372a352 100644 --- a/fs/orangefs/inode.c +++ b/fs/orangefs/inode.c @@ -32,12 +32,13 @@ static int orangefs_writepage_locked(struct folio *folio, len = i_size_read(inode); if (folio->private) { wr = folio->private; - WARN_ON(wr->pos >= len); off = wr->pos; - if (off + wr->len > len) + if ((off + wr->len > len) && (off <= len)) wlen = len - off; else wlen = wr->len; + if (wlen == 0) + wlen = wr->len; } else { WARN_ON(1); off = folio_pos(folio); @@ -46,8 +47,6 @@ static int orangefs_writepage_locked(struct folio *folio, if (wlen > len - off) wlen = len - off; } - /* Should've been handled in orangefs_invalidate_folio. */ - WARN_ON(off == len || off + wlen > len); WARN_ON(wlen == 0); bvec_set_folio(&bv, folio, wlen, offset_in_folio(folio, off)); @@ -320,6 +319,8 @@ static int orangefs_write_begin(struct file *file, wr->len += len; goto okay; } else { + wr->pos = pos; + wr->len = len; ret = orangefs_launder_folio(folio); if (ret) return ret; diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c index 851b74f557c1..950aa4f912f5 100644 --- a/fs/smb/client/file.c +++ b/fs/smb/client/file.c @@ -160,8 +160,10 @@ static int cifs_prepare_read(struct netfs_io_subrequest *subreq) server = cifs_pick_channel(tlink_tcon(req->cfile->tlink)->ses); rdata->server = server; - cifs_negotiate_rsize(server, cifs_sb->ctx, - tlink_tcon(req->cfile->tlink)); + if (cifs_sb->ctx->rsize == 0) { + cifs_negotiate_rsize(server, cifs_sb->ctx, + tlink_tcon(req->cfile->tlink)); + } rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize, &size, &rdata->credits); diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c index 0b35816d551f..4e28632b5fd6 100644 --- a/fs/smb/client/smb2pdu.c +++ b/fs/smb/client/smb2pdu.c @@ -2968,7 +2968,7 @@ replay_again: /* Eventually save off posix specific response info and timestamps */ err_free_rsp_buf: - free_rsp_buf(resp_buftype, rsp); + free_rsp_buf(resp_buftype, rsp_iov.iov_base); kfree(pc_buf); err_free_req: cifs_small_buf_release(req); diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index b2dd0c0bf509..4a11ddccc563 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c @@ -1149,7 +1149,7 @@ xfs_init_percpu_counters( return 0; free_freecounters: - while (--i > 0) + while (--i >= 0) percpu_counter_destroy(&mp->m_free[i].count); percpu_counter_destroy(&mp->m_delalloc_rtextents); free_delalloc: @@ -2114,6 +2114,21 @@ xfs_fs_reconfigure( if (error) return error; + /* attr2 -> noattr2 */ + if (xfs_has_noattr2(new_mp)) { + if (xfs_has_crc(mp)) { + xfs_warn(mp, + "attr2 is always enabled for a V5 filesystem - can't be changed."); + return -EINVAL; + } + mp->m_features &= ~XFS_FEAT_ATTR2; + mp->m_features |= XFS_FEAT_NOATTR2; + } else if (xfs_has_attr2(new_mp)) { + /* noattr2 -> attr2 */ + mp->m_features &= ~XFS_FEAT_NOATTR2; + mp->m_features |= XFS_FEAT_ATTR2; + } + /* inode32 -> inode64 */ if (xfs_has_small_inums(mp) && !xfs_has_small_inums(new_mp)) { mp->m_features &= ~XFS_FEAT_SMALL_INUMS; @@ -2126,6 +2141,17 @@ xfs_fs_reconfigure( mp->m_maxagi = xfs_set_inode_alloc(mp, mp->m_sb.sb_agcount); } + /* + * Now that mp has been modified according to the remount options, we + * do a final option validation with xfs_finish_flags() just like it is + * just like it is done during mount. We cannot use + * done during mount. We cannot use xfs_finish_flags() on new_mp as it + * contains only the user given options. + */ + error = xfs_finish_flags(mp); + if (error) + return error; + /* ro -> rw */ if (xfs_is_readonly(mp) && !(flags & SB_RDONLY)) { error = xfs_remount_rw(mp); diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c index 85a649fec6ac..67c328d23e4a 100644 --- a/fs/xfs/xfs_trans_ail.c +++ b/fs/xfs/xfs_trans_ail.c @@ -315,7 +315,7 @@ xfs_ail_splice( } /* - * Delete the given item from the AIL. Return a pointer to the item. + * Delete the given item from the AIL. */ static void xfs_ail_delete( @@ -777,26 +777,28 @@ xfs_ail_update_finish( } /* - * xfs_trans_ail_update - bulk AIL insertion operation. + * xfs_trans_ail_update_bulk - bulk AIL insertion operation. * - * @xfs_trans_ail_update takes an array of log items that all need to be + * @xfs_trans_ail_update_bulk takes an array of log items that all need to be * positioned at the same LSN in the AIL. If an item is not in the AIL, it will - * be added. Otherwise, it will be repositioned by removing it and re-adding - * it to the AIL. If we move the first item in the AIL, update the log tail to - * match the new minimum LSN in the AIL. + * be added. Otherwise, it will be repositioned by removing it and re-adding + * it to the AIL. * - * This function takes the AIL lock once to execute the update operations on - * all the items in the array, and as such should not be called with the AIL - * lock held. As a result, once we have the AIL lock, we need to check each log - * item LSN to confirm it needs to be moved forward in the AIL. + * If we move the first item in the AIL, update the log tail to match the new + * minimum LSN in the AIL. * - * To optimise the insert operation, we delete all the items from the AIL in - * the first pass, moving them into a temporary list, then splice the temporary - * list into the correct position in the AIL. This avoids needing to do an - * insert operation on every item. + * This function should be called with the AIL lock held. * - * This function must be called with the AIL lock held. The lock is dropped - * before returning. + * To optimise the insert operation, we add all items to a temporary list, then + * splice this list into the correct position in the AIL. + * + * Items that are already in the AIL are first deleted from their current + * location before being added to the temporary list. + * + * This avoids needing to do an insert operation on every item. + * + * The AIL lock is dropped by xfs_ail_update_finish() before returning to + * the caller. */ void xfs_trans_ail_update_bulk( diff --git a/fs/xfs/xfs_zone_gc.c b/fs/xfs/xfs_zone_gc.c index 81c94dd1d596..d613a4094db6 100644 --- a/fs/xfs/xfs_zone_gc.c +++ b/fs/xfs/xfs_zone_gc.c @@ -807,7 +807,8 @@ xfs_zone_gc_write_chunk( { struct xfs_zone_gc_data *data = chunk->data; struct xfs_mount *mp = chunk->ip->i_mount; - unsigned int folio_offset = chunk->bio.bi_io_vec->bv_offset; + phys_addr_t bvec_paddr = + bvec_phys(bio_first_bvec_all(&chunk->bio)); struct xfs_gc_bio *split_chunk; if (chunk->bio.bi_status) @@ -822,7 +823,7 @@ xfs_zone_gc_write_chunk( bio_reset(&chunk->bio, mp->m_rtdev_targp->bt_bdev, REQ_OP_WRITE); bio_add_folio_nofail(&chunk->bio, chunk->scratch->folio, chunk->len, - folio_offset); + offset_in_folio(chunk->scratch->folio, bvec_paddr)); while ((split_chunk = xfs_zone_gc_split_write(data, chunk))) xfs_zone_gc_submit_write(data, split_chunk); diff --git a/include/drm/drm_gpusvm.h b/include/drm/drm_gpusvm.h index df120b4d1f83..eaf704d3d05e 100644 --- a/include/drm/drm_gpusvm.h +++ b/include/drm/drm_gpusvm.h @@ -89,6 +89,7 @@ struct drm_gpusvm_devmem_ops { * @ops: Pointer to the operations structure for GPU SVM device memory * @dpagemap: The struct drm_pagemap of the pages this allocation belongs to. * @size: Size of device memory allocation + * @timeslice_expiration: Timeslice expiration in jiffies */ struct drm_gpusvm_devmem { struct device *dev; @@ -97,6 +98,7 @@ struct drm_gpusvm_devmem { const struct drm_gpusvm_devmem_ops *ops; struct drm_pagemap *dpagemap; size_t size; + u64 timeslice_expiration; }; /** @@ -186,6 +188,31 @@ struct drm_gpusvm_notifier { }; /** + * struct drm_gpusvm_range_flags - Structure representing a GPU SVM range flags + * + * @migrate_devmem: Flag indicating whether the range can be migrated to device memory + * @unmapped: Flag indicating if the range has been unmapped + * @partial_unmap: Flag indicating if the range has been partially unmapped + * @has_devmem_pages: Flag indicating if the range has devmem pages + * @has_dma_mapping: Flag indicating if the range has a DMA mapping + * @__flags: Flags for range in u16 form (used for READ_ONCE) + */ +struct drm_gpusvm_range_flags { + union { + struct { + /* All flags below must be set upon creation */ + u16 migrate_devmem : 1; + /* All flags below must be set / cleared under notifier lock */ + u16 unmapped : 1; + u16 partial_unmap : 1; + u16 has_devmem_pages : 1; + u16 has_dma_mapping : 1; + }; + u16 __flags; + }; +}; + +/** * struct drm_gpusvm_range - Structure representing a GPU SVM range * * @gpusvm: Pointer to the GPU SVM structure @@ -198,11 +225,6 @@ struct drm_gpusvm_notifier { * @dpagemap: The struct drm_pagemap of the device pages we're dma-mapping. * Note this is assuming only one drm_pagemap per range is allowed. * @flags: Flags for range - * @flags.migrate_devmem: Flag indicating whether the range can be migrated to device memory - * @flags.unmapped: Flag indicating if the range has been unmapped - * @flags.partial_unmap: Flag indicating if the range has been partially unmapped - * @flags.has_devmem_pages: Flag indicating if the range has devmem pages - * @flags.has_dma_mapping: Flag indicating if the range has a DMA mapping * * This structure represents a GPU SVM range used for tracking memory ranges * mapped in a DRM device. @@ -216,15 +238,7 @@ struct drm_gpusvm_range { unsigned long notifier_seq; struct drm_pagemap_device_addr *dma_addr; struct drm_pagemap *dpagemap; - struct { - /* All flags below must be set upon creation */ - u16 migrate_devmem : 1; - /* All flags below must be set / cleared under notifier lock */ - u16 unmapped : 1; - u16 partial_unmap : 1; - u16 has_devmem_pages : 1; - u16 has_dma_mapping : 1; - } flags; + struct drm_gpusvm_range_flags flags; }; /** @@ -283,17 +297,22 @@ struct drm_gpusvm { * @check_pages_threshold: Check CPU pages for present if chunk is less than or * equal to threshold. If not present, reduce chunk * size. + * @timeslice_ms: The timeslice MS which in minimum time a piece of memory + * remains with either exclusive GPU or CPU access. * @in_notifier: entering from a MMU notifier * @read_only: operating on read-only memory * @devmem_possible: possible to use device memory + * @devmem_only: use only device memory * * Context that is DRM GPUSVM is operating in (i.e. user arguments). */ struct drm_gpusvm_ctx { unsigned long check_pages_threshold; + unsigned long timeslice_ms; unsigned int in_notifier :1; unsigned int read_only :1; unsigned int devmem_possible :1; + unsigned int devmem_only :1; }; int drm_gpusvm_init(struct drm_gpusvm *gpusvm, diff --git a/include/linux/bio.h b/include/linux/bio.h index cafc7c215de8..b786ec5bcc81 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -11,6 +11,7 @@ #include <linux/uio.h> #define BIO_MAX_VECS 256U +#define BIO_MAX_INLINE_VECS UIO_MAXIOV struct queue_limits; diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h index 23492213ea35..492d23bec7be 100644 --- a/include/linux/can/dev.h +++ b/include/linux/can/dev.h @@ -38,6 +38,17 @@ enum can_termination_gpio { CAN_TERMINATION_GPIO_MAX, }; +struct data_bittiming_params { + const struct can_bittiming_const *data_bittiming_const; + struct can_bittiming data_bittiming; + const struct can_tdc_const *tdc_const; + struct can_tdc tdc; + const u32 *data_bitrate_const; + unsigned int data_bitrate_const_cnt; + int (*do_set_data_bittiming)(struct net_device *dev); + int (*do_get_auto_tdcv)(const struct net_device *dev, u32 *tdcv); +}; + /* * CAN common private data */ @@ -45,16 +56,11 @@ struct can_priv { struct net_device *dev; struct can_device_stats can_stats; - const struct can_bittiming_const *bittiming_const, - *data_bittiming_const; - struct can_bittiming bittiming, data_bittiming; - const struct can_tdc_const *tdc_const; - struct can_tdc tdc; - + const struct can_bittiming_const *bittiming_const; + struct can_bittiming bittiming; + struct data_bittiming_params fd; unsigned int bitrate_const_cnt; const u32 *bitrate_const; - const u32 *data_bitrate_const; - unsigned int data_bitrate_const_cnt; u32 bitrate_max; struct can_clock clock; @@ -77,14 +83,12 @@ struct can_priv { struct delayed_work restart_work; int (*do_set_bittiming)(struct net_device *dev); - int (*do_set_data_bittiming)(struct net_device *dev); int (*do_set_mode)(struct net_device *dev, enum can_mode mode); int (*do_set_termination)(struct net_device *dev, u16 term); int (*do_get_state)(const struct net_device *dev, enum can_state *state); int (*do_get_berr_counter)(const struct net_device *dev, struct can_berr_counter *bec); - int (*do_get_auto_tdcv)(const struct net_device *dev, u32 *tdcv); }; static inline bool can_tdc_is_enabled(const struct can_priv *priv) @@ -114,11 +118,11 @@ static inline bool can_tdc_is_enabled(const struct can_priv *priv) */ static inline s32 can_get_relative_tdco(const struct can_priv *priv) { - const struct can_bittiming *dbt = &priv->data_bittiming; + const struct can_bittiming *dbt = &priv->fd.data_bittiming; s32 sample_point_in_tc = (CAN_SYNC_SEG + dbt->prop_seg + dbt->phase_seg1) * dbt->brp; - return (s32)priv->tdc.tdco - sample_point_in_tc; + return (s32)priv->fd.tdc.tdco - sample_point_in_tc; } /* helper to define static CAN controller features at device creation time */ diff --git a/include/linux/crc32.h b/include/linux/crc32.h index 69c2e8bb3782..7f7d0be8a0ac 100644 --- a/include/linux/crc32.h +++ b/include/linux/crc32.h @@ -76,29 +76,6 @@ static inline u32 crc32_le_combine(u32 crc1, u32 crc2, size_t len2) return crc32_le_shift(crc1, len2) ^ crc2; } -u32 crc32c_shift(u32 crc, size_t len); - -/** - * crc32c_combine - Combine two crc32c check values into one. For two sequences - * of bytes, seq1 and seq2 with lengths len1 and len2, crc32c() - * check values were calculated for each, crc1 and crc2. - * - * @crc1: crc32c of the first block - * @crc2: crc32c of the second block - * @len2: length of the second block - * - * Return: The crc32c() check value of seq1 and seq2 concatenated, requiring - * only crc1, crc2, and len2. Note: If seq_full denotes the concatenated - * memory area of seq1 with seq2, and crc_full the crc32c() value of - * seq_full, then crc_full == crc32c_combine(crc1, crc2, len2) when - * crc_full was seeded with the same initializer as crc1, and crc2 seed - * was 0. See also crc_combine_test(). - */ -static inline u32 crc32c_combine(u32 crc1, u32 crc2, size_t len2) -{ - return crc32c_shift(crc1, len2) ^ crc2; -} - #define crc32(seed, data, length) crc32_le(seed, (unsigned char const *)(data), length) /* diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index 117718c24814..5e0dd333ad1f 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -19,6 +19,7 @@ #include <linux/netlink.h> #include <linux/timer_types.h> #include <uapi/linux/ethtool.h> +#include <uapi/linux/ethtool_netlink_generated.h> #include <uapi/linux/net_tstamp.h> #define ETHTOOL_MM_MAX_VERIFY_TIME_MS 128 @@ -830,6 +831,8 @@ struct ethtool_rxfh_param { * @so_timestamping: bit mask of the sum of the supported SO_TIMESTAMPING flags * @phc_index: device index of the associated PHC, or -1 if there is none * @phc_qualifier: qualifier of the associated PHC + * @phc_source: source device of the associated PHC + * @phc_phyindex: index of PHY device source of the associated PHC * @tx_types: bit mask of the supported hwtstamp_tx_types enumeration values * @rx_filters: bit mask of the supported hwtstamp_rx_filters enumeration values */ @@ -838,6 +841,8 @@ struct kernel_ethtool_ts_info { u32 so_timestamping; int phc_index; enum hwtstamp_provider_qualifier phc_qualifier; + enum hwtstamp_source phc_source; + int phc_phyindex; enum hwtstamp_tx_types tx_types; enum hwtstamp_rx_filters rx_filters; }; diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 17f917cb4540..420c7f9aa6ee 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -2325,6 +2325,7 @@ struct ieee80211_eht_cap_elem { #define IEEE80211_EHT_OPER_EHT_DEF_PE_DURATION 0x04 #define IEEE80211_EHT_OPER_GROUP_ADDRESSED_BU_IND_LIMIT 0x08 #define IEEE80211_EHT_OPER_GROUP_ADDRESSED_BU_IND_EXP_MASK 0x30 +#define IEEE80211_EHT_OPER_MCS15_DISABLE 0x40 /** * struct ieee80211_eht_operation - eht operation element diff --git a/include/linux/mroute_base.h b/include/linux/mroute_base.h index 58a2401e4b55..0075f6e5c3da 100644 --- a/include/linux/mroute_base.h +++ b/include/linux/mroute_base.h @@ -262,6 +262,11 @@ struct mr_table { int mroute_reg_vif_num; }; +static inline bool mr_can_free_table(struct net *net) +{ + return !check_net(net) || !net_initialized(net); +} + #ifdef CONFIG_IP_MROUTE_COMMON void vif_device_init(struct vif_device *v, struct net_device *dev, diff --git a/include/linux/net.h b/include/linux/net.h index 0ff950eecc6b..f8418d6e33e0 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -36,14 +36,13 @@ struct net; * in sock->flags, but moved into sk->sk_wq->flags to be RCU protected. * Eventually all flags will be in sk->sk_wq->flags. */ -#define SOCKWQ_ASYNC_NOSPACE 0 -#define SOCKWQ_ASYNC_WAITDATA 1 -#define SOCK_NOSPACE 2 -#define SOCK_PASSCRED 3 -#define SOCK_PASSSEC 4 -#define SOCK_SUPPORT_ZC 5 -#define SOCK_CUSTOM_SOCKOPT 6 -#define SOCK_PASSPIDFD 7 +enum socket_flags { + SOCKWQ_ASYNC_NOSPACE, + SOCKWQ_ASYNC_WAITDATA, + SOCK_NOSPACE, + SOCK_SUPPORT_ZC, + SOCK_CUSTOM_SOCKOPT, +}; #ifndef ARCH_HAS_SOCKET_TYPES /** diff --git a/include/linux/net_tstamp.h b/include/linux/net_tstamp.h index ff0758e88ea1..f4936d9c2b3c 100644 --- a/include/linux/net_tstamp.h +++ b/include/linux/net_tstamp.h @@ -4,6 +4,7 @@ #define _LINUX_NET_TIMESTAMPING_H_ #include <uapi/linux/net_tstamp.h> +#include <uapi/linux/ethtool_netlink_generated.h> #define SOF_TIMESTAMPING_SOFTWARE_MASK (SOF_TIMESTAMPING_RX_SOFTWARE | \ SOF_TIMESTAMPING_TX_SOFTWARE | \ @@ -13,12 +14,6 @@ SOF_TIMESTAMPING_TX_HARDWARE | \ SOF_TIMESTAMPING_RAW_HARDWARE) -enum hwtstamp_source { - HWTSTAMP_SOURCE_UNSPEC, - HWTSTAMP_SOURCE_NETDEV, - HWTSTAMP_SOURCE_PHYLIB, -}; - /** * struct hwtstamp_provider_desc - hwtstamp provider description * diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 73a97cf1bbce..ea9d335de130 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1013,9 +1013,13 @@ struct netdev_bpf { #ifdef CONFIG_XFRM_OFFLOAD struct xfrmdev_ops { - int (*xdo_dev_state_add) (struct xfrm_state *x, struct netlink_ext_ack *extack); - void (*xdo_dev_state_delete) (struct xfrm_state *x); - void (*xdo_dev_state_free) (struct xfrm_state *x); + int (*xdo_dev_state_add)(struct net_device *dev, + struct xfrm_state *x, + struct netlink_ext_ack *extack); + void (*xdo_dev_state_delete)(struct net_device *dev, + struct xfrm_state *x); + void (*xdo_dev_state_free)(struct net_device *dev, + struct xfrm_state *x); bool (*xdo_dev_offload_ok) (struct sk_buff *skb, struct xfrm_state *x); void (*xdo_dev_state_advance_esn) (struct xfrm_state *x); diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index 71319637a84e..ee03f3cef30c 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -213,6 +213,15 @@ struct nfs_server { char *fscache_uniq; /* Uniquifier (or NULL) */ #endif + /* The following #defines numerically match the NFSv4 equivalents */ +#define NFS_FH_NOEXPIRE_WITH_OPEN (0x1) +#define NFS_FH_VOLATILE_ANY (0x2) +#define NFS_FH_VOL_MIGRATION (0x4) +#define NFS_FH_VOL_RENAME (0x8) +#define NFS_FH_RENAME_UNSAFE (NFS_FH_VOLATILE_ANY | NFS_FH_VOL_RENAME) + u32 fh_expire_type; /* V4 bitmask representing file + handle volatility type for + this filesystem */ u32 pnfs_blksize; /* layout_blksize attr */ #if IS_ENABLED(CONFIG_NFS_V4) u32 attr_bitmask[3];/* V4 bitmask representing the set @@ -236,9 +245,6 @@ struct nfs_server { u32 acl_bitmask; /* V4 bitmask representing the ACEs that are supported on this filesystem */ - u32 fh_expire_type; /* V4 bitmask representing file - handle volatility type for - this filesystem */ struct pnfs_layoutdriver_type *pnfs_curr_ld; /* Active layout driver */ struct rpc_wait_queue roc_rpcwaitq; void *pnfs_ld_data; /* per mount point data */ diff --git a/include/linux/pgalloc_tag.h b/include/linux/pgalloc_tag.h index c74077977830..8a7f4f802c57 100644 --- a/include/linux/pgalloc_tag.h +++ b/include/linux/pgalloc_tag.h @@ -188,6 +188,13 @@ static inline struct alloc_tag *__pgalloc_tag_get(struct page *page) return tag; } +static inline struct alloc_tag *pgalloc_tag_get(struct page *page) +{ + if (mem_alloc_profiling_enabled()) + return __pgalloc_tag_get(page); + return NULL; +} + void pgalloc_tag_split(struct folio *folio, int old_order, int new_order); void pgalloc_tag_swap(struct folio *new, struct folio *old); @@ -199,6 +206,7 @@ static inline void clear_page_tag_ref(struct page *page) {} static inline void alloc_tag_sec_init(void) {} static inline void pgalloc_tag_split(struct folio *folio, int old_order, int new_order) {} static inline void pgalloc_tag_swap(struct folio *new, struct folio *old) {} +static inline struct alloc_tag *pgalloc_tag_get(struct page *page) { return NULL; } #endif /* CONFIG_MEM_ALLOC_PROFILING */ diff --git a/include/linux/phy.h b/include/linux/phy.h index 7c29d346d4b3..32b9da274115 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -990,7 +990,8 @@ struct phy_driver { * driver for the given phydev. If NULL, matching is based on * phy_id and phy_id_mask. */ - int (*match_phy_device)(struct phy_device *phydev); + int (*match_phy_device)(struct phy_device *phydev, + const struct phy_driver *phydrv); /** * @set_wol: Some devices (e.g. qnap TS-119P II) require PHY @@ -1867,6 +1868,9 @@ char *phy_attached_info_irq(struct phy_device *phydev) __malloc; void phy_attached_info(struct phy_device *phydev); +int genphy_match_phy_device(struct phy_device *phydev, + const struct phy_driver *phydrv); + /* Clause 22 PHY */ int genphy_read_abilities(struct phy_device *phydev); int genphy_setup_forced(struct phy_device *phydev); @@ -2033,9 +2037,6 @@ int phy_ethtool_set_link_ksettings(struct net_device *ndev, const struct ethtool_link_ksettings *cmd); int phy_ethtool_nway_reset(struct net_device *ndev); -int __init mdio_bus_init(void); -void mdio_bus_exit(void); - int phy_ethtool_get_strings(struct phy_device *phydev, u8 *data); int phy_ethtool_get_sset_count(struct phy_device *phydev); int phy_ethtool_get_stats(struct phy_device *phydev, diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h index 3392c09b5d24..5399b9e41e35 100644 --- a/include/linux/phy_fixed.h +++ b/include/linux/phy_fixed.h @@ -17,25 +17,23 @@ struct net_device; #if IS_ENABLED(CONFIG_FIXED_PHY) extern int fixed_phy_change_carrier(struct net_device *dev, bool new_carrier); -extern int fixed_phy_add(unsigned int irq, int phy_id, - struct fixed_phy_status *status); -extern struct phy_device *fixed_phy_register(unsigned int irq, - struct fixed_phy_status *status, - struct device_node *np); +int fixed_phy_add(int phy_id, const struct fixed_phy_status *status); +struct phy_device *fixed_phy_register(const struct fixed_phy_status *status, + struct device_node *np); extern void fixed_phy_unregister(struct phy_device *phydev); extern int fixed_phy_set_link_update(struct phy_device *phydev, int (*link_update)(struct net_device *, struct fixed_phy_status *)); #else -static inline int fixed_phy_add(unsigned int irq, int phy_id, - struct fixed_phy_status *status) +static inline int fixed_phy_add(int phy_id, + const struct fixed_phy_status *status) { return -ENODEV; } -static inline struct phy_device *fixed_phy_register(unsigned int irq, - struct fixed_phy_status *status, - struct device_node *np) +static inline struct phy_device * +fixed_phy_register(const struct fixed_phy_status *status, + struct device_node *np) { return ERR_PTR(-ENODEV); } diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index c7397b17bb08..5520524c93bf 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -274,7 +274,6 @@ SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) -struct ahash_request; struct net_device; struct scatterlist; struct pipe_inode_info; @@ -4134,9 +4133,8 @@ static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset, } int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen, struct msghdr *msg); -int skb_copy_and_hash_datagram_iter(const struct sk_buff *skb, int offset, - struct iov_iter *to, int len, - struct ahash_request *hash); +int skb_copy_and_crc32c_datagram_iter(const struct sk_buff *skb, int offset, + struct iov_iter *to, int len, u32 *crcp); int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset, struct iov_iter *from, int len); int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm); @@ -4192,17 +4190,9 @@ static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len) return copy_to_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT; } -struct skb_checksum_ops { - __wsum (*update)(const void *mem, int len, __wsum wsum); - __wsum (*combine)(__wsum csum, __wsum csum2, int offset, int len); -}; - -extern const struct skb_checksum_ops *crc32c_csum_stub __read_mostly; - -__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, - __wsum csum, const struct skb_checksum_ops *ops); __wsum skb_checksum(const struct sk_buff *skb, int offset, int len, __wsum csum); +u32 skb_crc32c(const struct sk_buff *skb, int offset, int len, u32 crc); static inline void * __must_check __skb_header_pointer(const struct sk_buff *skb, int offset, int len, diff --git a/include/linux/soundwire/sdw_intel.h b/include/linux/soundwire/sdw_intel.h index 493d9de4e472..dc6ebaee3d18 100644 --- a/include/linux/soundwire/sdw_intel.h +++ b/include/linux/soundwire/sdw_intel.h @@ -365,7 +365,7 @@ struct sdw_intel_res { * on e.g. which machine driver to select (I2S mode, HDaudio or * SoundWire). */ -int sdw_intel_acpi_scan(acpi_handle *parent_handle, +int sdw_intel_acpi_scan(acpi_handle parent_handle, struct sdw_intel_acpi_info *info); void sdw_intel_process_wakeen_event(struct sdw_intel_ctx *ctx); diff --git a/include/linux/uio.h b/include/linux/uio.h index 49ece9e1888f..393d0622cc28 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -99,7 +99,13 @@ static inline const struct iovec *iter_iov(const struct iov_iter *iter) } #define iter_iov_addr(iter) (iter_iov(iter)->iov_base + (iter)->iov_offset) -#define iter_iov_len(iter) (iter_iov(iter)->iov_len - (iter)->iov_offset) + +static inline size_t iter_iov_len(const struct iov_iter *i) +{ + if (i->iter_type == ITER_UBUF) + return i->count; + return iter_iov(i)->iov_len - i->iov_offset; +} static inline enum iter_type iov_iter_type(const struct iov_iter *i) { diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h index bbefde319f95..114299bd8b98 100644 --- a/include/net/bluetooth/bluetooth.h +++ b/include/net/bluetooth/bluetooth.h @@ -29,6 +29,7 @@ #include <linux/poll.h> #include <net/sock.h> #include <linux/seq_file.h> +#include <linux/ethtool.h> #define BT_SUBSYS_VERSION 2 #define BT_SUBSYS_REVISION 22 @@ -448,6 +449,9 @@ void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status, hci_req_complete_t *req_complete, hci_req_complete_skb_t *req_complete_skb); +int hci_ethtool_ts_info(unsigned int index, int sk_proto, + struct kernel_ethtool_ts_info *ts_info); + #define HCI_REQ_START BIT(0) #define HCI_REQ_SKB BIT(1) diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index 797992019f9e..82cbd54443ac 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -494,6 +494,7 @@ enum { #define HCI_EVENT_PKT 0x04 #define HCI_ISODATA_PKT 0x05 #define HCI_DIAG_PKT 0xf0 +#define HCI_DRV_PKT 0xf1 #define HCI_VENDOR_PKT 0xff /* HCI packet types */ @@ -557,7 +558,8 @@ enum { #define ESCO_LINK 0x02 /* Low Energy links do not have defined link type. Use invented one */ #define LE_LINK 0x80 -#define ISO_LINK 0x82 +#define CIS_LINK 0x82 +#define BIS_LINK 0x83 #define INVALID_LINK 0xff /* LMP features */ diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 54bfeeaa0995..2b261e74e2c4 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -31,6 +31,7 @@ #include <linux/rculist.h> #include <net/bluetooth/hci.h> +#include <net/bluetooth/hci_drv.h> #include <net/bluetooth/hci_sync.h> #include <net/bluetooth/hci_sock.h> #include <net/bluetooth/coredump.h> @@ -613,6 +614,8 @@ struct hci_dev { struct list_head monitored_devices; bool advmon_pend_notify; + struct hci_drv *hci_drv; + #if IS_ENABLED(CONFIG_BT_LEDS) struct led_trigger *power_led; #endif @@ -996,7 +999,8 @@ static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c) case ESCO_LINK: h->sco_num++; break; - case ISO_LINK: + case CIS_LINK: + case BIS_LINK: h->iso_num++; break; } @@ -1022,7 +1026,8 @@ static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c) case ESCO_LINK: h->sco_num--; break; - case ISO_LINK: + case CIS_LINK: + case BIS_LINK: h->iso_num--; break; } @@ -1039,7 +1044,8 @@ static inline unsigned int hci_conn_num(struct hci_dev *hdev, __u8 type) case SCO_LINK: case ESCO_LINK: return h->sco_num; - case ISO_LINK: + case CIS_LINK: + case BIS_LINK: return h->iso_num; default: return 0; @@ -1100,7 +1106,7 @@ static inline struct hci_conn *hci_conn_hash_lookup_bis(struct hci_dev *hdev, rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { - if (bacmp(&c->dst, ba) || c->type != ISO_LINK) + if (bacmp(&c->dst, ba) || c->type != BIS_LINK) continue; if (c->iso_qos.bcast.bis == bis) { @@ -1122,7 +1128,7 @@ hci_conn_hash_lookup_create_pa_sync(struct hci_dev *hdev) rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { - if (c->type != ISO_LINK) + if (c->type != BIS_LINK) continue; if (!test_bit(HCI_CONN_CREATE_PA_SYNC, &c->flags)) @@ -1148,8 +1154,8 @@ hci_conn_hash_lookup_per_adv_bis(struct hci_dev *hdev, rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { - if (bacmp(&c->dst, ba) || c->type != ISO_LINK || - !test_bit(HCI_CONN_PER_ADV, &c->flags)) + if (bacmp(&c->dst, ba) || c->type != BIS_LINK || + !test_bit(HCI_CONN_PER_ADV, &c->flags)) continue; if (c->iso_qos.bcast.big == big && @@ -1238,7 +1244,7 @@ static inline struct hci_conn *hci_conn_hash_lookup_cis(struct hci_dev *hdev, rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { - if (c->type != ISO_LINK || !bacmp(&c->dst, BDADDR_ANY)) + if (c->type != CIS_LINK) continue; /* Match CIG ID if set */ @@ -1270,7 +1276,7 @@ static inline struct hci_conn *hci_conn_hash_lookup_cig(struct hci_dev *hdev, rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { - if (c->type != ISO_LINK || !bacmp(&c->dst, BDADDR_ANY)) + if (c->type != CIS_LINK) continue; if (handle == c->iso_qos.ucast.cig) { @@ -1293,17 +1299,7 @@ static inline struct hci_conn *hci_conn_hash_lookup_big(struct hci_dev *hdev, rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { - if (c->type != ISO_LINK) - continue; - - /* An ISO_LINK hcon with BDADDR_ANY as destination - * address is a Broadcast connection. A Broadcast - * slave connection is associated with a PA train, - * so the sync_handle can be used to differentiate - * from unicast. - */ - if (bacmp(&c->dst, BDADDR_ANY) && - c->sync_handle == HCI_SYNC_HANDLE_INVALID) + if (c->type != BIS_LINK) continue; if (handle == c->iso_qos.bcast.big) { @@ -1327,7 +1323,7 @@ hci_conn_hash_lookup_big_sync_pend(struct hci_dev *hdev, rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { - if (c->type != ISO_LINK) + if (c->type != BIS_LINK) continue; if (handle == c->iso_qos.bcast.big && num_bis == c->num_bis) { @@ -1350,8 +1346,8 @@ hci_conn_hash_lookup_big_state(struct hci_dev *hdev, __u8 handle, __u16 state) rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { - if (bacmp(&c->dst, BDADDR_ANY) || c->type != ISO_LINK || - c->state != state) + if (c->type != BIS_LINK || bacmp(&c->dst, BDADDR_ANY) || + c->state != state) continue; if (handle == c->iso_qos.bcast.big) { @@ -1374,8 +1370,8 @@ hci_conn_hash_lookup_pa_sync_big_handle(struct hci_dev *hdev, __u8 big) rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { - if (c->type != ISO_LINK || - !test_bit(HCI_CONN_PA_SYNC, &c->flags)) + if (c->type != BIS_LINK || + !test_bit(HCI_CONN_PA_SYNC, &c->flags)) continue; if (c->iso_qos.bcast.big == big) { @@ -1397,7 +1393,7 @@ hci_conn_hash_lookup_pa_sync_handle(struct hci_dev *hdev, __u16 sync_handle) rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { - if (c->type != ISO_LINK) + if (c->type != BIS_LINK) continue; /* Ignore the listen hcon, we are looking @@ -2012,7 +2008,8 @@ static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, case ESCO_LINK: return sco_connect_ind(hdev, bdaddr, flags); - case ISO_LINK: + case CIS_LINK: + case BIS_LINK: return iso_connect_ind(hdev, bdaddr, flags); default: diff --git a/include/net/bluetooth/hci_drv.h b/include/net/bluetooth/hci_drv.h new file mode 100644 index 000000000000..2f01c44f05ec --- /dev/null +++ b/include/net/bluetooth/hci_drv.h @@ -0,0 +1,76 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2025 Google Corporation + */ + +#ifndef __HCI_DRV_H +#define __HCI_DRV_H + +#include <linux/types.h> + +#include <net/bluetooth/bluetooth.h> +#include <net/bluetooth/hci.h> + +struct hci_drv_cmd_hdr { + __le16 opcode; + __le16 len; +} __packed; + +struct hci_drv_ev_hdr { + __le16 opcode; + __le16 len; +} __packed; + +#define HCI_DRV_EV_CMD_STATUS 0x0000 +struct hci_drv_ev_cmd_status { + __le16 opcode; + __u8 status; +} __packed; + +#define HCI_DRV_EV_CMD_COMPLETE 0x0001 +struct hci_drv_ev_cmd_complete { + __le16 opcode; + __u8 status; + __u8 data[]; +} __packed; + +#define HCI_DRV_STATUS_SUCCESS 0x00 +#define HCI_DRV_STATUS_UNSPECIFIED_ERROR 0x01 +#define HCI_DRV_STATUS_UNKNOWN_COMMAND 0x02 +#define HCI_DRV_STATUS_INVALID_PARAMETERS 0x03 + +#define HCI_DRV_MAX_DRIVER_NAME_LENGTH 32 + +/* Common commands that make sense on all drivers start from 0x0000 */ +#define HCI_DRV_OP_READ_INFO 0x0000 +#define HCI_DRV_READ_INFO_SIZE 0 +struct hci_drv_rp_read_info { + __u8 driver_name[HCI_DRV_MAX_DRIVER_NAME_LENGTH]; + __le16 num_supported_commands; + __le16 supported_commands[]; +} __packed; + +/* Driver specific OGF (Opcode Group Field) + * Commands in this group may have different meanings across different drivers. + */ +#define HCI_DRV_OGF_DRIVER_SPECIFIC 0x01 + +int hci_drv_cmd_status(struct hci_dev *hdev, u16 cmd, u8 status); +int hci_drv_cmd_complete(struct hci_dev *hdev, u16 cmd, u8 status, void *rp, + size_t rp_len); +int hci_drv_process_cmd(struct hci_dev *hdev, struct sk_buff *cmd_skb); + +struct hci_drv_handler { + int (*func)(struct hci_dev *hdev, void *data, u16 data_len); + size_t data_len; +}; + +struct hci_drv { + size_t common_handler_count; + const struct hci_drv_handler *common_handlers; + + size_t specific_handler_count; + const struct hci_drv_handler *specific_handlers; +}; + +#endif /* __HCI_DRV_H */ diff --git a/include/net/bluetooth/hci_mon.h b/include/net/bluetooth/hci_mon.h index 082f89531b88..bbd752494ef9 100644 --- a/include/net/bluetooth/hci_mon.h +++ b/include/net/bluetooth/hci_mon.h @@ -51,6 +51,8 @@ struct hci_mon_hdr { #define HCI_MON_CTRL_EVENT 17 #define HCI_MON_ISO_TX_PKT 18 #define HCI_MON_ISO_RX_PKT 19 +#define HCI_MON_DRV_TX_PKT 20 +#define HCI_MON_DRV_RX_PKT 21 struct hci_mon_new_index { __u8 type; diff --git a/include/net/checksum.h b/include/net/checksum.h index 243f972267b8..e57986b173f8 100644 --- a/include/net/checksum.h +++ b/include/net/checksum.h @@ -99,12 +99,6 @@ csum_block_add(__wsum csum, __wsum csum2, int offset) } static __always_inline __wsum -csum_block_add_ext(__wsum csum, __wsum csum2, int offset, int len) -{ - return csum_block_add(csum, csum2, offset); -} - -static __always_inline __wsum csum_block_sub(__wsum csum, __wsum csum2, int offset) { return csum_block_add(csum, ~csum2, offset); @@ -115,12 +109,6 @@ static __always_inline __wsum csum_unfold(__sum16 n) return (__force __wsum)n; } -static __always_inline -__wsum csum_partial_ext(const void *buff, int len, __wsum sum) -{ - return csum_partial(buff, len, sum); -} - #define CSUM_MANGLED_0 ((__force __sum16)0xffff) static __always_inline void csum_replace_by_diff(__sum16 *sum, __wsum diff) diff --git a/include/net/dropreason-core.h b/include/net/dropreason-core.h index bea77934a235..bcf9d7467e1a 100644 --- a/include/net/dropreason-core.h +++ b/include/net/dropreason-core.h @@ -62,6 +62,7 @@ FN(NEIGH_FAILED) \ FN(NEIGH_QUEUEFULL) \ FN(NEIGH_DEAD) \ + FN(NEIGH_HH_FILLFAIL) \ FN(TC_EGRESS) \ FN(SECURITY_HOOK) \ FN(QDISC_DROP) \ @@ -348,6 +349,8 @@ enum skb_drop_reason { SKB_DROP_REASON_NEIGH_QUEUEFULL, /** @SKB_DROP_REASON_NEIGH_DEAD: neigh entry is dead */ SKB_DROP_REASON_NEIGH_DEAD, + /** @SKB_DROP_REASON_NEIGH_HH_FILLFAIL: failed to fill the device hard header */ + SKB_DROP_REASON_NEIGH_HH_FILLFAIL, /** @SKB_DROP_REASON_TC_EGRESS: dropped in TC egress HOOK */ SKB_DROP_REASON_TC_EGRESS, /** @SKB_DROP_REASON_SECURITY_HOOK: dropped due to security HOOK */ diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h index 39cd50300a18..c306ebe379a0 100644 --- a/include/net/lwtunnel.h +++ b/include/net/lwtunnel.h @@ -116,11 +116,9 @@ int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *op, int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op, unsigned int num); int lwtunnel_valid_encap_type(u16 encap_type, - struct netlink_ext_ack *extack, - bool rtnl_is_held); + struct netlink_ext_ack *extack); int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len, - struct netlink_ext_ack *extack, - bool rtnl_is_held); + struct netlink_ext_ack *extack); int lwtunnel_build_state(struct net *net, u16 encap_type, struct nlattr *encap, unsigned int family, const void *cfg, @@ -203,15 +201,14 @@ static inline int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op, } static inline int lwtunnel_valid_encap_type(u16 encap_type, - struct netlink_ext_ack *extack, - bool rtnl_is_held) + struct netlink_ext_ack *extack) { NL_SET_ERR_MSG(extack, "CONFIG_LWTUNNEL is not enabled in this kernel"); return -EOPNOTSUPP; } + static inline int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len, - struct netlink_ext_ack *extack, - bool rtnl_is_held) + struct netlink_ext_ack *extack) { /* return 0 since we are not walking attr looking for * RTA_ENCAP_TYPE attribute on nexthops. diff --git a/include/net/mac80211.h b/include/net/mac80211.h index fdafc37d17cc..82617579d910 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -744,6 +744,7 @@ struct ieee80211_parsed_tpe { * @eht_80mhz_full_bw_ul_mumimo: in AP-mode, does this BSS support the * reception of an EHT TB PPDU on an RU that spans the entire PPDU * bandwidth + * @eht_disable_mcs15: disable EHT-MCS 15 reception capability. * @bss_param_ch_cnt: in BSS-mode, the BSS params change count. This * information is the latest known value. It can come from this link's * beacon or from a beacon sent by another link. @@ -852,6 +853,8 @@ struct ieee80211_bss_conf { bool eht_su_beamformee; bool eht_mu_beamformer; bool eht_80mhz_full_bw_ul_mumimo; + bool eht_disable_mcs15; + u8 bss_param_ch_cnt; u8 bss_param_ch_cnt_link_id; }; diff --git a/include/net/netdev_lock.h b/include/net/netdev_lock.h index 2a753813f849..3d3aef80beac 100644 --- a/include/net/netdev_lock.h +++ b/include/net/netdev_lock.h @@ -99,16 +99,15 @@ static inline void netdev_unlock_ops_compat(struct net_device *dev) static inline int netdev_lock_cmp_fn(const struct lockdep_map *a, const struct lockdep_map *b) { - /* Only lower devices currently grab the instance lock, so no - * real ordering issues can occur. In the near future, only - * hardware devices will grab instance lock which also does not - * involve any ordering. Suppress lockdep ordering warnings - * until (if) we start grabbing instance lock on pure SW - * devices (bond/team/veth/etc). - */ if (a == b) return 0; - return -1; + + /* Allow locking multiple devices only under rtnl_lock, + * the exact order doesn't matter. + * Note that upper devices don't lock their ops, so nesting + * mostly happens in batched device removal for now. + */ + return lockdep_rtnl_is_held() ? -1 : 1; } #define netdev_lockdep_set_classes(dev) \ @@ -130,6 +129,9 @@ static inline int netdev_lock_cmp_fn(const struct lockdep_map *a, &qdisc_xmit_lock_key); \ } +#define netdev_lock_dereference(p, dev) \ + rcu_dereference_protected(p, lockdep_is_held(&(dev)->lock)) + int netdev_debug_event(struct notifier_block *nb, unsigned long event, void *ptr); diff --git a/include/net/scm.h b/include/net/scm.h index 22bb49589fde..84c4707e78a5 100644 --- a/include/net/scm.h +++ b/include/net/scm.h @@ -102,123 +102,10 @@ static __inline__ int scm_send(struct socket *sock, struct msghdr *msg, return __scm_send(sock, msg, scm); } -#ifdef CONFIG_SECURITY_NETWORK -static inline void scm_passec(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm) -{ - struct lsm_context ctx; - int err; - - if (test_bit(SOCK_PASSSEC, &sock->flags)) { - err = security_secid_to_secctx(scm->secid, &ctx); - - if (err >= 0) { - put_cmsg(msg, SOL_SOCKET, SCM_SECURITY, ctx.len, - ctx.context); - security_release_secctx(&ctx); - } - } -} - -static inline bool scm_has_secdata(struct socket *sock) -{ - return test_bit(SOCK_PASSSEC, &sock->flags); -} -#else -static inline void scm_passec(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm) -{ } - -static inline bool scm_has_secdata(struct socket *sock) -{ - return false; -} -#endif /* CONFIG_SECURITY_NETWORK */ - -static __inline__ void scm_pidfd_recv(struct msghdr *msg, struct scm_cookie *scm) -{ - struct file *pidfd_file = NULL; - int len, pidfd; - - /* put_cmsg() doesn't return an error if CMSG is truncated, - * that's why we need to opencode these checks here. - */ - if (msg->msg_flags & MSG_CMSG_COMPAT) - len = sizeof(struct compat_cmsghdr) + sizeof(int); - else - len = sizeof(struct cmsghdr) + sizeof(int); - - if (msg->msg_controllen < len) { - msg->msg_flags |= MSG_CTRUNC; - return; - } - - if (!scm->pid) - return; - - pidfd = pidfd_prepare(scm->pid, 0, &pidfd_file); - - if (put_cmsg(msg, SOL_SOCKET, SCM_PIDFD, sizeof(int), &pidfd)) { - if (pidfd_file) { - put_unused_fd(pidfd); - fput(pidfd_file); - } - - return; - } - - if (pidfd_file) - fd_install(pidfd, pidfd_file); -} - -static inline bool __scm_recv_common(struct socket *sock, struct msghdr *msg, - struct scm_cookie *scm, int flags) -{ - if (!msg->msg_control) { - if (test_bit(SOCK_PASSCRED, &sock->flags) || - test_bit(SOCK_PASSPIDFD, &sock->flags) || - scm->fp || scm_has_secdata(sock)) - msg->msg_flags |= MSG_CTRUNC; - scm_destroy(scm); - return false; - } - - if (test_bit(SOCK_PASSCRED, &sock->flags)) { - struct user_namespace *current_ns = current_user_ns(); - struct ucred ucreds = { - .pid = scm->creds.pid, - .uid = from_kuid_munged(current_ns, scm->creds.uid), - .gid = from_kgid_munged(current_ns, scm->creds.gid), - }; - put_cmsg(msg, SOL_SOCKET, SCM_CREDENTIALS, sizeof(ucreds), &ucreds); - } - - scm_passec(sock, msg, scm); - - if (scm->fp) - scm_detach_fds(msg, scm); - - return true; -} - -static inline void scm_recv(struct socket *sock, struct msghdr *msg, - struct scm_cookie *scm, int flags) -{ - if (!__scm_recv_common(sock, msg, scm, flags)) - return; - - scm_destroy_cred(scm); -} - -static inline void scm_recv_unix(struct socket *sock, struct msghdr *msg, - struct scm_cookie *scm, int flags) -{ - if (!__scm_recv_common(sock, msg, scm, flags)) - return; - - if (test_bit(SOCK_PASSPIDFD, &sock->flags)) - scm_pidfd_recv(msg, scm); - - scm_destroy_cred(scm); -} +void scm_recv(struct socket *sock, struct msghdr *msg, + struct scm_cookie *scm, int flags); +void scm_recv_unix(struct socket *sock, struct msghdr *msg, + struct scm_cookie *scm, int flags); static inline int scm_recv_one_fd(struct file *f, int __user *ufd, unsigned int flags) diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h index 291465c25810..654d37ec0402 100644 --- a/include/net/sctp/checksum.h +++ b/include/net/sctp/checksum.h @@ -15,8 +15,6 @@ * Dinakaran Joseph * Jon Grimm <jgrimm@us.ibm.com> * Sridhar Samudrala <sri@us.ibm.com> - * - * Rewritten to use libcrc32c by: * Vlad Yasevich <vladislav.yasevich@hp.com> */ @@ -25,39 +23,18 @@ #include <linux/types.h> #include <linux/sctp.h> -#include <linux/crc32c.h> -#include <linux/crc32.h> - -static inline __wsum sctp_csum_update(const void *buff, int len, __wsum sum) -{ - return (__force __wsum)crc32c((__force __u32)sum, buff, len); -} - -static inline __wsum sctp_csum_combine(__wsum csum, __wsum csum2, - int offset, int len) -{ - return (__force __wsum)crc32c_combine((__force __u32)csum, - (__force __u32)csum2, len); -} - -static const struct skb_checksum_ops sctp_csum_ops = { - .update = sctp_csum_update, - .combine = sctp_csum_combine, -}; static inline __le32 sctp_compute_cksum(const struct sk_buff *skb, unsigned int offset) { struct sctphdr *sh = (struct sctphdr *)(skb->data + offset); __le32 old = sh->checksum; - __wsum new; + u32 new; sh->checksum = 0; - new = ~__skb_checksum(skb, offset, skb->len - offset, ~(__wsum)0, - &sctp_csum_ops); + new = ~skb_crc32c(skb, offset, skb->len - offset, ~0); sh->checksum = old; - - return cpu_to_le32((__force __u32)new); + return cpu_to_le32(new); } #endif /* __sctp_checksum_h__ */ diff --git a/include/net/sock.h b/include/net/sock.h index 3e15d7105ad2..92e7c1aae3cc 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -337,6 +337,12 @@ struct sk_filter; * @sk_txtime_deadline_mode: set deadline mode for SO_TXTIME * @sk_txtime_report_errors: set report errors mode for SO_TXTIME * @sk_txtime_unused: unused txtime flags + * @sk_scm_recv_flags: all flags used by scm_recv() + * @sk_scm_credentials: flagged by SO_PASSCRED to recv SCM_CREDENTIALS + * @sk_scm_security: flagged by SO_PASSSEC to recv SCM_SECURITY + * @sk_scm_pidfd: flagged by SO_PASSPIDFD to recv SCM_PIDFD + * @sk_scm_rights: flagged by SO_PASSRIGHTS to recv SCM_RIGHTS + * @sk_scm_unused: unused flags for scm_recv() * @ns_tracker: tracker for netns reference * @sk_user_frags: xarray of pages the user is holding a reference on. * @sk_owner: reference to the real owner of the socket that calls @@ -523,7 +529,17 @@ struct sock { #endif int sk_disconnects; - u8 sk_txrehash; + union { + u8 sk_txrehash; + u8 sk_scm_recv_flags; + struct { + u8 sk_scm_credentials : 1, + sk_scm_security : 1, + sk_scm_pidfd : 1, + sk_scm_rights : 1, + sk_scm_unused : 4; + }; + }; u8 sk_clockid; u8 sk_txtime_deadline_mode : 1, sk_txtime_report_errors : 1, @@ -2773,9 +2789,14 @@ static inline bool sk_is_udp(const struct sock *sk) sk->sk_protocol == IPPROTO_UDP; } +static inline bool sk_is_unix(const struct sock *sk) +{ + return sk->sk_family == AF_UNIX; +} + static inline bool sk_is_stream_unix(const struct sock *sk) { - return sk->sk_family == AF_UNIX && sk->sk_type == SOCK_STREAM; + return sk_is_unix(sk) && sk->sk_type == SOCK_STREAM; } static inline bool sk_is_vsock(const struct sock *sk) @@ -2783,6 +2804,13 @@ static inline bool sk_is_vsock(const struct sock *sk) return sk->sk_family == AF_VSOCK; } +static inline bool sk_may_scm_recv(const struct sock *sk) +{ + return (IS_ENABLED(CONFIG_UNIX) && sk->sk_family == AF_UNIX) || + sk->sk_family == AF_NETLINK || + (IS_ENABLED(CONFIG_BT) && sk->sk_family == AF_BLUETOOTH); +} + /** * sk_eat_skb - Release a skb if it is no longer needed * @sk: socket to eat this skb from @@ -2822,6 +2850,12 @@ sk_is_refcounted(struct sock *sk) return !sk_fullsock(sk) || !sock_flag(sk, SOCK_RCU_FREE); } +static inline bool +sk_requests_wifi_status(struct sock *sk) +{ + return sk && sk_fullsock(sk) && sock_flag(sk, SOCK_WIFI_STATUS); +} + /* Checks if this SKB belongs to an HW offloaded socket * and whether any SW fallbacks are required based on dev. * Check decrypted mark in case skb_orphan() cleared socket. diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 39365fd2ea17..a21e276dbe44 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -147,8 +147,19 @@ enum { }; struct xfrm_dev_offload { + /* The device for this offload. + * Device drivers should not use this directly, as that will prevent + * them from working with bonding device. Instead, the device passed + * to the add/delete callbacks should be used. + */ struct net_device *dev; netdevice_tracker dev_tracker; + /* This is a private pointer used by the bonding driver (and eventually + * should be moved there). Device drivers should not use it. + * Protected by xfrm_state.lock AND bond.ipsec_lock in most cases, + * except in the .xdo_dev_state_del() flow, where only xfrm_state.lock + * is held. + */ struct net_device *real_dev; unsigned long offload_handle; u8 dir : 2; @@ -236,7 +247,6 @@ struct xfrm_state { /* Data for encapsulator */ struct xfrm_encap_tmpl *encap; - struct sock __rcu *encap_sk; /* NAT keepalive */ u32 nat_keepalive_interval; /* seconds */ @@ -1893,12 +1903,16 @@ struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *n u32 if_id); struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x, struct xfrm_migrate *m, - struct xfrm_encap_tmpl *encap); + struct xfrm_encap_tmpl *encap, + struct net *net, + struct xfrm_user_offload *xuo, + struct netlink_ext_ack *extack); int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, struct xfrm_migrate *m, int num_bundles, struct xfrm_kmaddress *k, struct net *net, struct xfrm_encap_tmpl *encap, u32 if_id, - struct netlink_ext_ack *extack); + struct netlink_ext_ack *extack, + struct xfrm_user_offload *xuo); #endif int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport); diff --git a/include/sound/pcm.h b/include/sound/pcm.h index 8becb4504887..8582d22f3818 100644 --- a/include/sound/pcm.h +++ b/include/sound/pcm.h @@ -1404,6 +1404,8 @@ int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream, struct vm_area_s #define snd_pcm_lib_mmap_iomem NULL #endif +void snd_pcm_runtime_buffer_set_silence(struct snd_pcm_runtime *runtime); + /** * snd_pcm_limit_isa_dma_size - Get the max size fitting with ISA DMA transfer * @dma: DMA number diff --git a/include/sound/ump_msg.h b/include/sound/ump_msg.h index 72f60ddfea75..9556b4755a1e 100644 --- a/include/sound/ump_msg.h +++ b/include/sound/ump_msg.h @@ -604,7 +604,7 @@ struct snd_ump_stream_msg_ep_info { } __packed; /* UMP Stream Message: Device Info Notification (128bit) */ -struct snd_ump_stream_msg_devince_info { +struct snd_ump_stream_msg_device_info { #ifdef __BIG_ENDIAN_BITFIELD /* 0 */ u32 type:4; @@ -754,7 +754,7 @@ struct snd_ump_stream_msg_fb_name { union snd_ump_stream_msg { struct snd_ump_stream_msg_ep_discovery ep_discovery; struct snd_ump_stream_msg_ep_info ep_info; - struct snd_ump_stream_msg_devince_info device_info; + struct snd_ump_stream_msg_device_info device_info; struct snd_ump_stream_msg_stream_cfg stream_cfg; struct snd_ump_stream_msg_fb_discovery fb_discovery; struct snd_ump_stream_msg_fb_info fb_info; diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h index aa5016ff3d91..f333a0ac4ee4 100644 --- a/include/uapi/asm-generic/socket.h +++ b/include/uapi/asm-generic/socket.h @@ -145,6 +145,8 @@ #define SO_RCVPRIORITY 82 +#define SO_PASSRIGHTS 83 + #if !defined(__KERNEL__) #if __BITS_PER_LONG == 64 || (defined(__x86_64__) && defined(__ILP32__)) diff --git a/include/uapi/linux/ethtool_netlink_generated.h b/include/uapi/linux/ethtool_netlink_generated.h index 30c8dad6214e..9a02f579de22 100644 --- a/include/uapi/linux/ethtool_netlink_generated.h +++ b/include/uapi/linux/ethtool_netlink_generated.h @@ -37,6 +37,18 @@ enum ethtool_tcp_data_split { ETHTOOL_TCP_DATA_SPLIT_ENABLED, }; +/** + * enum hwtstamp_source - Source of the hardware timestamp + * @HWTSTAMP_SOURCE_NETDEV: Hardware timestamp comes from a MAC or a device + * which has MAC and PHY integrated + * @HWTSTAMP_SOURCE_PHYLIB: Hardware timestamp comes from one PHY device of the + * network topology + */ +enum hwtstamp_source { + HWTSTAMP_SOURCE_NETDEV = 1, + HWTSTAMP_SOURCE_PHYLIB, +}; + enum { ETHTOOL_A_HEADER_UNSPEC, ETHTOOL_A_HEADER_DEV_INDEX, @@ -401,6 +413,8 @@ enum { ETHTOOL_A_TSINFO_PHC_INDEX, ETHTOOL_A_TSINFO_STATS, ETHTOOL_A_TSINFO_HWTSTAMP_PROVIDER, + ETHTOOL_A_TSINFO_HWTSTAMP_SOURCE, + ETHTOOL_A_TSINFO_HWTSTAMP_PHYINDEX, __ETHTOOL_A_TSINFO_CNT, ETHTOOL_A_TSINFO_MAX = (__ETHTOOL_A_TSINFO_CNT - 1) diff --git a/include/uapi/linux/neighbour.h b/include/uapi/linux/neighbour.h index 5e67a7eaf4a7..b851c36ad25d 100644 --- a/include/uapi/linux/neighbour.h +++ b/include/uapi/linux/neighbour.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -#ifndef __LINUX_NEIGHBOUR_H -#define __LINUX_NEIGHBOUR_H +#ifndef _UAPI__LINUX_NEIGHBOUR_H +#define _UAPI__LINUX_NEIGHBOUR_H #include <linux/types.h> #include <linux/netlink.h> diff --git a/io_uring/fdinfo.c b/io_uring/fdinfo.c index 9414ca6d101c..e0d6a59a89fa 100644 --- a/io_uring/fdinfo.c +++ b/io_uring/fdinfo.c @@ -86,13 +86,8 @@ static inline void napi_show_fdinfo(struct io_ring_ctx *ctx, } #endif -/* - * Caller holds a reference to the file already, we don't need to do - * anything else to get an extra reference. - */ -__cold void io_uring_show_fdinfo(struct seq_file *m, struct file *file) +static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m) { - struct io_ring_ctx *ctx = file->private_data; struct io_overflow_cqe *ocqe; struct io_rings *r = ctx->rings; struct rusage sq_usage; @@ -106,7 +101,6 @@ __cold void io_uring_show_fdinfo(struct seq_file *m, struct file *file) unsigned int sq_entries, cq_entries; int sq_pid = -1, sq_cpu = -1; u64 sq_total_time = 0, sq_work_time = 0; - bool has_lock; unsigned int i; if (ctx->flags & IORING_SETUP_CQE32) @@ -176,15 +170,7 @@ __cold void io_uring_show_fdinfo(struct seq_file *m, struct file *file) seq_printf(m, "\n"); } - /* - * Avoid ABBA deadlock between the seq lock and the io_uring mutex, - * since fdinfo case grabs it in the opposite direction of normal use - * cases. If we fail to get the lock, we just don't iterate any - * structures that could be going away outside the io_uring mutex. - */ - has_lock = mutex_trylock(&ctx->uring_lock); - - if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL)) { + if (ctx->flags & IORING_SETUP_SQPOLL) { struct io_sq_data *sq = ctx->sq_data; /* @@ -206,7 +192,7 @@ __cold void io_uring_show_fdinfo(struct seq_file *m, struct file *file) seq_printf(m, "SqTotalTime:\t%llu\n", sq_total_time); seq_printf(m, "SqWorkTime:\t%llu\n", sq_work_time); seq_printf(m, "UserFiles:\t%u\n", ctx->file_table.data.nr); - for (i = 0; has_lock && i < ctx->file_table.data.nr; i++) { + for (i = 0; i < ctx->file_table.data.nr; i++) { struct file *f = NULL; if (ctx->file_table.data.nodes[i]) @@ -218,7 +204,7 @@ __cold void io_uring_show_fdinfo(struct seq_file *m, struct file *file) } } seq_printf(m, "UserBufs:\t%u\n", ctx->buf_table.nr); - for (i = 0; has_lock && i < ctx->buf_table.nr; i++) { + for (i = 0; i < ctx->buf_table.nr; i++) { struct io_mapped_ubuf *buf = NULL; if (ctx->buf_table.nodes[i]) @@ -228,7 +214,7 @@ __cold void io_uring_show_fdinfo(struct seq_file *m, struct file *file) else seq_printf(m, "%5u: <none>\n", i); } - if (has_lock && !xa_empty(&ctx->personalities)) { + if (!xa_empty(&ctx->personalities)) { unsigned long index; const struct cred *cred; @@ -238,7 +224,7 @@ __cold void io_uring_show_fdinfo(struct seq_file *m, struct file *file) } seq_puts(m, "PollList:\n"); - for (i = 0; has_lock && i < (1U << ctx->cancel_table.hash_bits); i++) { + for (i = 0; i < (1U << ctx->cancel_table.hash_bits); i++) { struct io_hash_bucket *hb = &ctx->cancel_table.hbs[i]; struct io_kiocb *req; @@ -247,9 +233,6 @@ __cold void io_uring_show_fdinfo(struct seq_file *m, struct file *file) task_work_pending(req->tctx->task)); } - if (has_lock) - mutex_unlock(&ctx->uring_lock); - seq_puts(m, "CqOverflowList:\n"); spin_lock(&ctx->completion_lock); list_for_each_entry(ocqe, &ctx->cq_overflow_list, list) { @@ -262,4 +245,23 @@ __cold void io_uring_show_fdinfo(struct seq_file *m, struct file *file) spin_unlock(&ctx->completion_lock); napi_show_fdinfo(ctx, m); } + +/* + * Caller holds a reference to the file already, we don't need to do + * anything else to get an extra reference. + */ +__cold void io_uring_show_fdinfo(struct seq_file *m, struct file *file) +{ + struct io_ring_ctx *ctx = file->private_data; + + /* + * Avoid ABBA deadlock between the seq lock and the io_uring mutex, + * since fdinfo case grabs it in the opposite direction of normal use + * cases. + */ + if (mutex_trylock(&ctx->uring_lock)) { + __io_uring_show_fdinfo(ctx, m); + mutex_unlock(&ctx->uring_lock); + } +} #endif diff --git a/io_uring/memmap.c b/io_uring/memmap.c index 76fcc79656b0..07f8a5cbd37e 100644 --- a/io_uring/memmap.c +++ b/io_uring/memmap.c @@ -116,7 +116,7 @@ static int io_region_init_ptr(struct io_mapped_region *mr) void *ptr; if (io_check_coalesce_buffer(mr->pages, mr->nr_pages, &ifd)) { - if (ifd.nr_folios == 1) { + if (ifd.nr_folios == 1 && !PageHighMem(mr->pages[0])) { mr->ptr = page_address(mr->pages[0]); return 0; } diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c index a9ea7d29cdd9..430ed620ddfe 100644 --- a/io_uring/uring_cmd.c +++ b/io_uring/uring_cmd.c @@ -254,6 +254,11 @@ int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags) return -EOPNOTSUPP; issue_flags |= IO_URING_F_IOPOLL; req->iopoll_completed = 0; + if (ctx->flags & IORING_SETUP_HYBRID_IOPOLL) { + /* make sure every req only blocks once */ + req->flags &= ~REQ_F_IOPOLL_STATE; + req->iopoll_start = ktime_get_ns(); + } } ret = file->f_op->uring_cmd(ioucmd, issue_flags); diff --git a/kernel/fork.c b/kernel/fork.c index c4b26cd8998b..168681fc4b25 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -498,10 +498,6 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig) vma_numab_state_init(new); dup_anon_vma_name(orig, new); - /* track_pfn_copy() will later take care of copying internal state. */ - if (unlikely(new->vm_flags & VM_PFNMAP)) - untrack_pfn_clear(new); - return new; } @@ -672,6 +668,11 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, tmp = vm_area_dup(mpnt); if (!tmp) goto fail_nomem; + + /* track_pfn_copy() will later take care of copying internal state. */ + if (unlikely(tmp->vm_flags & VM_PFNMAP)) + untrack_pfn_clear(tmp); + retval = vma_dup_policy(mpnt, tmp); if (retval) goto fail_nomem_policy; diff --git a/kernel/padata.c b/kernel/padata.c index b3d4eacc4f5d..7eee94166357 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -358,7 +358,8 @@ static void padata_reorder(struct parallel_data *pd) * To avoid UAF issue, add pd ref here, and put pd ref after reorder_work finish. */ padata_get_pd(pd); - queue_work(pinst->serial_wq, &pd->reorder_work); + if (!queue_work(pinst->serial_wq, &pd->reorder_work)) + padata_put_pd(pd); } } diff --git a/lib/crc32.c b/lib/crc32.c index fddd424ff224..ade48bbb0083 100644 --- a/lib/crc32.c +++ b/lib/crc32.c @@ -119,12 +119,6 @@ u32 crc32_le_shift(u32 crc, size_t len) } EXPORT_SYMBOL(crc32_le_shift); -u32 crc32c_shift(u32 crc, size_t len) -{ - return crc32_generic_shift(crc, len, CRC32C_POLY_LE); -} -EXPORT_SYMBOL(crc32c_shift); - u32 crc32_be_base(u32 crc, const u8 *p, size_t len) { while (len--) diff --git a/lib/tests/crc_kunit.c b/lib/tests/crc_kunit.c index 585c48b65cef..064c2d581557 100644 --- a/lib/tests/crc_kunit.c +++ b/lib/tests/crc_kunit.c @@ -391,17 +391,11 @@ static u64 crc32c_wrapper(u64 crc, const u8 *p, size_t len) return crc32c(crc, p, len); } -static u64 crc32c_combine_wrapper(u64 crc1, u64 crc2, size_t len2) -{ - return crc32c_combine(crc1, crc2, len2); -} - static const struct crc_variant crc_variant_crc32c = { .bits = 32, .le = true, .poly = 0x82f63b78, .func = crc32c_wrapper, - .combine_func = crc32c_combine_wrapper, }; static void crc32c_test(struct kunit *test) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 6ea1be71aa42..7ae38bfb9096 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3010,7 +3010,7 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, struct hugepage_subpool *spool = subpool_vma(vma); struct hstate *h = hstate_vma(vma); struct folio *folio; - long retval, gbl_chg; + long retval, gbl_chg, gbl_reserve; map_chg_state map_chg; int ret, idx; struct hugetlb_cgroup *h_cg = NULL; @@ -3163,8 +3163,16 @@ out_uncharge_cgroup_reservation: hugetlb_cgroup_uncharge_cgroup_rsvd(idx, pages_per_huge_page(h), h_cg); out_subpool_put: - if (map_chg) - hugepage_subpool_put_pages(spool, 1); + /* + * put page to subpool iff the quota of subpool's rsv_hpages is used + * during hugepage_subpool_get_pages. + */ + if (map_chg && !gbl_chg) { + gbl_reserve = hugepage_subpool_put_pages(spool, 1); + hugetlb_acct_memory(h, -gbl_reserve); + } + + out_end_reservation: if (map_chg != MAP_CHG_ENFORCED) vma_end_reservation(h, vma, addr); @@ -7239,7 +7247,7 @@ bool hugetlb_reserve_pages(struct inode *inode, struct vm_area_struct *vma, vm_flags_t vm_flags) { - long chg = -1, add = -1; + long chg = -1, add = -1, spool_resv, gbl_resv; struct hstate *h = hstate_inode(inode); struct hugepage_subpool *spool = subpool_inode(inode); struct resv_map *resv_map; @@ -7374,8 +7382,16 @@ bool hugetlb_reserve_pages(struct inode *inode, return true; out_put_pages: - /* put back original number of pages, chg */ - (void)hugepage_subpool_put_pages(spool, chg); + spool_resv = chg - gbl_reserve; + if (spool_resv) { + /* put sub pool's reservation back, chg - gbl_reserve */ + gbl_resv = hugepage_subpool_put_pages(spool, spool_resv); + /* + * subpool's reserved pages can not be put back due to race, + * return to hstate. + */ + hugetlb_acct_memory(h, -gbl_resv); + } out_uncharge_cgroup: hugetlb_cgroup_uncharge_cgroup_rsvd(hstate_index(h), chg * pages_per_huge_page(h), h_cg); diff --git a/mm/internal.h b/mm/internal.h index 25a29872c634..5c7a2b43ad76 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -1590,7 +1590,6 @@ unsigned long move_page_tables(struct pagetable_move_control *pmc); #ifdef CONFIG_UNACCEPTED_MEMORY void accept_page(struct page *page); -void unaccepted_cleanup_work(struct work_struct *work); #else /* CONFIG_UNACCEPTED_MEMORY */ static inline void accept_page(struct page *page) { diff --git a/mm/memory.c b/mm/memory.c index ba3ea0a82f7f..49199410805c 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3751,7 +3751,7 @@ static bool __wp_can_reuse_large_anon_folio(struct folio *folio, /* Stabilize the mapcount vs. refcount and recheck. */ folio_lock_large_mapcount(folio); - VM_WARN_ON_ONCE(folio_large_mapcount(folio) < folio_ref_count(folio)); + VM_WARN_ON_ONCE_FOLIO(folio_large_mapcount(folio) > folio_ref_count(folio), folio); if (folio_test_large_maybe_mapped_shared(folio)) goto unlock; diff --git a/mm/mm_init.c b/mm/mm_init.c index 327764ca0ee4..eedce9321e13 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -1441,7 +1441,6 @@ static void __meminit zone_init_free_lists(struct zone *zone) #ifdef CONFIG_UNACCEPTED_MEMORY INIT_LIST_HEAD(&zone->unaccepted_pages); - INIT_WORK(&zone->unaccepted_cleanup, unaccepted_cleanup_work); #endif } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 2073c3fdf9c4..8c75433ff9a4 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -290,7 +290,8 @@ EXPORT_SYMBOL(nr_online_nodes); #endif static bool page_contains_unaccepted(struct page *page, unsigned int order); -static bool cond_accept_memory(struct zone *zone, unsigned int order); +static bool cond_accept_memory(struct zone *zone, unsigned int order, + int alloc_flags); static bool __free_unaccepted(struct page *page); int page_group_by_mobility_disabled __read_mostly; @@ -1147,14 +1148,9 @@ static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) __pgalloc_tag_sub(page, nr); } -static inline void pgalloc_tag_sub_pages(struct page *page, unsigned int nr) +/* When tag is not NULL, assuming mem_alloc_profiling_enabled */ +static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr) { - struct alloc_tag *tag; - - if (!mem_alloc_profiling_enabled()) - return; - - tag = __pgalloc_tag_get(page); if (tag) this_cpu_sub(tag->counters->bytes, PAGE_SIZE * nr); } @@ -1164,7 +1160,7 @@ static inline void pgalloc_tag_sub_pages(struct page *page, unsigned int nr) static inline void pgalloc_tag_add(struct page *page, struct task_struct *task, unsigned int nr) {} static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) {} -static inline void pgalloc_tag_sub_pages(struct page *page, unsigned int nr) {} +static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr) {} #endif /* CONFIG_MEM_ALLOC_PROFILING */ @@ -3612,7 +3608,7 @@ retry: } } - cond_accept_memory(zone, order); + cond_accept_memory(zone, order, alloc_flags); /* * Detect whether the number of free pages is below high @@ -3639,7 +3635,7 @@ check_alloc_wmark: gfp_mask)) { int ret; - if (cond_accept_memory(zone, order)) + if (cond_accept_memory(zone, order, alloc_flags)) goto try_this_zone; /* @@ -3692,7 +3688,7 @@ try_this_zone: return page; } else { - if (cond_accept_memory(zone, order)) + if (cond_accept_memory(zone, order, alloc_flags)) goto try_this_zone; /* Try again if zone has deferred pages */ @@ -4845,7 +4841,7 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid, goto failed; } - cond_accept_memory(zone, 0); + cond_accept_memory(zone, 0, alloc_flags); retry_this_zone: mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK) + nr_pages; if (zone_watermark_fast(zone, 0, mark, @@ -4854,7 +4850,7 @@ retry_this_zone: break; } - if (cond_accept_memory(zone, 0)) + if (cond_accept_memory(zone, 0, alloc_flags)) goto retry_this_zone; /* Try again if zone has deferred pages */ @@ -5061,11 +5057,13 @@ static void ___free_pages(struct page *page, unsigned int order, { /* get PageHead before we drop reference */ int head = PageHead(page); + /* get alloc tag in case the page is released by others */ + struct alloc_tag *tag = pgalloc_tag_get(page); if (put_page_testzero(page)) __free_frozen_pages(page, order, fpi_flags); else if (!head) { - pgalloc_tag_sub_pages(page, (1 << order) - 1); + pgalloc_tag_sub_pages(tag, (1 << order) - 1); while (order-- > 0) __free_frozen_pages(page + (1 << order), order, fpi_flags); @@ -7170,16 +7168,8 @@ bool has_managed_dma(void) #ifdef CONFIG_UNACCEPTED_MEMORY -/* Counts number of zones with unaccepted pages. */ -static DEFINE_STATIC_KEY_FALSE(zones_with_unaccepted_pages); - static bool lazy_accept = true; -void unaccepted_cleanup_work(struct work_struct *work) -{ - static_branch_dec(&zones_with_unaccepted_pages); -} - static int __init accept_memory_parse(char *p) { if (!strcmp(p, "lazy")) { @@ -7204,11 +7194,7 @@ static bool page_contains_unaccepted(struct page *page, unsigned int order) static void __accept_page(struct zone *zone, unsigned long *flags, struct page *page) { - bool last; - list_del(&page->lru); - last = list_empty(&zone->unaccepted_pages); - account_freepages(zone, -MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE); __mod_zone_page_state(zone, NR_UNACCEPTED, -MAX_ORDER_NR_PAGES); __ClearPageUnaccepted(page); @@ -7217,28 +7203,6 @@ static void __accept_page(struct zone *zone, unsigned long *flags, accept_memory(page_to_phys(page), PAGE_SIZE << MAX_PAGE_ORDER); __free_pages_ok(page, MAX_PAGE_ORDER, FPI_TO_TAIL); - - if (last) { - /* - * There are two corner cases: - * - * - If allocation occurs during the CPU bring up, - * static_branch_dec() cannot be used directly as - * it causes a deadlock on cpu_hotplug_lock. - * - * Instead, use schedule_work() to prevent deadlock. - * - * - If allocation occurs before workqueues are initialized, - * static_branch_dec() should be called directly. - * - * Workqueues are initialized before CPU bring up, so this - * will not conflict with the first scenario. - */ - if (system_wq) - schedule_work(&zone->unaccepted_cleanup); - else - unaccepted_cleanup_work(&zone->unaccepted_cleanup); - } } void accept_page(struct page *page) @@ -7275,20 +7239,17 @@ static bool try_to_accept_memory_one(struct zone *zone) return true; } -static inline bool has_unaccepted_memory(void) -{ - return static_branch_unlikely(&zones_with_unaccepted_pages); -} - -static bool cond_accept_memory(struct zone *zone, unsigned int order) +static bool cond_accept_memory(struct zone *zone, unsigned int order, + int alloc_flags) { long to_accept, wmark; bool ret = false; - if (!has_unaccepted_memory()) + if (list_empty(&zone->unaccepted_pages)) return false; - if (list_empty(&zone->unaccepted_pages)) + /* Bailout, since try_to_accept_memory_one() needs to take a lock */ + if (alloc_flags & ALLOC_TRYLOCK) return false; wmark = promo_wmark_pages(zone); @@ -7321,22 +7282,17 @@ static bool __free_unaccepted(struct page *page) { struct zone *zone = page_zone(page); unsigned long flags; - bool first = false; if (!lazy_accept) return false; spin_lock_irqsave(&zone->lock, flags); - first = list_empty(&zone->unaccepted_pages); list_add_tail(&page->lru, &zone->unaccepted_pages); account_freepages(zone, MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE); __mod_zone_page_state(zone, NR_UNACCEPTED, MAX_ORDER_NR_PAGES); __SetPageUnaccepted(page); spin_unlock_irqrestore(&zone->lock, flags); - if (first) - static_branch_inc(&zones_with_unaccepted_pages); - return true; } @@ -7347,7 +7303,8 @@ static bool page_contains_unaccepted(struct page *page, unsigned int order) return false; } -static bool cond_accept_memory(struct zone *zone, unsigned int order) +static bool cond_accept_memory(struct zone *zone, unsigned int order, + int alloc_flags) { return false; } @@ -7418,11 +7375,6 @@ struct page *try_alloc_pages_noprof(int nid, unsigned int order) if (!pcp_allowed_order(order)) return NULL; -#ifdef CONFIG_UNACCEPTED_MEMORY - /* Bailout, since try_to_accept_memory_one() needs to take a lock */ - if (has_unaccepted_memory()) - return NULL; -#endif /* Bailout, since _deferred_grow_zone() needs to take a lock */ if (deferred_pages_enabled()) return NULL; diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index 7d5d709cc838..e0db855c89b4 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -1064,8 +1064,13 @@ static int move_present_pte(struct mm_struct *mm, src_folio->index = linear_page_index(dst_vma, dst_addr); orig_dst_pte = mk_pte(&src_folio->page, dst_vma->vm_page_prot); - /* Follow mremap() behavior and treat the entry dirty after the move */ - orig_dst_pte = pte_mkwrite(pte_mkdirty(orig_dst_pte), dst_vma); + /* Set soft dirty bit so userspace can notice the pte was moved */ +#ifdef CONFIG_MEM_SOFT_DIRTY + orig_dst_pte = pte_mksoft_dirty(orig_dst_pte); +#endif + if (pte_dirty(orig_src_pte)) + orig_dst_pte = pte_mkdirty(orig_dst_pte); + orig_dst_pte = pte_mkwrite(orig_dst_pte, dst_vma); set_pte_at(mm, dst_addr, dst_pte, orig_dst_pte); out: @@ -1100,6 +1105,9 @@ static int move_swap_pte(struct mm_struct *mm, struct vm_area_struct *dst_vma, } orig_src_pte = ptep_get_and_clear(mm, src_addr, src_pte); +#ifdef CONFIG_MEM_SOFT_DIRTY + orig_src_pte = pte_swp_mksoft_dirty(orig_src_pte); +#endif set_pte_at(mm, dst_addr, dst_pte, orig_src_pte); double_pt_unlock(dst_ptl, src_ptl); diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 961b270f023c..d14a7e317ac8 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -1243,19 +1243,19 @@ void zs_obj_write(struct zs_pool *pool, unsigned long handle, class = zspage_class(pool, zspage); off = offset_in_page(class->size * obj_idx); - if (off + class->size <= PAGE_SIZE) { + if (!ZsHugePage(zspage)) + off += ZS_HANDLE_SIZE; + + if (off + mem_len <= PAGE_SIZE) { /* this object is contained entirely within a page */ void *dst = kmap_local_zpdesc(zpdesc); - if (!ZsHugePage(zspage)) - off += ZS_HANDLE_SIZE; memcpy(dst + off, handle_mem, mem_len); kunmap_local(dst); } else { /* this object spans two pages */ size_t sizes[2]; - off += ZS_HANDLE_SIZE; sizes[0] = PAGE_SIZE - off; sizes[1] = mem_len - sizes[0]; diff --git a/net/Kconfig b/net/Kconfig index 202cc595e5e6..5b71a52987d3 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -75,6 +75,10 @@ config NET_DEVMEM config NET_SHAPER bool +config NET_CRC32C + bool + select CRC32 + menu "Networking options" source "net/packet/Kconfig" diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile index 5a3835b7dfcd..a7eede7616d8 100644 --- a/net/bluetooth/Makefile +++ b/net/bluetooth/Makefile @@ -14,7 +14,8 @@ bluetooth_6lowpan-y := 6lowpan.o bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \ hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o lib.o \ - ecdh_helper.o mgmt_util.o mgmt_config.o hci_codec.o eir.o hci_sync.o + ecdh_helper.o mgmt_util.o mgmt_config.o hci_codec.o eir.o hci_sync.o \ + hci_drv.o bluetooth-$(CONFIG_DEV_COREDUMP) += coredump.o diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index 0b4d0a8bd361..6ad2f72f53f4 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c @@ -34,6 +34,9 @@ #include <net/bluetooth/bluetooth.h> #include <linux/proc_fs.h> +#include <linux/ethtool.h> +#include <linux/sockios.h> + #include "leds.h" #include "selftest.h" @@ -563,6 +566,86 @@ __poll_t bt_sock_poll(struct file *file, struct socket *sock, } EXPORT_SYMBOL(bt_sock_poll); +static int bt_ethtool_get_ts_info(struct sock *sk, unsigned int index, + void __user *useraddr) +{ + struct ethtool_ts_info info; + struct kernel_ethtool_ts_info ts_info = {}; + int ret; + + ret = hci_ethtool_ts_info(index, sk->sk_protocol, &ts_info); + if (ret == -ENODEV) + return ret; + else if (ret < 0) + return -EIO; + + memset(&info, 0, sizeof(info)); + + info.cmd = ETHTOOL_GET_TS_INFO; + info.so_timestamping = ts_info.so_timestamping; + info.phc_index = ts_info.phc_index; + info.tx_types = ts_info.tx_types; + info.rx_filters = ts_info.rx_filters; + + if (copy_to_user(useraddr, &info, sizeof(info))) + return -EFAULT; + + return 0; +} + +static int bt_ethtool(struct sock *sk, const struct ifreq *ifr, + void __user *useraddr) +{ + unsigned int index; + u32 ethcmd; + int n; + + if (copy_from_user(ðcmd, useraddr, sizeof(ethcmd))) + return -EFAULT; + + if (sscanf(ifr->ifr_name, "hci%u%n", &index, &n) != 1 || + n != strlen(ifr->ifr_name)) + return -ENODEV; + + switch (ethcmd) { + case ETHTOOL_GET_TS_INFO: + return bt_ethtool_get_ts_info(sk, index, useraddr); + } + + return -EOPNOTSUPP; +} + +static int bt_dev_ioctl(struct socket *sock, unsigned int cmd, void __user *arg) +{ + struct sock *sk = sock->sk; + struct ifreq ifr = {}; + void __user *data; + char *colon; + int ret = -ENOIOCTLCMD; + + if (get_user_ifreq(&ifr, &data, arg)) + return -EFAULT; + + ifr.ifr_name[IFNAMSIZ - 1] = 0; + colon = strchr(ifr.ifr_name, ':'); + if (colon) + *colon = 0; + + switch (cmd) { + case SIOCETHTOOL: + ret = bt_ethtool(sk, &ifr, data); + break; + } + + if (colon) + *colon = ':'; + + if (put_user_ifreq(&ifr, arg)) + return -EFAULT; + + return ret; +} + int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct sock *sk = sock->sk; @@ -595,6 +678,10 @@ int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) err = put_user(amount, (int __user *)arg); break; + case SIOCETHTOOL: + err = bt_dev_ioctl(sock, cmd, (void __user *)arg); + break; + default: err = -ENOIOCTLCMD; break; diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 946d2ae551f8..99efeed6a766 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -785,7 +785,7 @@ static int hci_le_big_terminate(struct hci_dev *hdev, u8 big, struct hci_conn *c d->sync_handle = conn->sync_handle; if (test_and_clear_bit(HCI_CONN_PA_SYNC, &conn->flags)) { - hci_conn_hash_list_flag(hdev, find_bis, ISO_LINK, + hci_conn_hash_list_flag(hdev, find_bis, BIS_LINK, HCI_CONN_PA_SYNC, d); if (!d->count) @@ -795,7 +795,7 @@ static int hci_le_big_terminate(struct hci_dev *hdev, u8 big, struct hci_conn *c } if (test_and_clear_bit(HCI_CONN_BIG_SYNC, &conn->flags)) { - hci_conn_hash_list_flag(hdev, find_bis, ISO_LINK, + hci_conn_hash_list_flag(hdev, find_bis, BIS_LINK, HCI_CONN_BIG_SYNC, d); if (!d->count) @@ -885,9 +885,11 @@ static void cis_cleanup(struct hci_conn *conn) /* Check if ISO connection is a CIS and remove CIG if there are * no other connections using it. */ - hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_BOUND, &d); - hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECT, &d); - hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECTED, &d); + hci_conn_hash_list_state(hdev, find_cis, CIS_LINK, BT_BOUND, &d); + hci_conn_hash_list_state(hdev, find_cis, CIS_LINK, BT_CONNECT, + &d); + hci_conn_hash_list_state(hdev, find_cis, CIS_LINK, BT_CONNECTED, + &d); if (d.count) return; @@ -910,7 +912,8 @@ static struct hci_conn *__hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t if (!hdev->acl_mtu) return ERR_PTR(-ECONNREFUSED); break; - case ISO_LINK: + case CIS_LINK: + case BIS_LINK: if (hdev->iso_mtu) /* Dedicated ISO Buffer exists */ break; @@ -974,7 +977,8 @@ static struct hci_conn *__hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t hci_copy_identity_address(hdev, &conn->src, &conn->src_type); conn->mtu = hdev->le_mtu ? hdev->le_mtu : hdev->acl_mtu; break; - case ISO_LINK: + case CIS_LINK: + case BIS_LINK: /* conn->src should reflect the local identity address */ hci_copy_identity_address(hdev, &conn->src, &conn->src_type); @@ -1071,7 +1075,8 @@ static void hci_conn_cleanup_child(struct hci_conn *conn, u8 reason) if (HCI_CONN_HANDLE_UNSET(conn->handle)) hci_conn_failed(conn, reason); break; - case ISO_LINK: + case CIS_LINK: + case BIS_LINK: if ((conn->state != BT_CONNECTED && !test_bit(HCI_CONN_CREATE_CIS, &conn->flags)) || test_bit(HCI_CONN_BIG_CREATED, &conn->flags)) @@ -1146,7 +1151,8 @@ void hci_conn_del(struct hci_conn *conn) hdev->acl_cnt += conn->sent; } else { /* Unacked ISO frames */ - if (conn->type == ISO_LINK) { + if (conn->type == CIS_LINK || + conn->type == BIS_LINK) { if (hdev->iso_pkts) hdev->iso_cnt += conn->sent; else if (hdev->le_pkts) @@ -1532,7 +1538,7 @@ static struct hci_conn *hci_add_bis(struct hci_dev *hdev, bdaddr_t *dst, memcmp(conn->le_per_adv_data, base, base_len))) return ERR_PTR(-EADDRINUSE); - conn = hci_conn_add_unset(hdev, ISO_LINK, dst, HCI_ROLE_MASTER); + conn = hci_conn_add_unset(hdev, BIS_LINK, dst, HCI_ROLE_MASTER); if (IS_ERR(conn)) return conn; @@ -1740,7 +1746,7 @@ static int hci_le_create_big(struct hci_conn *conn, struct bt_iso_qos *qos) data.count = 0; /* Create a BIS for each bound connection */ - hci_conn_hash_list_state(hdev, bis_list, ISO_LINK, + hci_conn_hash_list_state(hdev, bis_list, BIS_LINK, BT_BOUND, &data); cp.handle = qos->bcast.big; @@ -1829,12 +1835,12 @@ static bool hci_le_set_cig_params(struct hci_conn *conn, struct bt_iso_qos *qos) for (data.cig = 0x00; data.cig < 0xf0; data.cig++) { data.count = 0; - hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, + hci_conn_hash_list_state(hdev, find_cis, CIS_LINK, BT_CONNECT, &data); if (data.count) continue; - hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, + hci_conn_hash_list_state(hdev, find_cis, CIS_LINK, BT_CONNECTED, &data); if (!data.count) break; @@ -1884,7 +1890,8 @@ struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst, cis = hci_conn_hash_lookup_cis(hdev, dst, dst_type, qos->ucast.cig, qos->ucast.cis); if (!cis) { - cis = hci_conn_add_unset(hdev, ISO_LINK, dst, HCI_ROLE_MASTER); + cis = hci_conn_add_unset(hdev, CIS_LINK, dst, + HCI_ROLE_MASTER); if (IS_ERR(cis)) return cis; cis->cleanup = cis_cleanup; @@ -1976,7 +1983,7 @@ bool hci_iso_setup_path(struct hci_conn *conn) int hci_conn_check_create_cis(struct hci_conn *conn) { - if (conn->type != ISO_LINK || !bacmp(&conn->dst, BDADDR_ANY)) + if (conn->type != CIS_LINK) return -EINVAL; if (!conn->parent || conn->parent->state != BT_CONNECTED || @@ -2070,7 +2077,9 @@ struct hci_conn *hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst, { struct hci_conn *conn; - conn = hci_conn_add_unset(hdev, ISO_LINK, dst, HCI_ROLE_SLAVE); + bt_dev_dbg(hdev, "dst %pMR type %d sid %d", dst, dst_type, sid); + + conn = hci_conn_add_unset(hdev, BIS_LINK, dst, HCI_ROLE_SLAVE); if (IS_ERR(conn)) return conn; @@ -2219,7 +2228,7 @@ struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst, * the start periodic advertising and create BIG commands have * been queued */ - hci_conn_hash_list_state(hdev, bis_mark_per_adv, ISO_LINK, + hci_conn_hash_list_state(hdev, bis_mark_per_adv, BIS_LINK, BT_BOUND, &data); /* Queue start periodic advertising and create BIG */ @@ -2951,7 +2960,8 @@ void hci_conn_tx_queue(struct hci_conn *conn, struct sk_buff *skb) * TODO: SCO support without flowctl (needs to be done in drivers) */ switch (conn->type) { - case ISO_LINK: + case CIS_LINK: + case BIS_LINK: case ACL_LINK: case LE_LINK: break; @@ -3047,3 +3057,36 @@ u8 *hci_conn_key_enc_size(struct hci_conn *conn) return NULL; } + +int hci_ethtool_ts_info(unsigned int index, int sk_proto, + struct kernel_ethtool_ts_info *info) +{ + struct hci_dev *hdev; + + hdev = hci_dev_get(index); + if (!hdev) + return -ENODEV; + + info->so_timestamping = + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE; + info->phc_index = -1; + info->tx_types = BIT(HWTSTAMP_TX_OFF); + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE); + + switch (sk_proto) { + case BTPROTO_ISO: + case BTPROTO_L2CAP: + info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE; + info->so_timestamping |= SOF_TIMESTAMPING_TX_COMPLETION; + break; + case BTPROTO_SCO: + info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE; + if (hci_dev_test_flag(hdev, HCI_SCO_FLOWCTL)) + info->so_timestamping |= SOF_TIMESTAMPING_TX_COMPLETION; + break; + } + + hci_dev_put(hdev); + return 0; +} diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 5eb0600bbd03..3b49828160b7 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -2898,12 +2898,13 @@ int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb) break; case HCI_ACLDATA_PKT: /* Detect if ISO packet has been sent as ACL */ - if (hci_conn_num(hdev, ISO_LINK)) { + if (hci_conn_num(hdev, CIS_LINK) || + hci_conn_num(hdev, BIS_LINK)) { __u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle); __u8 type; type = hci_conn_lookup_type(hdev, hci_handle(handle)); - if (type == ISO_LINK) + if (type == CIS_LINK || type == BIS_LINK) hci_skb_pkt_type(skb) = HCI_ISODATA_PKT; } break; @@ -2911,6 +2912,8 @@ int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb) break; case HCI_ISODATA_PKT: break; + case HCI_DRV_PKT: + break; default: kfree_skb(skb); return -EINVAL; @@ -3019,6 +3022,15 @@ static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) return -EINVAL; } + if (hci_skb_pkt_type(skb) == HCI_DRV_PKT) { + /* Intercept HCI Drv packet here and don't go with hdev->send + * callback. + */ + err = hci_drv_process_cmd(hdev, skb); + kfree_skb(skb); + return err; + } + err = hdev->send(hdev, skb); if (err < 0) { bt_dev_err(hdev, "sending frame failed (%d)", err); @@ -3345,7 +3357,8 @@ static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote) case LE_LINK: cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt; break; - case ISO_LINK: + case CIS_LINK: + case BIS_LINK: cnt = hdev->iso_mtu ? hdev->iso_cnt : hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt; break; @@ -3359,7 +3372,7 @@ static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote) } static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, - int *quote) + __u8 type2, int *quote) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *conn = NULL, *c; @@ -3371,7 +3384,8 @@ static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { - if (c->type != type || skb_queue_empty(&c->data_q)) + if ((c->type != type && c->type != type2) || + skb_queue_empty(&c->data_q)) continue; if (c->state != BT_CONNECTED && c->state != BT_CONFIG) @@ -3579,7 +3593,7 @@ static void hci_sched_sco(struct hci_dev *hdev, __u8 type) else cnt = &hdev->sco_cnt; - while (*cnt && (conn = hci_low_sent(hdev, type, "e))) { + while (*cnt && (conn = hci_low_sent(hdev, type, type, "e))) { while (quote-- && (skb = skb_dequeue(&conn->data_q))) { BT_DBG("skb %p len %d", skb, skb->len); hci_send_conn_frame(hdev, conn, skb); @@ -3707,12 +3721,14 @@ static void hci_sched_iso(struct hci_dev *hdev) BT_DBG("%s", hdev->name); - if (!hci_conn_num(hdev, ISO_LINK)) + if (!hci_conn_num(hdev, CIS_LINK) && + !hci_conn_num(hdev, BIS_LINK)) return; cnt = hdev->iso_pkts ? &hdev->iso_cnt : hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt; - while (*cnt && (conn = hci_low_sent(hdev, ISO_LINK, "e))) { + while (*cnt && (conn = hci_low_sent(hdev, CIS_LINK, BIS_LINK, + "e))) { while (quote-- && (skb = skb_dequeue(&conn->data_q))) { BT_DBG("skb %p len %d", skb, skb->len); hci_send_conn_frame(hdev, conn, skb); @@ -4057,10 +4073,13 @@ static void hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb) return; } - err = hci_send_frame(hdev, skb); - if (err < 0) { - hci_cmd_sync_cancel_sync(hdev, -err); - return; + if (hci_skb_opcode(skb) != HCI_OP_NOP) { + err = hci_send_frame(hdev, skb); + if (err < 0) { + hci_cmd_sync_cancel_sync(hdev, -err); + return; + } + atomic_dec(&hdev->cmd_cnt); } if (hdev->req_status == HCI_REQ_PEND && @@ -4068,8 +4087,6 @@ static void hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb) kfree_skb(hdev->req_skb); hdev->req_skb = skb_clone(hdev->sent_cmd, GFP_KERNEL); } - - atomic_dec(&hdev->cmd_cnt); } static void hci_cmd_work(struct work_struct *work) diff --git a/net/bluetooth/hci_drv.c b/net/bluetooth/hci_drv.c new file mode 100644 index 000000000000..3dd2d8a006b9 --- /dev/null +++ b/net/bluetooth/hci_drv.c @@ -0,0 +1,105 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2025 Google Corporation + */ + +#include <linux/skbuff.h> +#include <linux/types.h> + +#include <net/bluetooth/bluetooth.h> +#include <net/bluetooth/hci.h> +#include <net/bluetooth/hci_core.h> +#include <net/bluetooth/hci_drv.h> + +int hci_drv_cmd_status(struct hci_dev *hdev, u16 cmd, u8 status) +{ + struct hci_drv_ev_hdr *hdr; + struct hci_drv_ev_cmd_status *ev; + struct sk_buff *skb; + + skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL); + if (!skb) + return -ENOMEM; + + hdr = skb_put(skb, sizeof(*hdr)); + hdr->opcode = __cpu_to_le16(HCI_DRV_EV_CMD_STATUS); + hdr->len = __cpu_to_le16(sizeof(*ev)); + + ev = skb_put(skb, sizeof(*ev)); + ev->opcode = __cpu_to_le16(cmd); + ev->status = status; + + hci_skb_pkt_type(skb) = HCI_DRV_PKT; + + return hci_recv_frame(hdev, skb); +} +EXPORT_SYMBOL(hci_drv_cmd_status); + +int hci_drv_cmd_complete(struct hci_dev *hdev, u16 cmd, u8 status, void *rp, + size_t rp_len) +{ + struct hci_drv_ev_hdr *hdr; + struct hci_drv_ev_cmd_complete *ev; + struct sk_buff *skb; + + skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + hdr = skb_put(skb, sizeof(*hdr)); + hdr->opcode = __cpu_to_le16(HCI_DRV_EV_CMD_COMPLETE); + hdr->len = __cpu_to_le16(sizeof(*ev) + rp_len); + + ev = skb_put(skb, sizeof(*ev)); + ev->opcode = __cpu_to_le16(cmd); + ev->status = status; + + skb_put_data(skb, rp, rp_len); + + hci_skb_pkt_type(skb) = HCI_DRV_PKT; + + return hci_recv_frame(hdev, skb); +} +EXPORT_SYMBOL(hci_drv_cmd_complete); + +int hci_drv_process_cmd(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_drv_cmd_hdr *hdr; + const struct hci_drv_handler *handler = NULL; + u16 opcode, len, ogf, ocf; + + hdr = skb_pull_data(skb, sizeof(*hdr)); + if (!hdr) + return -EILSEQ; + + opcode = __le16_to_cpu(hdr->opcode); + len = __le16_to_cpu(hdr->len); + if (len != skb->len) + return -EILSEQ; + + ogf = hci_opcode_ogf(opcode); + ocf = hci_opcode_ocf(opcode); + + if (!hdev->hci_drv) + return hci_drv_cmd_status(hdev, opcode, + HCI_DRV_STATUS_UNKNOWN_COMMAND); + + if (ogf != HCI_DRV_OGF_DRIVER_SPECIFIC) { + if (opcode < hdev->hci_drv->common_handler_count) + handler = &hdev->hci_drv->common_handlers[opcode]; + } else { + if (ocf < hdev->hci_drv->specific_handler_count) + handler = &hdev->hci_drv->specific_handlers[ocf]; + } + + if (!handler || !handler->func) + return hci_drv_cmd_status(hdev, opcode, + HCI_DRV_STATUS_UNKNOWN_COMMAND); + + if (len != handler->data_len) + return hci_drv_cmd_status(hdev, opcode, + HCI_DRV_STATUS_INVALID_PARAMETERS); + + return handler->func(hdev, skb->data, len); +} +EXPORT_SYMBOL(hci_drv_process_cmd); diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index c38ada69c3d7..66052d6aaa1d 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -3804,7 +3804,7 @@ static void hci_unbound_cis_failed(struct hci_dev *hdev, u8 cig, u8 status) lockdep_assert_held(&hdev->lock); list_for_each_entry_safe(conn, tmp, &hdev->conn_hash.list, list) { - if (conn->type != ISO_LINK || !bacmp(&conn->dst, BDADDR_ANY) || + if (conn->type != CIS_LINK || conn->state == BT_OPEN || conn->iso_qos.ucast.cig != cig) continue; @@ -4467,7 +4467,8 @@ static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data, break; - case ISO_LINK: + case CIS_LINK: + case BIS_LINK: if (hdev->iso_pkts) { hdev->iso_cnt += count; if (hdev->iso_cnt > hdev->iso_pkts) @@ -6351,6 +6352,17 @@ static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data, info->secondary_phy &= 0x1f; } + /* Check if PA Sync is pending and if the hci_conn SID has not + * been set update it. + */ + if (hci_dev_test_flag(hdev, HCI_PA_SYNC)) { + struct hci_conn *conn; + + conn = hci_conn_hash_lookup_create_pa_sync(hdev); + if (conn && conn->sid == HCI_SID_INVALID) + conn->sid = info->sid; + } + if (legacy_evt_type != LE_ADV_INVALID) { process_adv_report(hdev, legacy_evt_type, &info->bdaddr, info->bdaddr_type, NULL, 0, @@ -6402,7 +6414,8 @@ static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data, conn->sync_handle = le16_to_cpu(ev->handle); conn->sid = HCI_SID_INVALID; - mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ISO_LINK, &flags); + mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, BIS_LINK, + &flags); if (!(mask & HCI_LM_ACCEPT)) { hci_le_pa_term_sync(hdev, ev->handle); goto unlock; @@ -6412,7 +6425,7 @@ static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data, goto unlock; /* Add connection to indicate PA sync event */ - pa_sync = hci_conn_add_unset(hdev, ISO_LINK, BDADDR_ANY, + pa_sync = hci_conn_add_unset(hdev, BIS_LINK, BDADDR_ANY, HCI_ROLE_SLAVE); if (IS_ERR(pa_sync)) @@ -6443,7 +6456,7 @@ static void hci_le_per_adv_report_evt(struct hci_dev *hdev, void *data, hci_dev_lock(hdev); - mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags); + mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, BIS_LINK, &flags); if (!(mask & HCI_LM_ACCEPT)) goto unlock; @@ -6727,7 +6740,7 @@ static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data, goto unlock; } - if (conn->type != ISO_LINK) { + if (conn->type != CIS_LINK) { bt_dev_err(hdev, "Invalid connection link type handle 0x%4.4x", handle); @@ -6845,7 +6858,7 @@ static void hci_le_cis_req_evt(struct hci_dev *hdev, void *data, if (!acl) goto unlock; - mask = hci_proto_connect_ind(hdev, &acl->dst, ISO_LINK, &flags); + mask = hci_proto_connect_ind(hdev, &acl->dst, CIS_LINK, &flags); if (!(mask & HCI_LM_ACCEPT)) { hci_le_reject_cis(hdev, ev->cis_handle); goto unlock; @@ -6853,8 +6866,8 @@ static void hci_le_cis_req_evt(struct hci_dev *hdev, void *data, cis = hci_conn_hash_lookup_handle(hdev, cis_handle); if (!cis) { - cis = hci_conn_add(hdev, ISO_LINK, &acl->dst, HCI_ROLE_SLAVE, - cis_handle); + cis = hci_conn_add(hdev, CIS_LINK, &acl->dst, + HCI_ROLE_SLAVE, cis_handle); if (IS_ERR(cis)) { hci_le_reject_cis(hdev, ev->cis_handle); goto unlock; @@ -6969,7 +6982,7 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data, bt_dev_dbg(hdev, "ignore too large handle %u", handle); continue; } - bis = hci_conn_add(hdev, ISO_LINK, BDADDR_ANY, + bis = hci_conn_add(hdev, BIS_LINK, BDADDR_ANY, HCI_ROLE_SLAVE, handle); if (IS_ERR(bis)) continue; @@ -7025,7 +7038,7 @@ static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data, hci_dev_lock(hdev); - mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags); + mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, BIS_LINK, &flags); if (!(mask & HCI_LM_ACCEPT)) goto unlock; @@ -7155,7 +7168,8 @@ static void hci_le_meta_evt(struct hci_dev *hdev, void *data, /* Only match event if command OGF is for LE */ if (hdev->req_skb && - hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) == 0x08 && + (hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) == 0x08 || + hci_skb_opcode(hdev->req_skb) == HCI_OP_NOP) && hci_skb_event(hdev->req_skb) == ev->subevent) { *opcode = hci_skb_opcode(hdev->req_skb); hci_req_cmd_complete(hdev, *opcode, 0x00, req_complete, @@ -7511,8 +7525,10 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) goto done; } + hci_dev_lock(hdev); kfree_skb(hdev->recv_event); hdev->recv_event = skb_clone(skb, GFP_KERNEL); + hci_dev_unlock(hdev); event = hdr->evt; if (!event) { diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index 022b86797acd..428ee5c7de7e 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c @@ -234,7 +234,8 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb) if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT && hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT && hci_skb_pkt_type(skb) != HCI_SCODATA_PKT && - hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) + hci_skb_pkt_type(skb) != HCI_ISODATA_PKT && + hci_skb_pkt_type(skb) != HCI_DRV_PKT) continue; } else { /* Don't send frame to other channel types */ @@ -391,6 +392,12 @@ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb) else opcode = cpu_to_le16(HCI_MON_ISO_TX_PKT); break; + case HCI_DRV_PKT: + if (bt_cb(skb)->incoming) + opcode = cpu_to_le16(HCI_MON_DRV_RX_PKT); + else + opcode = cpu_to_le16(HCI_MON_DRV_TX_PKT); + break; case HCI_DIAG_PKT: opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG); break; @@ -1860,7 +1867,8 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg, if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT && hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT && hci_skb_pkt_type(skb) != HCI_SCODATA_PKT && - hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) { + hci_skb_pkt_type(skb) != HCI_ISODATA_PKT && + hci_skb_pkt_type(skb) != HCI_DRV_PKT) { err = -EINVAL; goto drop; } diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c index e56b1cbedab9..62d1ff951ebe 100644 --- a/net/bluetooth/hci_sync.c +++ b/net/bluetooth/hci_sync.c @@ -2860,7 +2860,7 @@ static int hci_le_set_ext_scan_param_sync(struct hci_dev *hdev, u8 type, if (sent) { struct hci_conn *conn; - conn = hci_conn_hash_lookup_ba(hdev, ISO_LINK, + conn = hci_conn_hash_lookup_ba(hdev, BIS_LINK, &sent->bdaddr); if (conn) { struct bt_iso_qos *qos = &conn->iso_qos; @@ -5477,7 +5477,7 @@ static int hci_connect_cancel_sync(struct hci_dev *hdev, struct hci_conn *conn, if (conn->type == LE_LINK) return hci_le_connect_cancel_sync(hdev, conn, reason); - if (conn->type == ISO_LINK) { + if (conn->type == CIS_LINK) { /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E * page 1857: * @@ -5490,9 +5490,10 @@ static int hci_connect_cancel_sync(struct hci_dev *hdev, struct hci_conn *conn, return hci_disconnect_sync(hdev, conn, reason); /* CIS with no Create CIS sent have nothing to cancel */ - if (bacmp(&conn->dst, BDADDR_ANY)) - return HCI_ERROR_LOCAL_HOST_TERM; + return HCI_ERROR_LOCAL_HOST_TERM; + } + if (conn->type == BIS_LINK) { /* There is no way to cancel a BIS without terminating the BIG * which is done later on connection cleanup. */ @@ -5554,9 +5555,12 @@ static int hci_reject_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, { struct hci_cp_reject_conn_req cp; - if (conn->type == ISO_LINK) + if (conn->type == CIS_LINK) return hci_le_reject_cis_sync(hdev, conn, reason); + if (conn->type == BIS_LINK) + return -EINVAL; + if (conn->type == SCO_LINK || conn->type == ESCO_LINK) return hci_reject_sco_sync(hdev, conn, reason); @@ -6898,20 +6902,37 @@ int hci_le_conn_update_sync(struct hci_dev *hdev, struct hci_conn *conn, static void create_pa_complete(struct hci_dev *hdev, void *data, int err) { + struct hci_conn *conn = data; + struct hci_conn *pa_sync; + bt_dev_dbg(hdev, "err %d", err); - if (!err) + if (err == -ECANCELED) return; + hci_dev_lock(hdev); + hci_dev_clear_flag(hdev, HCI_PA_SYNC); - if (err == -ECANCELED) - return; + if (!hci_conn_valid(hdev, conn)) + clear_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags); - hci_dev_lock(hdev); + if (!err) + goto unlock; - hci_update_passive_scan_sync(hdev); + /* Add connection to indicate PA sync error */ + pa_sync = hci_conn_add_unset(hdev, BIS_LINK, BDADDR_ANY, + HCI_ROLE_SLAVE); + + if (IS_ERR(pa_sync)) + goto unlock; + + set_bit(HCI_CONN_PA_SYNC_FAILED, &pa_sync->flags); + /* Notify iso layer */ + hci_connect_cfm(pa_sync, bt_status(err)); + +unlock: hci_dev_unlock(hdev); } @@ -6925,9 +6946,23 @@ static int hci_le_pa_create_sync(struct hci_dev *hdev, void *data) if (!hci_conn_valid(hdev, conn)) return -ECANCELED; + if (conn->sync_handle != HCI_SYNC_HANDLE_INVALID) + return -EINVAL; + if (hci_dev_test_and_set_flag(hdev, HCI_PA_SYNC)) return -EBUSY; + /* Stop scanning if SID has not been set and active scanning is enabled + * so we use passive scanning which will be scanning using the allow + * list programmed to contain only the connection address. + */ + if (conn->sid == HCI_SID_INVALID && + hci_dev_test_flag(hdev, HCI_LE_SCAN)) { + hci_scan_disable_sync(hdev); + hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED); + hci_discovery_set_state(hdev, DISCOVERY_STOPPED); + } + /* Mark HCI_CONN_CREATE_PA_SYNC so hci_update_passive_scan_sync can * program the address in the allow list so PA advertisements can be * received. @@ -6936,6 +6971,14 @@ static int hci_le_pa_create_sync(struct hci_dev *hdev, void *data) hci_update_passive_scan_sync(hdev); + /* SID has not been set listen for HCI_EV_LE_EXT_ADV_REPORT to update + * it. + */ + if (conn->sid == HCI_SID_INVALID) + __hci_cmd_sync_status_sk(hdev, HCI_OP_NOP, 0, NULL, + HCI_EV_LE_EXT_ADV_REPORT, + conn->conn_timeout, NULL); + memset(&cp, 0, sizeof(cp)); cp.options = qos->bcast.options; cp.sid = conn->sid; diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c index 2819cda616bc..6e2c752aaa8f 100644 --- a/net/bluetooth/iso.c +++ b/net/bluetooth/iso.c @@ -941,7 +941,7 @@ static int iso_sock_bind_bc(struct socket *sock, struct sockaddr *addr, iso_pi(sk)->dst_type = sa->iso_bc->bc_bdaddr_type; - if (sa->iso_bc->bc_sid > 0x0f) + if (sa->iso_bc->bc_sid > 0x0f && sa->iso_bc->bc_sid != HCI_SID_INVALID) return -EINVAL; iso_pi(sk)->bc_sid = sa->iso_bc->bc_sid; @@ -1330,6 +1330,7 @@ static int iso_sock_getname(struct socket *sock, struct sockaddr *addr, { struct sockaddr_iso *sa = (struct sockaddr_iso *)addr; struct sock *sk = sock->sk; + int len = sizeof(struct sockaddr_iso); BT_DBG("sock %p, sk %p", sock, sk); @@ -1338,12 +1339,20 @@ static int iso_sock_getname(struct socket *sock, struct sockaddr *addr, if (peer) { bacpy(&sa->iso_bdaddr, &iso_pi(sk)->dst); sa->iso_bdaddr_type = iso_pi(sk)->dst_type; + + if (test_bit(BT_SK_PA_SYNC, &iso_pi(sk)->flags)) { + sa->iso_bc->bc_sid = iso_pi(sk)->bc_sid; + sa->iso_bc->bc_num_bis = iso_pi(sk)->bc_num_bis; + memcpy(sa->iso_bc->bc_bis, iso_pi(sk)->bc_bis, + ISO_MAX_NUM_BIS); + len += sizeof(struct sockaddr_iso_bc); + } } else { bacpy(&sa->iso_bdaddr, &iso_pi(sk)->src); sa->iso_bdaddr_type = iso_pi(sk)->src_type; } - return sizeof(struct sockaddr_iso); + return len; } static int iso_sock_sendmsg(struct socket *sock, struct msghdr *msg, @@ -1988,11 +1997,13 @@ static void iso_conn_ready(struct iso_conn *conn) hcon->dst_type = iso_pi(parent)->dst_type; } - if (ev3) { + if (test_bit(HCI_CONN_PA_SYNC, &hcon->flags)) { iso_pi(sk)->qos = iso_pi(parent)->qos; hcon->iso_qos = iso_pi(sk)->qos; + iso_pi(sk)->bc_sid = iso_pi(parent)->bc_sid; iso_pi(sk)->bc_num_bis = iso_pi(parent)->bc_num_bis; - memcpy(iso_pi(sk)->bc_bis, iso_pi(parent)->bc_bis, ISO_MAX_NUM_BIS); + memcpy(iso_pi(sk)->bc_bis, iso_pi(parent)->bc_bis, + ISO_MAX_NUM_BIS); set_bit(BT_SK_PA_SYNC, &iso_pi(sk)->flags); } @@ -2029,6 +2040,9 @@ static bool iso_match_sid(struct sock *sk, void *data) { struct hci_ev_le_pa_sync_established *ev = data; + if (iso_pi(sk)->bc_sid == HCI_SID_INVALID) + return true; + return ev->sid == iso_pi(sk)->bc_sid; } @@ -2075,8 +2089,10 @@ int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags) if (ev1) { sk = iso_get_sock(&hdev->bdaddr, bdaddr, BT_LISTEN, iso_match_sid, ev1); - if (sk && !ev1->status) + if (sk && !ev1->status) { iso_pi(sk)->sync_handle = le16_to_cpu(ev1->handle); + iso_pi(sk)->bc_sid = ev1->sid; + } goto done; } @@ -2203,7 +2219,7 @@ done: static void iso_connect_cfm(struct hci_conn *hcon, __u8 status) { - if (hcon->type != ISO_LINK) { + if (hcon->type != CIS_LINK && hcon->type != BIS_LINK) { if (hcon->type != LE_LINK) return; @@ -2244,7 +2260,7 @@ static void iso_connect_cfm(struct hci_conn *hcon, __u8 status) static void iso_disconn_cfm(struct hci_conn *hcon, __u8 reason) { - if (hcon->type != ISO_LINK) + if (hcon->type != CIS_LINK && hcon->type != BIS_LINK) return; BT_DBG("hcon %p reason %d", hcon, reason); diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index 73472756618a..042d3ac3b4a3 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -1411,7 +1411,8 @@ static void l2cap_request_info(struct l2cap_conn *conn) sizeof(req), &req); } -static bool l2cap_check_enc_key_size(struct hci_conn *hcon) +static bool l2cap_check_enc_key_size(struct hci_conn *hcon, + struct l2cap_chan *chan) { /* The minimum encryption key size needs to be enforced by the * host stack before establishing any L2CAP connections. The @@ -1425,7 +1426,7 @@ static bool l2cap_check_enc_key_size(struct hci_conn *hcon) int min_key_size = hcon->hdev->min_enc_key_size; /* On FIPS security level, key size must be 16 bytes */ - if (hcon->sec_level == BT_SECURITY_FIPS) + if (chan->sec_level == BT_SECURITY_FIPS) min_key_size = 16; return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) || @@ -1453,7 +1454,7 @@ static void l2cap_do_start(struct l2cap_chan *chan) !__l2cap_no_conn_pending(chan)) return; - if (l2cap_check_enc_key_size(conn->hcon)) + if (l2cap_check_enc_key_size(conn->hcon, chan)) l2cap_start_connection(chan); else __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); @@ -1528,7 +1529,7 @@ static void l2cap_conn_start(struct l2cap_conn *conn) continue; } - if (l2cap_check_enc_key_size(conn->hcon)) + if (l2cap_check_enc_key_size(conn->hcon, chan)) l2cap_start_connection(chan); else l2cap_chan_close(chan, ECONNREFUSED); @@ -3992,7 +3993,7 @@ static void l2cap_connect(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, /* Check if the ACL is secure enough (if not SDP) */ if (psm != cpu_to_le16(L2CAP_PSM_SDP) && (!hci_conn_check_link_mode(conn->hcon) || - !l2cap_check_enc_key_size(conn->hcon))) { + !l2cap_check_enc_key_size(conn->hcon, pchan))) { conn->disc_reason = HCI_ERROR_AUTH_FAILURE; result = L2CAP_CR_SEC_BLOCK; goto response; @@ -7352,7 +7353,7 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) } if (chan->state == BT_CONNECT) { - if (!status && l2cap_check_enc_key_size(hcon)) + if (!status && l2cap_check_enc_key_size(hcon, chan)) l2cap_start_connection(chan); else __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); @@ -7362,7 +7363,7 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) struct l2cap_conn_rsp rsp; __u16 res, stat; - if (!status && l2cap_check_enc_key_size(hcon)) { + if (!status && l2cap_check_enc_key_size(hcon, chan)) { if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) { res = L2CAP_CR_PEND; stat = L2CAP_CS_AUTHOR_PEND; diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 46b22708dfbd..261926dccc7e 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -3221,7 +3221,8 @@ failed: static u8 link_to_bdaddr(u8 link_type, u8 addr_type) { switch (link_type) { - case ISO_LINK: + case CIS_LINK: + case BIS_LINK: case LE_LINK: switch (addr_type) { case ADDR_LE_DEV_PUBLIC: diff --git a/net/bluetooth/mgmt_util.c b/net/bluetooth/mgmt_util.c index e5ff65e424b5..3713ff490c65 100644 --- a/net/bluetooth/mgmt_util.c +++ b/net/bluetooth/mgmt_util.c @@ -304,7 +304,7 @@ void mgmt_mesh_foreach(struct hci_dev *hdev, { struct mgmt_mesh_tx *mesh_tx, *tmp; - list_for_each_entry_safe(mesh_tx, tmp, &hdev->mgmt_pending, list) { + list_for_each_entry_safe(mesh_tx, tmp, &hdev->mesh_pending, list) { if (!sk || mesh_tx->sk == sk) cb(mesh_tx, data); } diff --git a/net/bridge/br_nf_core.c b/net/bridge/br_nf_core.c index 98aea5485aae..a8c67035e23c 100644 --- a/net/bridge/br_nf_core.c +++ b/net/bridge/br_nf_core.c @@ -65,17 +65,14 @@ static struct dst_ops fake_dst_ops = { * ipt_REJECT needs it. Future netfilter modules might * require us to fill additional fields. */ -static const u32 br_dst_default_metrics[RTAX_MAX] = { - [RTAX_MTU - 1] = 1500, -}; - void br_netfilter_rtable_init(struct net_bridge *br) { struct rtable *rt = &br->fake_rtable; rcuref_init(&rt->dst.__rcuref, 1); rt->dst.dev = br->dev; - dst_init_metrics(&rt->dst, br_dst_default_metrics, true); + dst_init_metrics(&rt->dst, br->metrics, false); + dst_metric_set(&rt->dst, RTAX_MTU, br->dev->mtu); rt->dst.flags = DST_NOXFRM | DST_FAKE_RTABLE; rt->dst.ops = &fake_dst_ops; } diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index db1bddb330ff..b159aae594c0 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -507,6 +507,7 @@ struct net_bridge { struct rtable fake_rtable; struct rt6_info fake_rt6_info; }; + u32 metrics[RTAX_MAX]; #endif u16 group_fwd_mask; u16 group_fwd_mask_required; diff --git a/net/can/bcm.c b/net/can/bcm.c index 0bca1b9b3f70..6bc1cc4c94c5 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c @@ -58,6 +58,7 @@ #include <linux/can/skb.h> #include <linux/can/bcm.h> #include <linux/slab.h> +#include <linux/spinlock.h> #include <net/sock.h> #include <net/net_namespace.h> @@ -122,6 +123,7 @@ struct bcm_op { struct canfd_frame last_sframe; struct sock *sk; struct net_device *rx_reg_dev; + spinlock_t bcm_tx_lock; /* protect currframe/count in runtime updates */ }; struct bcm_sock { @@ -217,7 +219,9 @@ static int bcm_proc_show(struct seq_file *m, void *v) seq_printf(m, " / bound %s", bcm_proc_getifname(net, ifname, bo->ifindex)); seq_printf(m, " <<<\n"); - list_for_each_entry(op, &bo->rx_ops, list) { + rcu_read_lock(); + + list_for_each_entry_rcu(op, &bo->rx_ops, list) { unsigned long reduction; @@ -273,6 +277,9 @@ static int bcm_proc_show(struct seq_file *m, void *v) seq_printf(m, "# sent %ld\n", op->frames_abs); } seq_putc(m, '\n'); + + rcu_read_unlock(); + return 0; } #endif /* CONFIG_PROC_FS */ @@ -285,13 +292,18 @@ static void bcm_can_tx(struct bcm_op *op) { struct sk_buff *skb; struct net_device *dev; - struct canfd_frame *cf = op->frames + op->cfsiz * op->currframe; + struct canfd_frame *cf; int err; /* no target device? => exit */ if (!op->ifindex) return; + /* read currframe under lock protection */ + spin_lock_bh(&op->bcm_tx_lock); + cf = op->frames + op->cfsiz * op->currframe; + spin_unlock_bh(&op->bcm_tx_lock); + dev = dev_get_by_index(sock_net(op->sk), op->ifindex); if (!dev) { /* RFC: should this bcm_op remove itself here? */ @@ -312,6 +324,10 @@ static void bcm_can_tx(struct bcm_op *op) skb->dev = dev; can_skb_set_owner(skb, op->sk); err = can_send(skb, 1); + + /* update currframe and count under lock protection */ + spin_lock_bh(&op->bcm_tx_lock); + if (!err) op->frames_abs++; @@ -320,6 +336,11 @@ static void bcm_can_tx(struct bcm_op *op) /* reached last frame? */ if (op->currframe >= op->nframes) op->currframe = 0; + + if (op->count > 0) + op->count--; + + spin_unlock_bh(&op->bcm_tx_lock); out: dev_put(dev); } @@ -430,7 +451,7 @@ static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer) struct bcm_msg_head msg_head; if (op->kt_ival1 && (op->count > 0)) { - op->count--; + bcm_can_tx(op); if (!op->count && (op->flags & TX_COUNTEVT)) { /* create notification to user */ @@ -445,7 +466,6 @@ static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer) bcm_send_to_user(op, &msg_head, NULL, 0); } - bcm_can_tx(op); } else if (op->kt_ival2) { bcm_can_tx(op); @@ -843,7 +863,7 @@ static int bcm_delete_rx_op(struct list_head *ops, struct bcm_msg_head *mh, REGMASK(op->can_id), bcm_rx_handler, op); - list_del(&op->list); + list_del_rcu(&op->list); bcm_remove_op(op); return 1; /* done */ } @@ -863,7 +883,7 @@ static int bcm_delete_tx_op(struct list_head *ops, struct bcm_msg_head *mh, list_for_each_entry_safe(op, n, ops, list) { if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) && (op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME)) { - list_del(&op->list); + list_del_rcu(&op->list); bcm_remove_op(op); return 1; /* done */ } @@ -956,6 +976,27 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, } op->flags = msg_head->flags; + /* only lock for unlikely count/nframes/currframe changes */ + if (op->nframes != msg_head->nframes || + op->flags & TX_RESET_MULTI_IDX || + op->flags & SETTIMER) { + + spin_lock_bh(&op->bcm_tx_lock); + + if (op->nframes != msg_head->nframes || + op->flags & TX_RESET_MULTI_IDX) { + /* potentially update changed nframes */ + op->nframes = msg_head->nframes; + /* restart multiple frame transmission */ + op->currframe = 0; + } + + if (op->flags & SETTIMER) + op->count = msg_head->count; + + spin_unlock_bh(&op->bcm_tx_lock); + } + } else { /* insert new BCM operation for the given can_id */ @@ -963,9 +1004,14 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, if (!op) return -ENOMEM; + spin_lock_init(&op->bcm_tx_lock); op->can_id = msg_head->can_id; op->cfsiz = CFSIZ(msg_head->flags); op->flags = msg_head->flags; + op->nframes = msg_head->nframes; + + if (op->flags & SETTIMER) + op->count = msg_head->count; /* create array for CAN frames and copy the data */ if (msg_head->nframes > 1) { @@ -1023,22 +1069,8 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, } /* if ((op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex))) */ - if (op->nframes != msg_head->nframes) { - op->nframes = msg_head->nframes; - /* start multiple frame transmission with index 0 */ - op->currframe = 0; - } - - /* check flags */ - - if (op->flags & TX_RESET_MULTI_IDX) { - /* start multiple frame transmission with index 0 */ - op->currframe = 0; - } - if (op->flags & SETTIMER) { /* set timer values */ - op->count = msg_head->count; op->ival1 = msg_head->ival1; op->ival2 = msg_head->ival2; op->kt_ival1 = bcm_timeval_to_ktime(msg_head->ival1); @@ -1055,11 +1087,8 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, op->flags |= TX_ANNOUNCE; } - if (op->flags & TX_ANNOUNCE) { + if (op->flags & TX_ANNOUNCE) bcm_can_tx(op); - if (op->count) - op->count--; - } if (op->flags & STARTTIMER) bcm_tx_start_timer(op); @@ -1272,7 +1301,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, bcm_rx_handler, op, "bcm", sk); if (err) { /* this bcm rx op is broken -> remove it */ - list_del(&op->list); + list_del_rcu(&op->list); bcm_remove_op(op); return err; } diff --git a/net/core/datagram.c b/net/core/datagram.c index 9ef5442536f5..94cc4705e91d 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -52,6 +52,7 @@ #include <linux/pagemap.h> #include <linux/iov_iter.h> #include <linux/indirect_call_wrapper.h> +#include <linux/crc32.h> #include <net/protocol.h> #include <linux/skbuff.h> @@ -61,7 +62,6 @@ #include <net/tcp_states.h> #include <trace/events/skb.h> #include <net/busy_poll.h> -#include <crypto/hash.h> #include "devmem.h" @@ -483,41 +483,37 @@ short_copy: return 0; } -static size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp, - struct iov_iter *i) +#ifdef CONFIG_NET_CRC32C +static size_t crc32c_and_copy_to_iter(const void *addr, size_t bytes, + void *_crcp, struct iov_iter *i) { -#ifdef CONFIG_CRYPTO_HASH - struct ahash_request *hash = hashp; - struct scatterlist sg; + u32 *crcp = _crcp; size_t copied; copied = copy_to_iter(addr, bytes, i); - sg_init_one(&sg, addr, copied); - ahash_request_set_crypt(hash, &sg, NULL, copied); - crypto_ahash_update(hash); + *crcp = crc32c(*crcp, addr, copied); return copied; -#else - return 0; -#endif } /** - * skb_copy_and_hash_datagram_iter - Copy datagram to an iovec iterator - * and update a hash. + * skb_copy_and_crc32c_datagram_iter - Copy datagram to an iovec iterator + * and update a CRC32C value. * @skb: buffer to copy * @offset: offset in the buffer to start copying from * @to: iovec iterator to copy to * @len: amount of data to copy from buffer to iovec - * @hash: hash request to update + * @crcp: pointer to CRC32C value to update + * + * Return: 0 on success, -EFAULT if there was a fault during copy. */ -int skb_copy_and_hash_datagram_iter(const struct sk_buff *skb, int offset, - struct iov_iter *to, int len, - struct ahash_request *hash) +int skb_copy_and_crc32c_datagram_iter(const struct sk_buff *skb, int offset, + struct iov_iter *to, int len, u32 *crcp) { return __skb_datagram_iter(skb, offset, to, len, true, - hash_and_copy_to_iter, hash); + crc32c_and_copy_to_iter, crcp); } -EXPORT_SYMBOL(skb_copy_and_hash_datagram_iter); +EXPORT_SYMBOL(skb_copy_and_crc32c_datagram_iter); +#endif /* CONFIG_NET_CRC32C */ static size_t simple_copy_to_iter(const void *addr, size_t bytes, void *data __always_unused, struct iov_iter *i) @@ -706,7 +702,8 @@ zerocopy_fill_skb_from_devmem(struct sk_buff *skb, struct iov_iter *from, * iov_addrs are interpreted as an offset in bytes into the dma-buf to * send from. We do not support other iter types. */ - if (iov_iter_type(from) != ITER_IOVEC) + if (iov_iter_type(from) != ITER_IOVEC && + iov_iter_type(from) != ITER_UBUF) return -EFAULT; while (length && iov_iter_count(from)) { diff --git a/net/core/dev.c b/net/core/dev.c index fccf2167b235..3eb4e945f312 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3596,9 +3596,10 @@ out: } EXPORT_SYMBOL(skb_checksum_help); +#ifdef CONFIG_NET_CRC32C int skb_crc32c_csum_help(struct sk_buff *skb) { - __le32 crc32c_csum; + u32 crc; int ret = 0, offset, start; if (skb->ip_summed != CHECKSUM_PARTIAL) @@ -3626,15 +3627,14 @@ int skb_crc32c_csum_help(struct sk_buff *skb) if (ret) goto out; - crc32c_csum = cpu_to_le32(~__skb_checksum(skb, start, - skb->len - start, ~(__u32)0, - crc32c_csum_stub)); - *(__le32 *)(skb->data + offset) = crc32c_csum; + crc = ~skb_crc32c(skb, start, skb->len - start, ~0); + *(__le32 *)(skb->data + offset) = cpu_to_le32(crc); skb_reset_csum_not_inet(skb); out: return ret; } EXPORT_SYMBOL(skb_crc32c_csum_help); +#endif /* CONFIG_NET_CRC32C */ __be16 skb_network_protocol(struct sk_buff *skb, int *depth) { @@ -4815,6 +4815,7 @@ static inline void ____napi_schedule(struct softnet_data *sd, } use_local_napi: + DEBUG_NET_WARN_ON_ONCE(!list_empty(&napi->poll_list)); list_add_tail(&napi->poll_list, &sd->poll_list); WRITE_ONCE(napi->list_owner, smp_processor_id()); /* If not called from net_rx_action() @@ -7476,9 +7477,14 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll) work = __napi_poll(n, &do_repoll); - if (do_repoll) + if (do_repoll) { +#if defined(CONFIG_DEBUG_NET) + if (unlikely(!napi_is_scheduled(n))) + pr_crit("repoll requested for device %s %ps but napi is not scheduled.\n", + n->dev->name, n->poll); +#endif list_add_tail(&n->poll_list, repoll); - + } netpoll_poll_unlock(have); return work; @@ -9278,8 +9284,16 @@ static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify) dev_change_rx_flags(dev, IFF_PROMISC); } - if (notify) + if (notify) { + /* The ops lock is only required to ensure consistent locking + * for `NETDEV_CHANGE` notifiers. This function is sometimes + * called without the lock, even for devices that are ops + * locked, such as in `dev_uc_sync_multiple` when using + * bonding or teaming. + */ + netdev_ops_assert_locked(dev); __dev_notify_flags(dev, old_flags, IFF_PROMISC, 0, NULL); + } return 0; } diff --git a/net/core/lwtunnel.c b/net/core/lwtunnel.c index 60f27cb4e54f..f9d76d85d04f 100644 --- a/net/core/lwtunnel.c +++ b/net/core/lwtunnel.c @@ -149,8 +149,7 @@ int lwtunnel_build_state(struct net *net, u16 encap_type, } EXPORT_SYMBOL_GPL(lwtunnel_build_state); -int lwtunnel_valid_encap_type(u16 encap_type, struct netlink_ext_ack *extack, - bool rtnl_is_held) +int lwtunnel_valid_encap_type(u16 encap_type, struct netlink_ext_ack *extack) { const struct lwtunnel_encap_ops *ops; int ret = -EINVAL; @@ -167,12 +166,7 @@ int lwtunnel_valid_encap_type(u16 encap_type, struct netlink_ext_ack *extack, const char *encap_type_str = lwtunnel_encap_str(encap_type); if (encap_type_str) { - if (rtnl_is_held) - __rtnl_unlock(); request_module("rtnl-lwt-%s", encap_type_str); - if (rtnl_is_held) - rtnl_lock(); - ops = rcu_access_pointer(lwtun_encaps[encap_type]); } } @@ -186,8 +180,7 @@ int lwtunnel_valid_encap_type(u16 encap_type, struct netlink_ext_ack *extack, EXPORT_SYMBOL_GPL(lwtunnel_valid_encap_type); int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int remaining, - struct netlink_ext_ack *extack, - bool rtnl_is_held) + struct netlink_ext_ack *extack) { struct rtnexthop *rtnh = (struct rtnexthop *)attr; struct nlattr *nla_entype; @@ -208,9 +201,7 @@ int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int remaining, } encap_type = nla_get_u16(nla_entype); - if (lwtunnel_valid_encap_type(encap_type, - extack, - rtnl_is_held) != 0) + if (lwtunnel_valid_encap_type(encap_type, extack)) return -EOPNOTSUPP; } } diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 254067b719da..a6e2c91ec3e7 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -1517,7 +1517,7 @@ out: return rc; out_kfree_skb: rc = -EINVAL; - kfree_skb(skb); + kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_HH_FILLFAIL); goto out; } EXPORT_SYMBOL(neigh_resolve_output); @@ -1541,7 +1541,7 @@ int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb) err = dev_queue_xmit(skb); else { err = -EINVAL; - kfree_skb(skb); + kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_HH_FILLFAIL); } return err; } diff --git a/net/core/scm.c b/net/core/scm.c index 733c0cbd393d..0225bd94170f 100644 --- a/net/core/scm.c +++ b/net/core/scm.c @@ -404,3 +404,125 @@ struct scm_fp_list *scm_fp_dup(struct scm_fp_list *fpl) return new_fpl; } EXPORT_SYMBOL(scm_fp_dup); + +#ifdef CONFIG_SECURITY_NETWORK +static void scm_passec(struct sock *sk, struct msghdr *msg, struct scm_cookie *scm) +{ + struct lsm_context ctx; + int err; + + if (sk->sk_scm_security) { + err = security_secid_to_secctx(scm->secid, &ctx); + + if (err >= 0) { + put_cmsg(msg, SOL_SOCKET, SCM_SECURITY, ctx.len, + ctx.context); + + security_release_secctx(&ctx); + } + } +} + +static bool scm_has_secdata(struct sock *sk) +{ + return sk->sk_scm_security; +} +#else +static void scm_passec(struct sock *sk, struct msghdr *msg, struct scm_cookie *scm) +{ +} + +static bool scm_has_secdata(struct sock *sk) +{ + return false; +} +#endif + +static void scm_pidfd_recv(struct msghdr *msg, struct scm_cookie *scm) +{ + struct file *pidfd_file = NULL; + int len, pidfd; + + /* put_cmsg() doesn't return an error if CMSG is truncated, + * that's why we need to opencode these checks here. + */ + if (msg->msg_flags & MSG_CMSG_COMPAT) + len = sizeof(struct compat_cmsghdr) + sizeof(int); + else + len = sizeof(struct cmsghdr) + sizeof(int); + + if (msg->msg_controllen < len) { + msg->msg_flags |= MSG_CTRUNC; + return; + } + + if (!scm->pid) + return; + + pidfd = pidfd_prepare(scm->pid, 0, &pidfd_file); + + if (put_cmsg(msg, SOL_SOCKET, SCM_PIDFD, sizeof(int), &pidfd)) { + if (pidfd_file) { + put_unused_fd(pidfd); + fput(pidfd_file); + } + + return; + } + + if (pidfd_file) + fd_install(pidfd, pidfd_file); +} + +static bool __scm_recv_common(struct sock *sk, struct msghdr *msg, + struct scm_cookie *scm, int flags) +{ + if (!msg->msg_control) { + if (sk->sk_scm_credentials || sk->sk_scm_pidfd || + scm->fp || scm_has_secdata(sk)) + msg->msg_flags |= MSG_CTRUNC; + + scm_destroy(scm); + return false; + } + + if (sk->sk_scm_credentials) { + struct user_namespace *current_ns = current_user_ns(); + struct ucred ucreds = { + .pid = scm->creds.pid, + .uid = from_kuid_munged(current_ns, scm->creds.uid), + .gid = from_kgid_munged(current_ns, scm->creds.gid), + }; + + put_cmsg(msg, SOL_SOCKET, SCM_CREDENTIALS, sizeof(ucreds), &ucreds); + } + + scm_passec(sk, msg, scm); + + if (scm->fp) + scm_detach_fds(msg, scm); + + return true; +} + +void scm_recv(struct socket *sock, struct msghdr *msg, + struct scm_cookie *scm, int flags) +{ + if (!__scm_recv_common(sock->sk, msg, scm, flags)) + return; + + scm_destroy_cred(scm); +} +EXPORT_SYMBOL(scm_recv); + +void scm_recv_unix(struct socket *sock, struct msghdr *msg, + struct scm_cookie *scm, int flags) +{ + if (!__scm_recv_common(sock->sk, msg, scm, flags)) + return; + + if (sock->sk->sk_scm_pidfd) + scm_pidfd_recv(msg, scm); + + scm_destroy_cred(scm); +} diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 4159107f1666..85fc82f72d26 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -64,6 +64,7 @@ #include <linux/mpls.h> #include <linux/kcov.h> #include <linux/iov_iter.h> +#include <linux/crc32.h> #include <net/protocol.h> #include <net/dst.h> @@ -3444,8 +3445,7 @@ fault: EXPORT_SYMBOL(skb_store_bits); /* Checksum skb data. */ -__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, - __wsum csum, const struct skb_checksum_ops *ops) +__wsum skb_checksum(const struct sk_buff *skb, int offset, int len, __wsum csum) { int start = skb_headlen(skb); int i, copy = start - offset; @@ -3456,8 +3456,7 @@ __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, if (copy > 0) { if (copy > len) copy = len; - csum = INDIRECT_CALL_1(ops->update, csum_partial_ext, - skb->data + offset, copy, csum); + csum = csum_partial(skb->data + offset, copy, csum); if ((len -= copy) == 0) return csum; offset += copy; @@ -3487,13 +3486,9 @@ __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, skb_frag_off(frag) + offset - start, copy, p, p_off, p_len, copied) { vaddr = kmap_atomic(p); - csum2 = INDIRECT_CALL_1(ops->update, - csum_partial_ext, - vaddr + p_off, p_len, 0); + csum2 = csum_partial(vaddr + p_off, p_len, 0); kunmap_atomic(vaddr); - csum = INDIRECT_CALL_1(ops->combine, - csum_block_add_ext, csum, - csum2, pos, p_len); + csum = csum_block_add(csum, csum2, pos); pos += p_len; } @@ -3514,10 +3509,9 @@ __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, __wsum csum2; if (copy > len) copy = len; - csum2 = __skb_checksum(frag_iter, offset - start, - copy, 0, ops); - csum = INDIRECT_CALL_1(ops->combine, csum_block_add_ext, - csum, csum2, pos, copy); + csum2 = skb_checksum(frag_iter, offset - start, copy, + 0); + csum = csum_block_add(csum, csum2, pos); if ((len -= copy) == 0) return csum; offset += copy; @@ -3529,18 +3523,6 @@ __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, return csum; } -EXPORT_SYMBOL(__skb_checksum); - -__wsum skb_checksum(const struct sk_buff *skb, int offset, - int len, __wsum csum) -{ - const struct skb_checksum_ops ops = { - .update = csum_partial_ext, - .combine = csum_block_add_ext, - }; - - return __skb_checksum(skb, offset, len, csum, &ops); -} EXPORT_SYMBOL(skb_checksum); /* Both of above in one bottle. */ @@ -3633,6 +3615,78 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, } EXPORT_SYMBOL(skb_copy_and_csum_bits); +#ifdef CONFIG_NET_CRC32C +u32 skb_crc32c(const struct sk_buff *skb, int offset, int len, u32 crc) +{ + int start = skb_headlen(skb); + int i, copy = start - offset; + struct sk_buff *frag_iter; + + if (copy > 0) { + copy = min(copy, len); + crc = crc32c(crc, skb->data + offset, copy); + len -= copy; + if (len == 0) + return crc; + offset += copy; + } + + if (WARN_ON_ONCE(!skb_frags_readable(skb))) + return 0; + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + int end; + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + WARN_ON(start > offset + len); + + end = start + skb_frag_size(frag); + copy = end - offset; + if (copy > 0) { + u32 p_off, p_len, copied; + struct page *p; + u8 *vaddr; + + copy = min(copy, len); + skb_frag_foreach_page(frag, + skb_frag_off(frag) + offset - start, + copy, p, p_off, p_len, copied) { + vaddr = kmap_atomic(p); + crc = crc32c(crc, vaddr + p_off, p_len); + kunmap_atomic(vaddr); + } + len -= copy; + if (len == 0) + return crc; + offset += copy; + } + start = end; + } + + skb_walk_frags(skb, frag_iter) { + int end; + + WARN_ON(start > offset + len); + + end = start + frag_iter->len; + copy = end - offset; + if (copy > 0) { + copy = min(copy, len); + crc = skb_crc32c(frag_iter, offset - start, copy, crc); + len -= copy; + if (len == 0) + return crc; + offset += copy; + } + start = end; + } + BUG_ON(len); + + return crc; +} +EXPORT_SYMBOL(skb_crc32c); +#endif /* CONFIG_NET_CRC32C */ + __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len) { __sum16 sum; @@ -3692,32 +3746,6 @@ __sum16 __skb_checksum_complete(struct sk_buff *skb) } EXPORT_SYMBOL(__skb_checksum_complete); -static __wsum warn_crc32c_csum_update(const void *buff, int len, __wsum sum) -{ - net_warn_ratelimited( - "%s: attempt to compute crc32c without libcrc32c.ko\n", - __func__); - return 0; -} - -static __wsum warn_crc32c_csum_combine(__wsum csum, __wsum csum2, - int offset, int len) -{ - net_warn_ratelimited( - "%s: attempt to compute crc32c without libcrc32c.ko\n", - __func__); - return 0; -} - -static const struct skb_checksum_ops default_crc32c_ops = { - .update = warn_crc32c_csum_update, - .combine = warn_crc32c_csum_combine, -}; - -const struct skb_checksum_ops *crc32c_csum_stub __read_mostly = - &default_crc32c_ops; -EXPORT_SYMBOL(crc32c_csum_stub); - /** * skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy() * @from: source buffer diff --git a/net/core/sock.c b/net/core/sock.c index 347ce75482f5..0cb52e590094 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1220,15 +1220,6 @@ int sk_setsockopt(struct sock *sk, int level, int optname, return 0; } return -EPERM; - case SO_PASSSEC: - assign_bit(SOCK_PASSSEC, &sock->flags, valbool); - return 0; - case SO_PASSCRED: - assign_bit(SOCK_PASSCRED, &sock->flags, valbool); - return 0; - case SO_PASSPIDFD: - assign_bit(SOCK_PASSPIDFD, &sock->flags, valbool); - return 0; case SO_TYPE: case SO_PROTOCOL: case SO_DOMAIN: @@ -1276,6 +1267,8 @@ int sk_setsockopt(struct sock *sk, int level, int optname, return 0; } case SO_TXREHASH: + if (!sk_is_tcp(sk)) + return -EOPNOTSUPP; if (val < -1 || val > 1) return -EINVAL; if ((u8)val == SOCK_TXREHASH_DEFAULT) @@ -1557,6 +1550,33 @@ set_sndbuf: sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool); break; + case SO_PASSCRED: + if (sk_may_scm_recv(sk)) + sk->sk_scm_credentials = valbool; + else + ret = -EOPNOTSUPP; + break; + + case SO_PASSSEC: + if (IS_ENABLED(CONFIG_SECURITY_NETWORK) && sk_may_scm_recv(sk)) + sk->sk_scm_security = valbool; + else + ret = -EOPNOTSUPP; + break; + + case SO_PASSPIDFD: + if (sk_is_unix(sk)) + sk->sk_scm_pidfd = valbool; + else + ret = -EOPNOTSUPP; + break; + + case SO_PASSRIGHTS: + if (sk_is_unix(sk)) + sk->sk_scm_rights = valbool; + else + ret = -EOPNOTSUPP; + break; case SO_INCOMING_CPU: reuseport_update_incoming_cpu(sk, val); @@ -1853,11 +1873,24 @@ int sk_getsockopt(struct sock *sk, int level, int optname, break; case SO_PASSCRED: - v.val = !!test_bit(SOCK_PASSCRED, &sock->flags); + if (!sk_may_scm_recv(sk)) + return -EOPNOTSUPP; + + v.val = sk->sk_scm_credentials; break; case SO_PASSPIDFD: - v.val = !!test_bit(SOCK_PASSPIDFD, &sock->flags); + if (!sk_is_unix(sk)) + return -EOPNOTSUPP; + + v.val = sk->sk_scm_pidfd; + break; + + case SO_PASSRIGHTS: + if (!sk_is_unix(sk)) + return -EOPNOTSUPP; + + v.val = sk->sk_scm_rights; break; case SO_PEERCRED: @@ -1954,7 +1987,10 @@ int sk_getsockopt(struct sock *sk, int level, int optname, break; case SO_PASSSEC: - v.val = !!test_bit(SOCK_PASSSEC, &sock->flags); + if (!IS_ENABLED(CONFIG_SECURITY_NETWORK) || !sk_may_scm_recv(sk)) + return -EOPNOTSUPP; + + v.val = sk->sk_scm_security; break; case SO_PEERSEC: @@ -2102,6 +2138,9 @@ int sk_getsockopt(struct sock *sk, int level, int optname, break; case SO_TXREHASH: + if (!sk_is_tcp(sk)) + return -EOPNOTSUPP; + /* Paired with WRITE_ONCE() in sk_setsockopt() */ v.val = READ_ONCE(sk->sk_txrehash); break; diff --git a/net/dsa/tag_ksz.c b/net/dsa/tag_ksz.c index c33d4bf17929..0b7564b53790 100644 --- a/net/dsa/tag_ksz.c +++ b/net/dsa/tag_ksz.c @@ -140,7 +140,12 @@ static struct sk_buff *ksz8795_xmit(struct sk_buff *skb, struct net_device *dev) static struct sk_buff *ksz8795_rcv(struct sk_buff *skb, struct net_device *dev) { - u8 *tag = skb_tail_pointer(skb) - KSZ_EGRESS_TAG_LEN; + u8 *tag; + + if (skb_linearize(skb)) + return NULL; + + tag = skb_tail_pointer(skb) - KSZ_EGRESS_TAG_LEN; return ksz_common_rcv(skb, dev, tag[0] & KSZ8795_TAIL_TAG_EG_PORT_M, KSZ_EGRESS_TAG_LEN); @@ -311,10 +316,16 @@ static struct sk_buff *ksz9477_xmit(struct sk_buff *skb, static struct sk_buff *ksz9477_rcv(struct sk_buff *skb, struct net_device *dev) { - /* Tag decoding */ - u8 *tag = skb_tail_pointer(skb) - KSZ_EGRESS_TAG_LEN; - unsigned int port = tag[0] & KSZ9477_TAIL_TAG_EG_PORT_M; unsigned int len = KSZ_EGRESS_TAG_LEN; + unsigned int port; + u8 *tag; + + if (skb_linearize(skb)) + return NULL; + + /* Tag decoding */ + tag = skb_tail_pointer(skb) - KSZ_EGRESS_TAG_LEN; + port = tag[0] & KSZ9477_TAIL_TAG_EG_PORT_M; /* Extra 4-bytes PTP timestamp */ if (tag[0] & KSZ9477_PTP_TAG_INDICATION) { diff --git a/net/ethtool/common.c b/net/ethtool/common.c index 49bea6b45bd5..eb253e0fd61b 100644 --- a/net/ethtool/common.c +++ b/net/ethtool/common.c @@ -921,9 +921,18 @@ int ethtool_get_ts_info_by_phc(struct net_device *dev, phy = ethtool_phy_get_ts_info_by_phc(dev, info, hwprov_desc); if (IS_ERR(phy)) - err = PTR_ERR(phy); - else - err = 0; + return PTR_ERR(phy); + + /* Report the phc source only if we have a real + * phc source with an index. + */ + if (info->phc_index >= 0) { + info->phc_source = HWTSTAMP_SOURCE_PHYLIB; + info->phc_phyindex = phy->phyindex; + } + err = 0; + } else if (!err && info->phc_index >= 0) { + info->phc_source = HWTSTAMP_SOURCE_NETDEV; } info->so_timestamping |= SOF_TIMESTAMPING_RX_SOFTWARE | @@ -947,10 +956,20 @@ int __ethtool_get_ts_info(struct net_device *dev, ethtool_init_tsinfo(info); if (phy_is_default_hwtstamp(phydev) && - phy_has_tsinfo(phydev)) + phy_has_tsinfo(phydev)) { err = phy_ts_info(phydev, info); - else if (ops->get_ts_info) + /* Report the phc source only if we have a real + * phc source with an index. + */ + if (!err && info->phc_index >= 0) { + info->phc_source = HWTSTAMP_SOURCE_PHYLIB; + info->phc_phyindex = phydev->phyindex; + } + } else if (ops->get_ts_info) { err = ops->get_ts_info(dev, info); + if (!err && info->phc_index >= 0) + info->phc_source = HWTSTAMP_SOURCE_NETDEV; + } info->so_timestamping |= SOF_TIMESTAMPING_RX_SOFTWARE | SOF_TIMESTAMPING_SOFTWARE; diff --git a/net/ethtool/tsinfo.c b/net/ethtool/tsinfo.c index 8130b406ef10..8c654caa6805 100644 --- a/net/ethtool/tsinfo.c +++ b/net/ethtool/tsinfo.c @@ -160,6 +160,12 @@ static int tsinfo_reply_size(const struct ethnl_req_info *req_base, /* _TSINFO_HWTSTAMP_PROVIDER */ len += nla_total_size(0) + 2 * nla_total_size(sizeof(u32)); } + if (ts_info->phc_source) { + len += nla_total_size(sizeof(u32)); /* _TSINFO_HWTSTAMP_SOURCE */ + if (ts_info->phc_phyindex) + /* _TSINFO_HWTSTAMP_PHYINDEX */ + len += nla_total_size(sizeof(u32)); + } if (req_base->flags & ETHTOOL_FLAG_STATS) len += nla_total_size(0) + /* _TSINFO_STATS */ nla_total_size_64bit(sizeof(u64)) * ETHTOOL_TS_STAT_CNT; @@ -259,6 +265,16 @@ static int tsinfo_fill_reply(struct sk_buff *skb, nla_nest_end(skb, nest); } + if (ts_info->phc_source) { + if (nla_put_u32(skb, ETHTOOL_A_TSINFO_HWTSTAMP_SOURCE, + ts_info->phc_source)) + return -EMSGSIZE; + + if (ts_info->phc_phyindex && + nla_put_u32(skb, ETHTOOL_A_TSINFO_HWTSTAMP_PHYINDEX, + ts_info->phc_phyindex)) + return -EMSGSIZE; + } if (req_base->flags & ETHTOOL_FLAG_STATS && tsinfo_put_stats(skb, &data->stats)) return -EMSGSIZE; @@ -346,6 +362,11 @@ static int ethnl_tsinfo_dump_one_phydev(struct sk_buff *skb, if (ret < 0) goto err; + if (reply_data->ts_info.phc_index >= 0) { + reply_data->ts_info.phc_source = HWTSTAMP_SOURCE_PHYLIB; + reply_data->ts_info.phc_phyindex = phydev->phyindex; + } + ret = ethnl_tsinfo_end_dump(skb, dev, req_info, reply_data, ehdr); if (ret < 0) goto err; @@ -389,6 +410,8 @@ static int ethnl_tsinfo_dump_one_netdev(struct sk_buff *skb, if (ret < 0) goto err; + if (reply_data->ts_info.phc_index >= 0) + reply_data->ts_info.phc_source = HWTSTAMP_SOURCE_NETDEV; ret = ethnl_tsinfo_end_dump(skb, dev, req_info, reply_data, ehdr); if (ret < 0) diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 0e4076866c0a..f14a41ee4aa1 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -120,47 +120,16 @@ static void esp_ssg_unref(struct xfrm_state *x, void *tmp, struct sk_buff *skb) } #ifdef CONFIG_INET_ESPINTCP -struct esp_tcp_sk { - struct sock *sk; - struct rcu_head rcu; -}; - -static void esp_free_tcp_sk(struct rcu_head *head) -{ - struct esp_tcp_sk *esk = container_of(head, struct esp_tcp_sk, rcu); - - sock_put(esk->sk); - kfree(esk); -} - static struct sock *esp_find_tcp_sk(struct xfrm_state *x) { struct xfrm_encap_tmpl *encap = x->encap; struct net *net = xs_net(x); - struct esp_tcp_sk *esk; __be16 sport, dport; - struct sock *nsk; struct sock *sk; - sk = rcu_dereference(x->encap_sk); - if (sk && sk->sk_state == TCP_ESTABLISHED) - return sk; - spin_lock_bh(&x->lock); sport = encap->encap_sport; dport = encap->encap_dport; - nsk = rcu_dereference_protected(x->encap_sk, - lockdep_is_held(&x->lock)); - if (sk && sk == nsk) { - esk = kmalloc(sizeof(*esk), GFP_ATOMIC); - if (!esk) { - spin_unlock_bh(&x->lock); - return ERR_PTR(-ENOMEM); - } - RCU_INIT_POINTER(x->encap_sk, NULL); - esk->sk = sk; - call_rcu(&esk->rcu, esp_free_tcp_sk); - } spin_unlock_bh(&x->lock); sk = inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo, x->id.daddr.a4, @@ -173,20 +142,6 @@ static struct sock *esp_find_tcp_sk(struct xfrm_state *x) return ERR_PTR(-EINVAL); } - spin_lock_bh(&x->lock); - nsk = rcu_dereference_protected(x->encap_sk, - lockdep_is_held(&x->lock)); - if (encap->encap_sport != sport || - encap->encap_dport != dport) { - sock_put(sk); - sk = nsk ?: ERR_PTR(-EREMCHG); - } else if (sk == nsk) { - sock_put(sk); - } else { - rcu_assign_pointer(x->encap_sk, sk); - } - spin_unlock_bh(&x->lock); - return sk; } @@ -199,8 +154,10 @@ static int esp_output_tcp_finish(struct xfrm_state *x, struct sk_buff *skb) sk = esp_find_tcp_sk(x); err = PTR_ERR_OR_ZERO(sk); - if (err) + if (err) { + kfree_skb(skb); goto out; + } bh_lock_sock(sk); if (sock_owned_by_user(sk)) @@ -209,6 +166,8 @@ static int esp_output_tcp_finish(struct xfrm_state *x, struct sk_buff *skb) err = espintcp_push_skb(sk, skb); bh_unlock_sock(sk); + sock_put(sk); + out: rcu_read_unlock(); return err; @@ -392,6 +351,8 @@ static struct ip_esp_hdr *esp_output_tcp_encap(struct xfrm_state *x, if (IS_ERR(sk)) return ERR_CAST(sk); + sock_put(sk); + *lenp = htons(len); esph = (struct ip_esp_hdr *)(lenp + 1); diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 57f088e5540e..fd1e1507a224 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -807,7 +807,7 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb, case RTA_MULTIPATH: err = lwtunnel_valid_encap_type_attr(nla_data(attr), nla_len(attr), - extack, false); + extack); if (err < 0) goto errout; cfg->fc_mp = nla_data(attr); @@ -825,7 +825,7 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb, case RTA_ENCAP_TYPE: cfg->fc_encap_type = nla_get_u16(attr); err = lwtunnel_valid_encap_type(cfg->fc_encap_type, - extack, false); + extack); if (err < 0) goto errout; break; diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index dabe2b7044ab..d643bd1a0d9d 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -617,12 +617,10 @@ int fib_nh_common_init(struct net *net, struct fib_nh_common *nhc, { int err; - if (!nhc->nhc_pcpu_rth_output) { - nhc->nhc_pcpu_rth_output = alloc_percpu_gfp(struct rtable __rcu *, - gfp_flags); - if (!nhc->nhc_pcpu_rth_output) - return -ENOMEM; - } + nhc->nhc_pcpu_rth_output = alloc_percpu_gfp(struct rtable __rcu *, + gfp_flags); + if (!nhc->nhc_pcpu_rth_output) + return -ENOMEM; if (encap) { struct lwtunnel_state *lwtstate; diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 3b5677d8467e..2ff2f79c7351 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -120,11 +120,6 @@ static void ipmr_expire_process(struct timer_list *t); lockdep_rtnl_is_held() || \ list_empty(&net->ipv4.mr_tables)) -static bool ipmr_can_free_table(struct net *net) -{ - return !check_net(net) || !net_initialized(net); -} - static struct mr_table *ipmr_mr_table_iter(struct net *net, struct mr_table *mrt) { @@ -317,11 +312,6 @@ EXPORT_SYMBOL(ipmr_rule_default); #define ipmr_for_each_table(mrt, net) \ for (mrt = net->ipv4.mrt; mrt; mrt = NULL) -static bool ipmr_can_free_table(struct net *net) -{ - return !check_net(net); -} - static struct mr_table *ipmr_mr_table_iter(struct net *net, struct mr_table *mrt) { @@ -437,7 +427,7 @@ static void ipmr_free_table(struct mr_table *mrt) { struct net *net = read_pnet(&mrt->net); - WARN_ON_ONCE(!ipmr_can_free_table(net)); + WARN_ON_ONCE(!mr_can_free_table(net)); timer_shutdown_sync(&mrt->ipmr_expire_timer); mroute_clean_tables(mrt, MRT_FLUSH_VIFS | MRT_FLUSH_VIFS_STATIC | diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c index 823e4a783d2b..4397e89d3123 100644 --- a/net/ipv4/nexthop.c +++ b/net/ipv4/nexthop.c @@ -3180,8 +3180,7 @@ static int rtm_to_nh_config(struct net *net, struct sk_buff *skb, } cfg->nh_encap_type = nla_get_u16(tb[NHA_ENCAP_TYPE]); - err = lwtunnel_valid_encap_type(cfg->nh_encap_type, - extack, false); + err = lwtunnel_valid_encap_type(cfg->nh_encap_type, extack); if (err < 0) goto out; diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c index b5b06323cfd9..0d31a8c108d4 100644 --- a/net/ipv4/xfrm4_input.c +++ b/net/ipv4/xfrm4_input.c @@ -182,11 +182,15 @@ struct sk_buff *xfrm4_gro_udp_encap_rcv(struct sock *sk, struct list_head *head, int offset = skb_gro_offset(skb); const struct net_offload *ops; struct sk_buff *pp = NULL; - int ret; - - offset = offset - sizeof(struct udphdr); + int len, dlen; + __u8 *udpdata; + __be32 *udpdata32; - if (!pskb_pull(skb, offset)) + len = skb->len - offset; + dlen = offset + min(len, 8); + udpdata = skb_gro_header(skb, dlen, offset); + udpdata32 = (__be32 *)udpdata; + if (unlikely(!udpdata)) return NULL; rcu_read_lock(); @@ -194,11 +198,10 @@ struct sk_buff *xfrm4_gro_udp_encap_rcv(struct sock *sk, struct list_head *head, if (!ops || !ops->callbacks.gro_receive) goto out; - ret = __xfrm4_udp_encap_rcv(sk, skb, false); - if (ret) + /* check if it is a keepalive or IKE packet */ + if (len <= sizeof(struct ip_esp_hdr) || udpdata32[0] == 0) goto out; - skb_push(skb, offset); NAPI_GRO_CB(skb)->proto = IPPROTO_UDP; pp = call_gro_receive(ops->callbacks.gro_receive, head, skb); @@ -208,7 +211,6 @@ struct sk_buff *xfrm4_gro_udp_encap_rcv(struct sock *sk, struct list_head *head, out: rcu_read_unlock(); - skb_push(skb, offset); NAPI_GRO_CB(skb)->same_flow = 0; NAPI_GRO_CB(skb)->flush = 1; diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index 9e73944e3b53..72adfc107b55 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -137,47 +137,16 @@ static void esp_ssg_unref(struct xfrm_state *x, void *tmp, struct sk_buff *skb) } #ifdef CONFIG_INET6_ESPINTCP -struct esp_tcp_sk { - struct sock *sk; - struct rcu_head rcu; -}; - -static void esp_free_tcp_sk(struct rcu_head *head) -{ - struct esp_tcp_sk *esk = container_of(head, struct esp_tcp_sk, rcu); - - sock_put(esk->sk); - kfree(esk); -} - static struct sock *esp6_find_tcp_sk(struct xfrm_state *x) { struct xfrm_encap_tmpl *encap = x->encap; struct net *net = xs_net(x); - struct esp_tcp_sk *esk; __be16 sport, dport; - struct sock *nsk; struct sock *sk; - sk = rcu_dereference(x->encap_sk); - if (sk && sk->sk_state == TCP_ESTABLISHED) - return sk; - spin_lock_bh(&x->lock); sport = encap->encap_sport; dport = encap->encap_dport; - nsk = rcu_dereference_protected(x->encap_sk, - lockdep_is_held(&x->lock)); - if (sk && sk == nsk) { - esk = kmalloc(sizeof(*esk), GFP_ATOMIC); - if (!esk) { - spin_unlock_bh(&x->lock); - return ERR_PTR(-ENOMEM); - } - RCU_INIT_POINTER(x->encap_sk, NULL); - esk->sk = sk; - call_rcu(&esk->rcu, esp_free_tcp_sk); - } spin_unlock_bh(&x->lock); sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo, &x->id.daddr.in6, @@ -190,20 +159,6 @@ static struct sock *esp6_find_tcp_sk(struct xfrm_state *x) return ERR_PTR(-EINVAL); } - spin_lock_bh(&x->lock); - nsk = rcu_dereference_protected(x->encap_sk, - lockdep_is_held(&x->lock)); - if (encap->encap_sport != sport || - encap->encap_dport != dport) { - sock_put(sk); - sk = nsk ?: ERR_PTR(-EREMCHG); - } else if (sk == nsk) { - sock_put(sk); - } else { - rcu_assign_pointer(x->encap_sk, sk); - } - spin_unlock_bh(&x->lock); - return sk; } @@ -216,8 +171,10 @@ static int esp_output_tcp_finish(struct xfrm_state *x, struct sk_buff *skb) sk = esp6_find_tcp_sk(x); err = PTR_ERR_OR_ZERO(sk); - if (err) + if (err) { + kfree_skb(skb); goto out; + } bh_lock_sock(sk); if (sock_owned_by_user(sk)) @@ -226,6 +183,8 @@ static int esp_output_tcp_finish(struct xfrm_state *x, struct sk_buff *skb) err = espintcp_push_skb(sk, skb); bh_unlock_sock(sk); + sock_put(sk); + out: rcu_read_unlock(); return err; @@ -422,6 +381,8 @@ static struct ip_esp_hdr *esp6_output_tcp_encap(struct xfrm_state *x, if (IS_ERR(sk)) return ERR_CAST(sk); + sock_put(sk); + *lenp = htons(len); esph = (struct ip_esp_hdr *)(lenp + 1); diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 1f860340690c..7094d7708686 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -281,22 +281,20 @@ EXPORT_SYMBOL_GPL(fib6_new_table); struct fib6_table *fib6_get_table(struct net *net, u32 id) { - struct fib6_table *tb; struct hlist_head *head; - unsigned int h; + struct fib6_table *tb; - if (id == 0) + if (!id) id = RT6_TABLE_MAIN; - h = id & (FIB6_TABLE_HASHSZ - 1); - rcu_read_lock(); - head = &net->ipv6.fib_table_hash[h]; - hlist_for_each_entry_rcu(tb, head, tb6_hlist) { - if (tb->tb6_id == id) { - rcu_read_unlock(); + + head = &net->ipv6.fib_table_hash[id & (FIB6_TABLE_HASHSZ - 1)]; + + /* See comment in fib6_link_table(). RCU is not required, + * but rcu_dereference_raw() is used to avoid data-race. + */ + hlist_for_each_entry_rcu(tb, head, tb6_hlist, true) + if (tb->tb6_id == id) return tb; - } - } - rcu_read_unlock(); return NULL; } @@ -1029,8 +1027,9 @@ static void fib6_drop_pcpu_from(struct fib6_info *f6i, .table = table }; - nexthop_for_each_fib6_nh(f6i->nh, fib6_nh_drop_pcpu_from, - &arg); + rcu_read_lock(); + nexthop_for_each_fib6_nh(f6i->nh, fib6_nh_drop_pcpu_from, &arg); + rcu_read_unlock(); } else { struct fib6_nh *fib6_nh; @@ -1223,7 +1222,9 @@ next_iter: fib6_nsiblings++; } BUG_ON(fib6_nsiblings != rt->fib6_nsiblings); + rcu_read_lock(); rt6_multipath_rebalance(temp_sibling); + rcu_read_unlock(); } /* @@ -1266,7 +1267,9 @@ add: sibling->fib6_nsiblings--; rt->fib6_nsiblings = 0; list_del_rcu(&rt->fib6_siblings); + rcu_read_lock(); rt6_multipath_rebalance(next_sibling); + rcu_read_unlock(); return err; } } diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index b413c9c8a21c..3276cde5ebd7 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -108,11 +108,6 @@ static void ipmr_expire_process(struct timer_list *t); lockdep_rtnl_is_held() || \ list_empty(&net->ipv6.mr6_tables)) -static bool ip6mr_can_free_table(struct net *net) -{ - return !check_net(net) || !net_initialized(net); -} - static struct mr_table *ip6mr_mr_table_iter(struct net *net, struct mr_table *mrt) { @@ -306,11 +301,6 @@ EXPORT_SYMBOL(ip6mr_rule_default); #define ip6mr_for_each_table(mrt, net) \ for (mrt = net->ipv6.mrt6; mrt; mrt = NULL) -static bool ip6mr_can_free_table(struct net *net) -{ - return !check_net(net); -} - static struct mr_table *ip6mr_mr_table_iter(struct net *net, struct mr_table *mrt) { @@ -416,7 +406,7 @@ static void ip6mr_free_table(struct mr_table *mrt) { struct net *net = read_pnet(&mrt->net); - WARN_ON_ONCE(!ip6mr_can_free_table(net)); + WARN_ON_ONCE(!mr_can_free_table(net)); timer_shutdown_sync(&mrt->ipmr_expire_timer); mroute_clean_tables(mrt, MRT6_FLUSH_MIFS | MRT6_FLUSH_MIFS_STATIC | diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 44300962230b..0143262094b0 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -1820,11 +1820,13 @@ static int rt6_nh_flush_exceptions(struct fib6_nh *nh, void *arg) void rt6_flush_exceptions(struct fib6_info *f6i) { - if (f6i->nh) - nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_flush_exceptions, - f6i); - else + if (f6i->nh) { + rcu_read_lock(); + nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_flush_exceptions, f6i); + rcu_read_unlock(); + } else { fib6_nh_flush_exceptions(f6i->fib6_nh, f6i); + } } /* Find cached rt in the hash table inside passed in rt @@ -3672,12 +3674,10 @@ int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh, goto out; pcpu_alloc: + fib6_nh->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, gfp_flags); if (!fib6_nh->rt6i_pcpu) { - fib6_nh->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, gfp_flags); - if (!fib6_nh->rt6i_pcpu) { - err = -ENOMEM; - goto out; - } + err = -ENOMEM; + goto out; } fib6_nh->fib_nh_dev = dev; @@ -3737,24 +3737,6 @@ void fib6_nh_release_dsts(struct fib6_nh *fib6_nh) } } -static int fib6_nh_prealloc_percpu(struct fib6_nh *fib6_nh, gfp_t gfp_flags) -{ - struct fib_nh_common *nhc = &fib6_nh->nh_common; - - fib6_nh->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, gfp_flags); - if (!fib6_nh->rt6i_pcpu) - return -ENOMEM; - - nhc->nhc_pcpu_rth_output = alloc_percpu_gfp(struct rtable __rcu *, - gfp_flags); - if (!nhc->nhc_pcpu_rth_output) { - free_percpu(fib6_nh->rt6i_pcpu); - return -ENOMEM; - } - - return 0; -} - static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg, gfp_t gfp_flags, struct netlink_ext_ack *extack) @@ -3792,12 +3774,6 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg, goto free; } - if (!cfg->fc_nh_id) { - err = fib6_nh_prealloc_percpu(&rt->fib6_nh[0], gfp_flags); - if (err) - goto free_metrics; - } - if (cfg->fc_flags & RTF_ADDRCONF) rt->dst_nocount = true; @@ -3822,8 +3798,6 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg, rt->fib6_src.plen = cfg->fc_src_len; #endif return rt; -free_metrics: - ip_fib_metrics_put(rt->fib6_metrics); free: kfree(rt); err: @@ -3832,6 +3806,7 @@ err: static int ip6_route_info_create_nh(struct fib6_info *rt, struct fib6_config *cfg, + gfp_t gfp_flags, struct netlink_ext_ack *extack) { struct net *net = cfg->fc_nlinfo.nl_net; @@ -3841,6 +3816,8 @@ static int ip6_route_info_create_nh(struct fib6_info *rt, if (cfg->fc_nh_id) { struct nexthop *nh; + rcu_read_lock(); + nh = nexthop_find_by_id(net, cfg->fc_nh_id); if (!nh) { err = -EINVAL; @@ -3860,10 +3837,12 @@ static int ip6_route_info_create_nh(struct fib6_info *rt, rt->nh = nh; fib6_nh = nexthop_fib6_nh(rt->nh); + + rcu_read_unlock(); } else { int addr_type; - err = fib6_nh_init(net, rt->fib6_nh, cfg, GFP_ATOMIC, extack); + err = fib6_nh_init(net, rt->fib6_nh, cfg, gfp_flags, extack); if (err) goto out_release; @@ -3895,6 +3874,7 @@ out_release: fib6_info_release(rt); return err; out_free: + rcu_read_unlock(); ip_fib_metrics_put(rt->fib6_metrics); kfree(rt); return err; @@ -3910,16 +3890,12 @@ int ip6_route_add(struct fib6_config *cfg, gfp_t gfp_flags, if (IS_ERR(rt)) return PTR_ERR(rt); - rcu_read_lock(); - - err = ip6_route_info_create_nh(rt, cfg, extack); + err = ip6_route_info_create_nh(rt, cfg, gfp_flags, extack); if (err) - goto unlock; + return err; err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, extack); fib6_info_release(rt); -unlock: - rcu_read_unlock(); return err; } @@ -4704,7 +4680,7 @@ struct fib6_info *addrconf_f6i_alloc(struct net *net, if (IS_ERR(f6i)) return f6i; - err = ip6_route_info_create_nh(f6i, &cfg, extack); + err = ip6_route_info_create_nh(f6i, &cfg, gfp_flags, extack); if (err) return ERR_PTR(err); @@ -5172,8 +5148,7 @@ static int rtm_to_fib6_multipath_config(struct fib6_config *cfg, rtnh = rtnh_next(rtnh, &remaining); } while (rtnh_ok(rtnh, remaining)); - return lwtunnel_valid_encap_type_attr(cfg->fc_mp, cfg->fc_mp_len, - extack, false); + return lwtunnel_valid_encap_type_attr(cfg->fc_mp, cfg->fc_mp_len, extack); } static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh, @@ -5310,8 +5285,7 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh, if (tb[RTA_ENCAP_TYPE]) { cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]); - err = lwtunnel_valid_encap_type(cfg->fc_encap_type, - extack, false); + err = lwtunnel_valid_encap_type(cfg->fc_encap_type, extack); if (err < 0) goto errout; } @@ -5334,131 +5308,29 @@ struct rt6_nh { struct fib6_info *fib6_info; struct fib6_config r_cfg; struct list_head list; - int weight; }; -static void ip6_route_mpath_info_cleanup(struct list_head *rt6_nh_list) +static int ip6_route_info_append(struct list_head *rt6_nh_list, + struct fib6_info *rt, + struct fib6_config *r_cfg) { - struct rt6_nh *nh, *nh_next; - - list_for_each_entry_safe(nh, nh_next, rt6_nh_list, list) { - struct fib6_info *rt = nh->fib6_info; - - if (rt) { - free_percpu(rt->fib6_nh->nh_common.nhc_pcpu_rth_output); - free_percpu(rt->fib6_nh->rt6i_pcpu); - ip_fib_metrics_put(rt->fib6_metrics); - kfree(rt); - } + struct rt6_nh *nh; - list_del(&nh->list); - kfree(nh); + list_for_each_entry(nh, rt6_nh_list, list) { + /* check if fib6_info already exists */ + if (rt6_duplicate_nexthop(nh->fib6_info, rt)) + return -EEXIST; } -} - -static int ip6_route_mpath_info_create(struct list_head *rt6_nh_list, - struct fib6_config *cfg, - struct netlink_ext_ack *extack) -{ - struct rtnexthop *rtnh; - int remaining; - int err; - - remaining = cfg->fc_mp_len; - rtnh = (struct rtnexthop *)cfg->fc_mp; - - /* Parse a Multipath Entry and build a list (rt6_nh_list) of - * fib6_info structs per nexthop - */ - while (rtnh_ok(rtnh, remaining)) { - struct fib6_config r_cfg; - struct fib6_info *rt; - struct rt6_nh *nh; - int attrlen; - - nh = kzalloc(sizeof(*nh), GFP_KERNEL); - if (!nh) { - err = -ENOMEM; - goto err; - } - - list_add_tail(&nh->list, rt6_nh_list); - - memcpy(&r_cfg, cfg, sizeof(*cfg)); - if (rtnh->rtnh_ifindex) - r_cfg.fc_ifindex = rtnh->rtnh_ifindex; - - attrlen = rtnh_attrlen(rtnh); - if (attrlen > 0) { - struct nlattr *nla, *attrs = rtnh_attrs(rtnh); - - nla = nla_find(attrs, attrlen, RTA_GATEWAY); - if (nla) { - r_cfg.fc_gateway = nla_get_in6_addr(nla); - r_cfg.fc_flags |= RTF_GATEWAY; - } - - r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP); - nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE); - if (nla) - r_cfg.fc_encap_type = nla_get_u16(nla); - } - - r_cfg.fc_flags |= (rtnh->rtnh_flags & RTNH_F_ONLINK); - rt = ip6_route_info_create(&r_cfg, GFP_KERNEL, extack); - if (IS_ERR(rt)) { - err = PTR_ERR(rt); - goto err; - } + nh = kzalloc(sizeof(*nh), GFP_KERNEL); + if (!nh) + return -ENOMEM; - nh->fib6_info = rt; - nh->weight = rtnh->rtnh_hops + 1; - memcpy(&nh->r_cfg, &r_cfg, sizeof(r_cfg)); - - rtnh = rtnh_next(rtnh, &remaining); - } + nh->fib6_info = rt; + memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg)); + list_add_tail(&nh->list, rt6_nh_list); return 0; -err: - ip6_route_mpath_info_cleanup(rt6_nh_list); - return err; -} - -static int ip6_route_mpath_info_create_nh(struct list_head *rt6_nh_list, - struct netlink_ext_ack *extack) -{ - struct rt6_nh *nh, *nh_next, *nh_tmp; - LIST_HEAD(tmp); - int err; - - list_for_each_entry_safe(nh, nh_next, rt6_nh_list, list) { - struct fib6_info *rt = nh->fib6_info; - - err = ip6_route_info_create_nh(rt, &nh->r_cfg, extack); - if (err) { - nh->fib6_info = NULL; - goto err; - } - - rt->fib6_nh->fib_nh_weight = nh->weight; - - list_move_tail(&nh->list, &tmp); - - list_for_each_entry(nh_tmp, rt6_nh_list, list) { - /* check if fib6_info already exists */ - if (rt6_duplicate_nexthop(nh_tmp->fib6_info, rt)) { - err = -EEXIST; - goto err; - } - } - } -out: - list_splice(&tmp, rt6_nh_list); - return err; -err: - ip6_route_mpath_info_cleanup(rt6_nh_list); - goto out; } static void ip6_route_mpath_notify(struct fib6_info *rt, @@ -5518,11 +5390,16 @@ static int ip6_route_multipath_add(struct fib6_config *cfg, struct fib6_info *rt_notif = NULL, *rt_last = NULL; struct nl_info *info = &cfg->fc_nlinfo; struct rt6_nh *nh, *nh_safe; + struct fib6_config r_cfg; + struct rtnexthop *rtnh; LIST_HEAD(rt6_nh_list); struct rt6_nh *err_nh; + struct fib6_info *rt; __u16 nlflags; - int nhn = 0; + int remaining; + int attrlen; int replace; + int nhn = 0; int err; replace = (cfg->fc_nlinfo.nlh && @@ -5532,15 +5409,57 @@ static int ip6_route_multipath_add(struct fib6_config *cfg, if (info->nlh && info->nlh->nlmsg_flags & NLM_F_APPEND) nlflags |= NLM_F_APPEND; - err = ip6_route_mpath_info_create(&rt6_nh_list, cfg, extack); - if (err) - return err; + remaining = cfg->fc_mp_len; + rtnh = (struct rtnexthop *)cfg->fc_mp; - rcu_read_lock(); + /* Parse a Multipath Entry and build a list (rt6_nh_list) of + * fib6_info structs per nexthop + */ + while (rtnh_ok(rtnh, remaining)) { + memcpy(&r_cfg, cfg, sizeof(*cfg)); + if (rtnh->rtnh_ifindex) + r_cfg.fc_ifindex = rtnh->rtnh_ifindex; - err = ip6_route_mpath_info_create_nh(&rt6_nh_list, extack); - if (err) - goto cleanup; + attrlen = rtnh_attrlen(rtnh); + if (attrlen > 0) { + struct nlattr *nla, *attrs = rtnh_attrs(rtnh); + + nla = nla_find(attrs, attrlen, RTA_GATEWAY); + if (nla) { + r_cfg.fc_gateway = nla_get_in6_addr(nla); + r_cfg.fc_flags |= RTF_GATEWAY; + } + + r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP); + nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE); + if (nla) + r_cfg.fc_encap_type = nla_get_u16(nla); + } + + r_cfg.fc_flags |= (rtnh->rtnh_flags & RTNH_F_ONLINK); + rt = ip6_route_info_create(&r_cfg, GFP_KERNEL, extack); + if (IS_ERR(rt)) { + err = PTR_ERR(rt); + rt = NULL; + goto cleanup; + } + + err = ip6_route_info_create_nh(rt, &r_cfg, GFP_KERNEL, extack); + if (err) { + rt = NULL; + goto cleanup; + } + + rt->fib6_nh->fib_nh_weight = rtnh->rtnh_hops + 1; + + err = ip6_route_info_append(&rt6_nh_list, rt, &r_cfg); + if (err) { + fib6_info_release(rt); + goto cleanup; + } + + rtnh = rtnh_next(rtnh, &remaining); + } /* for add and replace send one notification with all nexthops. * Skip the notification in fib6_add_rt2node and send one with @@ -5629,8 +5548,6 @@ add_errout: } cleanup: - rcu_read_unlock(); - list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, list) { fib6_info_release(nh->fib6_info); list_del(&nh->list); @@ -6412,6 +6329,8 @@ void inet6_rt_notify(int event, struct fib6_info *rt, struct nl_info *info, err = -ENOBUFS; seq = info->nlh ? info->nlh->nlmsg_seq : 0; + rcu_read_lock(); + skb = nlmsg_new(rt6_nlmsg_size(rt), GFP_ATOMIC); if (!skb) goto errout; @@ -6424,10 +6343,14 @@ void inet6_rt_notify(int event, struct fib6_info *rt, struct nl_info *info, kfree_skb(skb); goto errout; } + + rcu_read_unlock(); + rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE, info->nlh, GFP_ATOMIC); return; errout: + rcu_read_unlock(); rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err); } diff --git a/net/ipv6/seg6_local.c b/net/ipv6/seg6_local.c index ee5e448cc7a8..ac1dbd492c22 100644 --- a/net/ipv6/seg6_local.c +++ b/net/ipv6/seg6_local.c @@ -1671,7 +1671,7 @@ static int parse_nla_srh(struct nlattr **attrs, struct seg6_local_lwt *slwt, if (!seg6_validate_srh(srh, len, false)) return -EINVAL; - slwt->srh = kmemdup(srh, len, GFP_ATOMIC); + slwt->srh = kmemdup(srh, len, GFP_KERNEL); if (!slwt->srh) return -ENOMEM; @@ -1911,7 +1911,7 @@ static int parse_nla_bpf(struct nlattr **attrs, struct seg6_local_lwt *slwt, if (!tb[SEG6_LOCAL_BPF_PROG] || !tb[SEG6_LOCAL_BPF_PROG_NAME]) return -EINVAL; - slwt->bpf.name = nla_memdup(tb[SEG6_LOCAL_BPF_PROG_NAME], GFP_ATOMIC); + slwt->bpf.name = nla_memdup(tb[SEG6_LOCAL_BPF_PROG_NAME], GFP_KERNEL); if (!slwt->bpf.name) return -ENOMEM; @@ -1994,7 +1994,7 @@ static int parse_nla_counters(struct nlattr **attrs, return -EINVAL; /* counters are always zero initialized */ - pcounters = seg6_local_alloc_pcpu_counters(GFP_ATOMIC); + pcounters = seg6_local_alloc_pcpu_counters(GFP_KERNEL); if (!pcounters) return -ENOMEM; diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c index 4abc5e9d6322..841c81abaaf4 100644 --- a/net/ipv6/xfrm6_input.c +++ b/net/ipv6/xfrm6_input.c @@ -179,14 +179,18 @@ struct sk_buff *xfrm6_gro_udp_encap_rcv(struct sock *sk, struct list_head *head, int offset = skb_gro_offset(skb); const struct net_offload *ops; struct sk_buff *pp = NULL; - int ret; + int len, dlen; + __u8 *udpdata; + __be32 *udpdata32; if (skb->protocol == htons(ETH_P_IP)) return xfrm4_gro_udp_encap_rcv(sk, head, skb); - offset = offset - sizeof(struct udphdr); - - if (!pskb_pull(skb, offset)) + len = skb->len - offset; + dlen = offset + min(len, 8); + udpdata = skb_gro_header(skb, dlen, offset); + udpdata32 = (__be32 *)udpdata; + if (unlikely(!udpdata)) return NULL; rcu_read_lock(); @@ -194,11 +198,10 @@ struct sk_buff *xfrm6_gro_udp_encap_rcv(struct sock *sk, struct list_head *head, if (!ops || !ops->callbacks.gro_receive) goto out; - ret = __xfrm6_udp_encap_rcv(sk, skb, false); - if (ret) + /* check if it is a keepalive or IKE packet */ + if (len <= sizeof(struct ip_esp_hdr) || udpdata32[0] == 0) goto out; - skb_push(skb, offset); NAPI_GRO_CB(skb)->proto = IPPROTO_UDP; pp = call_gro_receive(ops->callbacks.gro_receive, head, skb); @@ -208,7 +211,6 @@ struct sk_buff *xfrm6_gro_udp_encap_rcv(struct sock *sk, struct list_head *head, out: rcu_read_unlock(); - skb_push(skb, offset); NAPI_GRO_CB(skb)->same_flow = 0; NAPI_GRO_CB(skb)->flush = 1; diff --git a/net/key/af_key.c b/net/key/af_key.c index c56bb4f451e6..efc2a91f4c48 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -2630,7 +2630,7 @@ static int pfkey_migrate(struct sock *sk, struct sk_buff *skb, } return xfrm_migrate(&sel, dir, XFRM_POLICY_TYPE_MAIN, m, i, - kma ? &k : NULL, net, NULL, 0, NULL); + kma ? &k : NULL, net, NULL, 0, NULL, NULL); out: return err; diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index 0259cde394ba..cc77ec5769d8 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -887,15 +887,15 @@ static int llc_ui_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, if (sk->sk_type != SOCK_STREAM) goto copy_uaddr; + /* Partial read */ + if (used + offset < skb_len) + continue; + if (!(flags & MSG_PEEK)) { skb_unlink(skb, &sk->sk_receive_queue); kfree_skb(skb); *seq = 0; } - - /* Partial read */ - if (used + offset < skb_len) - continue; } while (len > 0); out: diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 9017b98fea04..d9d88f2f2831 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -1290,9 +1290,9 @@ static u8 ieee80211_num_beaconing_links(struct ieee80211_sub_if_data *sdata) sdata->vif.type != NL80211_IFTYPE_P2P_GO) return num; - if (!sdata->vif.valid_links) - return num; - + /* non-MLO mode of operation also uses link_id 0 in sdata so it is + * safe to directly proceed with the below loop + */ for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) { link = sdata_dereference(sdata->link[link_id], sdata); if (!link) @@ -1421,6 +1421,9 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev, (IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ | IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ | IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_320MHZ); + link_conf->eht_disable_mcs15 = + u8_get_bits(params->eht_oper->params, + IEEE80211_EHT_OPER_MCS15_DISABLE); } else { link_conf->eht_su_beamformer = false; link_conf->eht_su_beamformee = false; @@ -2921,7 +2924,7 @@ static int ieee80211_scan(struct wiphy *wiphy, * the frames sent while scanning on other channel will be * lost) */ - if (sdata->deflink.u.ap.beacon && + if (ieee80211_num_beaconing_links(sdata) && (!(wiphy->features & NL80211_FEATURE_AP_SCAN) || !(req->flags & NL80211_SCAN_FLAG_AP))) return -EOPNOTSUPP; diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index a381b4b756ea..5cc56d578048 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -777,7 +777,7 @@ bool ieee80211_mesh_xmit_fast(struct ieee80211_sub_if_data *sdata, if (ethertype < ETH_P_802_3_MIN) return false; - if (skb->sk && sock_flag(skb->sk, SOCK_WIFI_STATUS)) + if (sk_requests_wifi_status(skb->sk)) return false; if (skb->ip_summed == CHECKSUM_PARTIAL) { diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index c94a9c7ca960..91444301a84a 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c @@ -636,7 +636,7 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata, mesh_path_add_gate(mpath); } rcu_read_unlock(); - } else { + } else if (ifmsh->mshcfg.dot11MeshForwarding) { rcu_read_lock(); mpath = mesh_path_lookup(sdata, target_addr); if (mpath) { @@ -654,6 +654,8 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata, } } rcu_read_unlock(); + } else { + forward = false; } if (reply) { @@ -671,7 +673,7 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata, } } - if (forward && ifmsh->mshcfg.dot11MeshForwarding) { + if (forward) { u32 preq_id; u8 hopcount; diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index cb7079071885..7b8da40a912d 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@ -240,6 +240,9 @@ static bool ieee80211_scan_accept_presp(struct ieee80211_sub_if_data *sdata, struct ieee80211_channel *channel, u32 scan_flags, const u8 *da) { + struct ieee80211_link_data *link_sdata; + u8 link_id; + if (!sdata) return false; @@ -251,7 +254,20 @@ static bool ieee80211_scan_accept_presp(struct ieee80211_sub_if_data *sdata, if (scan_flags & NL80211_SCAN_FLAG_RANDOM_ADDR) return true; - return ether_addr_equal(da, sdata->vif.addr); + + if (ether_addr_equal(da, sdata->vif.addr)) + return true; + + for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) { + link_sdata = rcu_dereference(sdata->link[link_id]); + if (!link_sdata) + continue; + + if (ether_addr_equal(da, link_sdata->conf->addr)) + return true; + } + + return false; } void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb) diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 3b9392a6ddb2..d8d4f3d7d7f2 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -2859,7 +2859,7 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata, } if (unlikely(!multicast && - ((skb->sk && sock_flag(skb->sk, SOCK_WIFI_STATUS)) || + (sk_requests_wifi_status(skb->sk) || ctrl_flags & IEEE80211_TX_CTL_REQ_TX_STATUS))) info_id = ieee80211_store_ack_skb(local, skb, &info_flags, cookie); @@ -3756,7 +3756,7 @@ static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata, return false; /* don't handle TX status request here either */ - if (skb->sk && sock_flag(skb->sk, SOCK_WIFI_STATUS)) + if (sk_requests_wifi_status(skb->sk)) return false; if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) { @@ -4648,7 +4648,7 @@ static void ieee80211_8023_xmit(struct ieee80211_sub_if_data *sdata, memcpy(IEEE80211_SKB_CB(seg), info, sizeof(*info)); } - if (unlikely(skb->sk && sock_flag(skb->sk, SOCK_WIFI_STATUS))) { + if (unlikely(sk_requests_wifi_status(skb->sk))) { info->status_data = ieee80211_store_ack_skb(local, skb, &info->flags, NULL); if (info->status_data) diff --git a/net/mctp/device.c b/net/mctp/device.c index 7c0dcf3df319..4d404edd7446 100644 --- a/net/mctp/device.c +++ b/net/mctp/device.c @@ -120,8 +120,8 @@ static int mctp_dump_addrinfo(struct sk_buff *skb, struct netlink_callback *cb) int ifindex = 0, rc; /* Filter by ifindex if a header is provided */ - if (cb->nlh->nlmsg_len >= nlmsg_msg_size(sizeof(*hdr))) { - hdr = nlmsg_data(cb->nlh); + hdr = nlmsg_payload(cb->nlh, sizeof(*hdr)); + if (hdr) { ifindex = hdr->ifa_index; } else { if (cb->strict_check) { diff --git a/net/mctp/neigh.c b/net/mctp/neigh.c index 590f642413e4..05b899f22d90 100644 --- a/net/mctp/neigh.c +++ b/net/mctp/neigh.c @@ -250,7 +250,10 @@ static int mctp_rtm_getneigh(struct sk_buff *skb, struct netlink_callback *cb) int idx; } *cbctx = (void *)cb->ctx; - ndmsg = nlmsg_data(cb->nlh); + ndmsg = nlmsg_payload(cb->nlh, sizeof(*ndmsg)); + if (!ndmsg) + return -EINVAL; + req_ifindex = ndmsg->ndm_ifindex; idx = 0; diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index 3b2183fc7e56..2560416218d0 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig @@ -212,7 +212,7 @@ config NF_CT_PROTO_SCTP bool 'SCTP protocol connection tracking support' depends on NETFILTER_ADVANCED default y - select CRC32 + select NET_CRC32C help With this option enabled, the layer 3 independent connection tracking code will be able to do state tracking on SCTP connections. @@ -475,7 +475,7 @@ endif # NF_CONNTRACK config NF_TABLES select NETFILTER_NETLINK - select CRC32 + select NET_CRC32C tristate "Netfilter nf_tables support" help nftables is the new packet classification framework that intends to diff --git a/net/netfilter/ipvs/Kconfig b/net/netfilter/ipvs/Kconfig index 8c5b1fe12d07..c203252e856d 100644 --- a/net/netfilter/ipvs/Kconfig +++ b/net/netfilter/ipvs/Kconfig @@ -105,7 +105,7 @@ config IP_VS_PROTO_AH config IP_VS_PROTO_SCTP bool "SCTP load balancing support" - select CRC32 + select NET_CRC32C help This option enables support for load balancing SCTP transport protocol. Say Y if unsure. diff --git a/net/openvswitch/Kconfig b/net/openvswitch/Kconfig index 5481bd561eb4..e6aaee92dba4 100644 --- a/net/openvswitch/Kconfig +++ b/net/openvswitch/Kconfig @@ -11,8 +11,8 @@ config OPENVSWITCH (!NF_NAT || NF_NAT) && \ (!NETFILTER_CONNCOUNT || NETFILTER_CONNCOUNT))) depends on PSAMPLE || !PSAMPLE - select CRC32 select MPLS + select NET_CRC32C select NET_MPLS_GSO select DST_CACHE select NET_NSH diff --git a/net/sched/Kconfig b/net/sched/Kconfig index 9f0b3f943fca..ad914d2b2e22 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig @@ -796,7 +796,7 @@ config NET_ACT_SKBEDIT config NET_ACT_CSUM tristate "Checksum Updating" depends on NET_CLS_ACT && INET - select CRC32 + select NET_CRC32C help Say Y here to update some common checksum after some direct packet alterations. diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index cb8c525ea20e..7986145a527c 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c @@ -1569,6 +1569,9 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) return err; } + sch->qstats.backlog += len; + sch->q.qlen++; + if (first && !cl->cl_nactive) { if (cl->cl_flags & HFSC_RSC) init_ed(cl, len); @@ -1584,9 +1587,6 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) } - sch->qstats.backlog += len; - sch->q.qlen++; - return NET_XMIT_SUCCESS; } diff --git a/net/sctp/Kconfig b/net/sctp/Kconfig index d18a72df3654..24d5a35ce894 100644 --- a/net/sctp/Kconfig +++ b/net/sctp/Kconfig @@ -7,10 +7,10 @@ menuconfig IP_SCTP tristate "The SCTP Protocol" depends on INET depends on IPV6 || IPV6=n - select CRC32 select CRYPTO select CRYPTO_HMAC select CRYPTO_SHA1 + select NET_CRC32C select NET_UDP_TUNNEL help Stream Control Transmission Protocol diff --git a/net/sctp/offload.c b/net/sctp/offload.c index 502095173d88..e6f863c031b4 100644 --- a/net/sctp/offload.c +++ b/net/sctp/offload.c @@ -111,7 +111,6 @@ int __init sctp_offload_init(void) if (ret) goto ipv4; - crc32c_csum_stub = &sctp_csum_ops; return ret; ipv4: diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 85a9dfeff4d6..90b75d4ec329 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -9100,7 +9100,8 @@ static void __sctp_write_space(struct sctp_association *asoc) wq = rcu_dereference(sk->sk_wq); if (wq) { if (waitqueue_active(&wq->wait)) - wake_up_interruptible(&wq->wait); + wake_up_interruptible_poll(&wq->wait, EPOLLOUT | + EPOLLWRNORM | EPOLLWRBAND); /* Note that we try to include the Async I/O support * here by modeling from the current TCP/UDP code. diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c index c524421ec652..8584893b4785 100644 --- a/net/tipc/crypto.c +++ b/net/tipc/crypto.c @@ -817,12 +817,16 @@ static int tipc_aead_encrypt(struct tipc_aead *aead, struct sk_buff *skb, goto exit; } + /* Get net to avoid freed tipc_crypto when delete namespace */ + get_net(aead->crypto->net); + /* Now, do encrypt */ rc = crypto_aead_encrypt(req); if (rc == -EINPROGRESS || rc == -EBUSY) return rc; tipc_bearer_put(b); + put_net(aead->crypto->net); exit: kfree(ctx); @@ -860,6 +864,7 @@ static void tipc_aead_encrypt_done(void *data, int err) kfree(tx_ctx); tipc_bearer_put(b); tipc_aead_put(aead); + put_net(net); } /** diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 2ab20821d6bb..bd507f74e35e 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -765,6 +765,11 @@ static void copy_peercred(struct sock *sk, struct sock *peersk) spin_unlock(&sk->sk_peer_lock); } +static bool unix_may_passcred(const struct sock *sk) +{ + return sk->sk_scm_credentials || sk->sk_scm_pidfd; +} + static int unix_listen(struct socket *sock, int backlog) { int err; @@ -1010,6 +1015,7 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern, sock_init_data(sock, sk); + sk->sk_scm_rights = 1; sk->sk_hash = unix_unbound_hash(sk); sk->sk_allocation = GFP_KERNEL_ACCOUNT; sk->sk_write_space = unix_write_space; @@ -1411,9 +1417,7 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr, if (err) goto out; - if ((test_bit(SOCK_PASSCRED, &sock->flags) || - test_bit(SOCK_PASSPIDFD, &sock->flags)) && - !READ_ONCE(unix_sk(sk)->addr)) { + if (unix_may_passcred(sk) && !READ_ONCE(unix_sk(sk)->addr)) { err = unix_autobind(sk); if (err) goto out; @@ -1531,9 +1535,7 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr, if (err) goto out; - if ((test_bit(SOCK_PASSCRED, &sock->flags) || - test_bit(SOCK_PASSPIDFD, &sock->flags)) && - !READ_ONCE(u->addr)) { + if (unix_may_passcred(sk) && !READ_ONCE(u->addr)) { err = unix_autobind(sk); if (err) goto out; @@ -1625,10 +1627,12 @@ restart: /* The way is open! Fastly set all the necessary fields... */ sock_hold(sk); - unix_peer(newsk) = sk; - newsk->sk_state = TCP_ESTABLISHED; - newsk->sk_type = sk->sk_type; + unix_peer(newsk) = sk; + newsk->sk_state = TCP_ESTABLISHED; + newsk->sk_type = sk->sk_type; + newsk->sk_scm_recv_flags = other->sk_scm_recv_flags; init_peercred(newsk); + newu = unix_sk(newsk); newu->listener = other; RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq); @@ -1709,17 +1713,6 @@ static int unix_socketpair(struct socket *socka, struct socket *sockb) return 0; } -static void unix_sock_inherit_flags(const struct socket *old, - struct socket *new) -{ - if (test_bit(SOCK_PASSCRED, &old->flags)) - set_bit(SOCK_PASSCRED, &new->flags); - if (test_bit(SOCK_PASSPIDFD, &old->flags)) - set_bit(SOCK_PASSPIDFD, &new->flags); - if (test_bit(SOCK_PASSSEC, &old->flags)) - set_bit(SOCK_PASSSEC, &new->flags); -} - static int unix_accept(struct socket *sock, struct socket *newsock, struct proto_accept_arg *arg) { @@ -1756,7 +1749,6 @@ static int unix_accept(struct socket *sock, struct socket *newsock, unix_state_lock(tsk); unix_update_edges(unix_sk(tsk)); newsock->state = SS_CONNECTED; - unix_sock_inherit_flags(sock, newsock); sock_graft(tsk, newsock); unix_state_unlock(tsk); return 0; @@ -1865,7 +1857,7 @@ static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool sen { int err = 0; - UNIXCB(skb).pid = get_pid(scm->pid); + UNIXCB(skb).pid = get_pid(scm->pid); UNIXCB(skb).uid = scm->creds.uid; UNIXCB(skb).gid = scm->creds.gid; UNIXCB(skb).fp = NULL; @@ -1877,28 +1869,19 @@ static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool sen return err; } -static bool unix_passcred_enabled(const struct socket *sock, - const struct sock *other) -{ - return test_bit(SOCK_PASSCRED, &sock->flags) || - test_bit(SOCK_PASSPIDFD, &sock->flags) || - !other->sk_socket || - test_bit(SOCK_PASSCRED, &other->sk_socket->flags) || - test_bit(SOCK_PASSPIDFD, &other->sk_socket->flags); -} - /* * Some apps rely on write() giving SCM_CREDENTIALS * We include credentials if source or destination socket * asserted SOCK_PASSCRED. */ -static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock, - const struct sock *other) +static void unix_maybe_add_creds(struct sk_buff *skb, const struct sock *sk, + const struct sock *other) { if (UNIXCB(skb).pid) return; - if (unix_passcred_enabled(sock, other)) { - UNIXCB(skb).pid = get_pid(task_tgid(current)); + + if (unix_may_passcred(sk) || unix_may_passcred(other)) { + UNIXCB(skb).pid = get_pid(task_tgid(current)); current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid); } } @@ -1974,9 +1957,7 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg, goto out; } - if ((test_bit(SOCK_PASSCRED, &sock->flags) || - test_bit(SOCK_PASSPIDFD, &sock->flags)) && - !READ_ONCE(u->addr)) { + if (unix_may_passcred(sk) && !READ_ONCE(u->addr)) { err = unix_autobind(sk); if (err) goto out; @@ -2093,6 +2074,11 @@ restart_locked: goto out_unlock; } + if (UNIXCB(skb).fp && !other->sk_scm_rights) { + err = -EPERM; + goto out_unlock; + } + if (sk->sk_type != SOCK_SEQPACKET) { err = security_unix_may_send(sk->sk_socket, other->sk_socket); if (err) @@ -2139,7 +2125,8 @@ restart_locked: if (sock_flag(other, SOCK_RCVTSTAMP)) __net_timestamp(skb); - maybe_add_creds(skb, sock, other); + + unix_maybe_add_creds(skb, sk, other); scm_stat_add(other, skb); skb_queue_tail(&other->sk_receive_queue, skb); unix_state_unlock(other); @@ -2167,14 +2154,14 @@ out: #define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768)) #if IS_ENABLED(CONFIG_AF_UNIX_OOB) -static int queue_oob(struct socket *sock, struct msghdr *msg, struct sock *other, +static int queue_oob(struct sock *sk, struct msghdr *msg, struct sock *other, struct scm_cookie *scm, bool fds_sent) { struct unix_sock *ousk = unix_sk(other); struct sk_buff *skb; int err; - skb = sock_alloc_send_skb(sock->sk, 1, msg->msg_flags & MSG_DONTWAIT, &err); + skb = sock_alloc_send_skb(sk, 1, msg->msg_flags & MSG_DONTWAIT, &err); if (!skb) return err; @@ -2193,12 +2180,16 @@ static int queue_oob(struct socket *sock, struct msghdr *msg, struct sock *other if (sock_flag(other, SOCK_DEAD) || (other->sk_shutdown & RCV_SHUTDOWN)) { - unix_state_unlock(other); err = -EPIPE; - goto out; + goto out_unlock; + } + + if (UNIXCB(skb).fp && !other->sk_scm_rights) { + err = -EPERM; + goto out_unlock; } - maybe_add_creds(skb, sock, other); + unix_maybe_add_creds(skb, sk, other); scm_stat_add(other, skb); spin_lock(&other->sk_receive_queue.lock); @@ -2211,6 +2202,8 @@ static int queue_oob(struct socket *sock, struct msghdr *msg, struct sock *other other->sk_data_ready(other); return 0; +out_unlock: + unix_state_unlock(other); out: consume_skb(skb); return err; @@ -2314,7 +2307,13 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg, (other->sk_shutdown & RCV_SHUTDOWN)) goto out_pipe_unlock; - maybe_add_creds(skb, sock, other); + if (UNIXCB(skb).fp && !other->sk_scm_rights) { + unix_state_unlock(other); + err = -EPERM; + goto out_free; + } + + unix_maybe_add_creds(skb, sk, other); scm_stat_add(other, skb); skb_queue_tail(&other->sk_receive_queue, skb); unix_state_unlock(other); @@ -2324,7 +2323,7 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg, #if IS_ENABLED(CONFIG_AF_UNIX_OOB) if (msg->msg_flags & MSG_OOB) { - err = queue_oob(sock, msg, other, &scm, fds_sent); + err = queue_oob(sk, msg, other, &scm, fds_sent); if (err) goto out_err; sent++; @@ -2846,8 +2845,7 @@ unlock: /* Never glue messages from different writers */ if (!unix_skb_scm_eq(skb, &scm)) break; - } else if (test_bit(SOCK_PASSCRED, &sock->flags) || - test_bit(SOCK_PASSPIDFD, &sock->flags)) { + } else if (unix_may_passcred(sk)) { /* Copy credentials */ scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid); unix_set_secdata(&scm, skb); diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index 4abc81f33d3e..72c000c0ae5f 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -1304,7 +1304,7 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len) xs->queue_id = qid; xp_add_xsk(xs->pool, xs); - if (xs->zc && qid < dev->real_num_rx_queues) { + if (qid < dev->real_num_rx_queues) { struct netdev_rx_queue *rxq; rxq = __netif_get_rx_queue(dev, qid); diff --git a/net/xfrm/espintcp.c b/net/xfrm/espintcp.c index fe82e2d07300..fc7a603b04f1 100644 --- a/net/xfrm/espintcp.c +++ b/net/xfrm/espintcp.c @@ -171,8 +171,10 @@ int espintcp_queue_out(struct sock *sk, struct sk_buff *skb) struct espintcp_ctx *ctx = espintcp_getctx(sk); if (skb_queue_len(&ctx->out_queue) >= - READ_ONCE(net_hotdata.max_backlog)) + READ_ONCE(net_hotdata.max_backlog)) { + kfree_skb(skb); return -ENOBUFS; + } __skb_queue_tail(&ctx->out_queue, skb); diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c index d62f76161d83..81fd486b5e56 100644 --- a/net/xfrm/xfrm_device.c +++ b/net/xfrm/xfrm_device.c @@ -145,10 +145,6 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur return NULL; } - /* This skb was already validated on the upper/virtual dev */ - if ((x->xso.dev != dev) && (x->xso.real_dev == dev)) - return skb; - local_irq_save(flags); sd = this_cpu_ptr(&softnet_data); err = !skb_queue_empty(&sd->xfrm_backlog); @@ -159,8 +155,7 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur return skb; } - if (skb_is_gso(skb) && (unlikely(x->xso.dev != dev) || - unlikely(xmit_xfrm_check_overflow(skb)))) { + if (skb_is_gso(skb) && unlikely(xmit_xfrm_check_overflow(skb))) { struct sk_buff *segs; /* Packet got rerouted, fixup features and segment it. */ @@ -256,6 +251,11 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, return -EINVAL; } + if (xuo->flags & XFRM_OFFLOAD_INBOUND && x->if_id) { + NL_SET_ERR_MSG(extack, "XFRM if_id is not supported in RX path"); + return -EINVAL; + } + is_packet_offload = xuo->flags & XFRM_OFFLOAD_PACKET; /* We don't yet support TFC padding. */ @@ -314,7 +314,6 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, xso->dev = dev; netdev_tracker_alloc(dev, &xso->dev_tracker, GFP_ATOMIC); - xso->real_dev = dev; if (xuo->flags & XFRM_OFFLOAD_INBOUND) xso->dir = XFRM_DEV_OFFLOAD_IN; @@ -326,11 +325,10 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, else xso->type = XFRM_DEV_OFFLOAD_CRYPTO; - err = dev->xfrmdev_ops->xdo_dev_state_add(x, extack); + err = dev->xfrmdev_ops->xdo_dev_state_add(dev, x, extack); if (err) { xso->dev = NULL; xso->dir = 0; - xso->real_dev = NULL; netdev_put(dev, &xso->dev_tracker); xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED; @@ -378,7 +376,6 @@ int xfrm_dev_policy_add(struct net *net, struct xfrm_policy *xp, xdo->dev = dev; netdev_tracker_alloc(dev, &xdo->dev_tracker, GFP_ATOMIC); - xdo->real_dev = dev; xdo->type = XFRM_DEV_OFFLOAD_PACKET; switch (dir) { case XFRM_POLICY_IN: @@ -400,7 +397,6 @@ int xfrm_dev_policy_add(struct net *net, struct xfrm_policy *xp, err = dev->xfrmdev_ops->xdo_dev_policy_add(xp, extack); if (err) { xdo->dev = NULL; - xdo->real_dev = NULL; xdo->type = XFRM_DEV_OFFLOAD_UNSPECIFIED; xdo->dir = 0; netdev_put(dev, &xdo->dev_tracker); diff --git a/net/xfrm/xfrm_ipcomp.c b/net/xfrm/xfrm_ipcomp.c index 0c1420534394..907c3ccb440d 100644 --- a/net/xfrm/xfrm_ipcomp.c +++ b/net/xfrm/xfrm_ipcomp.c @@ -48,7 +48,6 @@ static int ipcomp_post_acomp(struct sk_buff *skb, int err, int hlen) { struct acomp_req *req = ipcomp_cb(skb)->req; struct ipcomp_req_extra *extra; - const int plen = skb->data_len; struct scatterlist *dsg; int len, dlen; @@ -64,7 +63,7 @@ static int ipcomp_post_acomp(struct sk_buff *skb, int err, int hlen) /* Only update truesize on input. */ if (!hlen) - skb->truesize += dlen - plen; + skb->truesize += dlen; skb->data_len = dlen; skb->len += dlen; diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 143ac3aa7537..d4134a18c658 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -1581,6 +1581,9 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl) struct xfrm_policy *delpol; struct hlist_head *chain; + /* Sanitize mark before store */ + policy->mark.v &= policy->mark.m; + spin_lock_bh(&net->xfrm.xfrm_policy_lock); chain = policy_hash_bysel(net, &policy->selector, policy->family, dir); if (chain) @@ -4630,7 +4633,7 @@ int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, struct xfrm_migrate *m, int num_migrate, struct xfrm_kmaddress *k, struct net *net, struct xfrm_encap_tmpl *encap, u32 if_id, - struct netlink_ext_ack *extack) + struct netlink_ext_ack *extack, struct xfrm_user_offload *xuo) { int i, err, nx_cur = 0, nx_new = 0; struct xfrm_policy *pol = NULL; @@ -4663,7 +4666,7 @@ int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, if ((x = xfrm_migrate_state_find(mp, net, if_id))) { x_cur[nx_cur] = x; nx_cur++; - xc = xfrm_state_migrate(x, mp, encap); + xc = xfrm_state_migrate(x, mp, encap, net, xuo, extack); if (xc) { x_new[nx_new] = xc; nx_new++; diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 341d79ecb5c2..203b585c2ae2 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -599,9 +599,9 @@ static void ___xfrm_state_destroy(struct xfrm_state *x) x->mode_cbs->destroy_state(x); hrtimer_cancel(&x->mtimer); timer_delete_sync(&x->rtimer); - kfree(x->aead); - kfree(x->aalg); - kfree(x->ealg); + kfree_sensitive(x->aead); + kfree_sensitive(x->aalg); + kfree_sensitive(x->ealg); kfree(x->calg); kfree(x->encap); kfree(x->coaddr); @@ -767,7 +767,7 @@ void xfrm_dev_state_delete(struct xfrm_state *x) struct net_device *dev = READ_ONCE(xso->dev); if (dev) { - dev->xfrmdev_ops->xdo_dev_state_delete(x); + dev->xfrmdev_ops->xdo_dev_state_delete(dev, x); spin_lock_bh(&xfrm_state_dev_gc_lock); hlist_add_head(&x->dev_gclist, &xfrm_state_dev_gc_list); spin_unlock_bh(&xfrm_state_dev_gc_lock); @@ -789,7 +789,7 @@ void xfrm_dev_state_free(struct xfrm_state *x) spin_unlock_bh(&xfrm_state_dev_gc_lock); if (dev->xfrmdev_ops->xdo_dev_state_free) - dev->xfrmdev_ops->xdo_dev_state_free(x); + dev->xfrmdev_ops->xdo_dev_state_free(dev, x); WRITE_ONCE(xso->dev, NULL); xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED; netdev_put(dev, &xso->dev_tracker); @@ -838,9 +838,6 @@ int __xfrm_state_delete(struct xfrm_state *x) xfrm_nat_keepalive_state_updated(x); spin_unlock(&net->xfrm.xfrm_state_lock); - if (x->encap_sk) - sock_put(rcu_dereference_raw(x->encap_sk)); - xfrm_dev_state_delete(x); /* All xfrm_state objects are created by xfrm_state_alloc. @@ -1551,19 +1548,19 @@ found: if (pol->xdo.type == XFRM_DEV_OFFLOAD_PACKET) { struct xfrm_dev_offload *xdo = &pol->xdo; struct xfrm_dev_offload *xso = &x->xso; + struct net_device *dev = xdo->dev; xso->type = XFRM_DEV_OFFLOAD_PACKET; xso->dir = xdo->dir; - xso->dev = xdo->dev; - xso->real_dev = xdo->real_dev; + xso->dev = dev; xso->flags = XFRM_DEV_OFFLOAD_FLAG_ACQ; - netdev_hold(xso->dev, &xso->dev_tracker, GFP_ATOMIC); - error = xso->dev->xfrmdev_ops->xdo_dev_state_add(x, NULL); + netdev_hold(dev, &xso->dev_tracker, GFP_ATOMIC); + error = dev->xfrmdev_ops->xdo_dev_state_add(dev, x, + NULL); if (error) { xso->dir = 0; - netdev_put(xso->dev, &xso->dev_tracker); + netdev_put(dev, &xso->dev_tracker); xso->dev = NULL; - xso->real_dev = NULL; xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED; x->km.state = XFRM_STATE_DEAD; to_put = x; @@ -1721,6 +1718,9 @@ static void __xfrm_state_insert(struct xfrm_state *x) list_add(&x->km.all, &net->xfrm.state_all); + /* Sanitize mark before store */ + x->mark.v &= x->mark.m; + h = xfrm_dst_hash(net, &x->id.daddr, &x->props.saddr, x->props.reqid, x->props.family); XFRM_STATE_INSERT(bydst, &x->bydst, net->xfrm.state_bydst + h, @@ -1958,8 +1958,9 @@ static inline int clone_security(struct xfrm_state *x, struct xfrm_sec_ctx *secu return 0; } -static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, - struct xfrm_encap_tmpl *encap) +static struct xfrm_state *xfrm_state_clone_and_setup(struct xfrm_state *orig, + struct xfrm_encap_tmpl *encap, + struct xfrm_migrate *m) { struct net *net = xs_net(orig); struct xfrm_state *x = xfrm_state_alloc(net); @@ -2058,6 +2059,11 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, goto error; } + + x->props.family = m->new_family; + memcpy(&x->id.daddr, &m->new_daddr, sizeof(x->id.daddr)); + memcpy(&x->props.saddr, &m->new_saddr, sizeof(x->props.saddr)); + return x; error: @@ -2120,21 +2126,23 @@ EXPORT_SYMBOL(xfrm_migrate_state_find); struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x, struct xfrm_migrate *m, - struct xfrm_encap_tmpl *encap) + struct xfrm_encap_tmpl *encap, + struct net *net, + struct xfrm_user_offload *xuo, + struct netlink_ext_ack *extack) { struct xfrm_state *xc; - xc = xfrm_state_clone(x, encap); + xc = xfrm_state_clone_and_setup(x, encap, m); if (!xc) return NULL; - xc->props.family = m->new_family; - if (xfrm_init_state(xc) < 0) goto error; - memcpy(&xc->id.daddr, &m->new_daddr, sizeof(xc->id.daddr)); - memcpy(&xc->props.saddr, &m->new_saddr, sizeof(xc->props.saddr)); + /* configure the hardware if offload is requested */ + if (xuo && xfrm_dev_state_add(net, xc, xuo, extack)) + goto error; /* add state */ if (xfrm_addr_equal(&x->id.daddr, &m->new_daddr, m->new_family)) { diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 784a2d124749..59f258daf830 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -178,11 +178,27 @@ static inline int verify_replay(struct xfrm_usersa_info *p, "Replay seq and seq_hi should be 0 for output SA"); return -EINVAL; } - if (rs->oseq_hi && !(p->flags & XFRM_STATE_ESN)) { - NL_SET_ERR_MSG( - extack, - "Replay oseq_hi should be 0 in non-ESN mode for output SA"); - return -EINVAL; + + if (!(p->flags & XFRM_STATE_ESN)) { + if (rs->oseq_hi) { + NL_SET_ERR_MSG( + extack, + "Replay oseq_hi should be 0 in non-ESN mode for output SA"); + return -EINVAL; + } + if (rs->oseq == U32_MAX) { + NL_SET_ERR_MSG( + extack, + "Replay oseq should be less than 0xFFFFFFFF in non-ESN mode for output SA"); + return -EINVAL; + } + } else { + if (rs->oseq == U32_MAX && rs->oseq_hi == U32_MAX) { + NL_SET_ERR_MSG( + extack, + "Replay oseq and oseq_hi should be less than 0xFFFFFFFF for output SA"); + return -EINVAL; + } } if (rs->bmp_len) { NL_SET_ERR_MSG(extack, "Replay bmp_len should 0 for output SA"); @@ -196,11 +212,27 @@ static inline int verify_replay(struct xfrm_usersa_info *p, "Replay oseq and oseq_hi should be 0 for input SA"); return -EINVAL; } - if (rs->seq_hi && !(p->flags & XFRM_STATE_ESN)) { - NL_SET_ERR_MSG( - extack, - "Replay seq_hi should be 0 in non-ESN mode for input SA"); - return -EINVAL; + if (!(p->flags & XFRM_STATE_ESN)) { + if (rs->seq_hi) { + NL_SET_ERR_MSG( + extack, + "Replay seq_hi should be 0 in non-ESN mode for input SA"); + return -EINVAL; + } + + if (rs->seq == U32_MAX) { + NL_SET_ERR_MSG( + extack, + "Replay seq should be less than 0xFFFFFFFF in non-ESN mode for input SA"); + return -EINVAL; + } + } else { + if (rs->seq == U32_MAX && rs->seq_hi == U32_MAX) { + NL_SET_ERR_MSG( + extack, + "Replay seq and seq_hi should be less than 0xFFFFFFFF for input SA"); + return -EINVAL; + } } } @@ -1173,7 +1205,7 @@ static int copy_to_user_auth(struct xfrm_algo_auth *auth, struct sk_buff *skb) if (!nla) return -EMSGSIZE; algo = nla_data(nla); - strscpy_pad(algo->alg_name, auth->alg_name, sizeof(algo->alg_name)); + strscpy_pad(algo->alg_name, auth->alg_name); if (redact_secret && auth->alg_key_len) memset(algo->alg_key, 0, (auth->alg_key_len + 7) / 8); @@ -1186,7 +1218,7 @@ static int copy_to_user_auth(struct xfrm_algo_auth *auth, struct sk_buff *skb) if (!nla) return -EMSGSIZE; ap = nla_data(nla); - strscpy_pad(ap->alg_name, auth->alg_name, sizeof(ap->alg_name)); + strscpy_pad(ap->alg_name, auth->alg_name); ap->alg_key_len = auth->alg_key_len; ap->alg_trunc_len = auth->alg_trunc_len; if (redact_secret && auth->alg_key_len) @@ -1207,7 +1239,7 @@ static int copy_to_user_aead(struct xfrm_algo_aead *aead, struct sk_buff *skb) return -EMSGSIZE; ap = nla_data(nla); - strscpy_pad(ap->alg_name, aead->alg_name, sizeof(ap->alg_name)); + strscpy_pad(ap->alg_name, aead->alg_name); ap->alg_key_len = aead->alg_key_len; ap->alg_icv_len = aead->alg_icv_len; @@ -1229,7 +1261,7 @@ static int copy_to_user_ealg(struct xfrm_algo *ealg, struct sk_buff *skb) return -EMSGSIZE; ap = nla_data(nla); - strscpy_pad(ap->alg_name, ealg->alg_name, sizeof(ap->alg_name)); + strscpy_pad(ap->alg_name, ealg->alg_name); ap->alg_key_len = ealg->alg_key_len; if (redact_secret && ealg->alg_key_len) @@ -1250,7 +1282,7 @@ static int copy_to_user_calg(struct xfrm_algo *calg, struct sk_buff *skb) return -EMSGSIZE; ap = nla_data(nla); - strscpy_pad(ap->alg_name, calg->alg_name, sizeof(ap->alg_name)); + strscpy_pad(ap->alg_name, calg->alg_name); ap->alg_key_len = 0; return 0; @@ -3069,6 +3101,7 @@ static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh, int n = 0; struct net *net = sock_net(skb->sk); struct xfrm_encap_tmpl *encap = NULL; + struct xfrm_user_offload *xuo = NULL; u32 if_id = 0; if (!attrs[XFRMA_MIGRATE]) { @@ -3099,11 +3132,19 @@ static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh, if (attrs[XFRMA_IF_ID]) if_id = nla_get_u32(attrs[XFRMA_IF_ID]); + if (attrs[XFRMA_OFFLOAD_DEV]) { + xuo = kmemdup(nla_data(attrs[XFRMA_OFFLOAD_DEV]), + sizeof(*xuo), GFP_KERNEL); + if (!xuo) { + err = -ENOMEM; + goto error; + } + } err = xfrm_migrate(&pi->sel, pi->dir, type, m, n, kmp, net, encap, - if_id, extack); - + if_id, extack, xuo); +error: kfree(encap); - + kfree(xuo); return err; } #else diff --git a/rust/kernel/net/phy.rs b/rust/kernel/net/phy.rs index a59469c785e3..32ea43ece646 100644 --- a/rust/kernel/net/phy.rs +++ b/rust/kernel/net/phy.rs @@ -421,6 +421,7 @@ impl<T: Driver> Adapter<T> { /// `phydev` must be passed by the corresponding callback in `phy_driver`. unsafe extern "C" fn match_phy_device_callback( phydev: *mut bindings::phy_device, + _phydrv: *const bindings::phy_driver, ) -> crate::ffi::c_int { // SAFETY: This callback is called only in contexts // where we hold `phy_device->lock`, so the accessors on diff --git a/security/landlock/audit.c b/security/landlock/audit.c index 7e5e0ed0e4e5..c52d079cdb77 100644 --- a/security/landlock/audit.c +++ b/security/landlock/audit.c @@ -175,7 +175,7 @@ static void test_get_hierarchy(struct kunit *const test) KUNIT_EXPECT_EQ(test, 10, get_hierarchy(&dom2, 0)->id); KUNIT_EXPECT_EQ(test, 20, get_hierarchy(&dom2, 1)->id); KUNIT_EXPECT_EQ(test, 30, get_hierarchy(&dom2, 2)->id); - KUNIT_EXPECT_EQ(test, 30, get_hierarchy(&dom2, -1)->id); + /* KUNIT_EXPECT_EQ(test, 30, get_hierarchy(&dom2, -1)->id); */ } #endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */ @@ -437,7 +437,7 @@ void landlock_log_denial(const struct landlock_cred_security *const subject, return; /* Checks if the current exec was restricting itself. */ - if (subject->domain_exec & (1 << youngest_layer)) { + if (subject->domain_exec & BIT(youngest_layer)) { /* Ignores denials for the same execution. */ if (!youngest_denied->log_same_exec) return; diff --git a/security/landlock/id.c b/security/landlock/id.c index 11fab9259c15..56f7cc0fc744 100644 --- a/security/landlock/id.c +++ b/security/landlock/id.c @@ -7,6 +7,7 @@ #include <kunit/test.h> #include <linux/atomic.h> +#include <linux/bitops.h> #include <linux/random.h> #include <linux/spinlock.h> @@ -25,7 +26,7 @@ static void __init init_id(atomic64_t *const counter, const u32 random_32bits) * Ensures sure 64-bit values are always used by user space (or may * fail with -EOVERFLOW), and makes this testable. */ - init = 1ULL << 32; + init = BIT_ULL(32); /* * Makes a large (2^32) boot-time value to limit ID collision in logs @@ -105,7 +106,7 @@ static u64 get_id_range(size_t number_of_ids, atomic64_t *const counter, * to get a new ID (e.g. a full landlock_restrict_self() call), and the * cost of draining all available IDs during the system's uptime. */ - random_4bits = random_4bits % (1 << 4); + random_4bits &= 0b1111; step = number_of_ids + random_4bits; /* It is safe to cast a signed atomic to an unsigned value. */ @@ -144,6 +145,19 @@ static void test_range1_rand1(struct kunit *const test) init + 2); } +static void test_range1_rand15(struct kunit *const test) +{ + atomic64_t counter; + u64 init; + + init = get_random_u32(); + atomic64_set(&counter, init); + KUNIT_EXPECT_EQ(test, get_id_range(1, &counter, 15), init); + KUNIT_EXPECT_EQ( + test, get_id_range(get_random_u8(), &counter, get_random_u8()), + init + 16); +} + static void test_range1_rand16(struct kunit *const test) { atomic64_t counter; @@ -196,6 +210,19 @@ static void test_range2_rand2(struct kunit *const test) init + 4); } +static void test_range2_rand15(struct kunit *const test) +{ + atomic64_t counter; + u64 init; + + init = get_random_u32(); + atomic64_set(&counter, init); + KUNIT_EXPECT_EQ(test, get_id_range(2, &counter, 15), init); + KUNIT_EXPECT_EQ( + test, get_id_range(get_random_u8(), &counter, get_random_u8()), + init + 17); +} + static void test_range2_rand16(struct kunit *const test) { atomic64_t counter; @@ -232,10 +259,12 @@ static struct kunit_case __refdata test_cases[] = { KUNIT_CASE(test_init_once), KUNIT_CASE(test_range1_rand0), KUNIT_CASE(test_range1_rand1), + KUNIT_CASE(test_range1_rand15), KUNIT_CASE(test_range1_rand16), KUNIT_CASE(test_range2_rand0), KUNIT_CASE(test_range2_rand1), KUNIT_CASE(test_range2_rand2), + KUNIT_CASE(test_range2_rand15), KUNIT_CASE(test_range2_rand16), {} /* clang-format on */ diff --git a/security/landlock/syscalls.c b/security/landlock/syscalls.c index b9561e3417ae..33eafb71e4f3 100644 --- a/security/landlock/syscalls.c +++ b/security/landlock/syscalls.c @@ -9,6 +9,7 @@ #include <asm/current.h> #include <linux/anon_inodes.h> +#include <linux/bitops.h> #include <linux/build_bug.h> #include <linux/capability.h> #include <linux/cleanup.h> @@ -563,7 +564,7 @@ SYSCALL_DEFINE2(landlock_restrict_self, const int, ruleset_fd, const __u32, new_llcred->domain = new_dom; #ifdef CONFIG_AUDIT - new_llcred->domain_exec |= 1 << (new_dom->num_layers - 1); + new_llcred->domain_exec |= BIT(new_dom->num_layers - 1); #endif /* CONFIG_AUDIT */ return commit_creds(new_cred); diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c index 4683b9139c56..4ecb17bd5436 100644 --- a/sound/core/oss/pcm_oss.c +++ b/sound/core/oss/pcm_oss.c @@ -1074,8 +1074,7 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream) runtime->oss.params = 0; runtime->oss.prepare = 1; runtime->oss.buffer_used = 0; - if (runtime->dma_area) - snd_pcm_format_set_silence(runtime->format, runtime->dma_area, bytes_to_samples(runtime, runtime->dma_bytes)); + snd_pcm_runtime_buffer_set_silence(runtime); runtime->oss.period_frames = snd_pcm_alsa_frames(substream, oss_period_size); diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index 6c2b6a62d9d2..853ac5bb33ff 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c @@ -723,6 +723,17 @@ static void snd_pcm_buffer_access_unlock(struct snd_pcm_runtime *runtime) atomic_inc(&runtime->buffer_accessing); } +/* fill the PCM buffer with the current silence format; called from pcm_oss.c */ +void snd_pcm_runtime_buffer_set_silence(struct snd_pcm_runtime *runtime) +{ + snd_pcm_buffer_access_lock(runtime); + if (runtime->dma_area) + snd_pcm_format_set_silence(runtime->format, runtime->dma_area, + bytes_to_samples(runtime, runtime->dma_bytes)); + snd_pcm_buffer_access_unlock(runtime); +} +EXPORT_SYMBOL_GPL(snd_pcm_runtime_buffer_set_silence); + #if IS_ENABLED(CONFIG_SND_PCM_OSS) #define is_oss_stream(substream) ((substream)->oss.oss) #else diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c index 198c598a5393..880240924bfd 100644 --- a/sound/core/seq/seq_clientmgr.c +++ b/sound/core/seq/seq_clientmgr.c @@ -732,15 +732,21 @@ static int snd_seq_deliver_single_event(struct snd_seq_client *client, */ static int __deliver_to_subscribers(struct snd_seq_client *client, struct snd_seq_event *event, - struct snd_seq_client_port *src_port, - int atomic, int hop) + int port, int atomic, int hop) { + struct snd_seq_client_port *src_port; struct snd_seq_subscribers *subs; int err, result = 0, num_ev = 0; union __snd_seq_event event_saved; size_t saved_size; struct snd_seq_port_subs_info *grp; + if (port < 0) + return 0; + src_port = snd_seq_port_use_ptr(client, port); + if (!src_port) + return 0; + /* save original event record */ saved_size = snd_seq_event_packet_size(event); memcpy(&event_saved, event, saved_size); @@ -775,6 +781,7 @@ static int __deliver_to_subscribers(struct snd_seq_client *client, read_unlock(&grp->list_lock); else up_read(&grp->list_mutex); + snd_seq_port_unlock(src_port); memcpy(event, &event_saved, saved_size); return (result < 0) ? result : num_ev; } @@ -783,25 +790,32 @@ static int deliver_to_subscribers(struct snd_seq_client *client, struct snd_seq_event *event, int atomic, int hop) { - struct snd_seq_client_port *src_port; - int ret = 0, ret2; - - src_port = snd_seq_port_use_ptr(client, event->source.port); - if (src_port) { - ret = __deliver_to_subscribers(client, event, src_port, atomic, hop); - snd_seq_port_unlock(src_port); - } - - if (client->ump_endpoint_port < 0 || - event->source.port == client->ump_endpoint_port) - return ret; + int ret; +#if IS_ENABLED(CONFIG_SND_SEQ_UMP) + int ret2; +#endif - src_port = snd_seq_port_use_ptr(client, client->ump_endpoint_port); - if (!src_port) + ret = __deliver_to_subscribers(client, event, + event->source.port, atomic, hop); +#if IS_ENABLED(CONFIG_SND_SEQ_UMP) + if (!snd_seq_client_is_ump(client) || client->ump_endpoint_port < 0) return ret; - ret2 = __deliver_to_subscribers(client, event, src_port, atomic, hop); - snd_seq_port_unlock(src_port); - return ret2 < 0 ? ret2 : ret; + /* If it's an event from EP port (and with a UMP group), + * deliver to subscribers of the corresponding UMP group port, too. + * Or, if it's from non-EP port, deliver to subscribers of EP port, too. + */ + if (event->source.port == client->ump_endpoint_port) + ret2 = __deliver_to_subscribers(client, event, + snd_seq_ump_group_port(event), + atomic, hop); + else + ret2 = __deliver_to_subscribers(client, event, + client->ump_endpoint_port, + atomic, hop); + if (ret2 < 0) + return ret2; +#endif + return ret; } /* deliver an event to the destination port(s). diff --git a/sound/core/seq/seq_ump_convert.c b/sound/core/seq/seq_ump_convert.c index ff7e558b4d51..db2f169cae11 100644 --- a/sound/core/seq/seq_ump_convert.c +++ b/sound/core/seq/seq_ump_convert.c @@ -1285,3 +1285,21 @@ int snd_seq_deliver_to_ump(struct snd_seq_client *source, else return cvt_to_ump_midi1(dest, dest_port, event, atomic, hop); } + +/* return the UMP group-port number of the event; + * return -1 if groupless or non-UMP event + */ +int snd_seq_ump_group_port(const struct snd_seq_event *event) +{ + const struct snd_seq_ump_event *ump_ev = + (const struct snd_seq_ump_event *)event; + unsigned char type; + + if (!snd_seq_ev_is_ump(event)) + return -1; + type = ump_message_type(ump_ev->ump[0]); + if (ump_is_groupless_msg(type)) + return -1; + /* group-port number starts from 1 */ + return ump_message_group(ump_ev->ump[0]) + 1; +} diff --git a/sound/core/seq/seq_ump_convert.h b/sound/core/seq/seq_ump_convert.h index 6c146d803280..4abf0a7637d7 100644 --- a/sound/core/seq/seq_ump_convert.h +++ b/sound/core/seq/seq_ump_convert.h @@ -18,5 +18,6 @@ int snd_seq_deliver_to_ump(struct snd_seq_client *source, struct snd_seq_client_port *dest_port, struct snd_seq_event *event, int atomic, int hop); +int snd_seq_ump_group_port(const struct snd_seq_event *event); #endif /* __SEQ_UMP_CONVERT_H */ diff --git a/sound/hda/intel-sdw-acpi.c b/sound/hda/intel-sdw-acpi.c index 8686adaf4531..d3511135f7d3 100644 --- a/sound/hda/intel-sdw-acpi.c +++ b/sound/hda/intel-sdw-acpi.c @@ -177,7 +177,7 @@ static acpi_status sdw_intel_acpi_cb(acpi_handle handle, u32 level, * sdw_intel_startup() is required for creation of devices and bus * startup */ -int sdw_intel_acpi_scan(acpi_handle *parent_handle, +int sdw_intel_acpi_scan(acpi_handle parent_handle, struct sdw_intel_acpi_info *info) { acpi_status status; diff --git a/sound/pci/es1968.c b/sound/pci/es1968.c index c6c018b40c69..4e0693f0ab0f 100644 --- a/sound/pci/es1968.c +++ b/sound/pci/es1968.c @@ -1561,7 +1561,7 @@ static int snd_es1968_capture_open(struct snd_pcm_substream *substream) struct snd_pcm_runtime *runtime = substream->runtime; struct es1968 *chip = snd_pcm_substream_chip(substream); struct esschan *es; - int apu1, apu2; + int err, apu1, apu2; apu1 = snd_es1968_alloc_apu_pair(chip, ESM_APU_PCM_CAPTURE); if (apu1 < 0) @@ -1605,7 +1605,9 @@ static int snd_es1968_capture_open(struct snd_pcm_substream *substream) runtime->hw = snd_es1968_capture; runtime->hw.buffer_bytes_max = runtime->hw.period_bytes_max = calc_available_memory_size(chip) - 1024; /* keep MIXBUF size */ - snd_pcm_hw_constraint_pow2(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES); + err = snd_pcm_hw_constraint_pow2(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES); + if (err < 0) + return err; spin_lock_irq(&chip->substream_lock); list_add(&es->list, &chip->substream_list); diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 8a2b09e4a7d5..20ab1fb2195f 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -6830,7 +6830,10 @@ static void alc256_fixup_chromebook(struct hda_codec *codec, switch (action) { case HDA_FIXUP_ACT_PRE_PROBE: - spec->gen.suppress_auto_mute = 1; + if (codec->core.subsystem_id == 0x10280d76) + spec->gen.suppress_auto_mute = 0; + else + spec->gen.suppress_auto_mute = 1; spec->gen.suppress_auto_mic = 1; spec->en_3kpull_low = false; break; @@ -10882,9 +10885,12 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x8e1a, "HP ZBook Firefly 14 G12A", ALC245_FIXUP_HP_ZBOOK_FIREFLY_G12A), SND_PCI_QUIRK(0x103c, 0x8e1b, "HP EliteBook G12", ALC245_FIXUP_HP_ZBOOK_FIREFLY_G12A), SND_PCI_QUIRK(0x103c, 0x8e1c, "HP EliteBook G12", ALC245_FIXUP_HP_ZBOOK_FIREFLY_G12A), + SND_PCI_QUIRK(0x103c, 0x8e1d, "HP ZBook X Gli 16 G12", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8e2c, "HP EliteBook 16 G12", ALC285_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8e36, "HP 14 Enstrom OmniBook X", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8e37, "HP 16 Piston OmniBook X", ALC287_FIXUP_CS35L41_I2C_2), + SND_PCI_QUIRK(0x103c, 0x8e3a, "HP Agusta", ALC287_FIXUP_CS35L41_I2C_2), + SND_PCI_QUIRK(0x103c, 0x8e3b, "HP Agusta", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8e60, "HP Trekker ", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8e61, "HP Trekker ", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8e62, "HP Trekker ", ALC287_FIXUP_CS35L41_I2C_2), @@ -11300,6 +11306,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x38fa, "Thinkbook 16P Gen5", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD), SND_PCI_QUIRK(0x17aa, 0x38fd, "ThinkBook plus Gen5 Hybrid", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI), + SND_PCI_QUIRK(0x17aa, 0x390d, "Lenovo Yoga Pro 7 14ASP10", ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN), SND_PCI_QUIRK(0x17aa, 0x3913, "Lenovo 145", ALC236_FIXUP_LENOVO_INV_DMIC), SND_PCI_QUIRK(0x17aa, 0x391f, "Yoga S990-16 pro Quad YC Quad", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x17aa, 0x3920, "Yoga S990-16 pro Quad VECO Quad", ALC287_FIXUP_TAS2781_I2C), diff --git a/sound/sh/Kconfig b/sound/sh/Kconfig index b75fbb3236a7..f5fa09d740b4 100644 --- a/sound/sh/Kconfig +++ b/sound/sh/Kconfig @@ -14,7 +14,7 @@ if SND_SUPERH config SND_AICA tristate "Dreamcast Yamaha AICA sound" - depends on SH_DREAMCAST + depends on SH_DREAMCAST && SH_DMA_API select SND_PCM select G2_DMA help diff --git a/sound/soc/mediatek/Kconfig b/sound/soc/mediatek/Kconfig index 3033e2d3fe16..90e367586493 100644 --- a/sound/soc/mediatek/Kconfig +++ b/sound/soc/mediatek/Kconfig @@ -228,6 +228,7 @@ config SND_SOC_MT8188 config SND_SOC_MT8188_MT6359 tristate "ASoC Audio driver for MT8188 with MT6359 and I2S codecs" depends on SND_SOC_MT8188 && MTK_PMIC_WRAP + depends on SND_SOC_MT6359_ACCDET || !SND_SOC_MT6359_ACCDET depends on I2C select SND_SOC_MT6359 select SND_SOC_HDMI_CODEC diff --git a/sound/soc/sof/intel/hda-bus.c b/sound/soc/sof/intel/hda-bus.c index b1be03011d7e..6492e1cefbfb 100644 --- a/sound/soc/sof/intel/hda-bus.c +++ b/sound/soc/sof/intel/hda-bus.c @@ -76,7 +76,7 @@ void sof_hda_bus_init(struct snd_sof_dev *sdev, struct device *dev) snd_hdac_ext_bus_init(bus, dev, &bus_core_ops, sof_hda_ext_ops); - if (chip && chip->hw_ip_version == SOF_INTEL_ACE_2_0) + if (chip && chip->hw_ip_version >= SOF_INTEL_ACE_2_0) bus->use_pio_for_commands = true; #else snd_hdac_ext_bus_init(bus, dev, NULL, NULL); diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c index b34e5fdf10f1..6a3932d90b43 100644 --- a/sound/soc/sof/intel/hda.c +++ b/sound/soc/sof/intel/hda.c @@ -1049,7 +1049,21 @@ static void hda_generic_machine_select(struct snd_sof_dev *sdev, if (!*mach && codec_num <= 2) { bool tplg_fixup = false; - hda_mach = snd_soc_acpi_intel_hda_machines; + /* + * make a local copy of the match array since we might + * be modifying it + */ + hda_mach = devm_kmemdup_array(sdev->dev, + snd_soc_acpi_intel_hda_machines, + 2, /* we have one entry + sentinel in the array */ + sizeof(snd_soc_acpi_intel_hda_machines[0]), + GFP_KERNEL); + if (!hda_mach) { + dev_err(bus->dev, + "%s: failed to duplicate the HDA match table\n", + __func__); + return; + } dev_info(bus->dev, "using HDA machine driver %s now\n", hda_mach->drv_name); diff --git a/sound/soc/sof/ipc4-control.c b/sound/soc/sof/ipc4-control.c index 576f407cd456..976a4794d610 100644 --- a/sound/soc/sof/ipc4-control.c +++ b/sound/soc/sof/ipc4-control.c @@ -531,6 +531,14 @@ static int sof_ipc4_bytes_ext_put(struct snd_sof_control *scontrol, return -EINVAL; } + /* Check header id */ + if (header.numid != SOF_CTRL_CMD_BINARY) { + dev_err_ratelimited(scomp->dev, + "Incorrect numid for bytes put %d\n", + header.numid); + return -EINVAL; + } + /* Verify the ABI header first */ if (copy_from_user(&abi_hdr, tlvd->tlv, sizeof(abi_hdr))) return -EFAULT; @@ -613,7 +621,8 @@ static int _sof_ipc4_bytes_ext_get(struct snd_sof_control *scontrol, if (data_size > size) return -ENOSPC; - header.numid = scontrol->comp_id; + /* Set header id and length */ + header.numid = SOF_CTRL_CMD_BINARY; header.length = data_size; if (copy_to_user(tlvd, &header, sizeof(struct snd_ctl_tlv))) diff --git a/sound/soc/sof/ipc4-pcm.c b/sound/soc/sof/ipc4-pcm.c index 1a2841899ff5..c09b424ab863 100644 --- a/sound/soc/sof/ipc4-pcm.c +++ b/sound/soc/sof/ipc4-pcm.c @@ -798,7 +798,8 @@ static int sof_ipc4_pcm_setup(struct snd_sof_dev *sdev, struct snd_sof_pcm *spcm spcm->stream[stream].private = stream_priv; - if (!support_info) + /* Delay reporting is only supported on playback */ + if (!support_info || stream == SNDRV_PCM_STREAM_CAPTURE) continue; time_info = kzalloc(sizeof(*time_info), GFP_KERNEL); diff --git a/sound/soc/sof/topology.c b/sound/soc/sof/topology.c index dc9cb8324067..14aa8ecc4bc4 100644 --- a/sound/soc/sof/topology.c +++ b/sound/soc/sof/topology.c @@ -1063,7 +1063,7 @@ static int sof_connect_dai_widget(struct snd_soc_component *scomp, struct snd_sof_dai *dai) { struct snd_soc_card *card = scomp->card; - struct snd_soc_pcm_runtime *rtd; + struct snd_soc_pcm_runtime *rtd, *full, *partial; struct snd_soc_dai *cpu_dai; int stream; int i; @@ -1080,12 +1080,22 @@ static int sof_connect_dai_widget(struct snd_soc_component *scomp, else goto end; + full = NULL; + partial = NULL; list_for_each_entry(rtd, &card->rtd_list, list) { /* does stream match DAI link ? */ - if (!rtd->dai_link->stream_name || - !strstr(rtd->dai_link->stream_name, w->sname)) - continue; + if (rtd->dai_link->stream_name) { + if (!strcmp(rtd->dai_link->stream_name, w->sname)) { + full = rtd; + break; + } else if (strstr(rtd->dai_link->stream_name, w->sname)) { + partial = rtd; + } + } + } + rtd = full ? full : partial; + if (rtd) { for_each_rtd_cpu_dais(rtd, i, cpu_dai) { /* * Please create DAI widget in the right order diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index 9112313a9dbc..dbbc9eb935a4 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c @@ -2242,6 +2242,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = { QUIRK_FLAG_CTL_MSG_DELAY_1M), DEVICE_FLG(0x0c45, 0x6340, /* Sonix HD USB Camera */ QUIRK_FLAG_GET_SAMPLE_RATE), + DEVICE_FLG(0x0c45, 0x636b, /* Microdia JP001 USB Camera */ + QUIRK_FLAG_GET_SAMPLE_RATE), DEVICE_FLG(0x0d8c, 0x0014, /* USB Audio Device */ QUIRK_FLAG_CTL_MSG_DELAY_1M), DEVICE_FLG(0x0ecb, 0x205c, /* JBL Quantum610 Wireless */ @@ -2250,6 +2252,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = { QUIRK_FLAG_FIXED_RATE), DEVICE_FLG(0x0fd9, 0x0008, /* Hauppauge HVR-950Q */ QUIRK_FLAG_SHARE_MEDIA_DEVICE | QUIRK_FLAG_ALIGN_TRANSFER), + DEVICE_FLG(0x1101, 0x0003, /* Audioengine D1 */ + QUIRK_FLAG_GET_SAMPLE_RATE), DEVICE_FLG(0x1224, 0x2a25, /* Jieli Technology USB PHY 2.0 */ QUIRK_FLAG_GET_SAMPLE_RATE | QUIRK_FLAG_MIC_RES_16), DEVICE_FLG(0x1395, 0x740a, /* Sennheiser DECT */ diff --git a/tools/include/uapi/asm-generic/socket.h b/tools/include/uapi/asm-generic/socket.h index aa5016ff3d91..f333a0ac4ee4 100644 --- a/tools/include/uapi/asm-generic/socket.h +++ b/tools/include/uapi/asm-generic/socket.h @@ -145,6 +145,8 @@ #define SO_RCVPRIORITY 82 +#define SO_PASSRIGHTS 83 + #if !defined(__KERNEL__) #if __BITS_PER_LONG == 64 || (defined(__x86_64__) && defined(__ILP32__)) diff --git a/tools/net/ynl/Makefile.deps b/tools/net/ynl/Makefile.deps index e5a5cb1b2cff..90686e241157 100644 --- a/tools/net/ynl/Makefile.deps +++ b/tools/net/ynl/Makefile.deps @@ -35,7 +35,15 @@ CFLAGS_rt-addr:=$(call get_hdr_inc,__LINUX_RTNETLINK_H,rtnetlink.h) \ $(call get_hdr_inc,__LINUX_IF_ADDR_H,if_addr.h) CFLAGS_rt-link:=$(call get_hdr_inc,__LINUX_RTNETLINK_H,rtnetlink.h) \ $(call get_hdr_inc,_LINUX_IF_LINK_H,if_link.h) -CFLAGS_rt-neigh:=$(call get_hdr_inc,__LINUX_RTNETLINK_H,rtnetlink.h) +CFLAGS_rt-neigh:=$(call get_hdr_inc,__LINUX_RTNETLINK_H,rtnetlink.h) \ + $(call get_hdr_inc,__LINUX_NEIGHBOUR_H,neighbour.h) CFLAGS_rt-route:=$(call get_hdr_inc,__LINUX_RTNETLINK_H,rtnetlink.h) CFLAGS_rt-rule:=$(call get_hdr_inc,__LINUX_FIB_RULES_H,fib_rules.h) +CFLAGS_tc:= $(call get_hdr_inc,__LINUX_RTNETLINK_H,rtnetlink.h) \ + $(call get_hdr_inc,__LINUX_PKT_SCHED_H,pkt_sched.h) \ + $(call get_hdr_inc,__LINUX_PKT_CLS_H,pkt_cls.h) \ + $(call get_hdr_inc,_TC_CT_H,tc_act/tc_ct.h) \ + $(call get_hdr_inc,_TC_MIRRED_H,tc_act/tc_mirred.h) \ + $(call get_hdr_inc,_TC_SKBEDIT_H,tc_act/tc_skbedit.h) \ + $(call get_hdr_inc,_TC_TUNNEL_KEY_H,tc_act/tc_tunnel_key.h) CFLAGS_tcp_metrics:=$(call get_hdr_inc,_LINUX_TCP_METRICS_H,tcp_metrics.h) diff --git a/tools/net/ynl/generated/Makefile b/tools/net/ynl/generated/Makefile index 9208feed28c1..86e1e4a959a7 100644 --- a/tools/net/ynl/generated/Makefile +++ b/tools/net/ynl/generated/Makefile @@ -23,7 +23,7 @@ TOOL_RST:=../pyynl/ynl_gen_rst.py SPECS_DIR:=../../../../Documentation/netlink/specs SPECS_PATHS=$(wildcard $(SPECS_DIR)/*.yaml) -GENS_UNSUP=conntrack nftables tc +GENS_UNSUP=conntrack nftables GENS=$(filter-out ${GENS_UNSUP},$(patsubst $(SPECS_DIR)/%.yaml,%,${SPECS_PATHS})) SRCS=$(patsubst %,%-user.c,${GENS}) HDRS=$(patsubst %,%-user.h,${GENS}) diff --git a/tools/net/ynl/lib/ynl-priv.h b/tools/net/ynl/lib/ynl-priv.h index 416866f85820..824777d7e05e 100644 --- a/tools/net/ynl/lib/ynl-priv.h +++ b/tools/net/ynl/lib/ynl-priv.h @@ -213,11 +213,15 @@ static inline void *ynl_attr_data_end(const struct nlattr *attr) NLMSG_HDRLEN + fixed_hdr_sz); attr; \ (attr) = ynl_attr_next(ynl_nlmsg_end_addr(nlh), attr)) -#define ynl_attr_for_each_nested(attr, outer) \ +#define ynl_attr_for_each_nested_off(attr, outer, offset) \ for ((attr) = ynl_attr_first(outer, outer->nla_len, \ - sizeof(struct nlattr)); attr; \ + sizeof(struct nlattr) + offset); \ + attr; \ (attr) = ynl_attr_next(ynl_attr_data_end(outer), attr)) +#define ynl_attr_for_each_nested(attr, outer) \ + ynl_attr_for_each_nested_off(attr, outer, 0) + #define ynl_attr_for_each_payload(start, len, attr) \ for ((attr) = ynl_attr_first(start, len, 0); attr; \ (attr) = ynl_attr_next(start + len, attr)) diff --git a/tools/net/ynl/pyynl/ynl_gen_c.py b/tools/net/ynl/pyynl/ynl_gen_c.py index 1f8cc34ab3f0..76032e01c2e7 100755 --- a/tools/net/ynl/pyynl/ynl_gen_c.py +++ b/tools/net/ynl/pyynl/ynl_gen_c.py @@ -685,7 +685,11 @@ class TypeNest(Type): f"{self.enum_name}, {at}{var}->{self.c_name})") def _attr_get(self, ri, var): - get_lines = [f"if ({self.nested_render_name}_parse(&parg, attr))", + pns = self.family.pure_nested_structs[self.nested_attrs] + args = ["&parg", "attr"] + for sel in pns.external_selectors(): + args.append(f'{var}->{sel.name}') + get_lines = [f"if ({self.nested_render_name}_parse({', '.join(args)}))", "return YNL_PARSE_CB_ERROR;"] init_lines = [f"parg.rsp_policy = &{self.nested_render_name}_nest;", f"parg.data = &{var}->{self.c_name};"] @@ -890,15 +894,24 @@ class TypeSubMessage(TypeNest): def _attr_typol(self): typol = f'.type = YNL_PT_NEST, .nest = &{self.nested_render_name}_nest, ' - typol += f'.is_submsg = 1, .selector_type = {self.attr_set[self["selector"]].value} ' + typol += '.is_submsg = 1, ' + # Reverse-parsing of the policy (ynl_err_walk() in ynl.c) does not + # support external selectors. No family uses sub-messages with external + # selector for requests so this is fine for now. + if not self.selector.is_external(): + typol += f'.selector_type = {self.attr_set[self["selector"]].value} ' return typol def _attr_get(self, ri, var): sel = c_lower(self['selector']) - get_lines = [f'if (!{var}->{sel})', + if self.selector.is_external(): + sel_var = f"_sel_{sel}" + else: + sel_var = f"{var}->{sel}" + get_lines = [f'if (!{sel_var})', f'return ynl_submsg_failed(yarg, "%s", "%s");' % (self.name, self['selector']), - f"if ({self.nested_render_name}_parse(&parg, {var}->{sel}, attr))", + f"if ({self.nested_render_name}_parse(&parg, {sel_var}, attr))", "return YNL_PARSE_CB_ERROR;"] init_lines = [f"parg.rsp_policy = &{self.nested_render_name}_nest;", f"parg.data = &{var}->{self.c_name};"] @@ -914,11 +927,19 @@ class Selector: self.attr.is_selector = True self._external = False else: - raise Exception("Passing selectors from external nests not supported") + # The selector will need to get passed down thru the structs + self.attr = None + self._external = True + + def set_attr(self, attr): + self.attr = attr + + def is_external(self): + return self._external class Struct: - def __init__(self, family, space_name, type_list=None, + def __init__(self, family, space_name, type_list=None, fixed_header=None, inherited=None, submsg=None): self.family = family self.space_name = space_name @@ -926,6 +947,9 @@ class Struct: # Use list to catch comparisons with empty sets self._inherited = inherited if inherited is not None else [] self.inherited = [] + self.fixed_header = None + if fixed_header: + self.fixed_header = 'struct ' + c_lower(fixed_header) self.submsg = submsg self.nested = type_list is None @@ -976,6 +1000,13 @@ class Struct: raise Exception("Inheriting different members not supported") self.inherited = [c_lower(x) for x in sorted(self._inherited)] + def external_selectors(self): + sels = [] + for name, attr in self.attr_list: + if isinstance(attr, TypeSubMessage) and attr.selector.is_external(): + sels.append(attr.selector) + return sels + def free_needs_iter(self): for _, attr in self.attr_list: if attr.free_needs_iter(): @@ -1222,6 +1253,7 @@ class Family(SpecFamily): self._load_root_sets() self._load_nested_sets() self._load_attr_use() + self._load_selector_passing() self._load_hooks() self.kernel_policy = self.yaml.get('kernel-policy', 'split') @@ -1316,7 +1348,9 @@ class Family(SpecFamily): nested = spec['nested-attributes'] if nested not in self.root_sets: if nested not in self.pure_nested_structs: - self.pure_nested_structs[nested] = Struct(self, nested, inherited=inherit) + self.pure_nested_structs[nested] = \ + Struct(self, nested, inherited=inherit, + fixed_header=spec.get('fixed-header')) else: raise Exception(f'Using attr set as root and nested not supported - {nested}') @@ -1338,12 +1372,25 @@ class Family(SpecFamily): attrs = [] for name, fmt in submsg.formats.items(): - attrs.append({ + attr = { "name": name, - "type": "nest", "parent-sub-message": spec, - "nested-attributes": fmt['attribute-set'] - }) + } + if 'attribute-set' in fmt: + attr |= { + "type": "nest", + "nested-attributes": fmt['attribute-set'], + } + if 'fixed-header' in fmt: + attr |= { "fixed-header": fmt["fixed-header"] } + elif 'fixed-header' in fmt: + attr |= { + "type": "binary", + "struct": fmt["fixed-header"], + } + else: + attr["type"] = "flag" + attrs.append(attr) self.attr_sets[nested] = AttrSet(self, { "name": nested, @@ -1436,6 +1483,30 @@ class Family(SpecFamily): if attr in rs_members['reply']: spec.set_reply() + def _load_selector_passing(self): + def all_structs(): + for k, v in reversed(self.pure_nested_structs.items()): + yield k, v + for k, _ in self.root_sets.items(): + yield k, None # we don't have a struct, but it must be terminal + + for attr_set, struct in all_structs(): + for _, spec in self.attr_sets[attr_set].items(): + if 'nested-attributes' in spec: + child_name = spec['nested-attributes'] + elif 'sub-message' in spec: + child_name = spec.sub_message + else: + continue + + child = self.pure_nested_structs.get(child_name) + for selector in child.external_selectors(): + if selector.name in self.attr_sets[attr_set]: + sel_attr = self.attr_sets[attr_set][selector.name] + selector.set_attr(sel_attr) + else: + raise Exception("Passing selector thru more than one layer not supported") + def _load_global_policy(self): global_set = set() attr_set_name = None @@ -1485,13 +1556,12 @@ class RenderInfo: self.op_mode = op_mode self.op = op - self.fixed_hdr = None + fixed_hdr = op.fixed_header if op else None self.fixed_hdr_len = 'ys->family->hdr_len' if op and op.fixed_header: - self.fixed_hdr = 'struct ' + c_lower(op.fixed_header) if op.fixed_header != family.fixed_header: if family.is_classic(): - self.fixed_hdr_len = f"sizeof({self.fixed_hdr})" + self.fixed_hdr_len = f"sizeof(struct {c_lower(fixed_hdr)})" else: raise Exception(f"Per-op fixed header not supported, yet") @@ -1531,12 +1601,17 @@ class RenderInfo: type_list = [] if op_dir in op[op_mode]: type_list = op[op_mode][op_dir]['attributes'] - self.struct[op_dir] = Struct(family, self.attr_set, type_list=type_list) + self.struct[op_dir] = Struct(family, self.attr_set, + fixed_header=fixed_hdr, + type_list=type_list) if op_mode == 'event': - self.struct['reply'] = Struct(family, self.attr_set, type_list=op['event']['attributes']) + self.struct['reply'] = Struct(family, self.attr_set, + fixed_header=fixed_hdr, + type_list=op['event']['attributes']) def type_empty(self, key): - return len(self.struct[key].attr_list) == 0 and self.fixed_hdr is None + return len(self.struct[key].attr_list) == 0 and \ + self.struct['request'].fixed_header is None def needs_nlflags(self, direction): return self.op_mode == 'do' and direction == 'request' and self.family.is_classic() @@ -1859,8 +1934,11 @@ def put_typol_submsg(cw, struct): i = 0 for name, arg in struct.member_list(): - cw.p('[%d] = { .type = YNL_PT_SUBMSG, .name = "%s", .nest = &%s_nest, },' % - (i, name, arg.nested_render_name)) + nest = "" + if arg.type == 'nest': + nest = f" .nest = &{arg.nested_render_name}_nest," + cw.p('[%d] = { .type = YNL_PT_SUBMSG, .name = "%s",%s },' % + (i, name, nest)) i += 1 cw.block_end(line=';') @@ -1970,6 +2048,11 @@ def put_req_nested(ri, struct): if struct.submsg is None: local_vars.append('struct nlattr *nest;') init_lines.append("nest = ynl_attr_nest_start(nlh, attr_type);") + if struct.fixed_header: + local_vars.append('void *hdr;') + struct_sz = f'sizeof({struct.fixed_header})' + init_lines.append(f"hdr = ynl_nlmsg_put_extra_header(nlh, {struct_sz});") + init_lines.append(f"memcpy(hdr, &obj->_hdr, {struct_sz});") has_anest = False has_count = False @@ -2001,15 +2084,18 @@ def put_req_nested(ri, struct): def _multi_parse(ri, struct, init_lines, local_vars): + if struct.fixed_header: + local_vars += ['void *hdr;'] if struct.nested: - iter_line = "ynl_attr_for_each_nested(attr, nested)" + if struct.fixed_header: + iter_line = f"ynl_attr_for_each_nested_off(attr, nested, sizeof({struct.fixed_header}))" + else: + iter_line = "ynl_attr_for_each_nested(attr, nested)" else: - if ri.fixed_hdr: - local_vars += ['void *hdr;'] iter_line = "ynl_attr_for_each(attr, nlh, yarg->ys->family->hdr_len)" if ri.op.fixed_header != ri.family.fixed_header: if ri.family.is_classic(): - iter_line = f"ynl_attr_for_each(attr, nlh, sizeof({ri.fixed_hdr}))" + iter_line = f"ynl_attr_for_each(attr, nlh, sizeof({struct.fixed_header}))" else: raise Exception(f"Per-op fixed header not supported, yet") @@ -2051,12 +2137,14 @@ def _multi_parse(ri, struct, init_lines, local_vars): for arg in struct.inherited: ri.cw.p(f'dst->{arg} = {arg};') - if ri.fixed_hdr: - if ri.family.is_classic(): + if struct.fixed_header: + if struct.nested: + ri.cw.p('hdr = ynl_attr_data(nested);') + elif ri.family.is_classic(): ri.cw.p('hdr = ynl_nlmsg_data(nlh);') else: ri.cw.p('hdr = ynl_nlmsg_data_offset(nlh, sizeof(struct genlmsghdr));') - ri.cw.p(f"memcpy(&dst->_hdr, hdr, sizeof({ri.fixed_hdr}));") + ri.cw.p(f"memcpy(&dst->_hdr, hdr, sizeof({struct.fixed_header}));") for anest in sorted(all_multi): aspec = struct[anest] ri.cw.p(f"if (dst->{aspec.c_name})") @@ -2152,12 +2240,16 @@ def parse_rsp_submsg(ri, struct): parse_rsp_nested_prototype(ri, struct, suffix='') var = 'dst' + local_vars = {'const struct nlattr *attr = nested;', + f'{struct.ptr_name}{var} = yarg->data;', + 'struct ynl_parse_arg parg;'} - ri.cw.block_start() - ri.cw.write_func_lvar(['const struct nlattr *attr = nested;', - f'{struct.ptr_name}{var} = yarg->data;', - 'struct ynl_parse_arg parg;']) + for _, arg in struct.member_list(): + _, _, l_vars = arg._attr_get(ri, var) + local_vars |= set(l_vars) if l_vars else set() + ri.cw.block_start() + ri.cw.write_func_lvar(list(local_vars)) ri.cw.p('parg.ys = yarg->ys;') ri.cw.nl() @@ -2168,7 +2260,7 @@ def parse_rsp_submsg(ri, struct): ri.cw.block_start(line=f'{kw} (!strcmp(sel, "{name}"))') get_lines, init_lines, _ = arg._attr_get(ri, var) - for line in init_lines: + for line in init_lines or []: ri.cw.p(line) for line in get_lines: ri.cw.p(line) @@ -2183,6 +2275,8 @@ def parse_rsp_submsg(ri, struct): def parse_rsp_nested_prototype(ri, struct, suffix=';'): func_args = ['struct ynl_parse_arg *yarg', 'const struct nlattr *nested'] + for sel in struct.external_selectors(): + func_args.append('const char *_sel_' + sel.name) if struct.submsg: func_args.insert(1, 'const char *sel') for arg in struct.inherited: @@ -2248,7 +2342,7 @@ def print_req(ri): ret_err = 'NULL' local_vars += [f'{type_name(ri, rdir(direction))} *rsp;'] - if ri.fixed_hdr: + if ri.struct["request"].fixed_header: local_vars += ['size_t hdr_len;', 'void *hdr;'] @@ -2272,7 +2366,7 @@ def print_req(ri): ri.cw.p(f"yrs.yarg.rsp_policy = &{ri.struct['reply'].render_name}_nest;") ri.cw.nl() - if ri.fixed_hdr: + if ri.struct['request'].fixed_header: ri.cw.p("hdr_len = sizeof(req->_hdr);") ri.cw.p("hdr = ynl_nlmsg_put_extra_header(nlh, hdr_len);") ri.cw.p("memcpy(hdr, &req->_hdr, hdr_len);") @@ -2318,7 +2412,7 @@ def print_dump(ri): 'struct nlmsghdr *nlh;', 'int err;'] - if ri.fixed_hdr: + if ri.struct['request'].fixed_header: local_vars += ['size_t hdr_len;', 'void *hdr;'] @@ -2339,7 +2433,7 @@ def print_dump(ri): else: ri.cw.p(f"nlh = ynl_gemsg_start_dump(ys, {ri.nl.get_family_id()}, {ri.op.enum_name}, 1);") - if ri.fixed_hdr: + if ri.struct['request'].fixed_header: ri.cw.p("hdr_len = sizeof(req->_hdr);") ri.cw.p("hdr = ynl_nlmsg_put_extra_header(nlh, hdr_len);") ri.cw.p("memcpy(hdr, &req->_hdr, hdr_len);") @@ -2416,8 +2510,8 @@ def _print_type(ri, direction, struct): if ri.needs_nlflags(direction): ri.cw.p('__u16 _nlmsg_flags;') ri.cw.nl() - if ri.fixed_hdr: - ri.cw.p(ri.fixed_hdr + ' _hdr;') + if struct.fixed_header: + ri.cw.p(struct.fixed_header + ' _hdr;') ri.cw.nl() for type_filter in ['present', 'len', 'count']: diff --git a/tools/net/ynl/samples/.gitignore b/tools/net/ynl/samples/.gitignore index b3ec3fb0929f..7f5fca7682d7 100644 --- a/tools/net/ynl/samples/.gitignore +++ b/tools/net/ynl/samples/.gitignore @@ -6,3 +6,4 @@ page-pool rt-addr rt-link rt-route +tc diff --git a/tools/net/ynl/samples/tc.c b/tools/net/ynl/samples/tc.c new file mode 100644 index 000000000000..0bfff0fdd792 --- /dev/null +++ b/tools/net/ynl/samples/tc.c @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <stdio.h> +#include <string.h> + +#include <ynl.h> + +#include <net/if.h> + +#include "tc-user.h" + +static void tc_qdisc_print(struct tc_getqdisc_rsp *q) +{ + char ifname[IF_NAMESIZE]; + const char *name; + + name = if_indextoname(q->_hdr.tcm_ifindex, ifname); + if (name) + printf("%16s: ", name); + + if (q->_len.kind) { + printf("%s ", q->kind); + + if (q->options._present.fq_codel) { + struct tc_fq_codel_attrs *fq_codel; + struct tc_fq_codel_xstats *stats; + + fq_codel = &q->options.fq_codel; + stats = q->stats2.app.fq_codel; + + if (fq_codel->_present.limit) + printf("limit: %dp ", fq_codel->limit); + if (fq_codel->_present.target) + printf("target: %dms ", + (fq_codel->target + 500) / 1000); + if (q->stats2.app._len.fq_codel) + printf("new_flow_cnt: %d ", + stats->qdisc_stats.new_flow_count); + } + } + + printf("\n"); +} + +int main(int argc, char **argv) +{ + struct tc_getqdisc_req_dump *req; + struct tc_getqdisc_list *rsp; + struct ynl_error yerr; + struct ynl_sock *ys; + + ys = ynl_sock_create(&ynl_tc_family, &yerr); + if (!ys) { + fprintf(stderr, "YNL: %s\n", yerr.msg); + return 1; + } + + req = tc_getqdisc_req_dump_alloc(); + if (!req) + goto err_destroy; + + rsp = tc_getqdisc_dump(ys, req); + tc_getqdisc_req_dump_free(req); + if (!rsp) + goto err_close; + + if (ynl_dump_empty(rsp)) + fprintf(stderr, "Error: no addresses reported\n"); + ynl_dump_foreach(rsp, qdisc) + tc_qdisc_print(qdisc); + tc_getqdisc_list_free(rsp); + + ynl_sock_destroy(ys); + return 0; + +err_close: + fprintf(stderr, "YNL: %s\n", ys->err.msg); +err_destroy: + ynl_sock_destroy(ys); + return 2; +} diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile index a0a6ba47d600..a62bd8e3a52e 100644 --- a/tools/testing/selftests/Makefile +++ b/tools/testing/selftests/Makefile @@ -66,6 +66,7 @@ TARGETS += mseal_system_mappings TARGETS += nci TARGETS += net TARGETS += net/af_unix +TARGETS += net/can TARGETS += net/forwarding TARGETS += net/hsr TARGETS += net/mptcp diff --git a/tools/testing/selftests/bpf/progs/setget_sockopt.c b/tools/testing/selftests/bpf/progs/setget_sockopt.c index 0107a24b7522..d330b1511979 100644 --- a/tools/testing/selftests/bpf/progs/setget_sockopt.c +++ b/tools/testing/selftests/bpf/progs/setget_sockopt.c @@ -83,6 +83,14 @@ struct loop_ctx { struct sock *sk; }; +static bool sk_is_tcp(struct sock *sk) +{ + return (sk->__sk_common.skc_family == AF_INET || + sk->__sk_common.skc_family == AF_INET6) && + sk->sk_type == SOCK_STREAM && + sk->sk_protocol == IPPROTO_TCP; +} + static int bpf_test_sockopt_flip(void *ctx, struct sock *sk, const struct sockopt_test *t, int level) @@ -91,6 +99,9 @@ static int bpf_test_sockopt_flip(void *ctx, struct sock *sk, opt = t->opt; + if (opt == SO_TXREHASH && !sk_is_tcp(sk)) + return 0; + if (bpf_getsockopt(ctx, level, opt, &old, sizeof(old))) return 1; /* kernel initialized txrehash to 255 */ diff --git a/tools/testing/selftests/drivers/net/hw/devmem.py b/tools/testing/selftests/drivers/net/hw/devmem.py index 7fc686cf47a2..d7f6a76eb2b7 100755 --- a/tools/testing/selftests/drivers/net/hw/devmem.py +++ b/tools/testing/selftests/drivers/net/hw/devmem.py @@ -49,12 +49,27 @@ def check_tx(cfg) -> None: ksft_eq(socat.stdout.strip(), "hello\nworld") +@ksft_disruptive +def check_tx_chunks(cfg) -> None: + cfg.require_ipver("6") + require_devmem(cfg) + + port = rand_port() + listen_cmd = f"socat -U - TCP6-LISTEN:{port}" + + with bkg(listen_cmd, exit_wait=True) as socat: + wait_port_listen(port) + cmd(f"echo -e \"hello\\nworld\"| {cfg.bin_remote} -f {cfg.ifname} -s {cfg.addr_v['6']} -p {port} -z 3", host=cfg.remote, shell=True) + + ksft_eq(socat.stdout.strip(), "hello\nworld") + + def main() -> None: with NetDrvEpEnv(__file__) as cfg: cfg.bin_local = path.abspath(path.dirname(__file__) + "/ncdevmem") cfg.bin_remote = cfg.remote.deploy(cfg.bin_local) - ksft_run([check_rx, check_tx], + ksft_run([check_rx, check_tx, check_tx_chunks], args=(cfg, )) ksft_exit() diff --git a/tools/testing/selftests/drivers/net/hw/ncdevmem.c b/tools/testing/selftests/drivers/net/hw/ncdevmem.c index ca723722a810..fc7ba7d71502 100644 --- a/tools/testing/selftests/drivers/net/hw/ncdevmem.c +++ b/tools/testing/selftests/drivers/net/hw/ncdevmem.c @@ -82,6 +82,9 @@ #define MSG_SOCK_DEVMEM 0x2000000 #endif +#define MAX_IOV 1024 + +static size_t max_chunk; static char *server_ip; static char *client_ip; static char *port; @@ -834,10 +837,10 @@ static int do_client(struct memory_buffer *mem) struct sockaddr_in6 server_sin; struct sockaddr_in6 client_sin; struct ynl_sock *ys = NULL; + struct iovec iov[MAX_IOV]; struct msghdr msg = {}; ssize_t line_size = 0; struct cmsghdr *cmsg; - struct iovec iov[2]; char *line = NULL; unsigned long mid; size_t len = 0; @@ -893,27 +896,29 @@ static int do_client(struct memory_buffer *mem) if (line_size < 0) break; - mid = (line_size / 2) + 1; - - iov[0].iov_base = (void *)1; - iov[0].iov_len = mid; - iov[1].iov_base = (void *)(mid + 2); - iov[1].iov_len = line_size - mid; + if (max_chunk) { + msg.msg_iovlen = + (line_size + max_chunk - 1) / max_chunk; + if (msg.msg_iovlen > MAX_IOV) + error(1, 0, + "can't partition %zd bytes into maximum of %d chunks", + line_size, MAX_IOV); - provider->memcpy_to_device(mem, (size_t)iov[0].iov_base, line, - iov[0].iov_len); - provider->memcpy_to_device(mem, (size_t)iov[1].iov_base, - line + iov[0].iov_len, - iov[1].iov_len); + for (int i = 0; i < msg.msg_iovlen; i++) { + iov[i].iov_base = (void *)(i * max_chunk); + iov[i].iov_len = max_chunk; + } - fprintf(stderr, - "read line_size=%ld iov[0].iov_base=%lu, iov[0].iov_len=%lu, iov[1].iov_base=%lu, iov[1].iov_len=%lu\n", - line_size, (unsigned long)iov[0].iov_base, - iov[0].iov_len, (unsigned long)iov[1].iov_base, - iov[1].iov_len); + iov[msg.msg_iovlen - 1].iov_len = + line_size - (msg.msg_iovlen - 1) * max_chunk; + } else { + iov[0].iov_base = 0; + iov[0].iov_len = line_size; + msg.msg_iovlen = 1; + } msg.msg_iov = iov; - msg.msg_iovlen = 2; + provider->memcpy_to_device(mem, 0, line, line_size); msg.msg_control = ctrl_data; msg.msg_controllen = sizeof(ctrl_data); @@ -934,7 +939,8 @@ static int do_client(struct memory_buffer *mem) fprintf(stderr, "sendmsg_ret=%d\n", ret); if (ret != line_size) - error(1, errno, "Did not send all bytes"); + error(1, errno, "Did not send all bytes %d vs %zd", ret, + line_size); wait_compl(socket_fd); } @@ -956,7 +962,7 @@ int main(int argc, char *argv[]) int is_server = 0, opt; int ret; - while ((opt = getopt(argc, argv, "ls:c:p:v:q:t:f:")) != -1) { + while ((opt = getopt(argc, argv, "ls:c:p:v:q:t:f:z:")) != -1) { switch (opt) { case 'l': is_server = 1; @@ -982,6 +988,9 @@ int main(int argc, char *argv[]) case 'f': ifname = optarg; break; + case 'z': + max_chunk = atoi(optarg); + break; case '?': fprintf(stderr, "unknown option: %c\n", optopt); break; diff --git a/tools/testing/selftests/drivers/net/team/Makefile b/tools/testing/selftests/drivers/net/team/Makefile index 2d5a76d99181..eaf6938f100e 100644 --- a/tools/testing/selftests/drivers/net/team/Makefile +++ b/tools/testing/selftests/drivers/net/team/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 # Makefile for net selftests -TEST_PROGS := dev_addr_lists.sh +TEST_PROGS := dev_addr_lists.sh propagation.sh TEST_INCLUDES := \ ../bonding/lag_lib.sh \ diff --git a/tools/testing/selftests/drivers/net/team/config b/tools/testing/selftests/drivers/net/team/config index b5e3a3aad4bf..636b3525b679 100644 --- a/tools/testing/selftests/drivers/net/team/config +++ b/tools/testing/selftests/drivers/net/team/config @@ -1,5 +1,6 @@ CONFIG_DUMMY=y CONFIG_IPV6=y CONFIG_MACVLAN=y +CONFIG_NETDEVSIM=m CONFIG_NET_TEAM=y CONFIG_NET_TEAM_MODE_LOADBALANCE=y diff --git a/tools/testing/selftests/drivers/net/team/propagation.sh b/tools/testing/selftests/drivers/net/team/propagation.sh new file mode 100755 index 000000000000..4bea75b79878 --- /dev/null +++ b/tools/testing/selftests/drivers/net/team/propagation.sh @@ -0,0 +1,80 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +set -e + +NSIM_LRO_ID=$((256 + RANDOM % 256)) +NSIM_LRO_SYS=/sys/bus/netdevsim/devices/netdevsim$NSIM_LRO_ID + +NSIM_DEV_SYS_NEW=/sys/bus/netdevsim/new_device +NSIM_DEV_SYS_DEL=/sys/bus/netdevsim/del_device + +cleanup() +{ + set +e + ip link del dummyteam &>/dev/null + ip link del team0 &>/dev/null + echo $NSIM_LRO_ID > $NSIM_DEV_SYS_DEL + modprobe -r netdevsim +} + +# Trigger LRO propagation to the lower. +# https://lore.kernel.org/netdev/aBvOpkIoxcr9PfDg@mini-arch/ +team_lro() +{ + # using netdevsim because it supports NETIF_F_LRO + NSIM_LRO_NAME=$(find $NSIM_LRO_SYS/net -maxdepth 1 -type d ! \ + -path $NSIM_LRO_SYS/net -exec basename {} \;) + + ip link add name team0 type team + ip link set $NSIM_LRO_NAME down + ip link set dev $NSIM_LRO_NAME master team0 + ip link set team0 up + ethtool -K team0 large-receive-offload off + + ip link del team0 +} + +# Trigger promisc propagation to the lower during IFLA_MASTER. +# https://lore.kernel.org/netdev/20250506032328.3003050-1-sdf@fomichev.me/ +team_promisc() +{ + ip link add name dummyteam type dummy + ip link add name team0 type team + ip link set dummyteam down + ip link set team0 promisc on + ip link set dev dummyteam master team0 + ip link set team0 up + + ip link del team0 + ip link del dummyteam +} + +# Trigger promisc propagation to the lower via netif_change_flags (aka +# ndo_change_rx_flags). +# https://lore.kernel.org/netdev/20250514220319.3505158-1-stfomichev@gmail.com/ +team_change_flags() +{ + ip link add name dummyteam type dummy + ip link add name team0 type team + ip link set dummyteam down + ip link set dev dummyteam master team0 + ip link set team0 up + ip link set team0 promisc on + + # Make sure we can add more L2 addresses without any issues. + ip link add link team0 address 00:00:00:00:00:01 team0.1 type macvlan + ip link set team0.1 up + + ip link del team0.1 + ip link del team0 + ip link del dummyteam +} + +trap cleanup EXIT +modprobe netdevsim || : +echo $NSIM_LRO_ID > $NSIM_DEV_SYS_NEW +udevadm settle +team_lro +team_promisc +team_change_flags diff --git a/tools/testing/selftests/nci/nci_dev.c b/tools/testing/selftests/nci/nci_dev.c index 1562aa7d60b0..6dec59d64083 100644 --- a/tools/testing/selftests/nci/nci_dev.c +++ b/tools/testing/selftests/nci/nci_dev.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (C) 2021 Samsung Electrnoics + * Copyright (C) 2021 Samsung Electronics * Bongsu Jeon <bongsu.jeon@samsung.com> * * Test code for nci diff --git a/tools/testing/selftests/net/af_unix/scm_rights.c b/tools/testing/selftests/net/af_unix/scm_rights.c index d66336256580..8b015f16c03d 100644 --- a/tools/testing/selftests/net/af_unix/scm_rights.c +++ b/tools/testing/selftests/net/af_unix/scm_rights.c @@ -23,6 +23,7 @@ FIXTURE_VARIANT(scm_rights) int type; int flags; bool test_listener; + bool disabled; }; FIXTURE_VARIANT_ADD(scm_rights, dgram) @@ -31,6 +32,16 @@ FIXTURE_VARIANT_ADD(scm_rights, dgram) .type = SOCK_DGRAM, .flags = 0, .test_listener = false, + .disabled = false, +}; + +FIXTURE_VARIANT_ADD(scm_rights, dgram_disabled) +{ + .name = "UNIX ", + .type = SOCK_DGRAM, + .flags = 0, + .test_listener = false, + .disabled = true, }; FIXTURE_VARIANT_ADD(scm_rights, stream) @@ -39,6 +50,16 @@ FIXTURE_VARIANT_ADD(scm_rights, stream) .type = SOCK_STREAM, .flags = 0, .test_listener = false, + .disabled = false, +}; + +FIXTURE_VARIANT_ADD(scm_rights, stream_disabled) +{ + .name = "UNIX-STREAM ", + .type = SOCK_STREAM, + .flags = 0, + .test_listener = false, + .disabled = true, }; FIXTURE_VARIANT_ADD(scm_rights, stream_oob) @@ -47,6 +68,16 @@ FIXTURE_VARIANT_ADD(scm_rights, stream_oob) .type = SOCK_STREAM, .flags = MSG_OOB, .test_listener = false, + .disabled = false, +}; + +FIXTURE_VARIANT_ADD(scm_rights, stream_oob_disabled) +{ + .name = "UNIX-STREAM ", + .type = SOCK_STREAM, + .flags = MSG_OOB, + .test_listener = false, + .disabled = true, }; FIXTURE_VARIANT_ADD(scm_rights, stream_listener) @@ -55,6 +86,16 @@ FIXTURE_VARIANT_ADD(scm_rights, stream_listener) .type = SOCK_STREAM, .flags = 0, .test_listener = true, + .disabled = false, +}; + +FIXTURE_VARIANT_ADD(scm_rights, stream_listener_disabled) +{ + .name = "UNIX-STREAM ", + .type = SOCK_STREAM, + .flags = 0, + .test_listener = true, + .disabled = true, }; FIXTURE_VARIANT_ADD(scm_rights, stream_listener_oob) @@ -63,6 +104,16 @@ FIXTURE_VARIANT_ADD(scm_rights, stream_listener_oob) .type = SOCK_STREAM, .flags = MSG_OOB, .test_listener = true, + .disabled = false, +}; + +FIXTURE_VARIANT_ADD(scm_rights, stream_listener_oob_disabled) +{ + .name = "UNIX-STREAM ", + .type = SOCK_STREAM, + .flags = MSG_OOB, + .test_listener = true, + .disabled = true, }; static int count_sockets(struct __test_metadata *_metadata, @@ -105,6 +156,9 @@ FIXTURE_SETUP(scm_rights) ret = unshare(CLONE_NEWNET); ASSERT_EQ(0, ret); + if (variant->disabled) + return; + ret = count_sockets(_metadata, variant); ASSERT_EQ(0, ret); } @@ -113,6 +167,9 @@ FIXTURE_TEARDOWN(scm_rights) { int ret; + if (variant->disabled) + return; + sleep(1); ret = count_sockets(_metadata, variant); @@ -121,6 +178,7 @@ FIXTURE_TEARDOWN(scm_rights) static void create_listeners(struct __test_metadata *_metadata, FIXTURE_DATA(scm_rights) *self, + const FIXTURE_VARIANT(scm_rights) *variant, int n) { struct sockaddr_un addr = { @@ -140,6 +198,12 @@ static void create_listeners(struct __test_metadata *_metadata, ret = listen(self->fd[i], -1); ASSERT_EQ(0, ret); + if (variant->disabled) { + ret = setsockopt(self->fd[i], SOL_SOCKET, SO_PASSRIGHTS, + &(int){0}, sizeof(int)); + ASSERT_EQ(0, ret); + } + addrlen = sizeof(addr); ret = getsockname(self->fd[i], (struct sockaddr *)&addr, &addrlen); ASSERT_EQ(0, ret); @@ -164,6 +228,12 @@ static void create_socketpairs(struct __test_metadata *_metadata, for (i = 0; i < n * 2; i += 2) { ret = socketpair(AF_UNIX, variant->type, 0, self->fd + i); ASSERT_EQ(0, ret); + + if (variant->disabled) { + ret = setsockopt(self->fd[i], SOL_SOCKET, SO_PASSRIGHTS, + &(int){0}, sizeof(int)); + ASSERT_EQ(0, ret); + } } } @@ -175,7 +245,7 @@ static void __create_sockets(struct __test_metadata *_metadata, ASSERT_LE(n * 2, sizeof(self->fd) / sizeof(self->fd[0])); if (variant->test_listener) - create_listeners(_metadata, self, n); + create_listeners(_metadata, self, variant, n); else create_socketpairs(_metadata, self, variant, n); } @@ -230,7 +300,13 @@ void __send_fd(struct __test_metadata *_metadata, int ret; ret = sendmsg(self->fd[receiver * 2 + 1], &msg, variant->flags); - ASSERT_EQ(MSGLEN, ret); + + if (variant->disabled) { + ASSERT_EQ(-1, ret); + ASSERT_EQ(-EPERM, -errno); + } else { + ASSERT_EQ(MSGLEN, ret); + } } #define create_sockets(n) \ diff --git a/tools/testing/selftests/net/can/.gitignore b/tools/testing/selftests/net/can/.gitignore new file mode 100644 index 000000000000..764a53fc837f --- /dev/null +++ b/tools/testing/selftests/net/can/.gitignore @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +test_raw_filter diff --git a/tools/testing/selftests/net/can/Makefile b/tools/testing/selftests/net/can/Makefile new file mode 100644 index 000000000000..5b82e60a03e7 --- /dev/null +++ b/tools/testing/selftests/net/can/Makefile @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: GPL-2.0 + +top_srcdir = ../../../../.. + +CFLAGS += -Wall -Wl,--no-as-needed -O2 -g -I$(top_srcdir)/usr/include $(KHDR_INCLUDES) + +TEST_PROGS := test_raw_filter.sh + +TEST_GEN_FILES := test_raw_filter + +include ../../lib.mk diff --git a/tools/testing/selftests/net/can/test_raw_filter.c b/tools/testing/selftests/net/can/test_raw_filter.c new file mode 100644 index 000000000000..4101c36390fd --- /dev/null +++ b/tools/testing/selftests/net/can/test_raw_filter.c @@ -0,0 +1,405 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) +/* + * Copyright (c) 2011 Volkswagen Group Electronic Research + * All rights reserved. + */ + +#include <stdio.h> +#include <stdlib.h> +#include <unistd.h> +#include <string.h> + +#include <sys/types.h> +#include <sys/socket.h> +#include <sys/ioctl.h> +#include <sys/time.h> +#include <net/if.h> +#include <linux/if.h> + +#include <linux/can.h> +#include <linux/can/raw.h> + +#include "../../kselftest_harness.h" + +#define ID 0x123 + +char CANIF[IFNAMSIZ]; + +static int send_can_frames(int sock, int testcase) +{ + struct can_frame frame; + + frame.can_dlc = 1; + frame.data[0] = testcase; + + frame.can_id = ID; + if (write(sock, &frame, sizeof(frame)) < 0) + goto write_err; + + frame.can_id = (ID | CAN_RTR_FLAG); + if (write(sock, &frame, sizeof(frame)) < 0) + goto write_err; + + frame.can_id = (ID | CAN_EFF_FLAG); + if (write(sock, &frame, sizeof(frame)) < 0) + goto write_err; + + frame.can_id = (ID | CAN_EFF_FLAG | CAN_RTR_FLAG); + if (write(sock, &frame, sizeof(frame)) < 0) + goto write_err; + + return 0; + +write_err: + perror("write"); + return 1; +} + +FIXTURE(can_filters) { + int sock; +}; + +FIXTURE_SETUP(can_filters) +{ + struct sockaddr_can addr; + struct ifreq ifr; + int recv_own_msgs = 1; + int s, ret; + + s = socket(PF_CAN, SOCK_RAW, CAN_RAW); + ASSERT_GE(s, 0) + TH_LOG("failed to create CAN_RAW socket: %d", errno); + + strncpy(ifr.ifr_name, CANIF, sizeof(ifr.ifr_name)); + ret = ioctl(s, SIOCGIFINDEX, &ifr); + ASSERT_GE(ret, 0) + TH_LOG("failed SIOCGIFINDEX: %d", errno); + + addr.can_family = AF_CAN; + addr.can_ifindex = ifr.ifr_ifindex; + + setsockopt(s, SOL_CAN_RAW, CAN_RAW_RECV_OWN_MSGS, + &recv_own_msgs, sizeof(recv_own_msgs)); + + ret = bind(s, (struct sockaddr *)&addr, sizeof(addr)); + ASSERT_EQ(ret, 0) + TH_LOG("failed bind socket: %d", errno); + + self->sock = s; +} + +FIXTURE_TEARDOWN(can_filters) +{ + close(self->sock); +} + +FIXTURE_VARIANT(can_filters) { + int testcase; + canid_t id; + canid_t mask; + int exp_num_rx; + canid_t exp_flags[]; +}; + +/* Receive all frames when filtering for the ID in standard frame format */ +FIXTURE_VARIANT_ADD(can_filters, base) { + .testcase = 1, + .id = ID, + .mask = CAN_SFF_MASK, + .exp_num_rx = 4, + .exp_flags = { + 0, + CAN_RTR_FLAG, + CAN_EFF_FLAG, + CAN_EFF_FLAG | CAN_RTR_FLAG, + }, +}; + +/* Ignore EFF flag in filter ID if not covered by filter mask */ +FIXTURE_VARIANT_ADD(can_filters, base_eff) { + .testcase = 2, + .id = ID | CAN_EFF_FLAG, + .mask = CAN_SFF_MASK, + .exp_num_rx = 4, + .exp_flags = { + 0, + CAN_RTR_FLAG, + CAN_EFF_FLAG, + CAN_EFF_FLAG | CAN_RTR_FLAG, + }, +}; + +/* Ignore RTR flag in filter ID if not covered by filter mask */ +FIXTURE_VARIANT_ADD(can_filters, base_rtr) { + .testcase = 3, + .id = ID | CAN_RTR_FLAG, + .mask = CAN_SFF_MASK, + .exp_num_rx = 4, + .exp_flags = { + 0, + CAN_RTR_FLAG, + CAN_EFF_FLAG, + CAN_EFF_FLAG | CAN_RTR_FLAG, + }, +}; + +/* Ignore EFF and RTR flags in filter ID if not covered by filter mask */ +FIXTURE_VARIANT_ADD(can_filters, base_effrtr) { + .testcase = 4, + .id = ID | CAN_EFF_FLAG | CAN_RTR_FLAG, + .mask = CAN_SFF_MASK, + .exp_num_rx = 4, + .exp_flags = { + 0, + CAN_RTR_FLAG, + CAN_EFF_FLAG, + CAN_EFF_FLAG | CAN_RTR_FLAG, + }, +}; + +/* Receive only SFF frames when expecting no EFF flag */ +FIXTURE_VARIANT_ADD(can_filters, filter_eff) { + .testcase = 5, + .id = ID, + .mask = CAN_SFF_MASK | CAN_EFF_FLAG, + .exp_num_rx = 2, + .exp_flags = { + 0, + CAN_RTR_FLAG, + }, +}; + +/* Receive only EFF frames when filter id and filter mask include EFF flag */ +FIXTURE_VARIANT_ADD(can_filters, filter_eff_eff) { + .testcase = 6, + .id = ID | CAN_EFF_FLAG, + .mask = CAN_SFF_MASK | CAN_EFF_FLAG, + .exp_num_rx = 2, + .exp_flags = { + CAN_EFF_FLAG, + CAN_EFF_FLAG | CAN_RTR_FLAG, + }, +}; + +/* Receive only SFF frames when expecting no EFF flag, ignoring RTR flag */ +FIXTURE_VARIANT_ADD(can_filters, filter_eff_rtr) { + .testcase = 7, + .id = ID | CAN_RTR_FLAG, + .mask = CAN_SFF_MASK | CAN_EFF_FLAG, + .exp_num_rx = 2, + .exp_flags = { + 0, + CAN_RTR_FLAG, + }, +}; + +/* Receive only EFF frames when filter id and filter mask include EFF flag, + * ignoring RTR flag + */ +FIXTURE_VARIANT_ADD(can_filters, filter_eff_effrtr) { + .testcase = 8, + .id = ID | CAN_EFF_FLAG | CAN_RTR_FLAG, + .mask = CAN_SFF_MASK | CAN_EFF_FLAG, + .exp_num_rx = 2, + .exp_flags = { + CAN_EFF_FLAG, + CAN_EFF_FLAG | CAN_RTR_FLAG, + }, +}; + +/* Receive no remote frames when filtering for no RTR flag */ +FIXTURE_VARIANT_ADD(can_filters, filter_rtr) { + .testcase = 9, + .id = ID, + .mask = CAN_SFF_MASK | CAN_RTR_FLAG, + .exp_num_rx = 2, + .exp_flags = { + 0, + CAN_EFF_FLAG, + }, +}; + +/* Receive no remote frames when filtering for no RTR flag, ignoring EFF flag */ +FIXTURE_VARIANT_ADD(can_filters, filter_rtr_eff) { + .testcase = 10, + .id = ID | CAN_EFF_FLAG, + .mask = CAN_SFF_MASK | CAN_RTR_FLAG, + .exp_num_rx = 2, + .exp_flags = { + 0, + CAN_EFF_FLAG, + }, +}; + +/* Receive only remote frames when filter includes RTR flag */ +FIXTURE_VARIANT_ADD(can_filters, filter_rtr_rtr) { + .testcase = 11, + .id = ID | CAN_RTR_FLAG, + .mask = CAN_SFF_MASK | CAN_RTR_FLAG, + .exp_num_rx = 2, + .exp_flags = { + CAN_RTR_FLAG, + CAN_EFF_FLAG | CAN_RTR_FLAG, + }, +}; + +/* Receive only remote frames when filter includes RTR flag, ignoring EFF + * flag + */ +FIXTURE_VARIANT_ADD(can_filters, filter_rtr_effrtr) { + .testcase = 12, + .id = ID | CAN_EFF_FLAG | CAN_RTR_FLAG, + .mask = CAN_SFF_MASK | CAN_RTR_FLAG, + .exp_num_rx = 2, + .exp_flags = { + CAN_RTR_FLAG, + CAN_EFF_FLAG | CAN_RTR_FLAG, + }, +}; + +/* Receive only SFF data frame when filtering for no flags */ +FIXTURE_VARIANT_ADD(can_filters, filter_effrtr) { + .testcase = 13, + .id = ID, + .mask = CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG, + .exp_num_rx = 1, + .exp_flags = { + 0, + }, +}; + +/* Receive only EFF data frame when filtering for EFF but no RTR flag */ +FIXTURE_VARIANT_ADD(can_filters, filter_effrtr_eff) { + .testcase = 14, + .id = ID | CAN_EFF_FLAG, + .mask = CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG, + .exp_num_rx = 1, + .exp_flags = { + CAN_EFF_FLAG, + }, +}; + +/* Receive only SFF remote frame when filtering for RTR but no EFF flag */ +FIXTURE_VARIANT_ADD(can_filters, filter_effrtr_rtr) { + .testcase = 15, + .id = ID | CAN_RTR_FLAG, + .mask = CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG, + .exp_num_rx = 1, + .exp_flags = { + CAN_RTR_FLAG, + }, +}; + +/* Receive only EFF remote frame when filtering for EFF and RTR flag */ +FIXTURE_VARIANT_ADD(can_filters, filter_effrtr_effrtr) { + .testcase = 16, + .id = ID | CAN_EFF_FLAG | CAN_RTR_FLAG, + .mask = CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG, + .exp_num_rx = 1, + .exp_flags = { + CAN_EFF_FLAG | CAN_RTR_FLAG, + }, +}; + +/* Receive only SFF data frame when filtering for no EFF flag and no RTR flag + * but based on EFF mask + */ +FIXTURE_VARIANT_ADD(can_filters, eff) { + .testcase = 17, + .id = ID, + .mask = CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG, + .exp_num_rx = 1, + .exp_flags = { + 0, + }, +}; + +/* Receive only EFF data frame when filtering for EFF flag and no RTR flag but + * based on EFF mask + */ +FIXTURE_VARIANT_ADD(can_filters, eff_eff) { + .testcase = 18, + .id = ID | CAN_EFF_FLAG, + .mask = CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG, + .exp_num_rx = 1, + .exp_flags = { + CAN_EFF_FLAG, + }, +}; + +/* This test verifies that the raw CAN filters work, by checking if only frames + * with the expected set of flags are received. For each test case, the given + * filter (id and mask) is added and four CAN frames are sent with every + * combination of set/unset EFF/RTR flags. + */ +TEST_F(can_filters, test_filter) +{ + struct can_filter rfilter; + int ret; + + rfilter.can_id = variant->id; + rfilter.can_mask = variant->mask; + setsockopt(self->sock, SOL_CAN_RAW, CAN_RAW_FILTER, + &rfilter, sizeof(rfilter)); + + TH_LOG("filters: can_id = 0x%08X can_mask = 0x%08X", + rfilter.can_id, rfilter.can_mask); + + ret = send_can_frames(self->sock, variant->testcase); + ASSERT_EQ(ret, 0) + TH_LOG("failed to send CAN frames"); + + for (int i = 0; i <= variant->exp_num_rx; i++) { + struct can_frame frame; + struct timeval tv = { + .tv_sec = 0, + .tv_usec = 50000, /* 50ms timeout */ + }; + fd_set rdfs; + + FD_ZERO(&rdfs); + FD_SET(self->sock, &rdfs); + + ret = select(self->sock + 1, &rdfs, NULL, NULL, &tv); + ASSERT_GE(ret, 0) + TH_LOG("failed select for frame %d, err: %d)", i, errno); + + ret = FD_ISSET(self->sock, &rdfs); + if (i == variant->exp_num_rx) { + ASSERT_EQ(ret, 0) + TH_LOG("too many frames received"); + } else { + ASSERT_NE(ret, 0) + TH_LOG("too few frames received"); + + ret = read(self->sock, &frame, sizeof(frame)); + ASSERT_GE(ret, 0) + TH_LOG("failed to read frame %d, err: %d", i, errno); + + TH_LOG("rx: can_id = 0x%08X rx = %d", frame.can_id, i); + + ASSERT_EQ(ID, frame.can_id & CAN_SFF_MASK) + TH_LOG("received wrong can_id"); + ASSERT_EQ(variant->testcase, frame.data[0]) + TH_LOG("received wrong test case"); + + ASSERT_EQ(frame.can_id & ~CAN_ERR_MASK, + variant->exp_flags[i]) + TH_LOG("received unexpected flags"); + } + } +} + +int main(int argc, char **argv) +{ + char *ifname = getenv("CANIF"); + + if (!ifname) { + printf("CANIF environment variable must contain the test interface\n"); + return KSFT_FAIL; + } + + strncpy(CANIF, ifname, sizeof(CANIF) - 1); + + return test_harness_run(argc, argv); +} diff --git a/tools/testing/selftests/net/can/test_raw_filter.sh b/tools/testing/selftests/net/can/test_raw_filter.sh new file mode 100755 index 000000000000..276d6c06ac95 --- /dev/null +++ b/tools/testing/selftests/net/can/test_raw_filter.sh @@ -0,0 +1,45 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +ALL_TESTS=" + test_raw_filter +" + +net_dir=$(dirname $0)/.. +source $net_dir/lib.sh + +export CANIF=${CANIF:-"vcan0"} +BITRATE=${BITRATE:-500000} + +setup() +{ + if [[ $CANIF == vcan* ]]; then + ip link add name $CANIF type vcan || exit $ksft_skip + else + ip link set dev $CANIF type can bitrate $BITRATE || exit $ksft_skip + fi + ip link set dev $CANIF up + pwd +} + +cleanup() +{ + ip link set dev $CANIF down + if [[ $CANIF == vcan* ]]; then + ip link delete $CANIF + fi +} + +test_raw_filter() +{ + ./test_raw_filter + check_err $? + log_test "test_raw_filter" +} + +trap cleanup EXIT +setup + +tests_run + +exit $EXIT_STATUS diff --git a/tools/testing/selftests/net/srv6_end_flavors_test.sh b/tools/testing/selftests/net/srv6_end_flavors_test.sh index 50563443a4ad..318487eda671 100755 --- a/tools/testing/selftests/net/srv6_end_flavors_test.sh +++ b/tools/testing/selftests/net/srv6_end_flavors_test.sh @@ -399,7 +399,7 @@ __get_srv6_rtcfg_id() # Given the description of a router <id:op> as an input, the function returns # the <op> token which represents the operation (e.g. End behavior with or -# withouth flavors) configured for the node. +# without flavors) configured for the node. # Note that when the operation represents an End behavior with a list of # flavors, the output is the ordered version of that list. @@ -480,7 +480,7 @@ setup_rt_local_sids() # all SIDs start with a common locator. Routes and SRv6 Endpoint - # behavior instaces are grouped together in the 'localsid' table. + # behavior instances are grouped together in the 'localsid' table. ip -netns "${nsname}" -6 rule \ add to "${LOCATOR_SERVICE}::/16" \ lookup "${LOCALSID_TABLE_ID}" prio 999 diff --git a/tools/testing/selftests/tc-testing/tc-tests/infra/qdiscs.json b/tools/testing/selftests/tc-testing/tc-tests/infra/qdiscs.json index a951c0d33cd2..ddc97ecd8b39 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/infra/qdiscs.json +++ b/tools/testing/selftests/tc-testing/tc-tests/infra/qdiscs.json @@ -573,5 +573,32 @@ "teardown": [ "$TC qdisc del dev $DEV1 handle 1: root" ] + }, + { + "id": "831d", + "name": "Test HFSC qlen accounting with DRR/NETEM/BLACKHOLE chain", + "category": ["qdisc", "hfsc", "drr", "netem", "blackhole"], + "plugins": { "requires": ["nsPlugin", "scapyPlugin"] }, + "setup": [ + "$IP link set dev $DEV1 up || true", + "$TC qdisc add dev $DEV1 root handle 1: drr", + "$TC filter add dev $DEV1 parent 1: basic classid 1:1", + "$TC class add dev $DEV1 parent 1: classid 1:1 drr", + "$TC qdisc add dev $DEV1 parent 1:1 handle 2: hfsc def 1", + "$TC class add dev $DEV1 parent 2: classid 2:1 hfsc rt m1 8 d 1 m2 0", + "$TC qdisc add dev $DEV1 parent 2:1 handle 3: netem", + "$TC qdisc add dev $DEV1 parent 3:1 handle 4: blackhole" + ], + "scapy": { + "iface": "$DEV0", + "count": 5, + "packet": "Ether()/IP(dst='10.10.10.1', src='10.10.10.10')/ICMP()" + }, + "cmdUnderTest": "$TC -s qdisc show dev $DEV1", + "expExitCode": "0", + "verifyCmd": "$TC -s qdisc show dev $DEV1", + "matchPattern": "qdisc hfsc", + "matchCount": "1", + "teardown": ["$TC qdisc del dev $DEV1 root handle 1: drr"] } ] |