aboutsummaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/net/dsa/hirschmann,hellcreek.yaml127
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.yaml2
-rw-r--r--Documentation/networking/index.rst1
-rw-r--r--Documentation/networking/mptcp-sysctl.rst26
-rw-r--r--MAINTAINERS1
-rw-r--r--drivers/bus/mhi/core/main.c11
-rw-r--r--drivers/crypto/caam/qi.c15
-rw-r--r--drivers/net/Kconfig7
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/bonding/bond_main.c11
-rw-r--r--drivers/net/dsa/Kconfig2
-rw-r--r--drivers/net/dsa/Makefile1
-rw-r--r--drivers/net/dsa/hirschmann/Kconfig9
-rw-r--r--drivers/net/dsa/hirschmann/Makefile5
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek.c1339
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek.h286
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c479
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h58
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek_ptp.c452
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek_ptp.h76
-rw-r--r--drivers/net/dsa/mt7530.c49
-rw-r--r--drivers/net/dsa/mt7530.h12
-rw-r--r--drivers/net/dummy.c2
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h1
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c3
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c28
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c51
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h5
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h47
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c10
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_vf.c10
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c147
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h83
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c120
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h11
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.c9
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c3
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c6
-rw-r--r--drivers/net/fddi/skfp/drvfbi.c4
-rw-r--r--drivers/net/fddi/skfp/ecm.c7
-rw-r--r--drivers/net/fddi/skfp/ess.c1
-rw-r--r--drivers/net/fddi/skfp/hwt.c4
-rw-r--r--drivers/net/fddi/skfp/pcmplc.c4
-rw-r--r--drivers/net/fddi/skfp/pmf.c4
-rw-r--r--drivers/net/fddi/skfp/queue.c4
-rw-r--r--drivers/net/fddi/skfp/rmt.c4
-rw-r--r--drivers/net/fddi/skfp/smtdef.c4
-rw-r--r--drivers/net/fddi/skfp/smtinit.c4
-rw-r--r--drivers/net/fddi/skfp/smttimer.c4
-rw-r--r--drivers/net/fddi/skfp/srf.c5
-rw-r--r--drivers/net/ieee802154/ca8210.c22
-rw-r--r--drivers/net/ifb.c3
-rw-r--r--drivers/net/ipa/gsi.c52
-rw-r--r--drivers/net/ipa/gsi.h24
-rw-r--r--drivers/net/ipa/ipa_endpoint.c14
-rw-r--r--drivers/net/ipa/ipa_main.c14
-rw-r--r--drivers/net/macsec.c1
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/mhi_net.c316
-rw-r--r--drivers/net/net_failover.c2
-rw-r--r--drivers/net/netconsole.c1
-rw-r--r--drivers/net/phy/adin.c150
-rw-r--r--drivers/net/phy/aquantia_main.c59
-rw-r--r--drivers/net/phy/at803x.c50
-rw-r--r--drivers/net/phy/bcm-cygnus.c2
-rw-r--r--drivers/net/phy/bcm-phy-lib.c49
-rw-r--r--drivers/net/phy/bcm-phy-lib.h1
-rw-r--r--drivers/net/phy/bcm54140.c46
-rw-r--r--drivers/net/phy/bcm63xx.c20
-rw-r--r--drivers/net/phy/bcm87xx.c50
-rw-r--r--drivers/net/phy/broadcom.c70
-rw-r--r--drivers/net/phy/cicada.c35
-rw-r--r--drivers/net/phy/davicom.c63
-rw-r--r--drivers/net/phy/mscc/mscc_main.c70
-rw-r--r--drivers/net/phy/phy.c6
-rw-r--r--drivers/net/phy/phy_device.c23
-rw-r--r--drivers/net/phy/realtek.c180
-rw-r--r--drivers/net/team/team.c9
-rw-r--r--drivers/net/usb/Makefile2
-rw-r--r--drivers/net/usb/lan78xx.c168
-rw-r--r--drivers/net/usb/r8152.c40
-rw-r--r--drivers/net/usb/r8153_ecm.c162
-rw-r--r--drivers/net/vxlan.c15
-rw-r--r--drivers/net/wan/hdlc_fr.c118
-rw-r--r--drivers/soc/fsl/qbman/qman.c12
-rw-r--r--drivers/soc/fsl/qbman/qman_test_api.c6
-rw-r--r--drivers/soc/fsl/qbman/qman_test_stash.c6
-rw-r--r--include/linux/mhi.h7
-rw-r--r--include/linux/netdev_features.h4
-rw-r--r--include/linux/netfilter/ipset/ip_set.h5
-rw-r--r--include/linux/phy.h3
-rw-r--r--include/linux/platform_data/hirschmann-hellcreek.h23
-rw-r--r--include/linux/usb/r8152.h37
-rw-r--r--include/net/dsa.h8
-rw-r--r--include/net/dst.h12
-rw-r--r--include/net/mptcp.h21
-rw-r--r--include/net/netfilter/ipv4/nf_reject.h10
-rw-r--r--include/net/netfilter/ipv6/nf_reject.h9
-rw-r--r--include/net/sctp/structs.h6
-rw-r--r--include/soc/fsl/qman.h3
-rw-r--r--include/uapi/linux/netfilter/ipset/ip_set.h6
-rw-r--r--include/uapi/linux/rtnetlink.h4
-rw-r--r--net/bridge/netfilter/Kconfig2
-rw-r--r--net/bridge/netfilter/nft_reject_bridge.c255
-rw-r--r--net/core/dev_ioctl.c2
-rw-r--r--net/dsa/Kconfig6
-rw-r--r--net/dsa/Makefile1
-rw-r--r--net/dsa/slave.c12
-rw-r--r--net/dsa/tag_hellcreek.c66
-rw-r--r--net/ipv4/netfilter/nf_reject_ipv4.c128
-rw-r--r--net/ipv4/tcp_output.c3
-rw-r--r--net/ipv6/netfilter/nf_reject_ipv6.c139
-rw-r--r--net/mpls/af_mpls.c2
-rw-r--r--net/mptcp/ctrl.c14
-rw-r--r--net/mptcp/pm_netlink.c8
-rw-r--r--net/mptcp/protocol.c67
-rw-r--r--net/mptcp/protocol.h1
-rw-r--r--net/netfilter/Kconfig10
-rw-r--r--net/netfilter/Makefile1
-rw-r--r--net/netfilter/ipset/ip_set_core.c6
-rw-r--r--net/netfilter/ipset/ip_set_hash_gen.h45
-rw-r--r--net/netfilter/ipset/ip_set_hash_ip.c7
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipmac.c6
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipmark.c7
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipport.c7
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportip.c7
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportnet.c7
-rw-r--r--net/netfilter/ipset/ip_set_hash_mac.c6
-rw-r--r--net/netfilter/ipset/ip_set_hash_net.c7
-rw-r--r--net/netfilter/ipset/ip_set_hash_netiface.c7
-rw-r--r--net/netfilter/ipset/ip_set_hash_netnet.c7
-rw-r--r--net/netfilter/ipset/ip_set_hash_netport.c7
-rw-r--r--net/netfilter/ipset/ip_set_hash_netportnet.c7
-rw-r--r--net/netfilter/nf_tables_api.c3
-rw-r--r--net/netfilter/nft_reject.c12
-rw-r--r--net/netfilter/nft_reject_inet.c68
-rw-r--r--net/netfilter/nft_reject_netdev.c189
-rw-r--r--net/sched/act_api.c69
-rw-r--r--net/sctp/ipv6.c2
-rw-r--r--net/sctp/protocol.c3
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_igmp.sh211
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_mld.sh558
-rw-r--r--tools/testing/selftests/net/forwarding/lib.sh107
-rw-r--r--tools/testing/selftests/net/mptcp/config10
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_join.sh94
146 files changed, 6414 insertions, 1340 deletions
diff --git a/Documentation/devicetree/bindings/net/dsa/hirschmann,hellcreek.yaml b/Documentation/devicetree/bindings/net/dsa/hirschmann,hellcreek.yaml
new file mode 100644
index 000000000000..5592f58fa6f0
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/dsa/hirschmann,hellcreek.yaml
@@ -0,0 +1,127 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/dsa/hirschmann,hellcreek.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Hirschmann Hellcreek TSN Switch Device Tree Bindings
+
+allOf:
+ - $ref: dsa.yaml#
+
+maintainers:
+ - Andrew Lunn <andrew@lunn.ch>
+ - Florian Fainelli <f.fainelli@gmail.com>
+ - Vivien Didelot <vivien.didelot@gmail.com>
+ - Kurt Kanzenbach <kurt@linutronix.de>
+
+description:
+ The Hellcreek TSN Switch IP is a 802.1Q Ethernet compliant switch. It supports
+ the Precision Time Protocol, Hardware Timestamping as well the Time Aware
+ Shaper.
+
+properties:
+ compatible:
+ items:
+ - const: hirschmann,hellcreek-de1soc-r1
+
+ reg:
+ description:
+ The physical base address and size of TSN and PTP memory base
+ minItems: 2
+ maxItems: 2
+
+ reg-names:
+ items:
+ - const: tsn
+ - const: ptp
+
+ leds:
+ type: object
+ properties:
+ '#address-cells':
+ const: 1
+ '#size-cells':
+ const: 0
+
+ patternProperties:
+ "^led@[01]$":
+ type: object
+ description: Hellcreek leds
+ $ref: ../../leds/common.yaml#
+
+ properties:
+ reg:
+ items:
+ - enum: [0, 1]
+ description: Led number
+
+ label: true
+
+ default-state: true
+
+ required:
+ - reg
+
+ additionalProperties: false
+
+ additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - ethernet-ports
+ - leds
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ switch0: switch@ff240000 {
+ compatible = "hirschmann,hellcreek-de1soc-r1";
+ reg = <0xff240000 0x1000>,
+ <0xff250000 0x1000>;
+ reg-names = "tsn", "ptp";
+ dsa,member = <0 0>;
+
+ ethernet-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ label = "cpu";
+ ethernet = <&gmac0>;
+ };
+
+ port@2 {
+ reg = <2>;
+ label = "lan0";
+ phy-handle = <&phy1>;
+ };
+
+ port@3 {
+ reg = <3>;
+ label = "lan1";
+ phy-handle = <&phy2>;
+ };
+ };
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ label = "sync_good";
+ default-state = "on";
+ };
+
+ led@1 {
+ reg = <1>;
+ label = "is_gm";
+ default-state = "off";
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml
index 2735be1a8470..bc8347c6cf56 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.yaml
+++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml
@@ -441,6 +441,8 @@ patternProperties:
description: HiDeep Inc.
"^himax,.*":
description: Himax Technologies, Inc.
+ "^hirschmann,.*":
+ description: Hirschmann Automation and Control GmbH
"^hisilicon,.*":
description: Hisilicon Limited.
"^hit,.*":
diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst
index 63ef386afd0a..70c71c9206e2 100644
--- a/Documentation/networking/index.rst
+++ b/Documentation/networking/index.rst
@@ -70,6 +70,7 @@ Contents:
lapb-module
mac80211-injection
mpls-sysctl
+ mptcp-sysctl
multiqueue
netconsole
netdev-features
diff --git a/Documentation/networking/mptcp-sysctl.rst b/Documentation/networking/mptcp-sysctl.rst
new file mode 100644
index 000000000000..6af0196c4297
--- /dev/null
+++ b/Documentation/networking/mptcp-sysctl.rst
@@ -0,0 +1,26 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=====================
+MPTCP Sysfs variables
+=====================
+
+/proc/sys/net/mptcp/* Variables
+===============================
+
+enabled - INTEGER
+ Control whether MPTCP sockets can be created.
+
+ MPTCP sockets can be created if the value is nonzero. This is
+ a per-namespace sysctl.
+
+ Default: 1
+
+add_addr_timeout - INTEGER (seconds)
+ Set the timeout after which an ADD_ADDR control message will be
+ resent to an MPTCP peer that has not acknowledged a previous
+ ADD_ADDR message.
+
+ The default value matches TCP_RTO_MAX. This is a per-namespace
+ sysctl.
+
+ Default: 120
diff --git a/MAINTAINERS b/MAINTAINERS
index 17f5571788c9..badaaa815aa3 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -12265,6 +12265,7 @@ L: mptcp@lists.01.org
S: Maintained
W: https://github.com/multipath-tcp/mptcp_net-next/wiki
B: https://github.com/multipath-tcp/mptcp_net-next/issues
+F: Documentation/networking/mptcp-sysctl.rst
F: include/net/mptcp.h
F: include/uapi/linux/mptcp.h
F: net/mptcp/
diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c
index 2cff5ddff225..ba9e721d61b7 100644
--- a/drivers/bus/mhi/core/main.c
+++ b/drivers/bus/mhi/core/main.c
@@ -1165,6 +1165,17 @@ int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
}
EXPORT_SYMBOL_GPL(mhi_queue_buf);
+bool mhi_queue_is_full(struct mhi_device *mhi_dev, enum dma_data_direction dir)
+{
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ?
+ mhi_dev->ul_chan : mhi_dev->dl_chan;
+ struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
+
+ return mhi_is_ring_full(mhi_cntrl, tre_ring);
+}
+EXPORT_SYMBOL_GPL(mhi_queue_is_full);
+
int mhi_send_cmd(struct mhi_controller *mhi_cntrl,
struct mhi_chan *mhi_chan,
enum mhi_cmd_type cmd)
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
index ec53528d8205..8163f5df8ebf 100644
--- a/drivers/crypto/caam/qi.c
+++ b/drivers/crypto/caam/qi.c
@@ -545,14 +545,10 @@ static void cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr, int congested)
}
}
-static int caam_qi_napi_schedule(struct qman_portal *p, struct caam_napi *np)
+static int caam_qi_napi_schedule(struct qman_portal *p, struct caam_napi *np,
+ bool sched_napi)
{
- /*
- * In case of threaded ISR, for RT kernels in_irq() does not return
- * appropriate value, so use in_serving_softirq to distinguish between
- * softirq and irq contexts.
- */
- if (unlikely(in_irq() || !in_serving_softirq())) {
+ if (sched_napi) {
/* Disable QMan IRQ source and invoke NAPI */
qman_p_irqsource_remove(p, QM_PIRQ_DQRI);
np->p = p;
@@ -564,7 +560,8 @@ static int caam_qi_napi_schedule(struct qman_portal *p, struct caam_napi *np)
static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
struct qman_fq *rsp_fq,
- const struct qm_dqrr_entry *dqrr)
+ const struct qm_dqrr_entry *dqrr,
+ bool sched_napi)
{
struct caam_napi *caam_napi = raw_cpu_ptr(&pcpu_qipriv.caam_napi);
struct caam_drv_req *drv_req;
@@ -573,7 +570,7 @@ static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
struct caam_drv_private *priv = dev_get_drvdata(qidev);
u32 status;
- if (caam_qi_napi_schedule(p, caam_napi))
+ if (caam_qi_napi_schedule(p, caam_napi, sched_napi))
return qman_cb_dqrr_stop;
fd = &dqrr->fd;
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index c0af2dc8b938..4ee41924cdf1 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -426,6 +426,13 @@ config VSOCKMON
mostly intended for developers or support to debug vsock issues. If
unsure, say N.
+config MHI_NET
+ tristate "MHI network driver"
+ depends on MHI_BUS
+ help
+ This is the network driver for MHI bus. It can be used with
+ QCOM based WWAN modems (like SDX55). Say Y or M.
+
endif # NET_CORE
config SUNGEM_PHY
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index b27e8633c305..36e2e41ed2aa 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -36,6 +36,7 @@ obj-$(CONFIG_GTP) += gtp.o
obj-$(CONFIG_NLMON) += nlmon.o
obj-$(CONFIG_NET_VRF) += vrf.o
obj-$(CONFIG_VSOCKMON) += vsockmon.o
+obj-$(CONFIG_MHI_NET) += mhi_net.o
#
# Networking Drivers
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 84ecbc6fa0ff..71c9677d135f 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1228,14 +1228,14 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
}
#define BOND_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
- NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
+ NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE | \
NETIF_F_HIGHDMA | NETIF_F_LRO)
#define BOND_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
- NETIF_F_RXCSUM | NETIF_F_ALL_TSO)
+ NETIF_F_RXCSUM | NETIF_F_GSO_SOFTWARE)
#define BOND_MPLS_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
- NETIF_F_ALL_TSO)
+ NETIF_F_GSO_SOFTWARE)
static void bond_compute_features(struct bonding *bond)
@@ -1291,8 +1291,7 @@ done:
bond_dev->vlan_features = vlan_features;
bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_STAG_TX |
- NETIF_F_GSO_UDP_L4;
+ NETIF_F_HW_VLAN_STAG_TX;
#ifdef CONFIG_XFRM_OFFLOAD
bond_dev->hw_enc_features |= xfrm_features;
#endif /* CONFIG_XFRM_OFFLOAD */
@@ -4721,7 +4720,7 @@ void bond_setup(struct net_device *bond_dev)
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER;
- bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL | NETIF_F_GSO_UDP_L4;
+ bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL;
#ifdef CONFIG_XFRM_OFFLOAD
bond_dev->hw_features |= BOND_XFRM_FEATURES;
#endif /* CONFIG_XFRM_OFFLOAD */
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index 2451f61a38e4..f6a0488589fc 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -24,6 +24,8 @@ config NET_DSA_LOOP
This enables support for a fake mock-up switch chip which
exercises the DSA APIs.
+source "drivers/net/dsa/hirschmann/Kconfig"
+
config NET_DSA_LANTIQ_GSWIP
tristate "Lantiq / Intel GSWIP"
depends on HAS_IOMEM && NET_DSA
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
index 4a943ccc2ca4..a84adb140a04 100644
--- a/drivers/net/dsa/Makefile
+++ b/drivers/net/dsa/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_NET_DSA_VITESSE_VSC73XX) += vitesse-vsc73xx-core.o
obj-$(CONFIG_NET_DSA_VITESSE_VSC73XX_PLATFORM) += vitesse-vsc73xx-platform.o
obj-$(CONFIG_NET_DSA_VITESSE_VSC73XX_SPI) += vitesse-vsc73xx-spi.o
obj-y += b53/
+obj-y += hirschmann/
obj-y += microchip/
obj-y += mv88e6xxx/
obj-y += ocelot/
diff --git a/drivers/net/dsa/hirschmann/Kconfig b/drivers/net/dsa/hirschmann/Kconfig
new file mode 100644
index 000000000000..222dd35e2c9d
--- /dev/null
+++ b/drivers/net/dsa/hirschmann/Kconfig
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+config NET_DSA_HIRSCHMANN_HELLCREEK
+ tristate "Hirschmann Hellcreek TSN Switch support"
+ depends on HAS_IOMEM
+ depends on NET_DSA
+ depends on PTP_1588_CLOCK
+ select NET_DSA_TAG_HELLCREEK
+ help
+ This driver adds support for Hirschmann Hellcreek TSN switches.
diff --git a/drivers/net/dsa/hirschmann/Makefile b/drivers/net/dsa/hirschmann/Makefile
new file mode 100644
index 000000000000..f4075c2998b5
--- /dev/null
+++ b/drivers/net/dsa/hirschmann/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_NET_DSA_HIRSCHMANN_HELLCREEK) += hellcreek_sw.o
+hellcreek_sw-objs := hellcreek.o
+hellcreek_sw-objs += hellcreek_ptp.o
+hellcreek_sw-objs += hellcreek_hwtstamp.o
diff --git a/drivers/net/dsa/hirschmann/hellcreek.c b/drivers/net/dsa/hirschmann/hellcreek.c
new file mode 100644
index 000000000000..dfa66f7260d6
--- /dev/null
+++ b/drivers/net/dsa/hirschmann/hellcreek.c
@@ -0,0 +1,1339 @@
+// SPDX-License-Identifier: (GPL-2.0 or MIT)
+/*
+ * DSA driver for:
+ * Hirschmann Hellcreek TSN switch.
+ *
+ * Copyright (C) 2019,2020 Linutronix GmbH
+ * Author Kurt Kanzenbach <kurt@linutronix.de>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_mdio.h>
+#include <linux/platform_device.h>
+#include <linux/bitops.h>
+#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
+#include <linux/etherdevice.h>
+#include <linux/random.h>
+#include <linux/iopoll.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <net/dsa.h>
+
+#include "hellcreek.h"
+#include "hellcreek_ptp.h"
+#include "hellcreek_hwtstamp.h"
+
+static const struct hellcreek_counter hellcreek_counter[] = {
+ { 0x00, "RxFiltered", },
+ { 0x01, "RxOctets1k", },
+ { 0x02, "RxVTAG", },
+ { 0x03, "RxL2BAD", },
+ { 0x04, "RxOverloadDrop", },
+ { 0x05, "RxUC", },
+ { 0x06, "RxMC", },
+ { 0x07, "RxBC", },
+ { 0x08, "RxRS<64", },
+ { 0x09, "RxRS64", },
+ { 0x0a, "RxRS65_127", },
+ { 0x0b, "RxRS128_255", },
+ { 0x0c, "RxRS256_511", },
+ { 0x0d, "RxRS512_1023", },
+ { 0x0e, "RxRS1024_1518", },
+ { 0x0f, "RxRS>1518", },
+ { 0x10, "TxTailDropQueue0", },
+ { 0x11, "TxTailDropQueue1", },
+ { 0x12, "TxTailDropQueue2", },
+ { 0x13, "TxTailDropQueue3", },
+ { 0x14, "TxTailDropQueue4", },
+ { 0x15, "TxTailDropQueue5", },
+ { 0x16, "TxTailDropQueue6", },
+ { 0x17, "TxTailDropQueue7", },
+ { 0x18, "RxTrafficClass0", },
+ { 0x19, "RxTrafficClass1", },
+ { 0x1a, "RxTrafficClass2", },
+ { 0x1b, "RxTrafficClass3", },
+ { 0x1c, "RxTrafficClass4", },
+ { 0x1d, "RxTrafficClass5", },
+ { 0x1e, "RxTrafficClass6", },
+ { 0x1f, "RxTrafficClass7", },
+ { 0x21, "TxOctets1k", },
+ { 0x22, "TxVTAG", },
+ { 0x23, "TxL2BAD", },
+ { 0x25, "TxUC", },
+ { 0x26, "TxMC", },
+ { 0x27, "TxBC", },
+ { 0x28, "TxTS<64", },
+ { 0x29, "TxTS64", },
+ { 0x2a, "TxTS65_127", },
+ { 0x2b, "TxTS128_255", },
+ { 0x2c, "TxTS256_511", },
+ { 0x2d, "TxTS512_1023", },
+ { 0x2e, "TxTS1024_1518", },
+ { 0x2f, "TxTS>1518", },
+ { 0x30, "TxTrafficClassOverrun0", },
+ { 0x31, "TxTrafficClassOverrun1", },
+ { 0x32, "TxTrafficClassOverrun2", },
+ { 0x33, "TxTrafficClassOverrun3", },
+ { 0x34, "TxTrafficClassOverrun4", },
+ { 0x35, "TxTrafficClassOverrun5", },
+ { 0x36, "TxTrafficClassOverrun6", },
+ { 0x37, "TxTrafficClassOverrun7", },
+ { 0x38, "TxTrafficClass0", },
+ { 0x39, "TxTrafficClass1", },
+ { 0x3a, "TxTrafficClass2", },
+ { 0x3b, "TxTrafficClass3", },
+ { 0x3c, "TxTrafficClass4", },
+ { 0x3d, "TxTrafficClass5", },
+ { 0x3e, "TxTrafficClass6", },
+ { 0x3f, "TxTrafficClass7", },
+};
+
+static u16 hellcreek_read(struct hellcreek *hellcreek, unsigned int offset)
+{
+ return readw(hellcreek->base + offset);
+}
+
+static u16 hellcreek_read_ctrl(struct hellcreek *hellcreek)
+{
+ return readw(hellcreek->base + HR_CTRL_C);
+}
+
+static u16 hellcreek_read_stat(struct hellcreek *hellcreek)
+{
+ return readw(hellcreek->base + HR_SWSTAT);
+}
+
+static void hellcreek_write(struct hellcreek *hellcreek, u16 data,
+ unsigned int offset)
+{
+ writew(data, hellcreek->base + offset);
+}
+
+static void hellcreek_select_port(struct hellcreek *hellcreek, int port)
+{
+ u16 val = port << HR_PSEL_PTWSEL_SHIFT;
+
+ hellcreek_write(hellcreek, val, HR_PSEL);
+}
+
+static void hellcreek_select_prio(struct hellcreek *hellcreek, int prio)
+{
+ u16 val = prio << HR_PSEL_PRTCWSEL_SHIFT;
+
+ hellcreek_write(hellcreek, val, HR_PSEL);
+}
+
+static void hellcreek_select_counter(struct hellcreek *hellcreek, int counter)
+{
+ u16 val = counter << HR_CSEL_SHIFT;
+
+ hellcreek_write(hellcreek, val, HR_CSEL);
+
+ /* Data sheet states to wait at least 20 internal clock cycles */
+ ndelay(200);
+}
+
+static void hellcreek_select_vlan(struct hellcreek *hellcreek, int vid,
+ bool pvid)
+{
+ u16 val = 0;
+
+ /* Set pvid bit first */
+ if (pvid)
+ val |= HR_VIDCFG_PVID;
+ hellcreek_write(hellcreek, val, HR_VIDCFG);
+
+ /* Set vlan */
+ val |= vid << HR_VIDCFG_VID_SHIFT;
+ hellcreek_write(hellcreek, val, HR_VIDCFG);
+}
+
+static int hellcreek_wait_until_ready(struct hellcreek *hellcreek)
+{
+ u16 val;
+
+ /* Wait up to 1ms, although 3 us should be enough */
+ return readx_poll_timeout(hellcreek_read_ctrl, hellcreek,
+ val, val & HR_CTRL_C_READY,
+ 3, 1000);
+}
+
+static int hellcreek_wait_until_transitioned(struct hellcreek *hellcreek)
+{
+ u16 val;
+
+ return readx_poll_timeout_atomic(hellcreek_read_ctrl, hellcreek,
+ val, !(val & HR_CTRL_C_TRANSITION),
+ 1, 1000);
+}
+
+static int hellcreek_wait_fdb_ready(struct hellcreek *hellcreek)
+{
+ u16 val;
+
+ return readx_poll_timeout_atomic(hellcreek_read_stat, hellcreek,
+ val, !(val & HR_SWSTAT_BUSY),
+ 1, 1000);
+}
+
+static int hellcreek_detect(struct hellcreek *hellcreek)
+{
+ u16 id, rel_low, rel_high, date_low, date_high, tgd_ver;
+ u8 tgd_maj, tgd_min;
+ u32 rel, date;
+
+ id = hellcreek_read(hellcreek, HR_MODID_C);
+ rel_low = hellcreek_read(hellcreek, HR_REL_L_C);
+ rel_high = hellcreek_read(hellcreek, HR_REL_H_C);
+ date_low = hellcreek_read(hellcreek, HR_BLD_L_C);
+ date_high = hellcreek_read(hellcreek, HR_BLD_H_C);
+ tgd_ver = hellcreek_read(hellcreek, TR_TGDVER);
+
+ if (id != hellcreek->pdata->module_id)
+ return -ENODEV;
+
+ rel = rel_low | (rel_high << 16);
+ date = date_low | (date_high << 16);
+ tgd_maj = (tgd_ver & TR_TGDVER_REV_MAJ_MASK) >> TR_TGDVER_REV_MAJ_SHIFT;
+ tgd_min = (tgd_ver & TR_TGDVER_REV_MIN_MASK) >> TR_TGDVER_REV_MIN_SHIFT;
+
+ dev_info(hellcreek->dev, "Module ID=%02x Release=%04x Date=%04x TGD Version=%02x.%02x\n",
+ id, rel, date, tgd_maj, tgd_min);
+
+ return 0;
+}
+
+static void hellcreek_feature_detect(struct hellcreek *hellcreek)
+{
+ u16 features;
+
+ features = hellcreek_read(hellcreek, HR_FEABITS0);
+
+ /* Currently we only detect the size of the FDB table */
+ hellcreek->fdb_entries = ((features & HR_FEABITS0_FDBBINS_MASK) >>
+ HR_FEABITS0_FDBBINS_SHIFT) * 32;
+
+ dev_info(hellcreek->dev, "Feature detect: FDB entries=%zu\n",
+ hellcreek->fdb_entries);
+}
+
+static enum dsa_tag_protocol hellcreek_get_tag_protocol(struct dsa_switch *ds,
+ int port,
+ enum dsa_tag_protocol mp)
+{
+ return DSA_TAG_PROTO_HELLCREEK;
+}
+
+static int hellcreek_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct hellcreek *hellcreek = ds->priv;
+ struct hellcreek_port *hellcreek_port;
+ u16 val;
+
+ hellcreek_port = &hellcreek->ports[port];
+
+ dev_dbg(hellcreek->dev, "Enable port %d\n", port);
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ hellcreek_select_port(hellcreek, port);
+ val = hellcreek_port->ptcfg;
+ val |= HR_PTCFG_ADMIN_EN;
+ hellcreek_write(hellcreek, val, HR_PTCFG);
+ hellcreek_port->ptcfg = val;
+
+ mutex_unlock(&hellcreek->reg_lock);
+
+ return 0;
+}
+
+static void hellcreek_port_disable(struct dsa_switch *ds, int port)
+{
+ struct hellcreek *hellcreek = ds->priv;
+ struct hellcreek_port *hellcreek_port;
+ u16 val;
+
+ hellcreek_port = &hellcreek->ports[port];
+
+ dev_dbg(hellcreek->dev, "Disable port %d\n", port);
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ hellcreek_select_port(hellcreek, port);
+ val = hellcreek_port->ptcfg;
+ val &= ~HR_PTCFG_ADMIN_EN;
+ hellcreek_write(hellcreek, val, HR_PTCFG);
+ hellcreek_port->ptcfg = val;
+
+ mutex_unlock(&hellcreek->reg_lock);
+}
+
+static void hellcreek_get_strings(struct dsa_switch *ds, int port,
+ u32 stringset, uint8_t *data)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hellcreek_counter); ++i) {
+ const struct hellcreek_counter *counter = &hellcreek_counter[i];
+
+ strlcpy(data + i * ETH_GSTRING_LEN,
+ counter->name, ETH_GSTRING_LEN);
+ }
+}
+
+static int hellcreek_get_sset_count(struct dsa_switch *ds, int port, int sset)
+{
+ if (sset != ETH_SS_STATS)
+ return 0;
+
+ return ARRAY_SIZE(hellcreek_counter);
+}
+
+static void hellcreek_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *data)
+{
+ struct hellcreek *hellcreek = ds->priv;
+ struct hellcreek_port *hellcreek_port;
+ int i;
+
+ hellcreek_port = &hellcreek->ports[port];
+
+ for (i = 0; i < ARRAY_SIZE(hellcreek_counter); ++i) {
+ const struct hellcreek_counter *counter = &hellcreek_counter[i];
+ u8 offset = counter->offset + port * 64;
+ u16 high, low;
+ u64 value = 0;
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ hellcreek_select_counter(hellcreek, offset);
+
+ /* The registers are locked internally by selecting the
+ * counter. So low and high can be read without reading high
+ * again.
+ */
+ high = hellcreek_read(hellcreek, HR_CRDH);
+ low = hellcreek_read(hellcreek, HR_CRDL);
+ value = (high << 16) | low;
+
+ hellcreek_port->counter_values[i] += value;
+ data[i] = hellcreek_port->counter_values[i];
+
+ mutex_unlock(&hellcreek->reg_lock);
+ }
+}
+
+static u16 hellcreek_private_vid(int port)
+{
+ return VLAN_N_VID - port + 1;
+}
+
+static int hellcreek_vlan_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct hellcreek *hellcreek = ds->priv;
+ int i;
+
+ dev_dbg(hellcreek->dev, "VLAN prepare for port %d\n", port);
+
+ /* Restriction: Make sure that nobody uses the "private" VLANs. These
+ * VLANs are internally used by the driver to ensure port
+ * separation. Thus, they cannot be used by someone else.
+ */
+ for (i = 0; i < hellcreek->pdata->num_ports; ++i) {
+ const u16 restricted_vid = hellcreek_private_vid(i);
+ u16 vid;
+
+ if (!dsa_is_user_port(ds, i))
+ continue;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid)
+ if (vid == restricted_vid)
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static void hellcreek_select_vlan_params(struct hellcreek *hellcreek, int port,
+ int *shift, int *mask)
+{
+ switch (port) {
+ case 0:
+ *shift = HR_VIDMBRCFG_P0MBR_SHIFT;
+ *mask = HR_VIDMBRCFG_P0MBR_MASK;
+ break;
+ case 1:
+ *shift = HR_VIDMBRCFG_P1MBR_SHIFT;
+ *mask = HR_VIDMBRCFG_P1MBR_MASK;
+ break;
+ case 2:
+ *shift = HR_VIDMBRCFG_P2MBR_SHIFT;
+ *mask = HR_VIDMBRCFG_P2MBR_MASK;
+ break;
+ case 3:
+ *shift = HR_VIDMBRCFG_P3MBR_SHIFT;
+ *mask = HR_VIDMBRCFG_P3MBR_MASK;
+ break;
+ default:
+ *shift = *mask = 0;
+ dev_err(hellcreek->dev, "Unknown port %d selected!\n", port);
+ }
+}
+
+static void hellcreek_apply_vlan(struct hellcreek *hellcreek, int port, u16 vid,
+ bool pvid, bool untagged)
+{
+ int shift, mask;
+ u16 val;
+
+ dev_dbg(hellcreek->dev, "Apply VLAN: port=%d vid=%u pvid=%d untagged=%d",
+ port, vid, pvid, untagged);
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ hellcreek_select_port(hellcreek, port);
+ hellcreek_select_vlan(hellcreek, vid, pvid);
+
+ /* Setup port vlan membership */
+ hellcreek_select_vlan_params(hellcreek, port, &shift, &mask);
+ val = hellcreek->vidmbrcfg[vid];
+ val &= ~mask;
+ if (untagged)
+ val |= HELLCREEK_VLAN_UNTAGGED_MEMBER << shift;
+ else
+ val |= HELLCREEK_VLAN_TAGGED_MEMBER << shift;
+
+ hellcreek_write(hellcreek, val, HR_VIDMBRCFG);
+ hellcreek->vidmbrcfg[vid] = val;
+
+ mutex_unlock(&hellcreek->reg_lock);
+}
+
+static void hellcreek_unapply_vlan(struct hellcreek *hellcreek, int port,
+ u16 vid)
+{
+ int shift, mask;
+ u16 val;
+
+ dev_dbg(hellcreek->dev, "Unapply VLAN: port=%d vid=%u\n", port, vid);
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ hellcreek_select_vlan(hellcreek, vid, 0);
+
+ /* Setup port vlan membership */
+ hellcreek_select_vlan_params(hellcreek, port, &shift, &mask);
+ val = hellcreek->vidmbrcfg[vid];
+ val &= ~mask;
+ val |= HELLCREEK_VLAN_NO_MEMBER << shift;
+
+ hellcreek_write(hellcreek, val, HR_VIDMBRCFG);
+ hellcreek->vidmbrcfg[vid] = val;
+
+ mutex_unlock(&hellcreek->reg_lock);
+}
+
+static void hellcreek_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ struct hellcreek *hellcreek = ds->priv;
+ u16 vid;
+
+ dev_dbg(hellcreek->dev, "Add VLANs (%d -- %d) on port %d, %s, %s\n",
+ vlan->vid_begin, vlan->vid_end, port,
+ untagged ? "untagged" : "tagged",
+ pvid ? "PVID" : "no PVID");
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid)
+ hellcreek_apply_vlan(hellcreek, port, vid, pvid, untagged);
+}
+
+static int hellcreek_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct hellcreek *hellcreek = ds->priv;
+ u16 vid;
+
+ dev_dbg(hellcreek->dev, "Remove VLANs (%d -- %d) on port %d\n",
+ vlan->vid_begin, vlan->vid_end, port);
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid)
+ hellcreek_unapply_vlan(hellcreek, port, vid);
+
+ return 0;
+}
+
+static void hellcreek_port_stp_state_set(struct dsa_switch *ds, int port,
+ u8 state)
+{
+ struct hellcreek *hellcreek = ds->priv;
+ struct hellcreek_port *hellcreek_port;
+ const char *new_state;
+ u16 val;
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ hellcreek_port = &hellcreek->ports[port];
+ val = hellcreek_port->ptcfg;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ new_state = "DISABLED";
+ val |= HR_PTCFG_BLOCKED;
+ val &= ~HR_PTCFG_LEARNING_EN;
+ break;
+ case BR_STATE_BLOCKING:
+ new_state = "BLOCKING";
+ val |= HR_PTCFG_BLOCKED;
+ val &= ~HR_PTCFG_LEARNING_EN;
+ break;
+ case BR_STATE_LISTENING:
+ new_state = "LISTENING";
+ val |= HR_PTCFG_BLOCKED;
+ val &= ~HR_PTCFG_LEARNING_EN;
+ break;
+ case BR_STATE_LEARNING:
+ new_state = "LEARNING";
+ val |= HR_PTCFG_BLOCKED;
+ val |= HR_PTCFG_LEARNING_EN;
+ break;
+ case BR_STATE_FORWARDING:
+ new_state = "FORWARDING";
+ val &= ~HR_PTCFG_BLOCKED;
+ val |= HR_PTCFG_LEARNING_EN;
+ break;
+ default:
+ new_state = "UNKNOWN";
+ }
+
+ hellcreek_select_port(hellcreek, port);
+ hellcreek_write(hellcreek, val, HR_PTCFG);
+ hellcreek_port->ptcfg = val;
+
+ mutex_unlock(&hellcreek->reg_lock);
+
+ dev_dbg(hellcreek->dev, "Configured STP state for port %d: %s\n",
+ port, new_state);
+}
+
+static void hellcreek_setup_ingressflt(struct hellcreek *hellcreek, int port,
+ bool enable)
+{
+ struct hellcreek_port *hellcreek_port = &hellcreek->ports[port];
+ u16 ptcfg;
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ ptcfg = hellcreek_port->ptcfg;
+
+ if (enable)
+ ptcfg |= HR_PTCFG_INGRESSFLT;
+ else
+ ptcfg &= ~HR_PTCFG_INGRESSFLT;
+
+ hellcreek_select_port(hellcreek, port);
+ hellcreek_write(hellcreek, ptcfg, HR_PTCFG);
+ hellcreek_port->ptcfg = ptcfg;
+
+ mutex_unlock(&hellcreek->reg_lock);
+}
+
+static void hellcreek_setup_vlan_awareness(struct hellcreek *hellcreek,
+ bool enable)
+{
+ u16 swcfg;
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ swcfg = hellcreek->swcfg;
+
+ if (enable)
+ swcfg |= HR_SWCFG_VLAN_UNAWARE;
+ else
+ swcfg &= ~HR_SWCFG_VLAN_UNAWARE;
+
+ hellcreek_write(hellcreek, swcfg, HR_SWCFG);
+
+ mutex_unlock(&hellcreek->reg_lock);
+}
+
+/* Default setup for DSA: VLAN <X>: CPU and Port <X> egress untagged. */
+static void hellcreek_setup_vlan_membership(struct dsa_switch *ds, int port,
+ bool enabled)
+{
+ const u16 vid = hellcreek_private_vid(port);
+ int upstream = dsa_upstream_port(ds, port);
+ struct hellcreek *hellcreek = ds->priv;
+
+ /* Apply vid to port as egress untagged and port vlan id */
+ if (enabled)
+ hellcreek_apply_vlan(hellcreek, port, vid, true, true);
+ else
+ hellcreek_unapply_vlan(hellcreek, port, vid);
+
+ /* Apply vid to cpu port as well */
+ if (enabled)
+ hellcreek_apply_vlan(hellcreek, upstream, vid, false, true);
+ else
+ hellcreek_unapply_vlan(hellcreek, upstream, vid);
+}
+
+static int hellcreek_port_bridge_join(struct dsa_switch *ds, int port,
+ struct net_device *br)
+{
+ struct hellcreek *hellcreek = ds->priv;
+
+ dev_dbg(hellcreek->dev, "Port %d joins a bridge\n", port);
+
+ /* When joining a vlan_filtering bridge, keep the switch VLAN aware */
+ if (!ds->vlan_filtering)
+ hellcreek_setup_vlan_awareness(hellcreek, false);
+
+ /* Drop private vlans */
+ hellcreek_setup_vlan_membership(ds, port, false);
+
+ return 0;
+}
+
+static void hellcreek_port_bridge_leave(struct dsa_switch *ds, int port,
+ struct net_device *br)
+{
+ struct hellcreek *hellcreek = ds->priv;
+
+ dev_dbg(hellcreek->dev, "Port %d leaves a bridge\n", port);
+
+ /* Enable VLAN awareness */
+ hellcreek_setup_vlan_awareness(hellcreek, true);
+
+ /* Enable private vlans */
+ hellcreek_setup_vlan_membership(ds, port, true);
+}
+
+static int __hellcreek_fdb_add(struct hellcreek *hellcreek,
+ const struct hellcreek_fdb_entry *entry)
+{
+ u16 meta = 0;
+
+ dev_dbg(hellcreek->dev, "Add static FDB entry: MAC=%pM, MASK=0x%02x, "
+ "OBT=%d, REPRIO_EN=%d, PRIO=%d\n", entry->mac, entry->portmask,
+ entry->is_obt, entry->reprio_en, entry->reprio_tc);
+
+ /* Add mac address */
+ hellcreek_write(hellcreek, entry->mac[1] | (entry->mac[0] << 8), HR_FDBWDH);
+ hellcreek_write(hellcreek, entry->mac[3] | (entry->mac[2] << 8), HR_FDBWDM);
+ hellcreek_write(hellcreek, entry->mac[5] | (entry->mac[4] << 8), HR_FDBWDL);
+
+ /* Meta data */
+ meta |= entry->portmask << HR_FDBWRM0_PORTMASK_SHIFT;
+ if (entry->is_obt)
+ meta |= HR_FDBWRM0_OBT;
+ if (entry->reprio_en) {
+ meta |= HR_FDBWRM0_REPRIO_EN;
+ meta |= entry->reprio_tc << HR_FDBWRM0_REPRIO_TC_SHIFT;
+ }
+ hellcreek_write(hellcreek, meta, HR_FDBWRM0);
+
+ /* Commit */
+ hellcreek_write(hellcreek, 0x00, HR_FDBWRCMD);
+
+ /* Wait until done */
+ return hellcreek_wait_fdb_ready(hellcreek);
+}
+
+static int __hellcreek_fdb_del(struct hellcreek *hellcreek,
+ const struct hellcreek_fdb_entry *entry)
+{
+ dev_dbg(hellcreek->dev, "Delete FDB entry: MAC=%pM!\n", entry->mac);
+
+ /* Delete by matching idx */
+ hellcreek_write(hellcreek, entry->idx | HR_FDBWRCMD_FDBDEL, HR_FDBWRCMD);
+
+ /* Wait until done */
+ return hellcreek_wait_fdb_ready(hellcreek);
+}
+
+/* Retrieve the index of a FDB entry by mac address. Currently we search through
+ * the complete table in hardware. If that's too slow, we might have to cache
+ * the complete FDB table in software.
+ */
+static int hellcreek_fdb_get(struct hellcreek *hellcreek,
+ const unsigned char *dest,
+ struct hellcreek_fdb_entry *entry)
+{
+ size_t i;
+
+ /* Set read pointer to zero: The read of HR_FDBMAX (read-only register)
+ * should reset the internal pointer. But, that doesn't work. The vendor
+ * suggested a subsequent write as workaround. Same for HR_FDBRDH below.
+ */
+ hellcreek_read(hellcreek, HR_FDBMAX);
+ hellcreek_write(hellcreek, 0x00, HR_FDBMAX);
+
+ /* We have to read the complete table, because the switch/driver might
+ * enter new entries anywhere.
+ */
+ for (i = 0; i < hellcreek->fdb_entries; ++i) {
+ unsigned char addr[ETH_ALEN];
+ u16 meta, mac;
+
+ meta = hellcreek_read(hellcreek, HR_FDBMDRD);
+ mac = hellcreek_read(hellcreek, HR_FDBRDL);
+ addr[5] = mac & 0xff;
+ addr[4] = (mac & 0xff00) >> 8;
+ mac = hellcreek_read(hellcreek, HR_FDBRDM);
+ addr[3] = mac & 0xff;
+ addr[2] = (mac & 0xff00) >> 8;
+ mac = hellcreek_read(hellcreek, HR_FDBRDH);
+ addr[1] = mac & 0xff;
+ addr[0] = (mac & 0xff00) >> 8;
+
+ /* Force next entry */
+ hellcreek_write(hellcreek, 0x00, HR_FDBRDH);
+
+ if (memcmp(addr, dest, ETH_ALEN))
+ continue;
+
+ /* Match found */
+ entry->idx = i;
+ entry->portmask = (meta & HR_FDBMDRD_PORTMASK_MASK) >>
+ HR_FDBMDRD_PORTMASK_SHIFT;
+ entry->age = (meta & HR_FDBMDRD_AGE_MASK) >>
+ HR_FDBMDRD_AGE_SHIFT;
+ entry->is_obt = !!(meta & HR_FDBMDRD_OBT);
+ entry->pass_blocked = !!(meta & HR_FDBMDRD_PASS_BLOCKED);
+ entry->is_static = !!(meta & HR_FDBMDRD_STATIC);
+ entry->reprio_tc = (meta & HR_FDBMDRD_REPRIO_TC_MASK) >>
+ HR_FDBMDRD_REPRIO_TC_SHIFT;
+ entry->reprio_en = !!(meta & HR_FDBMDRD_REPRIO_EN);
+ memcpy(entry->mac, addr, sizeof(addr));
+
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+static int hellcreek_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct hellcreek_fdb_entry entry = { 0 };
+ struct hellcreek *hellcreek = ds->priv;
+ int ret;
+
+ dev_dbg(hellcreek->dev, "Add FDB entry for MAC=%pM\n", addr);
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ ret = hellcreek_fdb_get(hellcreek, addr, &entry);
+ if (ret) {
+ /* Not found */
+ memcpy(entry.mac, addr, sizeof(entry.mac));
+ entry.portmask = BIT(port);
+
+ ret = __hellcreek_fdb_add(hellcreek, &entry);
+ if (ret) {
+ dev_err(hellcreek->dev, "Failed to add FDB entry!\n");
+ goto out;
+ }
+ } else {
+ /* Found */
+ ret = __hellcreek_fdb_del(hellcreek, &entry);
+ if (ret) {
+ dev_err(hellcreek->dev, "Failed to delete FDB entry!\n");
+ goto out;
+ }
+
+ entry.portmask |= BIT(port);
+
+ ret = __hellcreek_fdb_add(hellcreek, &entry);
+ if (ret) {
+ dev_err(hellcreek->dev, "Failed to add FDB entry!\n");
+ goto out;
+ }
+ }
+
+out:
+ mutex_unlock(&hellcreek->reg_lock);
+
+ return ret;
+}
+
+static int hellcreek_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct hellcreek_fdb_entry entry = { 0 };
+ struct hellcreek *hellcreek = ds->priv;
+ int ret;
+
+ dev_dbg(hellcreek->dev, "Delete FDB entry for MAC=%pM\n", addr);
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ ret = hellcreek_fdb_get(hellcreek, addr, &entry);
+ if (ret) {
+ /* Not found */
+ dev_err(hellcreek->dev, "FDB entry for deletion not found!\n");
+ } else {
+ /* Found */
+ ret = __hellcreek_fdb_del(hellcreek, &entry);
+ if (ret) {
+ dev_err(hellcreek->dev, "Failed to delete FDB entry!\n");
+ goto out;
+ }
+
+ entry.portmask &= ~BIT(port);
+
+ if (entry.portmask != 0x00) {
+ ret = __hellcreek_fdb_add(hellcreek, &entry);
+ if (ret) {
+ dev_err(hellcreek->dev, "Failed to add FDB entry!\n");
+ goto out;
+ }
+ }
+ }
+
+out:
+ mutex_unlock(&hellcreek->reg_lock);
+
+ return ret;
+}
+
+static int hellcreek_fdb_dump(struct dsa_switch *ds, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
+{
+ struct hellcreek *hellcreek = ds->priv;
+ u16 entries;
+ size_t i;
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ /* Set read pointer to zero: The read of HR_FDBMAX (read-only register)
+ * should reset the internal pointer. But, that doesn't work. The vendor
+ * suggested a subsequent write as workaround. Same for HR_FDBRDH below.
+ */
+ entries = hellcreek_read(hellcreek, HR_FDBMAX);
+ hellcreek_write(hellcreek, 0x00, HR_FDBMAX);
+
+ dev_dbg(hellcreek->dev, "FDB dump for port %d, entries=%d!\n", port, entries);
+
+ /* Read table */
+ for (i = 0; i < hellcreek->fdb_entries; ++i) {
+ unsigned char null_addr[ETH_ALEN] = { 0 };
+ struct hellcreek_fdb_entry entry = { 0 };
+ u16 meta, mac;
+
+ meta = hellcreek_read(hellcreek, HR_FDBMDRD);
+ mac = hellcreek_read(hellcreek, HR_FDBRDL);
+ entry.mac[5] = mac & 0xff;
+ entry.mac[4] = (mac & 0xff00) >> 8;
+ mac = hellcreek_read(hellcreek, HR_FDBRDM);
+ entry.mac[3] = mac & 0xff;
+ entry.mac[2] = (mac & 0xff00) >> 8;
+ mac = hellcreek_read(hellcreek, HR_FDBRDH);
+ entry.mac[1] = mac & 0xff;
+ entry.mac[0] = (mac & 0xff00) >> 8;
+
+ /* Force next entry */
+ hellcreek_write(hellcreek, 0x00, HR_FDBRDH);
+
+ /* Check valid */
+ if (!memcmp(entry.mac, null_addr, ETH_ALEN))
+ continue;
+
+ entry.portmask = (meta & HR_FDBMDRD_PORTMASK_MASK) >>
+ HR_FDBMDRD_PORTMASK_SHIFT;
+ entry.is_static = !!(meta & HR_FDBMDRD_STATIC);
+
+ /* Check port mask */
+ if (!(entry.portmask & BIT(port)))
+ continue;
+
+ cb(entry.mac, 0, entry.is_static, data);
+ }
+
+ mutex_unlock(&hellcreek->reg_lock);
+
+ return 0;
+}
+
+static int hellcreek_vlan_filtering(struct dsa_switch *ds, int port,
+ bool vlan_filtering,
+ struct switchdev_trans *trans)
+{
+ struct hellcreek *hellcreek = ds->priv;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ dev_dbg(hellcreek->dev, "%s VLAN filtering on port %d\n",
+ vlan_filtering ? "Enable" : "Disable", port);
+
+ /* Configure port to drop packages with not known vids */
+ hellcreek_setup_ingressflt(hellcreek, port, vlan_filtering);
+
+ /* Enable VLAN awareness on the switch. This save due to
+ * ds->vlan_filtering_is_global.
+ */
+ hellcreek_setup_vlan_awareness(hellcreek, vlan_filtering);
+
+ return 0;
+}
+
+static int hellcreek_enable_ip_core(struct hellcreek *hellcreek)
+{
+ int ret;
+ u16 val;
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ val = hellcreek_read(hellcreek, HR_CTRL_C);
+ val |= HR_CTRL_C_ENABLE;
+ hellcreek_write(hellcreek, val, HR_CTRL_C);
+ ret = hellcreek_wait_until_transitioned(hellcreek);
+
+ mutex_unlock(&hellcreek->reg_lock);
+
+ return ret;
+}
+
+static void hellcreek_setup_cpu_and_tunnel_port(struct hellcreek *hellcreek)
+{
+ struct hellcreek_port *tunnel_port = &hellcreek->ports[TUNNEL_PORT];
+ struct hellcreek_port *cpu_port = &hellcreek->ports[CPU_PORT];
+ u16 ptcfg = 0;
+
+ ptcfg |= HR_PTCFG_LEARNING_EN | HR_PTCFG_ADMIN_EN;
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ hellcreek_select_port(hellcreek, CPU_PORT);
+ hellcreek_write(hellcreek, ptcfg, HR_PTCFG);
+
+ hellcreek_select_port(hellcreek, TUNNEL_PORT);
+ hellcreek_write(hellcreek, ptcfg, HR_PTCFG);
+
+ cpu_port->ptcfg = ptcfg;
+ tunnel_port->ptcfg = ptcfg;
+
+ mutex_unlock(&hellcreek->reg_lock);
+}
+
+static void hellcreek_setup_tc_identity_mapping(struct hellcreek *hellcreek)
+{
+ int i;
+
+ /* The switch has multiple egress queues per port. The queue is selected
+ * via the PCP field in the VLAN header. The switch internally deals
+ * with traffic classes instead of PCP values and this mapping is
+ * configurable.
+ *
+ * The default mapping is (PCP - TC):
+ * 7 - 7
+ * 6 - 6
+ * 5 - 5
+ * 4 - 4
+ * 3 - 3
+ * 2 - 1
+ * 1 - 0
+ * 0 - 2
+ *
+ * The default should be an identity mapping.
+ */
+
+ for (i = 0; i < 8; ++i) {
+ mutex_lock(&hellcreek->reg_lock);
+
+ hellcreek_select_prio(hellcreek, i);
+ hellcreek_write(hellcreek,
+ i << HR_PRTCCFG_PCP_TC_MAP_SHIFT,
+ HR_PRTCCFG);
+
+ mutex_unlock(&hellcreek->reg_lock);
+ }
+}
+
+static int hellcreek_setup_fdb(struct hellcreek *hellcreek)
+{
+ static struct hellcreek_fdb_entry ptp = {
+ /* MAC: 01-1B-19-00-00-00 */
+ .mac = { 0x01, 0x1b, 0x19, 0x00, 0x00, 0x00 },
+ .portmask = 0x03, /* Management ports */
+ .age = 0,
+ .is_obt = 0,
+ .pass_blocked = 0,
+ .is_static = 1,
+ .reprio_tc = 6, /* TC: 6 as per IEEE 802.1AS */
+ .reprio_en = 1,
+ };
+ static struct hellcreek_fdb_entry p2p = {
+ /* MAC: 01-80-C2-00-00-0E */
+ .mac = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e },
+ .portmask = 0x03, /* Management ports */
+ .age = 0,
+ .is_obt = 0,
+ .pass_blocked = 0,
+ .is_static = 1,
+ .reprio_tc = 6, /* TC: 6 as per IEEE 802.1AS */
+ .reprio_en = 1,
+ };
+ int ret;
+
+ mutex_lock(&hellcreek->reg_lock);
+ ret = __hellcreek_fdb_add(hellcreek, &ptp);
+ if (ret)
+ goto out;
+ ret = __hellcreek_fdb_add(hellcreek, &p2p);
+out:
+ mutex_unlock(&hellcreek->reg_lock);
+
+ return ret;
+}
+
+static int hellcreek_setup(struct dsa_switch *ds)
+{
+ struct hellcreek *hellcreek = ds->priv;
+ u16 swcfg = 0;
+ int ret, i;
+
+ dev_dbg(hellcreek->dev, "Set up the switch\n");
+
+ /* Let's go */
+ ret = hellcreek_enable_ip_core(hellcreek);
+ if (ret) {
+ dev_err(hellcreek->dev, "Failed to enable IP core!\n");
+ return ret;
+ }
+
+ /* Enable CPU/Tunnel ports */
+ hellcreek_setup_cpu_and_tunnel_port(hellcreek);
+
+ /* Switch config: Keep defaults, enable FDB aging and learning and tag
+ * each frame from/to cpu port for DSA tagging. Also enable the length
+ * aware shaping mode. This eliminates the need for Qbv guard bands.
+ */
+ swcfg |= HR_SWCFG_FDBAGE_EN |
+ HR_SWCFG_FDBLRN_EN |
+ HR_SWCFG_ALWAYS_OBT |
+ (HR_SWCFG_LAS_ON << HR_SWCFG_LAS_MODE_SHIFT);
+ hellcreek->swcfg = swcfg;
+ hellcreek_write(hellcreek, swcfg, HR_SWCFG);
+
+ /* Initial vlan membership to reflect port separation */
+ for (i = 0; i < ds->num_ports; ++i) {
+ if (!dsa_is_user_port(ds, i))
+ continue;
+
+ hellcreek_setup_vlan_membership(ds, i, true);
+ }
+
+ /* Configure PCP <-> TC mapping */
+ hellcreek_setup_tc_identity_mapping(hellcreek);
+
+ /* Allow VLAN configurations while not filtering which is the default
+ * for new DSA drivers.
+ */
+ ds->configure_vlan_while_not_filtering = true;
+
+ /* The VLAN awareness is a global switch setting. Therefore, mixed vlan
+ * filtering setups are not supported.
+ */
+ ds->vlan_filtering_is_global = true;
+
+ /* Intercept _all_ PTP multicast traffic */
+ ret = hellcreek_setup_fdb(hellcreek);
+ if (ret) {
+ dev_err(hellcreek->dev,
+ "Failed to insert static PTP FDB entries\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void hellcreek_phylink_validate(struct dsa_switch *ds, int port,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+ struct hellcreek *hellcreek = ds->priv;
+
+ dev_dbg(hellcreek->dev, "Phylink validate for port %d\n", port);
+
+ /* The MAC settings are a hardware configuration option and cannot be
+ * changed at run time or by strapping. Therefore the attached PHYs
+ * should be programmed to only advertise settings which are supported
+ * by the hardware.
+ */
+ if (hellcreek->pdata->is_100_mbits)
+ phylink_set(mask, 100baseT_Full);
+ else
+ phylink_set(mask, 1000baseT_Full);
+
+ bitmap_and(supported, supported, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+ bitmap_and(state->advertising, state->advertising, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static int
+hellcreek_port_prechangeupper(struct dsa_switch *ds, int port,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct hellcreek *hellcreek = ds->priv;
+ bool used = true;
+ int ret = -EBUSY;
+ u16 vid;
+ int i;
+
+ dev_dbg(hellcreek->dev, "Pre change upper for port %d\n", port);
+
+ /*
+ * Deny VLAN devices on top of lan ports with the same VLAN ids, because
+ * it breaks the port separation due to the private VLANs. Example:
+ *
+ * lan0.100 *and* lan1.100 cannot be used in parallel. However, lan0.99
+ * and lan1.100 works.
+ */
+
+ if (!is_vlan_dev(info->upper_dev))
+ return 0;
+
+ vid = vlan_dev_vlan_id(info->upper_dev);
+
+ /* For all ports, check bitmaps */
+ mutex_lock(&hellcreek->vlan_lock);
+ for (i = 0; i < hellcreek->pdata->num_ports; ++i) {
+ if (!dsa_is_user_port(ds, i))
+ continue;
+
+ if (port == i)
+ continue;
+
+ used = used && test_bit(vid, hellcreek->ports[i].vlan_dev_bitmap);
+ }
+
+ if (used)
+ goto out;
+
+ /* Update bitmap */
+ set_bit(vid, hellcreek->ports[port].vlan_dev_bitmap);
+
+ ret = 0;
+
+out:
+ mutex_unlock(&hellcreek->vlan_lock);
+
+ return ret;
+}
+
+static const struct dsa_switch_ops hellcreek_ds_ops = {
+ .get_ethtool_stats = hellcreek_get_ethtool_stats,
+ .get_sset_count = hellcreek_get_sset_count,
+ .get_strings = hellcreek_get_strings,
+ .get_tag_protocol = hellcreek_get_tag_protocol,
+ .get_ts_info = hellcreek_get_ts_info,
+ .phylink_validate = hellcreek_phylink_validate,
+ .port_bridge_join = hellcreek_port_bridge_join,
+ .port_bridge_leave = hellcreek_port_bridge_leave,
+ .port_disable = hellcreek_port_disable,
+ .port_enable = hellcreek_port_enable,
+ .port_fdb_add = hellcreek_fdb_add,
+ .port_fdb_del = hellcreek_fdb_del,
+ .port_fdb_dump = hellcreek_fdb_dump,
+ .port_hwtstamp_set = hellcreek_port_hwtstamp_set,
+ .port_hwtstamp_get = hellcreek_port_hwtstamp_get,
+ .port_prechangeupper = hellcreek_port_prechangeupper,
+ .port_rxtstamp = hellcreek_port_rxtstamp,
+ .port_stp_state_set = hellcreek_port_stp_state_set,
+ .port_txtstamp = hellcreek_port_txtstamp,
+ .port_vlan_add = hellcreek_vlan_add,
+ .port_vlan_del = hellcreek_vlan_del,
+ .port_vlan_filtering = hellcreek_vlan_filtering,
+ .port_vlan_prepare = hellcreek_vlan_prepare,
+ .setup = hellcreek_setup,
+};
+
+static int hellcreek_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct hellcreek *hellcreek;
+ struct resource *res;
+ int ret, i;
+
+ hellcreek = devm_kzalloc(dev, sizeof(*hellcreek), GFP_KERNEL);
+ if (!hellcreek)
+ return -ENOMEM;
+
+ hellcreek->vidmbrcfg = devm_kcalloc(dev, VLAN_N_VID,
+ sizeof(*hellcreek->vidmbrcfg),
+ GFP_KERNEL);
+ if (!hellcreek->vidmbrcfg)
+ return -ENOMEM;
+
+ hellcreek->pdata = of_device_get_match_data(dev);
+
+ hellcreek->ports = devm_kcalloc(dev, hellcreek->pdata->num_ports,
+ sizeof(*hellcreek->ports),
+ GFP_KERNEL);
+ if (!hellcreek->ports)
+ return -ENOMEM;
+
+ for (i = 0; i < hellcreek->pdata->num_ports; ++i) {
+ struct hellcreek_port *port = &hellcreek->ports[i];
+
+ port->counter_values =
+ devm_kcalloc(dev,
+ ARRAY_SIZE(hellcreek_counter),
+ sizeof(*port->counter_values),
+ GFP_KERNEL);
+ if (!port->counter_values)
+ return -ENOMEM;
+
+ port->vlan_dev_bitmap =
+ devm_kcalloc(dev,
+ BITS_TO_LONGS(VLAN_N_VID),
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!port->vlan_dev_bitmap)
+ return -ENOMEM;
+
+ port->hellcreek = hellcreek;
+ port->port = i;
+ }
+
+ mutex_init(&hellcreek->reg_lock);
+ mutex_init(&hellcreek->vlan_lock);
+ mutex_init(&hellcreek->ptp_lock);
+
+ hellcreek->dev = dev;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tsn");
+ if (!res) {
+ dev_err(dev, "No memory region provided!\n");
+ return -ENODEV;
+ }
+
+ hellcreek->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(hellcreek->base)) {
+ dev_err(dev, "No memory available!\n");
+ return PTR_ERR(hellcreek->base);
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ptp");
+ if (!res) {
+ dev_err(dev, "No PTP memory region provided!\n");
+ return -ENODEV;
+ }
+
+ hellcreek->ptp_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(hellcreek->ptp_base)) {
+ dev_err(dev, "No memory available!\n");
+ return PTR_ERR(hellcreek->ptp_base);
+ }
+
+ ret = hellcreek_detect(hellcreek);
+ if (ret) {
+ dev_err(dev, "No (known) chip found!\n");
+ return ret;
+ }
+
+ ret = hellcreek_wait_until_ready(hellcreek);
+ if (ret) {
+ dev_err(dev, "Switch didn't become ready!\n");
+ return ret;
+ }
+
+ hellcreek_feature_detect(hellcreek);
+
+ hellcreek->ds = devm_kzalloc(dev, sizeof(*hellcreek->ds), GFP_KERNEL);
+ if (!hellcreek->ds)
+ return -ENOMEM;
+
+ hellcreek->ds->dev = dev;
+ hellcreek->ds->priv = hellcreek;
+ hellcreek->ds->ops = &hellcreek_ds_ops;
+ hellcreek->ds->num_ports = hellcreek->pdata->num_ports;
+ hellcreek->ds->num_tx_queues = HELLCREEK_NUM_EGRESS_QUEUES;
+
+ ret = dsa_register_switch(hellcreek->ds);
+ if (ret) {
+ dev_err(dev, "Unable to register switch\n");
+ return ret;
+ }
+
+ ret = hellcreek_ptp_setup(hellcreek);
+ if (ret) {
+ dev_err(dev, "Failed to setup PTP!\n");
+ goto err_ptp_setup;
+ }
+
+ ret = hellcreek_hwtstamp_setup(hellcreek);
+ if (ret) {
+ dev_err(dev, "Failed to setup hardware timestamping!\n");
+ goto err_tstamp_setup;
+ }
+
+ platform_set_drvdata(pdev, hellcreek);
+
+ return 0;
+
+err_tstamp_setup:
+ hellcreek_ptp_free(hellcreek);
+err_ptp_setup:
+ dsa_unregister_switch(hellcreek->ds);
+
+ return ret;
+}
+
+static int hellcreek_remove(struct platform_device *pdev)
+{
+ struct hellcreek *hellcreek = platform_get_drvdata(pdev);
+
+ hellcreek_hwtstamp_free(hellcreek);
+ hellcreek_ptp_free(hellcreek);
+ dsa_unregister_switch(hellcreek->ds);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static const struct hellcreek_platform_data de1soc_r1_pdata = {
+ .num_ports = 4,
+ .is_100_mbits = 1,
+ .qbv_support = 1,
+ .qbv_on_cpu_port = 1,
+ .qbu_support = 0,
+ .module_id = 0x4c30,
+};
+
+static const struct of_device_id hellcreek_of_match[] = {
+ {
+ .compatible = "hirschmann,hellcreek-de1soc-r1",
+ .data = &de1soc_r1_pdata,
+ },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, hellcreek_of_match);
+
+static struct platform_driver hellcreek_driver = {
+ .probe = hellcreek_probe,
+ .remove = hellcreek_remove,
+ .driver = {
+ .name = "hellcreek",
+ .of_match_table = hellcreek_of_match,
+ },
+};
+module_platform_driver(hellcreek_driver);
+
+MODULE_AUTHOR("Kurt Kanzenbach <kurt@linutronix.de>");
+MODULE_DESCRIPTION("Hirschmann Hellcreek driver");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/net/dsa/hirschmann/hellcreek.h b/drivers/net/dsa/hirschmann/hellcreek.h
new file mode 100644
index 000000000000..e81781ebc31c
--- /dev/null
+++ b/drivers/net/dsa/hirschmann/hellcreek.h
@@ -0,0 +1,286 @@
+/* SPDX-License-Identifier: (GPL-2.0 or MIT) */
+/*
+ * DSA driver for:
+ * Hirschmann Hellcreek TSN switch.
+ *
+ * Copyright (C) 2019,2020 Linutronix GmbH
+ * Author Kurt Kanzenbach <kurt@linutronix.de>
+ */
+
+#ifndef _HELLCREEK_H_
+#define _HELLCREEK_H_
+
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <linux/leds.h>
+#include <linux/platform_data/hirschmann-hellcreek.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/timecounter.h>
+#include <net/dsa.h>
+
+/* Ports:
+ * - 0: CPU
+ * - 1: Tunnel
+ * - 2: TSN front port 1
+ * - 3: TSN front port 2
+ * - ...
+ */
+#define CPU_PORT 0
+#define TUNNEL_PORT 1
+
+#define HELLCREEK_VLAN_NO_MEMBER 0x0
+#define HELLCREEK_VLAN_UNTAGGED_MEMBER 0x1
+#define HELLCREEK_VLAN_TAGGED_MEMBER 0x3
+#define HELLCREEK_NUM_EGRESS_QUEUES 8
+
+/* Register definitions */
+#define HR_MODID_C (0 * 2)
+#define HR_REL_L_C (1 * 2)
+#define HR_REL_H_C (2 * 2)
+#define HR_BLD_L_C (3 * 2)
+#define HR_BLD_H_C (4 * 2)
+#define HR_CTRL_C (5 * 2)
+#define HR_CTRL_C_READY BIT(14)
+#define HR_CTRL_C_TRANSITION BIT(13)
+#define HR_CTRL_C_ENABLE BIT(0)
+
+#define HR_PSEL (0xa6 * 2)
+#define HR_PSEL_PTWSEL_SHIFT 4
+#define HR_PSEL_PTWSEL_MASK GENMASK(5, 4)
+#define HR_PSEL_PRTCWSEL_SHIFT 0
+#define HR_PSEL_PRTCWSEL_MASK GENMASK(2, 0)
+
+#define HR_PTCFG (0xa7 * 2)
+#define HR_PTCFG_MLIMIT_EN BIT(13)
+#define HR_PTCFG_UMC_FLT BIT(10)
+#define HR_PTCFG_UUC_FLT BIT(9)
+#define HR_PTCFG_UNTRUST BIT(8)
+#define HR_PTCFG_TAG_REQUIRED BIT(7)
+#define HR_PTCFG_PPRIO_SHIFT 4
+#define HR_PTCFG_PPRIO_MASK GENMASK(6, 4)
+#define HR_PTCFG_INGRESSFLT BIT(3)
+#define HR_PTCFG_BLOCKED BIT(2)
+#define HR_PTCFG_LEARNING_EN BIT(1)
+#define HR_PTCFG_ADMIN_EN BIT(0)
+
+#define HR_PRTCCFG (0xa8 * 2)
+#define HR_PRTCCFG_PCP_TC_MAP_SHIFT 0
+#define HR_PRTCCFG_PCP_TC_MAP_MASK GENMASK(2, 0)
+
+#define HR_CSEL (0x8d * 2)
+#define HR_CSEL_SHIFT 0
+#define HR_CSEL_MASK GENMASK(7, 0)
+#define HR_CRDL (0x8e * 2)
+#define HR_CRDH (0x8f * 2)
+
+#define HR_SWTRC_CFG (0x90 * 2)
+#define HR_SWTRC0 (0x91 * 2)
+#define HR_SWTRC1 (0x92 * 2)
+#define HR_PFREE (0x93 * 2)
+#define HR_MFREE (0x94 * 2)
+
+#define HR_FDBAGE (0x97 * 2)
+#define HR_FDBMAX (0x98 * 2)
+#define HR_FDBRDL (0x99 * 2)
+#define HR_FDBRDM (0x9a * 2)
+#define HR_FDBRDH (0x9b * 2)
+
+#define HR_FDBMDRD (0x9c * 2)
+#define HR_FDBMDRD_PORTMASK_SHIFT 0
+#define HR_FDBMDRD_PORTMASK_MASK GENMASK(3, 0)
+#define HR_FDBMDRD_AGE_SHIFT 4
+#define HR_FDBMDRD_AGE_MASK GENMASK(7, 4)
+#define HR_FDBMDRD_OBT BIT(8)
+#define HR_FDBMDRD_PASS_BLOCKED BIT(9)
+#define HR_FDBMDRD_STATIC BIT(11)
+#define HR_FDBMDRD_REPRIO_TC_SHIFT 12
+#define HR_FDBMDRD_REPRIO_TC_MASK GENMASK(14, 12)
+#define HR_FDBMDRD_REPRIO_EN BIT(15)
+
+#define HR_FDBWDL (0x9d * 2)
+#define HR_FDBWDM (0x9e * 2)
+#define HR_FDBWDH (0x9f * 2)
+#define HR_FDBWRM0 (0xa0 * 2)
+#define HR_FDBWRM0_PORTMASK_SHIFT 0
+#define HR_FDBWRM0_PORTMASK_MASK GENMASK(3, 0)
+#define HR_FDBWRM0_OBT BIT(8)
+#define HR_FDBWRM0_PASS_BLOCKED BIT(9)
+#define HR_FDBWRM0_REPRIO_TC_SHIFT 12
+#define HR_FDBWRM0_REPRIO_TC_MASK GENMASK(14, 12)
+#define HR_FDBWRM0_REPRIO_EN BIT(15)
+#define HR_FDBWRM1 (0xa1 * 2)
+
+#define HR_FDBWRCMD (0xa2 * 2)
+#define HR_FDBWRCMD_FDBDEL BIT(9)
+
+#define HR_SWCFG (0xa3 * 2)
+#define HR_SWCFG_GM_STATEMD BIT(15)
+#define HR_SWCFG_LAS_MODE_SHIFT 12
+#define HR_SWCFG_LAS_MODE_MASK GENMASK(13, 12)
+#define HR_SWCFG_LAS_OFF (0x00)
+#define HR_SWCFG_LAS_ON (0x01)
+#define HR_SWCFG_LAS_STATIC (0x10)
+#define HR_SWCFG_CT_EN BIT(11)
+#define HR_SWCFG_VLAN_UNAWARE BIT(10)
+#define HR_SWCFG_ALWAYS_OBT BIT(9)
+#define HR_SWCFG_FDBAGE_EN BIT(5)
+#define HR_SWCFG_FDBLRN_EN BIT(4)
+
+#define HR_SWSTAT (0xa4 * 2)
+#define HR_SWSTAT_FAIL BIT(4)
+#define HR_SWSTAT_BUSY BIT(0)
+
+#define HR_SWCMD (0xa5 * 2)
+#define HW_SWCMD_FLUSH BIT(0)
+
+#define HR_VIDCFG (0xaa * 2)
+#define HR_VIDCFG_VID_SHIFT 0
+#define HR_VIDCFG_VID_MASK GENMASK(11, 0)
+#define HR_VIDCFG_PVID BIT(12)
+
+#define HR_VIDMBRCFG (0xab * 2)
+#define HR_VIDMBRCFG_P0MBR_SHIFT 0
+#define HR_VIDMBRCFG_P0MBR_MASK GENMASK(1, 0)
+#define HR_VIDMBRCFG_P1MBR_SHIFT 2
+#define HR_VIDMBRCFG_P1MBR_MASK GENMASK(3, 2)
+#define HR_VIDMBRCFG_P2MBR_SHIFT 4
+#define HR_VIDMBRCFG_P2MBR_MASK GENMASK(5, 4)
+#define HR_VIDMBRCFG_P3MBR_SHIFT 6
+#define HR_VIDMBRCFG_P3MBR_MASK GENMASK(7, 6)
+
+#define HR_FEABITS0 (0xac * 2)
+#define HR_FEABITS0_FDBBINS_SHIFT 4
+#define HR_FEABITS0_FDBBINS_MASK GENMASK(7, 4)
+#define HR_FEABITS0_PCNT_SHIFT 8
+#define HR_FEABITS0_PCNT_MASK GENMASK(11, 8)
+#define HR_FEABITS0_MCNT_SHIFT 12
+#define HR_FEABITS0_MCNT_MASK GENMASK(15, 12)
+
+#define TR_QTRACK (0xb1 * 2)
+#define TR_TGDVER (0xb3 * 2)
+#define TR_TGDVER_REV_MIN_MASK GENMASK(7, 0)
+#define TR_TGDVER_REV_MIN_SHIFT 0
+#define TR_TGDVER_REV_MAJ_MASK GENMASK(15, 8)
+#define TR_TGDVER_REV_MAJ_SHIFT 8
+#define TR_TGDSEL (0xb4 * 2)
+#define TR_TGDSEL_TDGSEL_MASK GENMASK(1, 0)
+#define TR_TGDSEL_TDGSEL_SHIFT 0
+#define TR_TGDCTRL (0xb5 * 2)
+#define TR_TGDCTRL_GATE_EN BIT(0)
+#define TR_TGDCTRL_CYC_SNAP BIT(4)
+#define TR_TGDCTRL_SNAP_EST BIT(5)
+#define TR_TGDCTRL_ADMINGATESTATES_MASK GENMASK(15, 8)
+#define TR_TGDCTRL_ADMINGATESTATES_SHIFT 8
+#define TR_TGDSTAT0 (0xb6 * 2)
+#define TR_TGDSTAT1 (0xb7 * 2)
+#define TR_ESTWRL (0xb8 * 2)
+#define TR_ESTWRH (0xb9 * 2)
+#define TR_ESTCMD (0xba * 2)
+#define TR_ESTCMD_ESTSEC_MASK GENMASK(2, 0)
+#define TR_ESTCMD_ESTSEC_SHIFT 0
+#define TR_ESTCMD_ESTARM BIT(4)
+#define TR_ESTCMD_ESTSWCFG BIT(5)
+#define TR_EETWRL (0xbb * 2)
+#define TR_EETWRH (0xbc * 2)
+#define TR_EETCMD (0xbd * 2)
+#define TR_EETCMD_EETSEC_MASK GEMASK(2, 0)
+#define TR_EETCMD_EETSEC_SHIFT 0
+#define TR_EETCMD_EETARM BIT(4)
+#define TR_CTWRL (0xbe * 2)
+#define TR_CTWRH (0xbf * 2)
+#define TR_LCNSL (0xc1 * 2)
+#define TR_LCNSH (0xc2 * 2)
+#define TR_LCS (0xc3 * 2)
+#define TR_GCLDAT (0xc4 * 2)
+#define TR_GCLDAT_GCLWRGATES_MASK GENMASK(7, 0)
+#define TR_GCLDAT_GCLWRGATES_SHIFT 0
+#define TR_GCLDAT_GCLWRLAST BIT(8)
+#define TR_GCLDAT_GCLOVRI BIT(9)
+#define TR_GCLTIL (0xc5 * 2)
+#define TR_GCLTIH (0xc6 * 2)
+#define TR_GCLCMD (0xc7 * 2)
+#define TR_GCLCMD_GCLWRADR_MASK GENMASK(7, 0)
+#define TR_GCLCMD_GCLWRADR_SHIFT 0
+#define TR_GCLCMD_INIT_GATE_STATES_MASK GENMASK(15, 8)
+#define TR_GCLCMD_INIT_GATE_STATES_SHIFT 8
+
+struct hellcreek_counter {
+ u8 offset;
+ const char *name;
+};
+
+struct hellcreek;
+
+/* State flags for hellcreek_port_hwtstamp::state */
+enum {
+ HELLCREEK_HWTSTAMP_ENABLED,
+ HELLCREEK_HWTSTAMP_TX_IN_PROGRESS,
+};
+
+/* A structure to hold hardware timestamping information per port */
+struct hellcreek_port_hwtstamp {
+ /* Timestamping state */
+ unsigned long state;
+
+ /* Resources for receive timestamping */
+ struct sk_buff_head rx_queue; /* For synchronization messages */
+
+ /* Resources for transmit timestamping */
+ unsigned long tx_tstamp_start;
+ struct sk_buff *tx_skb;
+
+ /* Current timestamp configuration */
+ struct hwtstamp_config tstamp_config;
+};
+
+struct hellcreek_port {
+ struct hellcreek *hellcreek;
+ unsigned long *vlan_dev_bitmap;
+ int port;
+ u16 ptcfg; /* ptcfg shadow */
+ u64 *counter_values;
+
+ /* Per-port timestamping resources */
+ struct hellcreek_port_hwtstamp port_hwtstamp;
+};
+
+struct hellcreek_fdb_entry {
+ size_t idx;
+ unsigned char mac[ETH_ALEN];
+ u8 portmask;
+ u8 age;
+ u8 is_obt;
+ u8 pass_blocked;
+ u8 is_static;
+ u8 reprio_tc;
+ u8 reprio_en;
+};
+
+struct hellcreek {
+ const struct hellcreek_platform_data *pdata;
+ struct device *dev;
+ struct dsa_switch *ds;
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_clock_info;
+ struct hellcreek_port *ports;
+ struct delayed_work overflow_work;
+ struct led_classdev led_is_gm;
+ struct led_classdev led_sync_good;
+ struct mutex reg_lock; /* Switch IP register lock */
+ struct mutex vlan_lock; /* VLAN bitmaps lock */
+ struct mutex ptp_lock; /* PTP IP register lock */
+ void __iomem *base;
+ void __iomem *ptp_base;
+ u16 swcfg; /* swcfg shadow */
+ u8 *vidmbrcfg; /* vidmbrcfg shadow */
+ u64 seconds; /* PTP seconds */
+ u64 last_ts; /* Used for overflow detection */
+ u16 status_out; /* ptp.status_out shadow */
+ size_t fdb_entries;
+};
+
+#endif /* _HELLCREEK_H_ */
diff --git a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c
new file mode 100644
index 000000000000..69dd9a2e8bb6
--- /dev/null
+++ b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c
@@ -0,0 +1,479 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * DSA driver for:
+ * Hirschmann Hellcreek TSN switch.
+ *
+ * Copyright (C) 2019,2020 Hochschule Offenburg
+ * Copyright (C) 2019,2020 Linutronix GmbH
+ * Authors: Kamil Alkhouri <kamil.alkhouri@hs-offenburg.de>
+ * Kurt Kanzenbach <kurt@linutronix.de>
+ */
+
+#include <linux/ptp_classify.h>
+
+#include "hellcreek.h"
+#include "hellcreek_hwtstamp.h"
+#include "hellcreek_ptp.h"
+
+int hellcreek_get_ts_info(struct dsa_switch *ds, int port,
+ struct ethtool_ts_info *info)
+{
+ struct hellcreek *hellcreek = ds->priv;
+
+ info->phc_index = hellcreek->ptp_clock ?
+ ptp_clock_index(hellcreek->ptp_clock) : -1;
+ info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ /* enabled tx timestamping */
+ info->tx_types = BIT(HWTSTAMP_TX_ON);
+
+ /* L2 & L4 PTPv2 event rx messages are timestamped */
+ info->rx_filters = BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
+
+ return 0;
+}
+
+/* Enabling/disabling TX and RX HW timestamping for different PTP messages is
+ * not available in the switch. Thus, this function only serves as a check if
+ * the user requested what is actually available or not
+ */
+static int hellcreek_set_hwtstamp_config(struct hellcreek *hellcreek, int port,
+ struct hwtstamp_config *config)
+{
+ struct hellcreek_port_hwtstamp *ps =
+ &hellcreek->ports[port].port_hwtstamp;
+ bool tx_tstamp_enable = false;
+ bool rx_tstamp_enable = false;
+
+ /* Interaction with the timestamp hardware is prevented here. It is
+ * enabled when this config function ends successfully
+ */
+ clear_bit_unlock(HELLCREEK_HWTSTAMP_ENABLED, &ps->state);
+
+ /* Reserved for future extensions */
+ if (config->flags)
+ return -EINVAL;
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_ON:
+ tx_tstamp_enable = true;
+ break;
+
+ /* TX HW timestamping can't be disabled on the switch */
+ case HWTSTAMP_TX_OFF:
+ config->tx_type = HWTSTAMP_TX_ON;
+ break;
+
+ default:
+ return -ERANGE;
+ }
+
+ switch (config->rx_filter) {
+ /* RX HW timestamping can't be disabled on the switch */
+ case HWTSTAMP_FILTER_NONE:
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ break;
+
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ rx_tstamp_enable = true;
+ break;
+
+ /* RX HW timestamping can't be enabled for all messages on the switch */
+ case HWTSTAMP_FILTER_ALL:
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ break;
+
+ default:
+ return -ERANGE;
+ }
+
+ if (!tx_tstamp_enable)
+ return -ERANGE;
+
+ if (!rx_tstamp_enable)
+ return -ERANGE;
+
+ /* If this point is reached, then the requested hwtstamp config is
+ * compatible with the hwtstamp offered by the switch. Therefore,
+ * enable the interaction with the HW timestamping
+ */
+ set_bit(HELLCREEK_HWTSTAMP_ENABLED, &ps->state);
+
+ return 0;
+}
+
+int hellcreek_port_hwtstamp_set(struct dsa_switch *ds, int port,
+ struct ifreq *ifr)
+{
+ struct hellcreek *hellcreek = ds->priv;
+ struct hellcreek_port_hwtstamp *ps;
+ struct hwtstamp_config config;
+ int err;
+
+ ps = &hellcreek->ports[port].port_hwtstamp;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ err = hellcreek_set_hwtstamp_config(hellcreek, port, &config);
+ if (err)
+ return err;
+
+ /* Save the chosen configuration to be returned later */
+ memcpy(&ps->tstamp_config, &config, sizeof(config));
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+int hellcreek_port_hwtstamp_get(struct dsa_switch *ds, int port,
+ struct ifreq *ifr)
+{
+ struct hellcreek *hellcreek = ds->priv;
+ struct hellcreek_port_hwtstamp *ps;
+ struct hwtstamp_config *config;
+
+ ps = &hellcreek->ports[port].port_hwtstamp;
+ config = &ps->tstamp_config;
+
+ return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
+ -EFAULT : 0;
+}
+
+/* Returns a pointer to the PTP header if the caller should time stamp, or NULL
+ * if the caller should not.
+ */
+static struct ptp_header *hellcreek_should_tstamp(struct hellcreek *hellcreek,
+ int port, struct sk_buff *skb,
+ unsigned int type)
+{
+ struct hellcreek_port_hwtstamp *ps =
+ &hellcreek->ports[port].port_hwtstamp;
+ struct ptp_header *hdr;
+
+ hdr = ptp_parse_header(skb, type);
+ if (!hdr)
+ return NULL;
+
+ if (!test_bit(HELLCREEK_HWTSTAMP_ENABLED, &ps->state))
+ return NULL;
+
+ return hdr;
+}
+
+static u64 hellcreek_get_reserved_field(const struct ptp_header *hdr)
+{
+ return be32_to_cpu(hdr->reserved2);
+}
+
+static void hellcreek_clear_reserved_field(struct ptp_header *hdr)
+{
+ hdr->reserved2 = 0;
+}
+
+static int hellcreek_ptp_hwtstamp_available(struct hellcreek *hellcreek,
+ unsigned int ts_reg)
+{
+ u16 status;
+
+ status = hellcreek_ptp_read(hellcreek, ts_reg);
+
+ if (status & PR_TS_STATUS_TS_LOST)
+ dev_err(hellcreek->dev,
+ "Tx time stamp lost! This should never happen!\n");
+
+ /* If hwtstamp is not available, this means the previous hwtstamp was
+ * successfully read, and the one we need is not yet available
+ */
+ return (status & PR_TS_STATUS_TS_AVAIL) ? 1 : 0;
+}
+
+/* Get nanoseconds timestamp from timestamping unit */
+static u64 hellcreek_ptp_hwtstamp_read(struct hellcreek *hellcreek,
+ unsigned int ts_reg)
+{
+ u16 nsl, nsh;
+
+ nsh = hellcreek_ptp_read(hellcreek, ts_reg);
+ nsh = hellcreek_ptp_read(hellcreek, ts_reg);
+ nsh = hellcreek_ptp_read(hellcreek, ts_reg);
+ nsh = hellcreek_ptp_read(hellcreek, ts_reg);
+ nsl = hellcreek_ptp_read(hellcreek, ts_reg);
+
+ return (u64)nsl | ((u64)nsh << 16);
+}
+
+static int hellcreek_txtstamp_work(struct hellcreek *hellcreek,
+ struct hellcreek_port_hwtstamp *ps, int port)
+{
+ struct skb_shared_hwtstamps shhwtstamps;
+ unsigned int status_reg, data_reg;
+ struct sk_buff *tmp_skb;
+ int ts_status;
+ u64 ns = 0;
+
+ if (!ps->tx_skb)
+ return 0;
+
+ switch (port) {
+ case 2:
+ status_reg = PR_TS_TX_P1_STATUS_C;
+ data_reg = PR_TS_TX_P1_DATA_C;
+ break;
+ case 3:
+ status_reg = PR_TS_TX_P2_STATUS_C;
+ data_reg = PR_TS_TX_P2_DATA_C;
+ break;
+ default:
+ dev_err(hellcreek->dev, "Wrong port for timestamping!\n");
+ return 0;
+ }
+
+ ts_status = hellcreek_ptp_hwtstamp_available(hellcreek, status_reg);
+
+ /* Not available yet? */
+ if (ts_status == 0) {
+ /* Check whether the operation of reading the tx timestamp has
+ * exceeded its allowed period
+ */
+ if (time_is_before_jiffies(ps->tx_tstamp_start +
+ TX_TSTAMP_TIMEOUT)) {
+ dev_err(hellcreek->dev,
+ "Timeout while waiting for Tx timestamp!\n");
+ goto free_and_clear_skb;
+ }
+
+ /* The timestamp should be available quickly, while getting it
+ * in high priority. Restart the work
+ */
+ return 1;
+ }
+
+ mutex_lock(&hellcreek->ptp_lock);
+ ns = hellcreek_ptp_hwtstamp_read(hellcreek, data_reg);
+ ns += hellcreek_ptp_gettime_seconds(hellcreek, ns);
+ mutex_unlock(&hellcreek->ptp_lock);
+
+ /* Now we have the timestamp in nanoseconds, store it in the correct
+ * structure in order to send it to the user
+ */
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ shhwtstamps.hwtstamp = ns_to_ktime(ns);
+
+ tmp_skb = ps->tx_skb;
+ ps->tx_skb = NULL;
+
+ /* skb_complete_tx_timestamp() frees up the client to make another
+ * timestampable transmit. We have to be ready for it by clearing the
+ * ps->tx_skb "flag" beforehand
+ */
+ clear_bit_unlock(HELLCREEK_HWTSTAMP_TX_IN_PROGRESS, &ps->state);
+
+ /* Deliver a clone of the original outgoing tx_skb with tx hwtstamp */
+ skb_complete_tx_timestamp(tmp_skb, &shhwtstamps);
+
+ return 0;
+
+free_and_clear_skb:
+ dev_kfree_skb_any(ps->tx_skb);
+ ps->tx_skb = NULL;
+ clear_bit_unlock(HELLCREEK_HWTSTAMP_TX_IN_PROGRESS, &ps->state);
+
+ return 0;
+}
+
+static void hellcreek_get_rxts(struct hellcreek *hellcreek,
+ struct hellcreek_port_hwtstamp *ps,
+ struct sk_buff *skb, struct sk_buff_head *rxq,
+ int port)
+{
+ struct skb_shared_hwtstamps *shwt;
+ struct sk_buff_head received;
+ unsigned long flags;
+
+ /* The latched timestamp belongs to one of the received frames. */
+ __skb_queue_head_init(&received);
+
+ /* Lock & disable interrupts */
+ spin_lock_irqsave(&rxq->lock, flags);
+
+ /* Add the reception queue "rxq" to the "received" queue an reintialize
+ * "rxq". From now on, we deal with "received" not with "rxq"
+ */
+ skb_queue_splice_tail_init(rxq, &received);
+
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ for (; skb; skb = __skb_dequeue(&received)) {
+ struct ptp_header *hdr;
+ unsigned int type;
+ u64 ns;
+
+ /* Get nanoseconds from ptp packet */
+ type = SKB_PTP_TYPE(skb);
+ hdr = ptp_parse_header(skb, type);
+ ns = hellcreek_get_reserved_field(hdr);
+ hellcreek_clear_reserved_field(hdr);
+
+ /* Add seconds part */
+ mutex_lock(&hellcreek->ptp_lock);
+ ns += hellcreek_ptp_gettime_seconds(hellcreek, ns);
+ mutex_unlock(&hellcreek->ptp_lock);
+
+ /* Save time stamp */
+ shwt = skb_hwtstamps(skb);
+ memset(shwt, 0, sizeof(*shwt));
+ shwt->hwtstamp = ns_to_ktime(ns);
+ netif_rx_ni(skb);
+ }
+}
+
+static void hellcreek_rxtstamp_work(struct hellcreek *hellcreek,
+ struct hellcreek_port_hwtstamp *ps,
+ int port)
+{
+ struct sk_buff *skb;
+
+ skb = skb_dequeue(&ps->rx_queue);
+ if (skb)
+ hellcreek_get_rxts(hellcreek, ps, skb, &ps->rx_queue, port);
+}
+
+long hellcreek_hwtstamp_work(struct ptp_clock_info *ptp)
+{
+ struct hellcreek *hellcreek = ptp_to_hellcreek(ptp);
+ struct dsa_switch *ds = hellcreek->ds;
+ int i, restart = 0;
+
+ for (i = 0; i < ds->num_ports; i++) {
+ struct hellcreek_port_hwtstamp *ps;
+
+ if (!dsa_is_user_port(ds, i))
+ continue;
+
+ ps = &hellcreek->ports[i].port_hwtstamp;
+
+ if (test_bit(HELLCREEK_HWTSTAMP_TX_IN_PROGRESS, &ps->state))
+ restart |= hellcreek_txtstamp_work(hellcreek, ps, i);
+
+ hellcreek_rxtstamp_work(hellcreek, ps, i);
+ }
+
+ return restart ? 1 : -1;
+}
+
+bool hellcreek_port_txtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *clone, unsigned int type)
+{
+ struct hellcreek *hellcreek = ds->priv;
+ struct hellcreek_port_hwtstamp *ps;
+ struct ptp_header *hdr;
+
+ ps = &hellcreek->ports[port].port_hwtstamp;
+
+ /* Check if the driver is expected to do HW timestamping */
+ if (!(skb_shinfo(clone)->tx_flags & SKBTX_HW_TSTAMP))
+ return false;
+
+ /* Make sure the message is a PTP message that needs to be timestamped
+ * and the interaction with the HW timestamping is enabled. If not, stop
+ * here
+ */
+ hdr = hellcreek_should_tstamp(hellcreek, port, clone, type);
+ if (!hdr)
+ return false;
+
+ if (test_and_set_bit_lock(HELLCREEK_HWTSTAMP_TX_IN_PROGRESS,
+ &ps->state))
+ return false;
+
+ ps->tx_skb = clone;
+
+ /* store the number of ticks occurred since system start-up till this
+ * moment
+ */
+ ps->tx_tstamp_start = jiffies;
+
+ ptp_schedule_worker(hellcreek->ptp_clock, 0);
+
+ return true;
+}
+
+bool hellcreek_port_rxtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *skb, unsigned int type)
+{
+ struct hellcreek *hellcreek = ds->priv;
+ struct hellcreek_port_hwtstamp *ps;
+ struct ptp_header *hdr;
+
+ ps = &hellcreek->ports[port].port_hwtstamp;
+
+ /* This check only fails if the user did not initialize hardware
+ * timestamping beforehand.
+ */
+ if (ps->tstamp_config.rx_filter != HWTSTAMP_FILTER_PTP_V2_EVENT)
+ return false;
+
+ /* Make sure the message is a PTP message that needs to be timestamped
+ * and the interaction with the HW timestamping is enabled. If not, stop
+ * here
+ */
+ hdr = hellcreek_should_tstamp(hellcreek, port, skb, type);
+ if (!hdr)
+ return false;
+
+ SKB_PTP_TYPE(skb) = type;
+
+ skb_queue_tail(&ps->rx_queue, skb);
+
+ ptp_schedule_worker(hellcreek->ptp_clock, 0);
+
+ return true;
+}
+
+static void hellcreek_hwtstamp_port_setup(struct hellcreek *hellcreek, int port)
+{
+ struct hellcreek_port_hwtstamp *ps =
+ &hellcreek->ports[port].port_hwtstamp;
+
+ skb_queue_head_init(&ps->rx_queue);
+}
+
+int hellcreek_hwtstamp_setup(struct hellcreek *hellcreek)
+{
+ struct dsa_switch *ds = hellcreek->ds;
+ int i;
+
+ /* Initialize timestamping ports. */
+ for (i = 0; i < ds->num_ports; ++i) {
+ if (!dsa_is_user_port(ds, i))
+ continue;
+
+ hellcreek_hwtstamp_port_setup(hellcreek, i);
+ }
+
+ /* Select the synchronized clock as the source timekeeper for the
+ * timestamps and enable inline timestamping.
+ */
+ hellcreek_ptp_write(hellcreek, PR_SETTINGS_C_TS_SRC_TK_MASK |
+ PR_SETTINGS_C_RES3TS,
+ PR_SETTINGS_C);
+
+ return 0;
+}
+
+void hellcreek_hwtstamp_free(struct hellcreek *hellcreek)
+{
+ /* Nothing todo */
+}
diff --git a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h
new file mode 100644
index 000000000000..c0745ffa1ebb
--- /dev/null
+++ b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * DSA driver for:
+ * Hirschmann Hellcreek TSN switch.
+ *
+ * Copyright (C) 2019,2020 Hochschule Offenburg
+ * Copyright (C) 2019,2020 Linutronix GmbH
+ * Authors: Kurt Kanzenbach <kurt@linutronix.de>
+ * Kamil Alkhouri <kamil.alkhouri@hs-offenburg.de>
+ */
+
+#ifndef _HELLCREEK_HWTSTAMP_H_
+#define _HELLCREEK_HWTSTAMP_H_
+
+#include <net/dsa.h>
+#include "hellcreek.h"
+
+/* Timestamp Register */
+#define PR_TS_RX_P1_STATUS_C (0x1d * 2)
+#define PR_TS_RX_P1_DATA_C (0x1e * 2)
+#define PR_TS_TX_P1_STATUS_C (0x1f * 2)
+#define PR_TS_TX_P1_DATA_C (0x20 * 2)
+#define PR_TS_RX_P2_STATUS_C (0x25 * 2)
+#define PR_TS_RX_P2_DATA_C (0x26 * 2)
+#define PR_TS_TX_P2_STATUS_C (0x27 * 2)
+#define PR_TS_TX_P2_DATA_C (0x28 * 2)
+
+#define PR_TS_STATUS_TS_AVAIL BIT(2)
+#define PR_TS_STATUS_TS_LOST BIT(3)
+
+#define SKB_PTP_TYPE(__skb) (*(unsigned int *)((__skb)->cb))
+
+/* TX_TSTAMP_TIMEOUT: This limits the time spent polling for a TX
+ * timestamp. When working properly, hardware will produce a timestamp
+ * within 1ms. Software may enounter delays, so the timeout is set
+ * accordingly.
+ */
+#define TX_TSTAMP_TIMEOUT msecs_to_jiffies(40)
+
+int hellcreek_port_hwtstamp_set(struct dsa_switch *ds, int port,
+ struct ifreq *ifr);
+int hellcreek_port_hwtstamp_get(struct dsa_switch *ds, int port,
+ struct ifreq *ifr);
+
+bool hellcreek_port_rxtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *clone, unsigned int type);
+bool hellcreek_port_txtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *clone, unsigned int type);
+
+int hellcreek_get_ts_info(struct dsa_switch *ds, int port,
+ struct ethtool_ts_info *info);
+
+long hellcreek_hwtstamp_work(struct ptp_clock_info *ptp);
+
+int hellcreek_hwtstamp_setup(struct hellcreek *chip);
+void hellcreek_hwtstamp_free(struct hellcreek *chip);
+
+#endif /* _HELLCREEK_HWTSTAMP_H_ */
diff --git a/drivers/net/dsa/hirschmann/hellcreek_ptp.c b/drivers/net/dsa/hirschmann/hellcreek_ptp.c
new file mode 100644
index 000000000000..2572c6087bb5
--- /dev/null
+++ b/drivers/net/dsa/hirschmann/hellcreek_ptp.c
@@ -0,0 +1,452 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * DSA driver for:
+ * Hirschmann Hellcreek TSN switch.
+ *
+ * Copyright (C) 2019,2020 Hochschule Offenburg
+ * Copyright (C) 2019,2020 Linutronix GmbH
+ * Authors: Kamil Alkhouri <kamil.alkhouri@hs-offenburg.de>
+ * Kurt Kanzenbach <kurt@linutronix.de>
+ */
+
+#include <linux/ptp_clock_kernel.h>
+#include "hellcreek.h"
+#include "hellcreek_ptp.h"
+#include "hellcreek_hwtstamp.h"
+
+u16 hellcreek_ptp_read(struct hellcreek *hellcreek, unsigned int offset)
+{
+ return readw(hellcreek->ptp_base + offset);
+}
+
+void hellcreek_ptp_write(struct hellcreek *hellcreek, u16 data,
+ unsigned int offset)
+{
+ writew(data, hellcreek->ptp_base + offset);
+}
+
+/* Get nanoseconds from PTP clock */
+static u64 hellcreek_ptp_clock_read(struct hellcreek *hellcreek)
+{
+ u16 nsl, nsh;
+
+ /* Take a snapshot */
+ hellcreek_ptp_write(hellcreek, PR_COMMAND_C_SS, PR_COMMAND_C);
+
+ /* The time of the day is saved as 96 bits. However, due to hardware
+ * limitations the seconds are not or only partly kept in the PTP
+ * core. Currently only three bits for the seconds are available. That's
+ * why only the nanoseconds are used and the seconds are tracked in
+ * software. Anyway due to internal locking all five registers should be
+ * read.
+ */
+ nsh = hellcreek_ptp_read(hellcreek, PR_SS_SYNC_DATA_C);
+ nsh = hellcreek_ptp_read(hellcreek, PR_SS_SYNC_DATA_C);
+ nsh = hellcreek_ptp_read(hellcreek, PR_SS_SYNC_DATA_C);
+ nsh = hellcreek_ptp_read(hellcreek, PR_SS_SYNC_DATA_C);
+ nsl = hellcreek_ptp_read(hellcreek, PR_SS_SYNC_DATA_C);
+
+ return (u64)nsl | ((u64)nsh << 16);
+}
+
+static u64 __hellcreek_ptp_gettime(struct hellcreek *hellcreek)
+{
+ u64 ns;
+
+ ns = hellcreek_ptp_clock_read(hellcreek);
+ if (ns < hellcreek->last_ts)
+ hellcreek->seconds++;
+ hellcreek->last_ts = ns;
+ ns += hellcreek->seconds * NSEC_PER_SEC;
+
+ return ns;
+}
+
+/* Retrieve the seconds parts in nanoseconds for a packet timestamped with @ns.
+ * There has to be a check whether an overflow occurred between the packet
+ * arrival and now. If so use the correct seconds (-1) for calculating the
+ * packet arrival time.
+ */
+u64 hellcreek_ptp_gettime_seconds(struct hellcreek *hellcreek, u64 ns)
+{
+ u64 s;
+
+ __hellcreek_ptp_gettime(hellcreek);
+ if (hellcreek->last_ts > ns)
+ s = hellcreek->seconds * NSEC_PER_SEC;
+ else
+ s = (hellcreek->seconds - 1) * NSEC_PER_SEC;
+
+ return s;
+}
+
+static int hellcreek_ptp_gettime(struct ptp_clock_info *ptp,
+ struct timespec64 *ts)
+{
+ struct hellcreek *hellcreek = ptp_to_hellcreek(ptp);
+ u64 ns;
+
+ mutex_lock(&hellcreek->ptp_lock);
+ ns = __hellcreek_ptp_gettime(hellcreek);
+ mutex_unlock(&hellcreek->ptp_lock);
+
+ *ts = ns_to_timespec64(ns);
+
+ return 0;
+}
+
+static int hellcreek_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct hellcreek *hellcreek = ptp_to_hellcreek(ptp);
+ u16 secl, nsh, nsl;
+
+ secl = ts->tv_sec & 0xffff;
+ nsh = ((u32)ts->tv_nsec & 0xffff0000) >> 16;
+ nsl = ts->tv_nsec & 0xffff;
+
+ mutex_lock(&hellcreek->ptp_lock);
+
+ /* Update overflow data structure */
+ hellcreek->seconds = ts->tv_sec;
+ hellcreek->last_ts = ts->tv_nsec;
+
+ /* Set time in clock */
+ hellcreek_ptp_write(hellcreek, 0x00, PR_CLOCK_WRITE_C);
+ hellcreek_ptp_write(hellcreek, 0x00, PR_CLOCK_WRITE_C);
+ hellcreek_ptp_write(hellcreek, secl, PR_CLOCK_WRITE_C);
+ hellcreek_ptp_write(hellcreek, nsh, PR_CLOCK_WRITE_C);
+ hellcreek_ptp_write(hellcreek, nsl, PR_CLOCK_WRITE_C);
+
+ mutex_unlock(&hellcreek->ptp_lock);
+
+ return 0;
+}
+
+static int hellcreek_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct hellcreek *hellcreek = ptp_to_hellcreek(ptp);
+ u16 negative = 0, addendh, addendl;
+ u32 addend;
+ u64 adj;
+
+ if (scaled_ppm < 0) {
+ negative = 1;
+ scaled_ppm = -scaled_ppm;
+ }
+
+ /* IP-Core adjusts the nominal frequency by adding or subtracting 1 ns
+ * from the 8 ns (period of the oscillator) every time the accumulator
+ * register overflows. The value stored in the addend register is added
+ * to the accumulator register every 8 ns.
+ *
+ * addend value = (2^30 * accumulator_overflow_rate) /
+ * oscillator_frequency
+ * where:
+ *
+ * oscillator_frequency = 125 MHz
+ * accumulator_overflow_rate = 125 MHz * scaled_ppm * 2^-16 * 10^-6 * 8
+ */
+ adj = scaled_ppm;
+ adj <<= 11;
+ addend = (u32)div_u64(adj, 15625);
+
+ addendh = (addend & 0xffff0000) >> 16;
+ addendl = addend & 0xffff;
+
+ negative = (negative << 15) & 0x8000;
+
+ mutex_lock(&hellcreek->ptp_lock);
+
+ /* Set drift register */
+ hellcreek_ptp_write(hellcreek, negative, PR_CLOCK_DRIFT_C);
+ hellcreek_ptp_write(hellcreek, 0x00, PR_CLOCK_DRIFT_C);
+ hellcreek_ptp_write(hellcreek, 0x00, PR_CLOCK_DRIFT_C);
+ hellcreek_ptp_write(hellcreek, addendh, PR_CLOCK_DRIFT_C);
+ hellcreek_ptp_write(hellcreek, addendl, PR_CLOCK_DRIFT_C);
+
+ mutex_unlock(&hellcreek->ptp_lock);
+
+ return 0;
+}
+
+static int hellcreek_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct hellcreek *hellcreek = ptp_to_hellcreek(ptp);
+ u16 negative = 0, counth, countl;
+ u32 count_val;
+
+ /* If the offset is larger than IP-Core slow offset resources. Don't
+ * consider slow adjustment. Rather, add the offset directly to the
+ * current time
+ */
+ if (abs(delta) > MAX_SLOW_OFFSET_ADJ) {
+ struct timespec64 now, then = ns_to_timespec64(delta);
+
+ hellcreek_ptp_gettime(ptp, &now);
+ now = timespec64_add(now, then);
+ hellcreek_ptp_settime(ptp, &now);
+
+ return 0;
+ }
+
+ if (delta < 0) {
+ negative = 1;
+ delta = -delta;
+ }
+
+ /* 'count_val' does not exceed the maximum register size (2^30) */
+ count_val = div_s64(delta, MAX_NS_PER_STEP);
+
+ counth = (count_val & 0xffff0000) >> 16;
+ countl = count_val & 0xffff;
+
+ negative = (negative << 15) & 0x8000;
+
+ mutex_lock(&hellcreek->ptp_lock);
+
+ /* Set offset write register */
+ hellcreek_ptp_write(hellcreek, negative, PR_CLOCK_OFFSET_C);
+ hellcreek_ptp_write(hellcreek, MAX_NS_PER_STEP, PR_CLOCK_OFFSET_C);
+ hellcreek_ptp_write(hellcreek, MIN_CLK_CYCLES_BETWEEN_STEPS,
+ PR_CLOCK_OFFSET_C);
+ hellcreek_ptp_write(hellcreek, countl, PR_CLOCK_OFFSET_C);
+ hellcreek_ptp_write(hellcreek, counth, PR_CLOCK_OFFSET_C);
+
+ mutex_unlock(&hellcreek->ptp_lock);
+
+ return 0;
+}
+
+static int hellcreek_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+static void hellcreek_ptp_overflow_check(struct work_struct *work)
+{
+ struct delayed_work *dw = to_delayed_work(work);
+ struct hellcreek *hellcreek;
+
+ hellcreek = dw_overflow_to_hellcreek(dw);
+
+ mutex_lock(&hellcreek->ptp_lock);
+ __hellcreek_ptp_gettime(hellcreek);
+ mutex_unlock(&hellcreek->ptp_lock);
+
+ schedule_delayed_work(&hellcreek->overflow_work,
+ HELLCREEK_OVERFLOW_PERIOD);
+}
+
+static enum led_brightness hellcreek_get_brightness(struct hellcreek *hellcreek,
+ int led)
+{
+ return (hellcreek->status_out & led) ? 1 : 0;
+}
+
+static void hellcreek_set_brightness(struct hellcreek *hellcreek, int led,
+ enum led_brightness b)
+{
+ mutex_lock(&hellcreek->ptp_lock);
+
+ if (b)
+ hellcreek->status_out |= led;
+ else
+ hellcreek->status_out &= ~led;
+
+ hellcreek_ptp_write(hellcreek, hellcreek->status_out, STATUS_OUT);
+
+ mutex_unlock(&hellcreek->ptp_lock);
+}
+
+static void hellcreek_led_sync_good_set(struct led_classdev *ldev,
+ enum led_brightness b)
+{
+ struct hellcreek *hellcreek = led_to_hellcreek(ldev, led_sync_good);
+
+ hellcreek_set_brightness(hellcreek, STATUS_OUT_SYNC_GOOD, b);
+}
+
+static enum led_brightness hellcreek_led_sync_good_get(struct led_classdev *ldev)
+{
+ struct hellcreek *hellcreek = led_to_hellcreek(ldev, led_sync_good);
+
+ return hellcreek_get_brightness(hellcreek, STATUS_OUT_SYNC_GOOD);
+}
+
+static void hellcreek_led_is_gm_set(struct led_classdev *ldev,
+ enum led_brightness b)
+{
+ struct hellcreek *hellcreek = led_to_hellcreek(ldev, led_is_gm);
+
+ hellcreek_set_brightness(hellcreek, STATUS_OUT_IS_GM, b);
+}
+
+static enum led_brightness hellcreek_led_is_gm_get(struct led_classdev *ldev)
+{
+ struct hellcreek *hellcreek = led_to_hellcreek(ldev, led_is_gm);
+
+ return hellcreek_get_brightness(hellcreek, STATUS_OUT_IS_GM);
+}
+
+/* There two available LEDs internally called sync_good and is_gm. However, the
+ * user might want to use a different label and specify the default state. Take
+ * those properties from device tree.
+ */
+static int hellcreek_led_setup(struct hellcreek *hellcreek)
+{
+ struct device_node *leds, *led = NULL;
+ const char *label, *state;
+ int ret = -EINVAL;
+
+ leds = of_find_node_by_name(hellcreek->dev->of_node, "leds");
+ if (!leds) {
+ dev_err(hellcreek->dev, "No LEDs specified in device tree!\n");
+ return ret;
+ }
+
+ hellcreek->status_out = 0;
+
+ led = of_get_next_available_child(leds, led);
+ if (!led) {
+ dev_err(hellcreek->dev, "First LED not specified!\n");
+ goto out;
+ }
+
+ ret = of_property_read_string(led, "label", &label);
+ hellcreek->led_sync_good.name = ret ? "sync_good" : label;
+
+ ret = of_property_read_string(led, "default-state", &state);
+ if (!ret) {
+ if (!strcmp(state, "on"))
+ hellcreek->led_sync_good.brightness = 1;
+ else if (!strcmp(state, "off"))
+ hellcreek->led_sync_good.brightness = 0;
+ else if (!strcmp(state, "keep"))
+ hellcreek->led_sync_good.brightness =
+ hellcreek_get_brightness(hellcreek,
+ STATUS_OUT_SYNC_GOOD);
+ }
+
+ hellcreek->led_sync_good.max_brightness = 1;
+ hellcreek->led_sync_good.brightness_set = hellcreek_led_sync_good_set;
+ hellcreek->led_sync_good.brightness_get = hellcreek_led_sync_good_get;
+
+ led = of_get_next_available_child(leds, led);
+ if (!led) {
+ dev_err(hellcreek->dev, "Second LED not specified!\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = of_property_read_string(led, "label", &label);
+ hellcreek->led_is_gm.name = ret ? "is_gm" : label;
+
+ ret = of_property_read_string(led, "default-state", &state);
+ if (!ret) {
+ if (!strcmp(state, "on"))
+ hellcreek->led_is_gm.brightness = 1;
+ else if (!strcmp(state, "off"))
+ hellcreek->led_is_gm.brightness = 0;
+ else if (!strcmp(state, "keep"))
+ hellcreek->led_is_gm.brightness =
+ hellcreek_get_brightness(hellcreek,
+ STATUS_OUT_IS_GM);
+ }
+
+ hellcreek->led_is_gm.max_brightness = 1;
+ hellcreek->led_is_gm.brightness_set = hellcreek_led_is_gm_set;
+ hellcreek->led_is_gm.brightness_get = hellcreek_led_is_gm_get;
+
+ /* Set initial state */
+ if (hellcreek->led_sync_good.brightness == 1)
+ hellcreek_set_brightness(hellcreek, STATUS_OUT_SYNC_GOOD, 1);
+ if (hellcreek->led_is_gm.brightness == 1)
+ hellcreek_set_brightness(hellcreek, STATUS_OUT_IS_GM, 1);
+
+ /* Register both leds */
+ led_classdev_register(hellcreek->dev, &hellcreek->led_sync_good);
+ led_classdev_register(hellcreek->dev, &hellcreek->led_is_gm);
+
+ ret = 0;
+
+out:
+ of_node_put(leds);
+
+ return ret;
+}
+
+int hellcreek_ptp_setup(struct hellcreek *hellcreek)
+{
+ u16 status;
+ int ret;
+
+ /* Set up the overflow work */
+ INIT_DELAYED_WORK(&hellcreek->overflow_work,
+ hellcreek_ptp_overflow_check);
+
+ /* Setup PTP clock */
+ hellcreek->ptp_clock_info.owner = THIS_MODULE;
+ snprintf(hellcreek->ptp_clock_info.name,
+ sizeof(hellcreek->ptp_clock_info.name),
+ dev_name(hellcreek->dev));
+
+ /* IP-Core can add up to 0.5 ns per 8 ns cycle, which means
+ * accumulator_overflow_rate shall not exceed 62.5 MHz (which adjusts
+ * the nominal frequency by 6.25%)
+ */
+ hellcreek->ptp_clock_info.max_adj = 62500000;
+ hellcreek->ptp_clock_info.n_alarm = 0;
+ hellcreek->ptp_clock_info.n_pins = 0;
+ hellcreek->ptp_clock_info.n_ext_ts = 0;
+ hellcreek->ptp_clock_info.n_per_out = 0;
+ hellcreek->ptp_clock_info.pps = 0;
+ hellcreek->ptp_clock_info.adjfine = hellcreek_ptp_adjfine;
+ hellcreek->ptp_clock_info.adjtime = hellcreek_ptp_adjtime;
+ hellcreek->ptp_clock_info.gettime64 = hellcreek_ptp_gettime;
+ hellcreek->ptp_clock_info.settime64 = hellcreek_ptp_settime;
+ hellcreek->ptp_clock_info.enable = hellcreek_ptp_enable;
+ hellcreek->ptp_clock_info.do_aux_work = hellcreek_hwtstamp_work;
+
+ hellcreek->ptp_clock = ptp_clock_register(&hellcreek->ptp_clock_info,
+ hellcreek->dev);
+ if (IS_ERR(hellcreek->ptp_clock))
+ return PTR_ERR(hellcreek->ptp_clock);
+
+ /* Enable the offset correction process, if no offset correction is
+ * already taking place
+ */
+ status = hellcreek_ptp_read(hellcreek, PR_CLOCK_STATUS_C);
+ if (!(status & PR_CLOCK_STATUS_C_OFS_ACT))
+ hellcreek_ptp_write(hellcreek,
+ status | PR_CLOCK_STATUS_C_ENA_OFS,
+ PR_CLOCK_STATUS_C);
+
+ /* Enable the drift correction process */
+ hellcreek_ptp_write(hellcreek, status | PR_CLOCK_STATUS_C_ENA_DRIFT,
+ PR_CLOCK_STATUS_C);
+
+ /* LED setup */
+ ret = hellcreek_led_setup(hellcreek);
+ if (ret) {
+ if (hellcreek->ptp_clock)
+ ptp_clock_unregister(hellcreek->ptp_clock);
+ return ret;
+ }
+
+ schedule_delayed_work(&hellcreek->overflow_work,
+ HELLCREEK_OVERFLOW_PERIOD);
+
+ return 0;
+}
+
+void hellcreek_ptp_free(struct hellcreek *hellcreek)
+{
+ led_classdev_unregister(&hellcreek->led_is_gm);
+ led_classdev_unregister(&hellcreek->led_sync_good);
+ cancel_delayed_work_sync(&hellcreek->overflow_work);
+ if (hellcreek->ptp_clock)
+ ptp_clock_unregister(hellcreek->ptp_clock);
+ hellcreek->ptp_clock = NULL;
+}
diff --git a/drivers/net/dsa/hirschmann/hellcreek_ptp.h b/drivers/net/dsa/hirschmann/hellcreek_ptp.h
new file mode 100644
index 000000000000..0b51392c7e56
--- /dev/null
+++ b/drivers/net/dsa/hirschmann/hellcreek_ptp.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * DSA driver for:
+ * Hirschmann Hellcreek TSN switch.
+ *
+ * Copyright (C) 2019,2020 Hochschule Offenburg
+ * Copyright (C) 2019,2020 Linutronix GmbH
+ * Authors: Kurt Kanzenbach <kurt@linutronix.de>
+ * Kamil Alkhouri <kamil.alkhouri@hs-offenburg.de>
+ */
+
+#ifndef _HELLCREEK_PTP_H_
+#define _HELLCREEK_PTP_H_
+
+#include <linux/bitops.h>
+#include <linux/ptp_clock_kernel.h>
+
+#include "hellcreek.h"
+
+/* Every jump in time is 7 ns */
+#define MAX_NS_PER_STEP 7L
+
+/* Correct offset at every clock cycle */
+#define MIN_CLK_CYCLES_BETWEEN_STEPS 0
+
+/* Maximum available slow offset resources */
+#define MAX_SLOW_OFFSET_ADJ \
+ ((unsigned long long)((1 << 30) - 1) * MAX_NS_PER_STEP)
+
+/* four times a second overflow check */
+#define HELLCREEK_OVERFLOW_PERIOD (HZ / 4)
+
+/* PTP Register */
+#define PR_SETTINGS_C (0x09 * 2)
+#define PR_SETTINGS_C_RES3TS BIT(4)
+#define PR_SETTINGS_C_TS_SRC_TK_SHIFT 8
+#define PR_SETTINGS_C_TS_SRC_TK_MASK GENMASK(9, 8)
+#define PR_COMMAND_C (0x0a * 2)
+#define PR_COMMAND_C_SS BIT(0)
+
+#define PR_CLOCK_STATUS_C (0x0c * 2)
+#define PR_CLOCK_STATUS_C_ENA_DRIFT BIT(12)
+#define PR_CLOCK_STATUS_C_OFS_ACT BIT(13)
+#define PR_CLOCK_STATUS_C_ENA_OFS BIT(14)
+
+#define PR_CLOCK_READ_C (0x0d * 2)
+#define PR_CLOCK_WRITE_C (0x0e * 2)
+#define PR_CLOCK_OFFSET_C (0x0f * 2)
+#define PR_CLOCK_DRIFT_C (0x10 * 2)
+
+#define PR_SS_FREE_DATA_C (0x12 * 2)
+#define PR_SS_SYNT_DATA_C (0x14 * 2)
+#define PR_SS_SYNC_DATA_C (0x16 * 2)
+#define PR_SS_DRAC_DATA_C (0x18 * 2)
+
+#define STATUS_OUT (0x60 * 2)
+#define STATUS_OUT_SYNC_GOOD BIT(0)
+#define STATUS_OUT_IS_GM BIT(1)
+
+int hellcreek_ptp_setup(struct hellcreek *hellcreek);
+void hellcreek_ptp_free(struct hellcreek *hellcreek);
+u16 hellcreek_ptp_read(struct hellcreek *hellcreek, unsigned int offset);
+void hellcreek_ptp_write(struct hellcreek *hellcreek, u16 data,
+ unsigned int offset);
+u64 hellcreek_ptp_gettime_seconds(struct hellcreek *hellcreek, u64 ns);
+
+#define ptp_to_hellcreek(ptp) \
+ container_of(ptp, struct hellcreek, ptp_clock_info)
+
+#define dw_overflow_to_hellcreek(dw) \
+ container_of(dw, struct hellcreek, overflow_work)
+
+#define led_to_hellcreek(ldev, led) \
+ container_of(ldev, struct hellcreek, led)
+
+#endif /* _HELLCREEK_PTP_H_ */
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 771f58f50d61..6408402a44f5 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -1021,6 +1021,53 @@ mt7530_port_disable(struct dsa_switch *ds, int port)
mutex_unlock(&priv->reg_mutex);
}
+static int
+mt7530_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+{
+ struct mt7530_priv *priv = ds->priv;
+ struct mii_bus *bus = priv->bus;
+ int length;
+ u32 val;
+
+ /* When a new MTU is set, DSA always set the CPU port's MTU to the
+ * largest MTU of the slave ports. Because the switch only has a global
+ * RX length register, only allowing CPU port here is enough.
+ */
+ if (!dsa_is_cpu_port(ds, port))
+ return 0;
+
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ val = mt7530_mii_read(priv, MT7530_GMACCR);
+ val &= ~MAX_RX_PKT_LEN_MASK;
+
+ /* RX length also includes Ethernet header, MTK tag, and FCS length */
+ length = new_mtu + ETH_HLEN + MTK_HDR_LEN + ETH_FCS_LEN;
+ if (length <= 1522) {
+ val |= MAX_RX_PKT_LEN_1522;
+ } else if (length <= 1536) {
+ val |= MAX_RX_PKT_LEN_1536;
+ } else if (length <= 1552) {
+ val |= MAX_RX_PKT_LEN_1552;
+ } else {
+ val &= ~MAX_RX_JUMBO_MASK;
+ val |= MAX_RX_JUMBO(DIV_ROUND_UP(length, 1024));
+ val |= MAX_RX_PKT_LEN_JUMBO;
+ }
+
+ mt7530_mii_write(priv, MT7530_GMACCR, val);
+
+ mutex_unlock(&bus->mdio_lock);
+
+ return 0;
+}
+
+static int
+mt7530_port_max_mtu(struct dsa_switch *ds, int port)
+{
+ return MT7530_MAX_MTU;
+}
+
static void
mt7530_stp_state_set(struct dsa_switch *ds, int port, u8 state)
{
@@ -2519,6 +2566,8 @@ static const struct dsa_switch_ops mt7530_switch_ops = {
.get_sset_count = mt7530_get_sset_count,
.port_enable = mt7530_port_enable,
.port_disable = mt7530_port_disable,
+ .port_change_mtu = mt7530_port_change_mtu,
+ .port_max_mtu = mt7530_port_max_mtu,
.port_stp_state_set = mt7530_stp_state_set,
.port_bridge_join = mt7530_port_bridge_join,
.port_bridge_leave = mt7530_port_bridge_leave,
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index 9278a8e3d04e..ee3523a7537e 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -11,6 +11,9 @@
#define MT7530_NUM_FDB_RECORDS 2048
#define MT7530_ALL_MEMBERS 0xff
+#define MTK_HDR_LEN 4
+#define MT7530_MAX_MTU (15 * 1024 - ETH_HLEN - ETH_FCS_LEN - MTK_HDR_LEN)
+
enum mt753x_id {
ID_MT7530 = 0,
ID_MT7621 = 1,
@@ -289,6 +292,15 @@ enum mt7530_vlan_port_attr {
#define MT7531_DBG_CNT(x) (0x3018 + (x) * 0x100)
#define MT7531_DIS_CLR BIT(31)
+#define MT7530_GMACCR 0x30e0
+#define MAX_RX_JUMBO(x) ((x) << 2)
+#define MAX_RX_JUMBO_MASK GENMASK(5, 2)
+#define MAX_RX_PKT_LEN_MASK GENMASK(1, 0)
+#define MAX_RX_PKT_LEN_1522 0x0
+#define MAX_RX_PKT_LEN_1536 0x1
+#define MAX_RX_PKT_LEN_1552 0x2
+#define MAX_RX_PKT_LEN_JUMBO 0x3
+
/* Register for MIB */
#define MT7530_PORT_MIB_COUNTER(x) (0x4000 + (x) * 0x100)
#define MT7530_MIB_CCR 0x4fe0
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index bab3a9bb5e6f..f82ad7419508 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -124,7 +124,7 @@ static void dummy_setup(struct net_device *dev)
dev->flags &= ~IFF_MULTICAST;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
dev->features |= NETIF_F_SG | NETIF_F_FRAGLIST;
- dev->features |= NETIF_F_ALL_TSO;
+ dev->features |= NETIF_F_GSO_SOFTWARE;
dev->features |= NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_LLTX;
dev->features |= NETIF_F_GSO_ENCAP_ALL;
dev->hw_features |= dev->features;
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h
index 2d3dfdd2a716..e7b78b68eaac 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h
@@ -235,6 +235,7 @@ struct chtls_dev {
struct list_head na_node;
unsigned int send_page_order;
int max_host_sndbuf;
+ u32 round_robin_cnt;
struct key_map kmap;
unsigned int cdev_state;
};
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
index d581c4e623f8..24154816d1d1 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
@@ -1217,8 +1217,9 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
csk->sndbuf = csk->snd_win;
csk->ulp_mode = ULP_MODE_TLS;
step = cdev->lldi->nrxq / cdev->lldi->nchan;
- csk->rss_qid = cdev->lldi->rxq_ids[port_id * step];
rxq_idx = port_id * step;
+ rxq_idx += cdev->round_robin_cnt++ % step;
+ csk->rss_qid = cdev->lldi->rxq_ids[rxq_idx];
csk->txq_idx = (rxq_idx < cdev->lldi->ntxq) ? rxq_idx :
port_id * step;
csk->sndbuf = newsk->sk_sndbuf;
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 06cc863f4dd6..1d1fa4e35ffb 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -2300,9 +2300,9 @@ static void dpaa_tx_conf(struct net_device *net_dev,
}
static inline int dpaa_eth_napi_schedule(struct dpaa_percpu_priv *percpu_priv,
- struct qman_portal *portal)
+ struct qman_portal *portal, bool sched_napi)
{
- if (unlikely(in_irq() || !in_serving_softirq())) {
+ if (sched_napi) {
/* Disable QMan IRQ and invoke NAPI */
qman_p_irqsource_remove(portal, QM_PIRQ_DQRI);
@@ -2316,7 +2316,8 @@ static inline int dpaa_eth_napi_schedule(struct dpaa_percpu_priv *percpu_priv,
static enum qman_cb_dqrr_result rx_error_dqrr(struct qman_portal *portal,
struct qman_fq *fq,
- const struct qm_dqrr_entry *dq)
+ const struct qm_dqrr_entry *dq,
+ bool sched_napi)
{
struct dpaa_fq *dpaa_fq = container_of(fq, struct dpaa_fq, fq_base);
struct dpaa_percpu_priv *percpu_priv;
@@ -2332,7 +2333,7 @@ static enum qman_cb_dqrr_result rx_error_dqrr(struct qman_portal *portal,
percpu_priv = this_cpu_ptr(priv->percpu_priv);
- if (dpaa_eth_napi_schedule(percpu_priv, portal))
+ if (dpaa_eth_napi_schedule(percpu_priv, portal, sched_napi))
return qman_cb_dqrr_stop;
dpaa_eth_refill_bpools(priv);
@@ -2343,7 +2344,8 @@ static enum qman_cb_dqrr_result rx_error_dqrr(struct qman_portal *portal,
static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
struct qman_fq *fq,
- const struct qm_dqrr_entry *dq)
+ const struct qm_dqrr_entry *dq,
+ bool sched_napi)
{
struct skb_shared_hwtstamps *shhwtstamps;
struct rtnl_link_stats64 *percpu_stats;
@@ -2375,7 +2377,7 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
percpu_priv = this_cpu_ptr(priv->percpu_priv);
percpu_stats = &percpu_priv->stats;
- if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal)))
+ if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal, sched_napi)))
return qman_cb_dqrr_stop;
/* Make sure we didn't run out of buffers */
@@ -2460,7 +2462,8 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
static enum qman_cb_dqrr_result conf_error_dqrr(struct qman_portal *portal,
struct qman_fq *fq,
- const struct qm_dqrr_entry *dq)
+ const struct qm_dqrr_entry *dq,
+ bool sched_napi)
{
struct dpaa_percpu_priv *percpu_priv;
struct net_device *net_dev;
@@ -2471,7 +2474,7 @@ static enum qman_cb_dqrr_result conf_error_dqrr(struct qman_portal *portal,
percpu_priv = this_cpu_ptr(priv->percpu_priv);
- if (dpaa_eth_napi_schedule(percpu_priv, portal))
+ if (dpaa_eth_napi_schedule(percpu_priv, portal, sched_napi))
return qman_cb_dqrr_stop;
dpaa_tx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
@@ -2481,7 +2484,8 @@ static enum qman_cb_dqrr_result conf_error_dqrr(struct qman_portal *portal,
static enum qman_cb_dqrr_result conf_dflt_dqrr(struct qman_portal *portal,
struct qman_fq *fq,
- const struct qm_dqrr_entry *dq)
+ const struct qm_dqrr_entry *dq,
+ bool sched_napi)
{
struct dpaa_percpu_priv *percpu_priv;
struct net_device *net_dev;
@@ -2495,7 +2499,7 @@ static enum qman_cb_dqrr_result conf_dflt_dqrr(struct qman_portal *portal,
percpu_priv = this_cpu_ptr(priv->percpu_priv);
- if (dpaa_eth_napi_schedule(percpu_priv, portal))
+ if (dpaa_eth_napi_schedule(percpu_priv, portal, sched_napi))
return qman_cb_dqrr_stop;
dpaa_tx_conf(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
@@ -2541,7 +2545,7 @@ static void dpaa_eth_napi_enable(struct dpaa_priv *priv)
for_each_online_cpu(i) {
percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
- percpu_priv->np.down = 0;
+ percpu_priv->np.down = false;
napi_enable(&percpu_priv->np.napi);
}
}
@@ -2554,7 +2558,7 @@ static void dpaa_eth_napi_disable(struct dpaa_priv *priv)
for_each_online_cpu(i) {
percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
- percpu_priv->np.down = 1;
+ percpu_priv->np.down = true;
napi_disable(&percpu_priv->np.napi);
}
}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 52be6e315752..01089c30b462 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -47,40 +47,6 @@ drop_packet_err:
return NETDEV_TX_OK;
}
-static bool enetc_tx_csum(struct sk_buff *skb, union enetc_tx_bd *txbd)
-{
- int l3_start, l3_hsize;
- u16 l3_flags, l4_flags;
-
- if (skb->ip_summed != CHECKSUM_PARTIAL)
- return false;
-
- switch (skb->csum_offset) {
- case offsetof(struct tcphdr, check):
- l4_flags = ENETC_TXBD_L4_TCP;
- break;
- case offsetof(struct udphdr, check):
- l4_flags = ENETC_TXBD_L4_UDP;
- break;
- default:
- skb_checksum_help(skb);
- return false;
- }
-
- l3_start = skb_network_offset(skb);
- l3_hsize = skb_network_header_len(skb);
-
- l3_flags = 0;
- if (skb->protocol == htons(ETH_P_IPV6))
- l3_flags = ENETC_TXBD_L3_IPV6;
-
- /* write BD fields */
- txbd->l3_csoff = enetc_txbd_l3_csoff(l3_start, l3_hsize, l3_flags);
- txbd->l4_csoff = l4_flags;
-
- return true;
-}
-
static void enetc_unmap_tx_buff(struct enetc_bdr *tx_ring,
struct enetc_tx_swbd *tx_swbd)
{
@@ -146,22 +112,16 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
if (do_vlan || do_tstamp)
flags |= ENETC_TXBD_FLAGS_EX;
- if (enetc_tx_csum(skb, &temp_bd))
- flags |= ENETC_TXBD_FLAGS_CSUM | ENETC_TXBD_FLAGS_L4CS;
- else if (tx_ring->tsd_enable)
+ if (tx_ring->tsd_enable)
flags |= ENETC_TXBD_FLAGS_TSE | ENETC_TXBD_FLAGS_TXSTART;
/* first BD needs frm_len and offload flags set */
temp_bd.frm_len = cpu_to_le16(skb->len);
temp_bd.flags = flags;
- if (flags & ENETC_TXBD_FLAGS_TSE) {
- u32 temp;
-
- temp = (skb->skb_mstamp_ns >> 5 & ENETC_TXBD_TXSTART_MASK)
- | (flags << ENETC_TXBD_FLAGS_OFFSET);
- temp_bd.txstart = cpu_to_le32(temp);
- }
+ if (flags & ENETC_TXBD_FLAGS_TSE)
+ temp_bd.txstart = enetc_txbd_set_tx_start(skb->skb_mstamp_ns,
+ flags);
if (flags & ENETC_TXBD_FLAGS_EX) {
u8 e_flags = 0;
@@ -1897,8 +1857,7 @@ static void enetc_kfree_si(struct enetc_si *si)
static void enetc_detect_errata(struct enetc_si *si)
{
if (si->pdev->revision == ENETC_REV1)
- si->errata = ENETC_ERR_TXCSUM | ENETC_ERR_VLAN_ISOL |
- ENETC_ERR_UCMCSWP;
+ si->errata = ENETC_ERR_VLAN_ISOL | ENETC_ERR_UCMCSWP;
}
int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index dd0fb0c066d7..8532d23b54f5 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -147,9 +147,8 @@ struct enetc_msg_swbd {
#define ENETC_REV1 0x1
enum enetc_errata {
- ENETC_ERR_TXCSUM = BIT(0),
- ENETC_ERR_VLAN_ISOL = BIT(1),
- ENETC_ERR_UCMCSWP = BIT(2),
+ ENETC_ERR_VLAN_ISOL = BIT(0),
+ ENETC_ERR_UCMCSWP = BIT(1),
};
#define ENETC_SI_F_QBV BIT(0)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
index 17cf7c94fdb5..68ef4f959982 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -374,8 +374,7 @@ union enetc_tx_bd {
__le16 frm_len;
union {
struct {
- __le16 l3_csoff;
- u8 l4_csoff;
+ u8 reserved[3];
u8 flags;
}; /* default layout */
__le32 txstart;
@@ -398,41 +397,37 @@ union enetc_tx_bd {
} wb; /* writeback descriptor */
};
-#define ENETC_TXBD_FLAGS_L4CS BIT(0)
-#define ENETC_TXBD_FLAGS_TSE BIT(1)
-#define ENETC_TXBD_FLAGS_W BIT(2)
-#define ENETC_TXBD_FLAGS_CSUM BIT(3)
-#define ENETC_TXBD_FLAGS_TXSTART BIT(4)
-#define ENETC_TXBD_FLAGS_EX BIT(6)
-#define ENETC_TXBD_FLAGS_F BIT(7)
+enum enetc_txbd_flags {
+ ENETC_TXBD_FLAGS_RES0 = BIT(0), /* reserved */
+ ENETC_TXBD_FLAGS_TSE = BIT(1),
+ ENETC_TXBD_FLAGS_W = BIT(2),
+ ENETC_TXBD_FLAGS_RES3 = BIT(3), /* reserved */
+ ENETC_TXBD_FLAGS_TXSTART = BIT(4),
+ ENETC_TXBD_FLAGS_EX = BIT(6),
+ ENETC_TXBD_FLAGS_F = BIT(7)
+};
#define ENETC_TXBD_TXSTART_MASK GENMASK(24, 0)
#define ENETC_TXBD_FLAGS_OFFSET 24
+
+static inline __le32 enetc_txbd_set_tx_start(u64 tx_start, u8 flags)
+{
+ u32 temp;
+
+ temp = (tx_start >> 5 & ENETC_TXBD_TXSTART_MASK) |
+ (flags << ENETC_TXBD_FLAGS_OFFSET);
+
+ return cpu_to_le32(temp);
+}
+
static inline void enetc_clear_tx_bd(union enetc_tx_bd *txbd)
{
memset(txbd, 0, sizeof(*txbd));
}
-/* L3 csum flags */
-#define ENETC_TXBD_L3_IPCS BIT(7)
-#define ENETC_TXBD_L3_IPV6 BIT(15)
-
-#define ENETC_TXBD_L3_START_MASK GENMASK(6, 0)
-#define ENETC_TXBD_L3_SET_HSIZE(val) ((((val) >> 2) & 0x7f) << 8)
-
/* Extension flags */
#define ENETC_TXBD_E_FLAGS_VLAN_INS BIT(0)
#define ENETC_TXBD_E_FLAGS_TWO_STEP_PTP BIT(2)
-static inline __le16 enetc_txbd_l3_csoff(int start, int hdr_sz, u16 l3_flags)
-{
- return cpu_to_le16(l3_flags | ENETC_TXBD_L3_SET_HSIZE(hdr_sz) |
- (start & ENETC_TXBD_L3_START_MASK));
-}
-
-/* L4 csum flags */
-#define ENETC_TXBD_L4_UDP BIT(5)
-#define ENETC_TXBD_L4_TCP BIT(6)
-
union enetc_rx_bd {
struct {
__le64 addr;
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index 419306342ac5..ecdc2af8c292 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -714,22 +714,16 @@ static void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
ndev->watchdog_timeo = 5 * HZ;
ndev->max_mtu = ENETC_MAX_MTU;
- ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
+ ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_LOOPBACK;
- ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG |
- NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
+ ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX;
if (si->num_rss)
ndev->hw_features |= NETIF_F_RXHASH;
- if (si->errata & ENETC_ERR_TXCSUM) {
- ndev->hw_features &= ~NETIF_F_HW_CSUM;
- ndev->features &= ~NETIF_F_HW_CSUM;
- }
-
ndev->priv_flags |= IFF_UNICAST_FLT;
if (si->hw_features & ENETC_SI_F_QBV)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
index 7b5c82c7e4e5..39c1a09e69a9 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
@@ -120,22 +120,16 @@ static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
ndev->watchdog_timeo = 5 * HZ;
ndev->max_mtu = ENETC_MAX_MTU;
- ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
+ ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX;
- ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG |
- NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
+ ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX;
if (si->num_rss)
ndev->hw_features |= NETIF_F_RXHASH;
- if (si->errata & ENETC_ERR_TXCSUM) {
- ndev->hw_features &= ~NETIF_F_HW_CSUM;
- ndev->features &= ~NETIF_F_HW_CSUM;
- }
-
/* pick up primary MAC address from SI */
enetc_get_primary_mac_addr(&si->hw, ndev->dev_addr);
}
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index af4dfbe28d56..f4167de30461 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1827,86 +1827,6 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p)
}
/**
- * do_change_param_reset returns zero if we are able to keep processing reset
- * events, or non-zero if we hit a fatal error and must halt.
- */
-static int do_change_param_reset(struct ibmvnic_adapter *adapter,
- struct ibmvnic_rwi *rwi,
- u32 reset_state)
-{
- struct net_device *netdev = adapter->netdev;
- int i, rc;
-
- netdev_dbg(adapter->netdev, "Change param resetting driver (%d)\n",
- rwi->reset_reason);
-
- netif_carrier_off(netdev);
- adapter->reset_reason = rwi->reset_reason;
-
- ibmvnic_cleanup(netdev);
-
- if (reset_state == VNIC_OPEN) {
- rc = __ibmvnic_close(netdev);
- if (rc)
- return rc;
- }
-
- release_resources(adapter);
- release_sub_crqs(adapter, 1);
- release_crq_queue(adapter);
-
- adapter->state = VNIC_PROBED;
-
- rc = init_crq_queue(adapter);
-
- if (rc) {
- netdev_err(adapter->netdev,
- "Couldn't initialize crq. rc=%d\n", rc);
- return rc;
- }
-
- rc = ibmvnic_reset_init(adapter, true);
- if (rc)
- return IBMVNIC_INIT_FAILED;
-
- /* If the adapter was in PROBE state prior to the reset,
- * exit here.
- */
- if (reset_state == VNIC_PROBED)
- return 0;
-
- rc = ibmvnic_login(netdev);
- if (rc) {
- adapter->state = reset_state;
- return rc;
- }
-
- rc = init_resources(adapter);
- if (rc)
- return rc;
-
- ibmvnic_disable_irqs(adapter);
-
- adapter->state = VNIC_CLOSED;
-
- if (reset_state == VNIC_CLOSED)
- return 0;
-
- rc = __ibmvnic_open(netdev);
- if (rc)
- return IBMVNIC_OPEN_FAILED;
-
- /* refresh device's multicast list */
- ibmvnic_set_multi(netdev);
-
- /* kick napi */
- for (i = 0; i < adapter->req_rx_queues; i++)
- napi_schedule(&adapter->napi[i]);
-
- return 0;
-}
-
-/**
* do_reset returns zero if we are able to keep processing reset events, or
* non-zero if we hit a fatal error and must halt.
*/
@@ -1921,10 +1841,12 @@ static int do_reset(struct ibmvnic_adapter *adapter,
netdev_dbg(adapter->netdev, "Re-setting driver (%d)\n",
rwi->reset_reason);
- rtnl_lock();
+ adapter->reset_reason = rwi->reset_reason;
+ /* requestor of VNIC_RESET_CHANGE_PARAM already has the rtnl lock */
+ if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM))
+ rtnl_lock();
netif_carrier_off(netdev);
- adapter->reset_reason = rwi->reset_reason;
old_num_rx_queues = adapter->req_rx_queues;
old_num_tx_queues = adapter->req_tx_queues;
@@ -1936,25 +1858,37 @@ static int do_reset(struct ibmvnic_adapter *adapter,
if (reset_state == VNIC_OPEN &&
adapter->reset_reason != VNIC_RESET_MOBILITY &&
adapter->reset_reason != VNIC_RESET_FAILOVER) {
- adapter->state = VNIC_CLOSING;
+ if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
+ rc = __ibmvnic_close(netdev);
+ if (rc)
+ goto out;
+ } else {
+ adapter->state = VNIC_CLOSING;
- /* Release the RTNL lock before link state change and
- * re-acquire after the link state change to allow
- * linkwatch_event to grab the RTNL lock and run during
- * a reset.
- */
- rtnl_unlock();
- rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
- rtnl_lock();
- if (rc)
- goto out;
+ /* Release the RTNL lock before link state change and
+ * re-acquire after the link state change to allow
+ * linkwatch_event to grab the RTNL lock and run during
+ * a reset.
+ */
+ rtnl_unlock();
+ rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
+ rtnl_lock();
+ if (rc)
+ goto out;
- if (adapter->state != VNIC_CLOSING) {
- rc = -1;
- goto out;
+ if (adapter->state != VNIC_CLOSING) {
+ rc = -1;
+ goto out;
+ }
+
+ adapter->state = VNIC_CLOSED;
}
+ }
- adapter->state = VNIC_CLOSED;
+ if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
+ release_resources(adapter);
+ release_sub_crqs(adapter, 1);
+ release_crq_queue(adapter);
}
if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
@@ -1963,7 +1897,9 @@ static int do_reset(struct ibmvnic_adapter *adapter,
*/
adapter->state = VNIC_PROBED;
- if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
+ if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
+ rc = init_crq_queue(adapter);
+ } else if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
rc = ibmvnic_reenable_crq_queue(adapter);
release_sub_crqs(adapter, 1);
} else {
@@ -2003,7 +1939,11 @@ static int do_reset(struct ibmvnic_adapter *adapter,
goto out;
}
- if (adapter->req_rx_queues != old_num_rx_queues ||
+ if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
+ rc = init_resources(adapter);
+ if (rc)
+ goto out;
+ } else if (adapter->req_rx_queues != old_num_rx_queues ||
adapter->req_tx_queues != old_num_tx_queues ||
adapter->req_rx_add_entries_per_subcrq !=
old_num_rx_slots ||
@@ -2064,7 +2004,9 @@ static int do_reset(struct ibmvnic_adapter *adapter,
rc = 0;
out:
- rtnl_unlock();
+ /* requestor of VNIC_RESET_CHANGE_PARAM should still hold the rtnl lock */
+ if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM))
+ rtnl_unlock();
return rc;
}
@@ -2198,10 +2140,7 @@ static void __ibmvnic_reset(struct work_struct *work)
}
spin_unlock_irqrestore(&adapter->state_lock, flags);
- if (rwi->reset_reason == VNIC_RESET_CHANGE_PARAM) {
- /* CHANGE_PARAM requestor holds rtnl_lock */
- rc = do_change_param_reset(adapter, rwi, reset_state);
- } else if (adapter->force_reset_recovery) {
+ if (adapter->force_reset_recovery) {
/* Transport event occurred during previous reset */
if (adapter->wait_for_reset) {
/* Previous was CHANGE_PARAM; caller locked */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index fc765e86988e..9f3d6715748e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -1239,7 +1239,7 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
sq = &qset->sq[qidx];
sq->sqb_count = 0;
- sq->sqb_ptrs = kcalloc(num_sqbs, sizeof(u64 *), GFP_KERNEL);
+ sq->sqb_ptrs = kcalloc(num_sqbs, sizeof(*sq->sqb_ptrs), GFP_KERNEL);
if (!sq->sqb_ptrs)
return -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 39eff6a57ba2..73aab72877fd 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -8245,6 +8245,86 @@ mlxsw_reg_rmft2_ipv6_pack(char *payload, bool v, u16 offset, u16 virtual_router,
mlxsw_reg_rmft2_sip6_mask_memcpy_to(payload, (void *)&sip6_mask);
}
+/* Note that XRALXX register position violates the rule of ordering register
+ * definition by the ID. However, XRALXX pack helpers are using RALXX pack
+ * helpers, RALXX registers have higher IDs.
+ */
+
+/* XRALTA - XM Router Algorithmic LPM Tree Allocation Register
+ * -----------------------------------------------------------
+ * The XRALTA is used to allocate the XLT LPM trees.
+ *
+ * This register embeds original RALTA register.
+ */
+#define MLXSW_REG_XRALTA_ID 0x7811
+#define MLXSW_REG_XRALTA_LEN 0x08
+#define MLXSW_REG_XRALTA_RALTA_OFFSET 0x04
+
+MLXSW_REG_DEFINE(xralta, MLXSW_REG_XRALTA_ID, MLXSW_REG_XRALTA_LEN);
+
+static inline void mlxsw_reg_xralta_pack(char *payload, bool alloc,
+ enum mlxsw_reg_ralxx_protocol protocol,
+ u8 tree_id)
+{
+ char *ralta_payload = payload + MLXSW_REG_XRALTA_RALTA_OFFSET;
+
+ MLXSW_REG_ZERO(xralta, payload);
+ mlxsw_reg_ralta_pack(ralta_payload, alloc, protocol, tree_id);
+}
+
+/* XRALST - XM Router Algorithmic LPM Structure Tree Register
+ * ----------------------------------------------------------
+ * The XRALST is used to set and query the structure of an XLT LPM tree.
+ *
+ * This register embeds original RALST register.
+ */
+#define MLXSW_REG_XRALST_ID 0x7812
+#define MLXSW_REG_XRALST_LEN 0x108
+#define MLXSW_REG_XRALST_RALST_OFFSET 0x04
+
+MLXSW_REG_DEFINE(xralst, MLXSW_REG_XRALST_ID, MLXSW_REG_XRALST_LEN);
+
+static inline void mlxsw_reg_xralst_pack(char *payload, u8 root_bin, u8 tree_id)
+{
+ char *ralst_payload = payload + MLXSW_REG_XRALST_RALST_OFFSET;
+
+ MLXSW_REG_ZERO(xralst, payload);
+ mlxsw_reg_ralst_pack(ralst_payload, root_bin, tree_id);
+}
+
+static inline void mlxsw_reg_xralst_bin_pack(char *payload, u8 bin_number,
+ u8 left_child_bin,
+ u8 right_child_bin)
+{
+ char *ralst_payload = payload + MLXSW_REG_XRALST_RALST_OFFSET;
+
+ mlxsw_reg_ralst_bin_pack(ralst_payload, bin_number, left_child_bin,
+ right_child_bin);
+}
+
+/* XRALTB - XM Router Algorithmic LPM Tree Binding Register
+ * --------------------------------------------------------
+ * The XRALTB register is used to bind virtual router and protocol
+ * to an allocated LPM tree.
+ *
+ * This register embeds original RALTB register.
+ */
+#define MLXSW_REG_XRALTB_ID 0x7813
+#define MLXSW_REG_XRALTB_LEN 0x08
+#define MLXSW_REG_XRALTB_RALTB_OFFSET 0x04
+
+MLXSW_REG_DEFINE(xraltb, MLXSW_REG_XRALTB_ID, MLXSW_REG_XRALTB_LEN);
+
+static inline void mlxsw_reg_xraltb_pack(char *payload, u16 virtual_router,
+ enum mlxsw_reg_ralxx_protocol protocol,
+ u8 tree_id)
+{
+ char *raltb_payload = payload + MLXSW_REG_XRALTB_RALTB_OFFSET;
+
+ MLXSW_REG_ZERO(xraltb, payload);
+ mlxsw_reg_raltb_pack(raltb_payload, virtual_router, protocol, tree_id);
+}
+
/* MFCR - Management Fan Control Register
* --------------------------------------
* This register controls the settings of the Fan Speed PWM mechanism.
@@ -11195,6 +11275,9 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(rigr2),
MLXSW_REG(recr2),
MLXSW_REG(rmft2),
+ MLXSW_REG(xralta),
+ MLXSW_REG(xralst),
+ MLXSW_REG(xraltb),
MLXSW_REG(mfcr),
MLXSW_REG(mfsc),
MLXSW_REG(mfsm),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 4381f8c6c3fb..29fc47821ad7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -409,6 +409,7 @@ struct mlxsw_sp_fib {
struct mlxsw_sp_vr *vr;
struct mlxsw_sp_lpm_tree *lpm_tree;
enum mlxsw_sp_l3proto proto;
+ const struct mlxsw_sp_router_ll_ops *ll_ops;
};
struct mlxsw_sp_vr {
@@ -422,12 +423,31 @@ struct mlxsw_sp_vr {
refcount_t ul_rif_refcnt;
};
+static int mlxsw_sp_router_ll_basic_ralta_write(struct mlxsw_sp *mlxsw_sp, char *xralta_pl)
+{
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta),
+ xralta_pl + MLXSW_REG_XRALTA_RALTA_OFFSET);
+}
+
+static int mlxsw_sp_router_ll_basic_ralst_write(struct mlxsw_sp *mlxsw_sp, char *xralst_pl)
+{
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst),
+ xralst_pl + MLXSW_REG_XRALST_RALST_OFFSET);
+}
+
+static int mlxsw_sp_router_ll_basic_raltb_write(struct mlxsw_sp *mlxsw_sp, char *xraltb_pl)
+{
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
+ xraltb_pl + MLXSW_REG_XRALTB_RALTB_OFFSET);
+}
+
static const struct rhashtable_params mlxsw_sp_fib_ht_params;
static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_vr *vr,
enum mlxsw_sp_l3proto proto)
{
+ const struct mlxsw_sp_router_ll_ops *ll_ops = mlxsw_sp->router->proto_ll_ops[proto];
struct mlxsw_sp_lpm_tree *lpm_tree;
struct mlxsw_sp_fib *fib;
int err;
@@ -443,6 +463,7 @@ static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp,
fib->proto = proto;
fib->vr = vr;
fib->lpm_tree = lpm_tree;
+ fib->ll_ops = ll_ops;
mlxsw_sp_lpm_tree_hold(lpm_tree);
err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, lpm_tree->id);
if (err)
@@ -481,33 +502,36 @@ mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
}
static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_router_ll_ops *ll_ops,
struct mlxsw_sp_lpm_tree *lpm_tree)
{
- char ralta_pl[MLXSW_REG_RALTA_LEN];
+ char xralta_pl[MLXSW_REG_XRALTA_LEN];
- mlxsw_reg_ralta_pack(ralta_pl, true,
- (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
- lpm_tree->id);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
+ mlxsw_reg_xralta_pack(xralta_pl, true,
+ (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
+ lpm_tree->id);
+ return ll_ops->ralta_write(mlxsw_sp, xralta_pl);
}
static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_router_ll_ops *ll_ops,
struct mlxsw_sp_lpm_tree *lpm_tree)
{
- char ralta_pl[MLXSW_REG_RALTA_LEN];
+ char xralta_pl[MLXSW_REG_XRALTA_LEN];
- mlxsw_reg_ralta_pack(ralta_pl, false,
- (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
- lpm_tree->id);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
+ mlxsw_reg_xralta_pack(xralta_pl, false,
+ (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
+ lpm_tree->id);
+ ll_ops->ralta_write(mlxsw_sp, xralta_pl);
}
static int
mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_router_ll_ops *ll_ops,
struct mlxsw_sp_prefix_usage *prefix_usage,
struct mlxsw_sp_lpm_tree *lpm_tree)
{
- char ralst_pl[MLXSW_REG_RALST_LEN];
+ char xralst_pl[MLXSW_REG_XRALST_LEN];
u8 root_bin = 0;
u8 prefix;
u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
@@ -515,19 +539,20 @@ mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
root_bin = prefix;
- mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
+ mlxsw_reg_xralst_pack(xralst_pl, root_bin, lpm_tree->id);
mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
if (prefix == 0)
continue;
- mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
- MLXSW_REG_RALST_BIN_NO_CHILD);
+ mlxsw_reg_xralst_bin_pack(xralst_pl, prefix, last_prefix,
+ MLXSW_REG_RALST_BIN_NO_CHILD);
last_prefix = prefix;
}
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
+ return ll_ops->ralst_write(mlxsw_sp, xralst_pl);
}
static struct mlxsw_sp_lpm_tree *
mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_router_ll_ops *ll_ops,
struct mlxsw_sp_prefix_usage *prefix_usage,
enum mlxsw_sp_l3proto proto)
{
@@ -538,12 +563,11 @@ mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
if (!lpm_tree)
return ERR_PTR(-EBUSY);
lpm_tree->proto = proto;
- err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
+ err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, ll_ops, lpm_tree);
if (err)
return ERR_PTR(err);
- err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
- lpm_tree);
+ err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, ll_ops, prefix_usage, lpm_tree);
if (err)
goto err_left_struct_set;
memcpy(&lpm_tree->prefix_usage, prefix_usage,
@@ -554,14 +578,15 @@ mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
return lpm_tree;
err_left_struct_set:
- mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
+ mlxsw_sp_lpm_tree_free(mlxsw_sp, ll_ops, lpm_tree);
return ERR_PTR(err);
}
static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_router_ll_ops *ll_ops,
struct mlxsw_sp_lpm_tree *lpm_tree)
{
- mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
+ mlxsw_sp_lpm_tree_free(mlxsw_sp, ll_ops, lpm_tree);
}
static struct mlxsw_sp_lpm_tree *
@@ -569,6 +594,7 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_prefix_usage *prefix_usage,
enum mlxsw_sp_l3proto proto)
{
+ const struct mlxsw_sp_router_ll_ops *ll_ops = mlxsw_sp->router->proto_ll_ops[proto];
struct mlxsw_sp_lpm_tree *lpm_tree;
int i;
@@ -582,7 +608,7 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
return lpm_tree;
}
}
- return mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, proto);
+ return mlxsw_sp_lpm_tree_create(mlxsw_sp, ll_ops, prefix_usage, proto);
}
static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
@@ -593,8 +619,11 @@ static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_lpm_tree *lpm_tree)
{
+ const struct mlxsw_sp_router_ll_ops *ll_ops =
+ mlxsw_sp->router->proto_ll_ops[lpm_tree->proto];
+
if (--lpm_tree->ref_count == 0)
- mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
+ mlxsw_sp_lpm_tree_destroy(mlxsw_sp, ll_ops, lpm_tree);
}
#define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
@@ -684,23 +713,23 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_fib *fib, u8 tree_id)
{
- char raltb_pl[MLXSW_REG_RALTB_LEN];
+ char xraltb_pl[MLXSW_REG_XRALTB_LEN];
- mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
- (enum mlxsw_reg_ralxx_protocol) fib->proto,
- tree_id);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
+ mlxsw_reg_xraltb_pack(xraltb_pl, fib->vr->id,
+ (enum mlxsw_reg_ralxx_protocol) fib->proto,
+ tree_id);
+ return fib->ll_ops->raltb_write(mlxsw_sp, xraltb_pl);
}
static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_fib *fib)
{
- char raltb_pl[MLXSW_REG_RALTB_LEN];
+ char xraltb_pl[MLXSW_REG_XRALTB_LEN];
/* Bind to tree 0 which is default */
- mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
- (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
+ mlxsw_reg_xraltb_pack(xraltb_pl, fib->vr->id,
+ (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
+ return fib->ll_ops->raltb_write(mlxsw_sp, xraltb_pl);
}
static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
@@ -5659,28 +5688,28 @@ static int __mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp,
enum mlxsw_reg_ralxx_protocol proto,
u8 tree_id)
{
- char ralta_pl[MLXSW_REG_RALTA_LEN];
- char ralst_pl[MLXSW_REG_RALST_LEN];
+ const struct mlxsw_sp_router_ll_ops *ll_ops = mlxsw_sp->router->proto_ll_ops[proto];
+ char xralta_pl[MLXSW_REG_XRALTA_LEN];
+ char xralst_pl[MLXSW_REG_XRALST_LEN];
int i, err;
- mlxsw_reg_ralta_pack(ralta_pl, true, proto, tree_id);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
+ mlxsw_reg_xralta_pack(xralta_pl, true, proto, tree_id);
+ err = ll_ops->ralta_write(mlxsw_sp, xralta_pl);
if (err)
return err;
- mlxsw_reg_ralst_pack(ralst_pl, 0xff, tree_id);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
+ mlxsw_reg_xralst_pack(xralst_pl, 0xff, tree_id);
+ err = ll_ops->ralst_write(mlxsw_sp, xralst_pl);
if (err)
return err;
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
- char raltb_pl[MLXSW_REG_RALTB_LEN];
+ char xraltb_pl[MLXSW_REG_XRALTB_LEN];
char ralue_pl[MLXSW_REG_RALUE_LEN];
- mlxsw_reg_raltb_pack(raltb_pl, vr->id, proto, tree_id);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
- raltb_pl);
+ mlxsw_reg_xraltb_pack(xraltb_pl, vr->id, proto, tree_id);
+ err = ll_ops->raltb_write(mlxsw_sp, xraltb_pl);
if (err)
return err;
@@ -8057,6 +8086,12 @@ static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
}
+static const struct mlxsw_sp_router_ll_ops mlxsw_sp_router_ll_basic_ops = {
+ .ralta_write = mlxsw_sp_router_ll_basic_ralta_write,
+ .ralst_write = mlxsw_sp_router_ll_basic_ralst_write,
+ .raltb_write = mlxsw_sp_router_ll_basic_raltb_write,
+};
+
int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
struct netlink_ext_ack *extack)
{
@@ -8070,6 +8105,9 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp->router = router;
router->mlxsw_sp = mlxsw_sp;
+ router->proto_ll_ops[MLXSW_SP_L3_PROTO_IPV4] = &mlxsw_sp_router_ll_basic_ops;
+ router->proto_ll_ops[MLXSW_SP_L3_PROTO_IPV6] = &mlxsw_sp_router_ll_basic_ops;
+
INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
err = __mlxsw_sp_router_init(mlxsw_sp);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
index 8418dc3ae967..c5c7346eb815 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
@@ -48,6 +48,17 @@ struct mlxsw_sp_router {
bool adj_discard_index_valid;
struct mlxsw_sp_router_nve_decap nve_decap_config;
struct mutex lock; /* Protects shared router resources */
+ /* One set of ops for each protocol: IPv4 and IPv6 */
+ const struct mlxsw_sp_router_ll_ops *proto_ll_ops[MLXSW_SP_L3_PROTO_MAX];
+};
+
+/* Low-level router ops. Basically this is to handle the different
+ * register sets to work with ordinary and XM trees and FIB entries.
+ */
+struct mlxsw_sp_router_ll_ops {
+ int (*ralta_write)(struct mlxsw_sp *mlxsw_sp, char *xralta_pl);
+ int (*ralst_write)(struct mlxsw_sp *mlxsw_sp, char *xralst_pl);
+ int (*raltb_write)(struct mlxsw_sp *mlxsw_sp, char *xraltb_pl);
};
struct mlxsw_sp_rif_ipip_lb;
diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c
index dcde496da7fb..c5de8f46cdd3 100644
--- a/drivers/net/ethernet/microchip/lan743x_ethtool.c
+++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c
@@ -780,7 +780,9 @@ static void lan743x_ethtool_get_wol(struct net_device *netdev,
wol->supported = 0;
wol->wolopts = 0;
- phy_ethtool_get_wol(netdev->phydev, wol);
+
+ if (netdev->phydev)
+ phy_ethtool_get_wol(netdev->phydev, wol);
wol->supported |= WAKE_BCAST | WAKE_UCAST | WAKE_MCAST |
WAKE_MAGIC | WAKE_PHY | WAKE_ARP;
@@ -809,9 +811,8 @@ static int lan743x_ethtool_set_wol(struct net_device *netdev,
device_set_wakeup_enable(&adapter->pdev->dev, (bool)wol->wolopts);
- phy_ethtool_set_wol(netdev->phydev, wol);
-
- return 0;
+ return netdev->phydev ? phy_ethtool_set_wol(netdev->phydev, wol)
+ : -ENETDOWN;
}
#endif /* CONFIG_PM */
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 7e85cf943be1..8724d6a9ed02 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -1043,8 +1043,7 @@ static int using_multi_irqs(struct net_device *dev)
struct fe_priv *np = get_nvpriv(dev);
if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
- ((np->msi_flags & NV_MSI_X_ENABLED) &&
- ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1)))
+ ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))
return 0;
else
return 1;
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 319399a03cb5..7e0947e29d51 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -67,7 +67,7 @@
#define R8169_REGS_SIZE 256
#define R8169_RX_BUF_SIZE (SZ_16K - 1)
-#define NUM_TX_DESC 64 /* Number of Tx descriptor registers */
+#define NUM_TX_DESC 256 /* Number of Tx descriptor registers */
#define NUM_RX_DESC 256U /* Number of Rx descriptor registers */
#define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
#define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
@@ -4690,6 +4690,7 @@ static int rtl_open(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
struct pci_dev *pdev = tp->pci_dev;
+ unsigned long irqflags;
int retval = -ENOMEM;
pm_runtime_get_sync(&pdev->dev);
@@ -4714,8 +4715,9 @@ static int rtl_open(struct net_device *dev)
rtl_request_firmware(tp);
+ irqflags = pci_dev_msi_enabled(pdev) ? IRQF_NO_THREAD : IRQF_SHARED;
retval = request_irq(pci_irq_vector(pdev, 0), rtl8169_interrupt,
- IRQF_SHARED, dev->name, tp);
+ irqflags, dev->name, tp);
if (retval < 0)
goto err_release_fw_2;
diff --git a/drivers/net/fddi/skfp/drvfbi.c b/drivers/net/fddi/skfp/drvfbi.c
index cc9ac572423e..e9b9614639cd 100644
--- a/drivers/net/fddi/skfp/drvfbi.c
+++ b/drivers/net/fddi/skfp/drvfbi.c
@@ -22,10 +22,6 @@
#include <linux/bitrev.h>
#include <linux/pci.h>
-#ifndef lint
-static const char ID_sccs[] = "@(#)drvfbi.c 1.63 99/02/11 (C) SK " ;
-#endif
-
/*
* PCM active state
*/
diff --git a/drivers/net/fddi/skfp/ecm.c b/drivers/net/fddi/skfp/ecm.c
index 15c503f43727..2f5f5f26bb43 100644
--- a/drivers/net/fddi/skfp/ecm.c
+++ b/drivers/net/fddi/skfp/ecm.c
@@ -40,10 +40,6 @@
#define KERNEL
#include "h/smtstate.h"
-#ifndef lint
-static const char ID_sccs[] = "@(#)ecm.c 2.7 99/08/05 (C) SK " ;
-#endif
-
/*
* FSM Macros
*/
@@ -147,10 +143,11 @@ static void ecm_fsm(struct s_smc *smc, int cmd)
/* For AIX event notification: */
/* Is a disconnect command remotely issued ? */
if (cmd == EC_DISCONNECT &&
- smc->mib.fddiSMTRemoteDisconnectFlag == TRUE)
+ smc->mib.fddiSMTRemoteDisconnectFlag == TRUE) {
AIX_EVENT (smc, (u_long) CIO_HARD_FAIL, (u_long)
FDDI_REMOTE_DISCONNECT, smt_get_event_word(smc),
smt_get_error_word(smc) );
+ }
/*jd 05-Aug-1999 Bug #10419 "Port Disconnect fails at Dup MAc Cond."*/
if (cmd == EC_CONNECT) {
diff --git a/drivers/net/fddi/skfp/ess.c b/drivers/net/fddi/skfp/ess.c
index afd5ca39f43b..35110c0c00a0 100644
--- a/drivers/net/fddi/skfp/ess.c
+++ b/drivers/net/fddi/skfp/ess.c
@@ -40,7 +40,6 @@
#ifdef ESS
#ifndef lint
-static const char ID_sccs[] = "@(#)ess.c 1.10 96/02/23 (C) SK" ;
#define LINT_USE(x)
#else
#define LINT_USE(x) (x)=(x)
diff --git a/drivers/net/fddi/skfp/hwt.c b/drivers/net/fddi/skfp/hwt.c
index 32804ed049cd..5577b8e14b73 100644
--- a/drivers/net/fddi/skfp/hwt.c
+++ b/drivers/net/fddi/skfp/hwt.c
@@ -27,10 +27,6 @@
#include "h/fddi.h"
#include "h/smc.h"
-#ifndef lint
-static const char ID_sccs[] = "@(#)hwt.c 1.13 97/04/23 (C) SK " ;
-#endif
-
/*
* Prototypes of local functions.
*/
diff --git a/drivers/net/fddi/skfp/pcmplc.c b/drivers/net/fddi/skfp/pcmplc.c
index 554cde8d6073..90e8df6d9a88 100644
--- a/drivers/net/fddi/skfp/pcmplc.c
+++ b/drivers/net/fddi/skfp/pcmplc.c
@@ -45,10 +45,6 @@
#define KERNEL
#include "h/smtstate.h"
-#ifndef lint
-static const char ID_sccs[] = "@(#)pcmplc.c 2.55 99/08/05 (C) SK " ;
-#endif
-
#ifdef FDDI_MIB
extern int snmp_fddi_trap(
#ifdef ANSIC
diff --git a/drivers/net/fddi/skfp/pmf.c b/drivers/net/fddi/skfp/pmf.c
index 14f10b4cab0f..563fb7f0b327 100644
--- a/drivers/net/fddi/skfp/pmf.c
+++ b/drivers/net/fddi/skfp/pmf.c
@@ -24,10 +24,6 @@
#ifndef SLIM_SMT
-#ifndef lint
-static const char ID_sccs[] = "@(#)pmf.c 1.37 97/08/04 (C) SK " ;
-#endif
-
static int smt_authorize(struct s_smc *smc, struct smt_header *sm);
static int smt_check_set_count(struct s_smc *smc, struct smt_header *sm);
static const struct s_p_tab* smt_get_ptab(u_short para);
diff --git a/drivers/net/fddi/skfp/queue.c b/drivers/net/fddi/skfp/queue.c
index ba022f723bd7..abe155ad777f 100644
--- a/drivers/net/fddi/skfp/queue.c
+++ b/drivers/net/fddi/skfp/queue.c
@@ -18,10 +18,6 @@
#include "h/fddi.h"
#include "h/smc.h"
-#ifndef lint
-static const char ID_sccs[] = "@(#)queue.c 2.9 97/08/04 (C) SK " ;
-#endif
-
#define PRINTF(a,b,c)
/*
diff --git a/drivers/net/fddi/skfp/rmt.c b/drivers/net/fddi/skfp/rmt.c
index c0e62c25332c..37a89675dbeb 100644
--- a/drivers/net/fddi/skfp/rmt.c
+++ b/drivers/net/fddi/skfp/rmt.c
@@ -45,10 +45,6 @@
#define KERNEL
#include "h/smtstate.h"
-#ifndef lint
-static const char ID_sccs[] = "@(#)rmt.c 2.13 99/07/02 (C) SK " ;
-#endif
-
/*
* FSM Macros
*/
diff --git a/drivers/net/fddi/skfp/smtdef.c b/drivers/net/fddi/skfp/smtdef.c
index 0bebde3c6cb9..99cc9a549bd7 100644
--- a/drivers/net/fddi/skfp/smtdef.c
+++ b/drivers/net/fddi/skfp/smtdef.c
@@ -22,10 +22,6 @@
#define OEM_USER_DATA "SK-NET FDDI V2.0 Userdata"
#endif
-#ifndef lint
-static const char ID_sccs[] = "@(#)smtdef.c 2.53 99/08/11 (C) SK " ;
-#endif
-
/*
* defaults
*/
diff --git a/drivers/net/fddi/skfp/smtinit.c b/drivers/net/fddi/skfp/smtinit.c
index 01f6c75cbea8..c9898c83fe30 100644
--- a/drivers/net/fddi/skfp/smtinit.c
+++ b/drivers/net/fddi/skfp/smtinit.c
@@ -19,10 +19,6 @@
#include "h/fddi.h"
#include "h/smc.h"
-#ifndef lint
-static const char ID_sccs[] = "@(#)smtinit.c 1.15 97/05/06 (C) SK " ;
-#endif
-
void init_fddi_driver(struct s_smc *smc, u_char *mac_addr);
/* define global debug variable */
diff --git a/drivers/net/fddi/skfp/smttimer.c b/drivers/net/fddi/skfp/smttimer.c
index 9d549bb14f07..5f3e5d7bf415 100644
--- a/drivers/net/fddi/skfp/smttimer.c
+++ b/drivers/net/fddi/skfp/smttimer.c
@@ -18,10 +18,6 @@
#include "h/fddi.h"
#include "h/smc.h"
-#ifndef lint
-static const char ID_sccs[] = "@(#)smttimer.c 2.4 97/08/04 (C) SK " ;
-#endif
-
static void timer_done(struct s_smc *smc, int restart);
void smt_timer_init(struct s_smc *smc)
diff --git a/drivers/net/fddi/skfp/srf.c b/drivers/net/fddi/skfp/srf.c
index f98d060b0f5b..4cad68c3f49b 100644
--- a/drivers/net/fddi/skfp/srf.c
+++ b/drivers/net/fddi/skfp/srf.c
@@ -26,11 +26,6 @@
#ifndef SLIM_SMT
#ifndef BOOT
-#ifndef lint
-static const char ID_sccs[] = "@(#)srf.c 1.18 97/08/04 (C) SK " ;
-#endif
-
-
/*
* function declarations
*/
diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
index 4eb64709d44c..3a2824f24caa 100644
--- a/drivers/net/ieee802154/ca8210.c
+++ b/drivers/net/ieee802154/ca8210.c
@@ -316,6 +316,7 @@ struct cas_control {
* struct ca8210_test - ca8210 test interface structure
* @ca8210_dfs_spi_int: pointer to the entry in the debug fs for this device
* @up_fifo: fifo for upstream messages
+ * @readq: read wait queue
*
* This structure stores all the data pertaining to the debug interface
*/
@@ -346,12 +347,12 @@ struct ca8210_test {
* @ca8210_is_awake: nonzero if ca8210 is initialised, ready for comms
* @sync_down: counts number of downstream synchronous commands
* @sync_up: counts number of upstream synchronous commands
- * @spi_transfer_complete completion object for a single spi_transfer
- * @sync_exchange_complete completion object for a complete synchronous API
- * exchange
- * @promiscuous whether the ca8210 is in promiscuous mode or not
+ * @spi_transfer_complete: completion object for a single spi_transfer
+ * @sync_exchange_complete: completion object for a complete synchronous API
+ * exchange
+ * @promiscuous: whether the ca8210 is in promiscuous mode or not
* @retries: records how many times the current pending spi
- * transfer has been retried
+ * transfer has been retried
*/
struct ca8210_priv {
struct spi_device *spi;
@@ -420,8 +421,8 @@ struct fulladdr {
/**
* union macaddr: generic MAC address container
- * @short_addr: 16-bit short address
- * @ieee_address: 64-bit extended address as LE byte array
+ * @short_address: 16-bit short address
+ * @ieee_address: 64-bit extended address as LE byte array
*
*/
union macaddr {
@@ -714,7 +715,7 @@ static void ca8210_mlme_reset_worker(struct work_struct *work)
/**
* ca8210_rx_done() - Calls various message dispatches responding to a received
* command
- * @arg: Pointer to the cas_control object for the relevant spi transfer
+ * @cas_ctl: Pointer to the cas_control object for the relevant spi transfer
*
* Presents a received SAP command from the ca8210 to the Cascoda EVBME, test
* interface and network driver.
@@ -1277,7 +1278,6 @@ static u8 tdme_channelinit(u8 channel, void *device_ref)
* @pib_attribute: Attribute Number
* @pib_attribute_length: Attribute length
* @pib_attribute_value: Pointer to Attribute Value
- * @device_ref: Nondescript pointer to target device
*
* Return: 802.15.4 status code of checks
*/
@@ -3046,7 +3046,7 @@ static void ca8210_test_interface_clear(struct ca8210_priv *priv)
/**
* ca8210_remove() - Shut down a ca8210 upon being disconnected
- * @priv: Pointer to private data structure
+ * @spi_device: Pointer to spi device data structure
*
* Return: 0 or linux error code
*/
@@ -3096,7 +3096,7 @@ static int ca8210_remove(struct spi_device *spi_device)
/**
* ca8210_probe() - Set up a connected ca8210 upon being detected by the system
- * @priv: Pointer to private data structure
+ * @spi_device: Pointer to spi device data structure
*
* Return: 0 or linux error code
*/
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index 7fe306e76281..fa63d4dee0ba 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -187,8 +187,7 @@ static const struct net_device_ops ifb_netdev_ops = {
};
#define IFB_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST | \
- NETIF_F_TSO_ECN | NETIF_F_TSO | NETIF_F_TSO6 | \
- NETIF_F_GSO_ENCAP_ALL | \
+ NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL | \
NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX | \
NETIF_F_HW_VLAN_STAG_TX)
diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
index 6bfac1efe037..12a2001ee1e9 100644
--- a/drivers/net/ipa/gsi.c
+++ b/drivers/net/ipa/gsi.c
@@ -21,6 +21,7 @@
#include "gsi_trans.h"
#include "ipa_gsi.h"
#include "ipa_data.h"
+#include "ipa_version.h"
/**
* DOC: The IPA Generic Software Interface
@@ -742,11 +743,12 @@ static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
/* Max prefetch is 1 segment (do not set MAX_PREFETCH_FMASK) */
- /* Enable the doorbell engine if requested */
- if (doorbell)
+ /* We enable the doorbell engine for IPA v3.5.1 */
+ if (gsi->version == IPA_VERSION_3_5_1 && doorbell)
val |= USE_DB_ENG_FMASK;
- if (!channel->use_prefetch)
+ /* Starting with IPA v4.0 the command channel uses the escape buffer */
+ if (gsi->version != IPA_VERSION_3_5_1 && channel->command)
val |= USE_ESCAPE_BUF_ONLY_FMASK;
iowrite32(val, gsi->virt + GSI_CH_C_QOS_OFFSET(channel_id));
@@ -829,8 +831,8 @@ int gsi_channel_stop(struct gsi *gsi, u32 channel_id)
return ret;
}
-/* Reset and reconfigure a channel (possibly leaving doorbell disabled) */
-void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool legacy)
+/* Reset and reconfigure a channel, (possibly) enabling the doorbell engine */
+void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell)
{
struct gsi_channel *channel = &gsi->channel[channel_id];
@@ -838,10 +840,10 @@ void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool legacy)
gsi_channel_reset_command(channel);
/* Due to a hardware quirk we may need to reset RX channels twice. */
- if (legacy && !channel->toward_ipa)
+ if (gsi->version == IPA_VERSION_3_5_1 && !channel->toward_ipa)
gsi_channel_reset_command(channel);
- gsi_channel_program(channel, legacy);
+ gsi_channel_program(channel, doorbell);
gsi_channel_trans_cancel_pending(channel);
mutex_unlock(&gsi->mutex);
@@ -1452,8 +1454,7 @@ static void gsi_evt_ring_teardown(struct gsi *gsi)
}
/* Setup function for a single channel */
-static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id,
- bool legacy)
+static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id)
{
struct gsi_channel *channel = &gsi->channel[channel_id];
u32 evt_ring_id = channel->evt_ring_id;
@@ -1472,7 +1473,7 @@ static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id,
if (ret)
goto err_evt_ring_de_alloc;
- gsi_channel_program(channel, legacy);
+ gsi_channel_program(channel, true);
if (channel->toward_ipa)
netif_tx_napi_add(&gsi->dummy_dev, &channel->napi,
@@ -1549,7 +1550,7 @@ static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id)
}
/* Setup function for channels */
-static int gsi_channel_setup(struct gsi *gsi, bool legacy)
+static int gsi_channel_setup(struct gsi *gsi)
{
u32 channel_id = 0;
u32 mask;
@@ -1561,7 +1562,7 @@ static int gsi_channel_setup(struct gsi *gsi, bool legacy)
mutex_lock(&gsi->mutex);
do {
- ret = gsi_channel_setup_one(gsi, channel_id, legacy);
+ ret = gsi_channel_setup_one(gsi, channel_id);
if (ret)
goto err_unwind;
} while (++channel_id < gsi->channel_count);
@@ -1647,7 +1648,7 @@ static void gsi_channel_teardown(struct gsi *gsi)
}
/* Setup function for GSI. GSI firmware must be loaded and initialized */
-int gsi_setup(struct gsi *gsi, bool legacy)
+int gsi_setup(struct gsi *gsi)
{
struct device *dev = gsi->dev;
u32 val;
@@ -1691,7 +1692,7 @@ int gsi_setup(struct gsi *gsi, bool legacy)
/* Writing 1 indicates IRQ interrupts; 0 would be MSI */
iowrite32(1, gsi->virt + GSI_CNTXT_INTSET_OFFSET);
- return gsi_channel_setup(gsi, legacy);
+ return gsi_channel_setup(gsi);
}
/* Inverse of gsi_setup() */
@@ -1814,7 +1815,7 @@ static bool gsi_channel_data_valid(struct gsi *gsi,
/* Init function for a single channel */
static int gsi_channel_init_one(struct gsi *gsi,
const struct ipa_gsi_endpoint_data *data,
- bool command, bool prefetch)
+ bool command)
{
struct gsi_channel *channel;
u32 tre_count;
@@ -1838,7 +1839,6 @@ static int gsi_channel_init_one(struct gsi *gsi,
channel->gsi = gsi;
channel->toward_ipa = data->toward_ipa;
channel->command = command;
- channel->use_prefetch = command && prefetch;
channel->tlv_count = data->channel.tlv_count;
channel->tre_count = tre_count;
channel->event_count = data->channel.event_count;
@@ -1892,13 +1892,16 @@ static void gsi_channel_exit_one(struct gsi_channel *channel)
}
/* Init function for channels */
-static int gsi_channel_init(struct gsi *gsi, bool prefetch, u32 count,
- const struct ipa_gsi_endpoint_data *data,
- bool modem_alloc)
+static int gsi_channel_init(struct gsi *gsi, u32 count,
+ const struct ipa_gsi_endpoint_data *data)
{
+ bool modem_alloc;
int ret = 0;
u32 i;
+ /* IPA v4.2 requires the AP to allocate channels for the modem */
+ modem_alloc = gsi->version == IPA_VERSION_4_2;
+
gsi_evt_ring_init(gsi);
/* The endpoint data array is indexed by endpoint name */
@@ -1916,7 +1919,7 @@ static int gsi_channel_init(struct gsi *gsi, bool prefetch, u32 count,
continue;
}
- ret = gsi_channel_init_one(gsi, &data[i], command, prefetch);
+ ret = gsi_channel_init_one(gsi, &data[i], command);
if (ret)
goto err_unwind;
}
@@ -1952,9 +1955,9 @@ static void gsi_channel_exit(struct gsi *gsi)
}
/* Init function for GSI. GSI hardware does not need to be "ready" */
-int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch,
- u32 count, const struct ipa_gsi_endpoint_data *data,
- bool modem_alloc)
+int gsi_init(struct gsi *gsi, struct platform_device *pdev,
+ enum ipa_version version, u32 count,
+ const struct ipa_gsi_endpoint_data *data)
{
struct device *dev = &pdev->dev;
struct resource *res;
@@ -1965,6 +1968,7 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch,
gsi_validate_build();
gsi->dev = dev;
+ gsi->version = version;
/* The GSI layer performs NAPI on all endpoints. NAPI requires a
* network device structure, but the GSI layer does not have one,
@@ -2008,7 +2012,7 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch,
goto err_free_irq;
}
- ret = gsi_channel_init(gsi, prefetch, count, data, modem_alloc);
+ ret = gsi_channel_init(gsi, count, data);
if (ret)
goto err_iounmap;
diff --git a/drivers/net/ipa/gsi.h b/drivers/net/ipa/gsi.h
index 3f9f29d531c4..59ace83d404c 100644
--- a/drivers/net/ipa/gsi.h
+++ b/drivers/net/ipa/gsi.h
@@ -13,6 +13,8 @@
#include <linux/platform_device.h>
#include <linux/netdevice.h>
+#include "ipa_version.h"
+
/* Maximum number of channels and event rings supported by the driver */
#define GSI_CHANNEL_COUNT_MAX 17
#define GSI_EVT_RING_COUNT_MAX 13
@@ -107,7 +109,6 @@ struct gsi_channel {
struct gsi *gsi;
bool toward_ipa;
bool command; /* AP command TX channel or not */
- bool use_prefetch; /* use prefetch (else escape buf) */
u8 tlv_count; /* # entries in TLV FIFO */
u16 tre_count;
@@ -147,6 +148,7 @@ struct gsi_evt_ring {
struct gsi {
struct device *dev; /* Same as IPA device */
+ enum ipa_version version;
struct net_device dummy_dev; /* needed for NAPI */
void __iomem *virt;
u32 irq;
@@ -164,14 +166,13 @@ struct gsi {
/**
* gsi_setup() - Set up the GSI subsystem
* @gsi: Address of GSI structure embedded in an IPA structure
- * @legacy: Set up for legacy hardware
*
* Return: 0 if successful, or a negative error code
*
* Performs initialization that must wait until the GSI hardware is
* ready (including firmware loaded).
*/
-int gsi_setup(struct gsi *gsi, bool legacy);
+int gsi_setup(struct gsi *gsi);
/**
* gsi_teardown() - Tear down GSI subsystem
@@ -219,15 +220,15 @@ int gsi_channel_stop(struct gsi *gsi, u32 channel_id);
* gsi_channel_reset() - Reset an allocated GSI channel
* @gsi: GSI pointer
* @channel_id: Channel to be reset
- * @legacy: Legacy behavior
+ * @doorbell: Whether to (possibly) enable the doorbell engine
*
- * Reset a channel and reconfigure it. The @legacy flag indicates
- * that some steps should be done differently for legacy hardware.
+ * Reset a channel and reconfigure it. The @doorbell flag indicates
+ * that the doorbell engine should be enabled if needed.
*
* GSI hardware relinquishes ownership of all pending receive buffer
* transactions and they will complete with their cancelled flag set.
*/
-void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool legacy);
+void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell);
int gsi_channel_suspend(struct gsi *gsi, u32 channel_id, bool stop);
int gsi_channel_resume(struct gsi *gsi, u32 channel_id, bool start);
@@ -236,15 +237,18 @@ int gsi_channel_resume(struct gsi *gsi, u32 channel_id, bool start);
* gsi_init() - Initialize the GSI subsystem
* @gsi: Address of GSI structure embedded in an IPA structure
* @pdev: IPA platform device
+ * @version: IPA hardware version (implies GSI version)
+ * @count: Number of entries in the configuration data array
+ * @data: Endpoint and channel configuration data
*
* Return: 0 if successful, or a negative error code
*
* Early stage initialization of the GSI subsystem, performing tasks
* that can be done before the GSI hardware is ready to use.
*/
-int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch,
- u32 count, const struct ipa_gsi_endpoint_data *data,
- bool modem_alloc);
+int gsi_init(struct gsi *gsi, struct platform_device *pdev,
+ enum ipa_version version, u32 count,
+ const struct ipa_gsi_endpoint_data *data);
/**
* gsi_exit() - Exit the GSI subsystem
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
index 7386e10615d9..548121b1531b 100644
--- a/drivers/net/ipa/ipa_endpoint.c
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -1217,7 +1217,6 @@ static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
struct gsi *gsi = &ipa->gsi;
bool suspended = false;
dma_addr_t addr;
- bool legacy;
u32 retries;
u32 len = 1;
void *virt;
@@ -1279,8 +1278,7 @@ static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
* complete the channel reset sequence. Finish by suspending the
* channel again (if necessary).
*/
- legacy = ipa->version == IPA_VERSION_3_5_1;
- gsi_channel_reset(gsi, endpoint->channel_id, legacy);
+ gsi_channel_reset(gsi, endpoint->channel_id, true);
msleep(1);
@@ -1303,21 +1301,19 @@ static void ipa_endpoint_reset(struct ipa_endpoint *endpoint)
u32 channel_id = endpoint->channel_id;
struct ipa *ipa = endpoint->ipa;
bool special;
- bool legacy;
int ret = 0;
/* On IPA v3.5.1, if an RX endpoint is reset while aggregation
* is active, we need to handle things specially to recover.
* All other cases just need to reset the underlying GSI channel.
- *
- * IPA v3.5.1 enables the doorbell engine. Newer versions do not.
*/
- legacy = ipa->version == IPA_VERSION_3_5_1;
- special = !endpoint->toward_ipa && endpoint->data->aggregation;
+ special = ipa->version == IPA_VERSION_3_5_1 &&
+ !endpoint->toward_ipa &&
+ endpoint->data->aggregation;
if (special && ipa_endpoint_aggr_active(endpoint))
ret = ipa_endpoint_reset_rx_aggr(endpoint);
else
- gsi_channel_reset(&ipa->gsi, channel_id, legacy);
+ gsi_channel_reset(&ipa->gsi, channel_id, true);
if (ret)
dev_err(&ipa->pdev->dev,
diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c
index f4dd14d9550f..a580cab794b1 100644
--- a/drivers/net/ipa/ipa_main.c
+++ b/drivers/net/ipa/ipa_main.c
@@ -111,8 +111,7 @@ int ipa_setup(struct ipa *ipa)
struct device *dev = &ipa->pdev->dev;
int ret;
- /* Setup for IPA v3.5.1 has some slight differences */
- ret = gsi_setup(&ipa->gsi, ipa->version == IPA_VERSION_3_5_1);
+ ret = gsi_setup(&ipa->gsi);
if (ret)
return ret;
@@ -723,10 +722,8 @@ static int ipa_probe(struct platform_device *pdev)
const struct ipa_data *data;
struct ipa_clock *clock;
struct rproc *rproc;
- bool modem_alloc;
bool modem_init;
struct ipa *ipa;
- bool prefetch;
phandle ph;
int ret;
@@ -788,13 +785,8 @@ static int ipa_probe(struct platform_device *pdev)
if (ret)
goto err_reg_exit;
- /* GSI v2.0+ (IPA v4.0+) uses prefetch for the command channel */
- prefetch = ipa->version != IPA_VERSION_3_5_1;
- /* IPA v4.2 requires the AP to allocate channels for the modem */
- modem_alloc = ipa->version == IPA_VERSION_4_2;
-
- ret = gsi_init(&ipa->gsi, pdev, prefetch, data->endpoint_count,
- data->endpoint_data, modem_alloc);
+ ret = gsi_init(&ipa->gsi, pdev, ipa->version, data->endpoint_count,
+ data->endpoint_data);
if (ret)
goto err_mem_exit;
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 11ca5fa902a1..92425e1fd70c 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -101,6 +101,7 @@ struct pcpu_secy_stats {
* @real_dev: pointer to underlying netdevice
* @stats: MACsec device stats
* @secys: linked list of SecY's on the underlying device
+ * @gro_cells: pointer to the Generic Receive Offload cell
* @offload: status of offloading on the MACsec device
*/
struct macsec_dev {
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index c8d803d3616c..dd960209da94 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -1339,7 +1339,7 @@ static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[],
return 0;
}
-/**
+/*
* reconfigure list of remote source mac address
* (only for macvlan devices in source mode)
* Note regarding alignment: all netlink data is aligned to 4 Byte, which
diff --git a/drivers/net/mhi_net.c b/drivers/net/mhi_net.c
new file mode 100644
index 000000000000..d3f92781314e
--- /dev/null
+++ b/drivers/net/mhi_net.c
@@ -0,0 +1,316 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* MHI Network driver - Network over MHI bus
+ *
+ * Copyright (C) 2020 Linaro Ltd <loic.poulain@linaro.org>
+ */
+
+#include <linux/if_arp.h>
+#include <linux/mhi.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/u64_stats_sync.h>
+
+#define MHI_NET_MIN_MTU ETH_MIN_MTU
+#define MHI_NET_MAX_MTU 0xffff
+#define MHI_NET_DEFAULT_MTU 0x4000
+
+struct mhi_net_stats {
+ u64_stats_t rx_packets;
+ u64_stats_t rx_bytes;
+ u64_stats_t rx_errors;
+ u64_stats_t rx_dropped;
+ u64_stats_t tx_packets;
+ u64_stats_t tx_bytes;
+ u64_stats_t tx_errors;
+ u64_stats_t tx_dropped;
+ atomic_t rx_queued;
+ struct u64_stats_sync tx_syncp;
+ struct u64_stats_sync rx_syncp;
+};
+
+struct mhi_net_dev {
+ struct mhi_device *mdev;
+ struct net_device *ndev;
+ struct delayed_work rx_refill;
+ struct mhi_net_stats stats;
+ u32 rx_queue_sz;
+};
+
+static int mhi_ndo_open(struct net_device *ndev)
+{
+ struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
+
+ /* Feed the rx buffer pool */
+ schedule_delayed_work(&mhi_netdev->rx_refill, 0);
+
+ /* Carrier is established via out-of-band channel (e.g. qmi) */
+ netif_carrier_on(ndev);
+
+ netif_start_queue(ndev);
+
+ return 0;
+}
+
+static int mhi_ndo_stop(struct net_device *ndev)
+{
+ struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
+
+ netif_stop_queue(ndev);
+ netif_carrier_off(ndev);
+ cancel_delayed_work_sync(&mhi_netdev->rx_refill);
+
+ return 0;
+}
+
+static int mhi_ndo_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
+ struct mhi_device *mdev = mhi_netdev->mdev;
+ int err;
+
+ err = mhi_queue_skb(mdev, DMA_TO_DEVICE, skb, skb->len, MHI_EOT);
+ if (unlikely(err)) {
+ net_err_ratelimited("%s: Failed to queue TX buf (%d)\n",
+ ndev->name, err);
+
+ u64_stats_update_begin(&mhi_netdev->stats.tx_syncp);
+ u64_stats_inc(&mhi_netdev->stats.tx_dropped);
+ u64_stats_update_end(&mhi_netdev->stats.tx_syncp);
+
+ /* drop the packet */
+ dev_kfree_skb_any(skb);
+ }
+
+ if (mhi_queue_is_full(mdev, DMA_TO_DEVICE))
+ netif_stop_queue(ndev);
+
+ return NETDEV_TX_OK;
+}
+
+static void mhi_ndo_get_stats64(struct net_device *ndev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin_irq(&mhi_netdev->stats.rx_syncp);
+ stats->rx_packets = u64_stats_read(&mhi_netdev->stats.rx_packets);
+ stats->rx_bytes = u64_stats_read(&mhi_netdev->stats.rx_bytes);
+ stats->rx_errors = u64_stats_read(&mhi_netdev->stats.rx_errors);
+ stats->rx_dropped = u64_stats_read(&mhi_netdev->stats.rx_dropped);
+ } while (u64_stats_fetch_retry_irq(&mhi_netdev->stats.rx_syncp, start));
+
+ do {
+ start = u64_stats_fetch_begin_irq(&mhi_netdev->stats.tx_syncp);
+ stats->tx_packets = u64_stats_read(&mhi_netdev->stats.tx_packets);
+ stats->tx_bytes = u64_stats_read(&mhi_netdev->stats.tx_bytes);
+ stats->tx_errors = u64_stats_read(&mhi_netdev->stats.tx_errors);
+ stats->tx_dropped = u64_stats_read(&mhi_netdev->stats.tx_dropped);
+ } while (u64_stats_fetch_retry_irq(&mhi_netdev->stats.tx_syncp, start));
+}
+
+static const struct net_device_ops mhi_netdev_ops = {
+ .ndo_open = mhi_ndo_open,
+ .ndo_stop = mhi_ndo_stop,
+ .ndo_start_xmit = mhi_ndo_xmit,
+ .ndo_get_stats64 = mhi_ndo_get_stats64,
+};
+
+static void mhi_net_setup(struct net_device *ndev)
+{
+ ndev->header_ops = NULL; /* No header */
+ ndev->type = ARPHRD_NONE; /* QMAP... */
+ ndev->hard_header_len = 0;
+ ndev->addr_len = 0;
+ ndev->flags = IFF_POINTOPOINT | IFF_NOARP;
+ ndev->netdev_ops = &mhi_netdev_ops;
+ ndev->mtu = MHI_NET_DEFAULT_MTU;
+ ndev->min_mtu = MHI_NET_MIN_MTU;
+ ndev->max_mtu = MHI_NET_MAX_MTU;
+ ndev->tx_queue_len = 1000;
+}
+
+static void mhi_net_dl_callback(struct mhi_device *mhi_dev,
+ struct mhi_result *mhi_res)
+{
+ struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev);
+ struct sk_buff *skb = mhi_res->buf_addr;
+ int remaining;
+
+ remaining = atomic_dec_return(&mhi_netdev->stats.rx_queued);
+
+ if (unlikely(mhi_res->transaction_status)) {
+ dev_kfree_skb_any(skb);
+
+ /* MHI layer stopping/resetting the DL channel */
+ if (mhi_res->transaction_status == -ENOTCONN)
+ return;
+
+ u64_stats_update_begin(&mhi_netdev->stats.rx_syncp);
+ u64_stats_inc(&mhi_netdev->stats.rx_errors);
+ u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
+ } else {
+ u64_stats_update_begin(&mhi_netdev->stats.rx_syncp);
+ u64_stats_inc(&mhi_netdev->stats.rx_packets);
+ u64_stats_add(&mhi_netdev->stats.rx_bytes, mhi_res->bytes_xferd);
+ u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
+
+ skb->protocol = htons(ETH_P_MAP);
+ skb_put(skb, mhi_res->bytes_xferd);
+ netif_rx(skb);
+ }
+
+ /* Refill if RX buffers queue becomes low */
+ if (remaining <= mhi_netdev->rx_queue_sz / 2)
+ schedule_delayed_work(&mhi_netdev->rx_refill, 0);
+}
+
+static void mhi_net_ul_callback(struct mhi_device *mhi_dev,
+ struct mhi_result *mhi_res)
+{
+ struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev);
+ struct net_device *ndev = mhi_netdev->ndev;
+ struct sk_buff *skb = mhi_res->buf_addr;
+
+ /* Hardware has consumed the buffer, so free the skb (which is not
+ * freed by the MHI stack) and perform accounting.
+ */
+ dev_consume_skb_any(skb);
+
+ u64_stats_update_begin(&mhi_netdev->stats.tx_syncp);
+ if (unlikely(mhi_res->transaction_status)) {
+
+ /* MHI layer stopping/resetting the UL channel */
+ if (mhi_res->transaction_status == -ENOTCONN) {
+ u64_stats_update_end(&mhi_netdev->stats.tx_syncp);
+ return;
+ }
+
+ u64_stats_inc(&mhi_netdev->stats.tx_errors);
+ } else {
+ u64_stats_inc(&mhi_netdev->stats.tx_packets);
+ u64_stats_add(&mhi_netdev->stats.tx_bytes, mhi_res->bytes_xferd);
+ }
+ u64_stats_update_end(&mhi_netdev->stats.tx_syncp);
+
+ if (netif_queue_stopped(ndev))
+ netif_wake_queue(ndev);
+}
+
+static void mhi_net_rx_refill_work(struct work_struct *work)
+{
+ struct mhi_net_dev *mhi_netdev = container_of(work, struct mhi_net_dev,
+ rx_refill.work);
+ struct net_device *ndev = mhi_netdev->ndev;
+ struct mhi_device *mdev = mhi_netdev->mdev;
+ int size = READ_ONCE(ndev->mtu);
+ struct sk_buff *skb;
+ int err;
+
+ while (atomic_read(&mhi_netdev->stats.rx_queued) < mhi_netdev->rx_queue_sz) {
+ skb = netdev_alloc_skb(ndev, size);
+ if (unlikely(!skb))
+ break;
+
+ err = mhi_queue_skb(mdev, DMA_FROM_DEVICE, skb, size, MHI_EOT);
+ if (unlikely(err)) {
+ net_err_ratelimited("%s: Failed to queue RX buf (%d)\n",
+ ndev->name, err);
+ kfree_skb(skb);
+ break;
+ }
+
+ atomic_inc(&mhi_netdev->stats.rx_queued);
+
+ /* Do not hog the CPU if rx buffers are consumed faster than
+ * queued (unlikely).
+ */
+ cond_resched();
+ }
+
+ /* If we're still starved of rx buffers, reschedule later */
+ if (unlikely(!atomic_read(&mhi_netdev->stats.rx_queued)))
+ schedule_delayed_work(&mhi_netdev->rx_refill, HZ / 2);
+}
+
+static int mhi_net_probe(struct mhi_device *mhi_dev,
+ const struct mhi_device_id *id)
+{
+ const char *netname = (char *)id->driver_data;
+ struct device *dev = &mhi_dev->dev;
+ struct mhi_net_dev *mhi_netdev;
+ struct net_device *ndev;
+ int err;
+
+ ndev = alloc_netdev(sizeof(*mhi_netdev), netname, NET_NAME_PREDICTABLE,
+ mhi_net_setup);
+ if (!ndev)
+ return -ENOMEM;
+
+ mhi_netdev = netdev_priv(ndev);
+ dev_set_drvdata(dev, mhi_netdev);
+ mhi_netdev->ndev = ndev;
+ mhi_netdev->mdev = mhi_dev;
+ SET_NETDEV_DEV(ndev, &mhi_dev->dev);
+
+ /* All MHI net channels have 128 ring elements (at least for now) */
+ mhi_netdev->rx_queue_sz = 128;
+
+ INIT_DELAYED_WORK(&mhi_netdev->rx_refill, mhi_net_rx_refill_work);
+ u64_stats_init(&mhi_netdev->stats.rx_syncp);
+ u64_stats_init(&mhi_netdev->stats.tx_syncp);
+
+ /* Start MHI channels */
+ err = mhi_prepare_for_transfer(mhi_dev);
+ if (err)
+ goto out_err;
+
+ err = register_netdev(ndev);
+ if (err)
+ goto out_err;
+
+ return 0;
+
+out_err:
+ free_netdev(ndev);
+ return err;
+}
+
+static void mhi_net_remove(struct mhi_device *mhi_dev)
+{
+ struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev);
+
+ unregister_netdev(mhi_netdev->ndev);
+
+ mhi_unprepare_from_transfer(mhi_netdev->mdev);
+
+ free_netdev(mhi_netdev->ndev);
+}
+
+static const struct mhi_device_id mhi_net_id_table[] = {
+ { .chan = "IP_HW0", .driver_data = (kernel_ulong_t)"mhi_hwip%d" },
+ { .chan = "IP_SW0", .driver_data = (kernel_ulong_t)"mhi_swip%d" },
+ {}
+};
+MODULE_DEVICE_TABLE(mhi, mhi_net_id_table);
+
+static struct mhi_driver mhi_net_driver = {
+ .probe = mhi_net_probe,
+ .remove = mhi_net_remove,
+ .dl_xfer_cb = mhi_net_dl_callback,
+ .ul_xfer_cb = mhi_net_ul_callback,
+ .id_table = mhi_net_id_table,
+ .driver = {
+ .name = "mhi_net",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_mhi_driver(mhi_net_driver);
+
+MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>");
+MODULE_DESCRIPTION("Network over MHI");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c
index fb182bec8f06..2a4892402ed8 100644
--- a/drivers/net/net_failover.c
+++ b/drivers/net/net_failover.c
@@ -697,7 +697,7 @@ static struct failover_ops net_failover_ops = {
/**
* net_failover_create - Create and register a failover instance
*
- * @dev: standby netdev
+ * @standby_dev: standby netdev
*
* Creates a failover netdev and registers a failover instance for a standby
* netdev. Used by paravirtual drivers that use 3-netdev model.
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index 92001f7af380..ccecba908ded 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -83,6 +83,7 @@ static struct console netconsole_ext;
* whether the corresponding netpoll is active or inactive.
* Also, other parameters of a target may be modified at
* runtime only when it is disabled (enabled == 0).
+ * @extended: Denotes whether console is extended or not.
* @np: The netpoll structure for this target.
* Contains the other userspace visible parameters:
* dev_name (read-write)
diff --git a/drivers/net/phy/adin.c b/drivers/net/phy/adin.c
index 307f0ac1287b..3727b38addf7 100644
--- a/drivers/net/phy/adin.c
+++ b/drivers/net/phy/adin.c
@@ -8,6 +8,7 @@
#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/errno.h>
+#include <linux/ethtool_netlink.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mii.h>
@@ -23,6 +24,7 @@
#define ADIN1300_PHY_CTRL1 0x0012
#define ADIN1300_AUTO_MDI_EN BIT(10)
#define ADIN1300_MAN_MDIX_EN BIT(9)
+#define ADIN1300_DIAG_CLK_EN BIT(2)
#define ADIN1300_RX_ERR_CNT 0x0014
@@ -69,6 +71,31 @@
#define ADIN1300_CLOCK_STOP_REG 0x9400
#define ADIN1300_LPI_WAKE_ERR_CNT_REG 0xa000
+#define ADIN1300_CDIAG_RUN 0xba1b
+#define ADIN1300_CDIAG_RUN_EN BIT(0)
+
+/*
+ * The XSIM3/2/1 and XSHRT3/2/1 are actually relative.
+ * For CDIAG_DTLD_RSLTS(0) it's ADIN1300_CDIAG_RSLT_XSIM3/2/1
+ * For CDIAG_DTLD_RSLTS(1) it's ADIN1300_CDIAG_RSLT_XSIM3/2/0
+ * For CDIAG_DTLD_RSLTS(2) it's ADIN1300_CDIAG_RSLT_XSIM3/1/0
+ * For CDIAG_DTLD_RSLTS(3) it's ADIN1300_CDIAG_RSLT_XSIM2/1/0
+ */
+#define ADIN1300_CDIAG_DTLD_RSLTS(x) (0xba1d + (x))
+#define ADIN1300_CDIAG_RSLT_BUSY BIT(10)
+#define ADIN1300_CDIAG_RSLT_XSIM3 BIT(9)
+#define ADIN1300_CDIAG_RSLT_XSIM2 BIT(8)
+#define ADIN1300_CDIAG_RSLT_XSIM1 BIT(7)
+#define ADIN1300_CDIAG_RSLT_SIM BIT(6)
+#define ADIN1300_CDIAG_RSLT_XSHRT3 BIT(5)
+#define ADIN1300_CDIAG_RSLT_XSHRT2 BIT(4)
+#define ADIN1300_CDIAG_RSLT_XSHRT1 BIT(3)
+#define ADIN1300_CDIAG_RSLT_SHRT BIT(2)
+#define ADIN1300_CDIAG_RSLT_OPEN BIT(1)
+#define ADIN1300_CDIAG_RSLT_GOOD BIT(0)
+
+#define ADIN1300_CDIAG_FLT_DIST(x) (0xba21 + (x))
+
#define ADIN1300_GE_SOFT_RESET_REG 0xff0c
#define ADIN1300_GE_SOFT_RESET BIT(0)
@@ -321,10 +348,9 @@ static int adin_set_downshift(struct phy_device *phydev, u8 cnt)
return -E2BIG;
val = FIELD_PREP(ADIN1300_DOWNSPEED_RETRIES_MSK, cnt);
- val |= ADIN1300_LINKING_EN;
rc = phy_modify(phydev, ADIN1300_PHY_CTRL3,
- ADIN1300_LINKING_EN | ADIN1300_DOWNSPEED_RETRIES_MSK,
+ ADIN1300_DOWNSPEED_RETRIES_MSK,
val);
if (rc < 0)
return rc;
@@ -555,6 +581,14 @@ static int adin_config_aneg(struct phy_device *phydev)
{
int ret;
+ ret = phy_clear_bits(phydev, ADIN1300_PHY_CTRL1, ADIN1300_DIAG_CLK_EN);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_set_bits(phydev, ADIN1300_PHY_CTRL3, ADIN1300_LINKING_EN);
+ if (ret < 0)
+ return ret;
+
ret = adin_config_mdix(phydev);
if (ret)
return ret;
@@ -725,10 +759,117 @@ static int adin_probe(struct phy_device *phydev)
return 0;
}
+static int adin_cable_test_start(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_clear_bits(phydev, ADIN1300_PHY_CTRL3, ADIN1300_LINKING_EN);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_clear_bits(phydev, ADIN1300_PHY_CTRL1, ADIN1300_DIAG_CLK_EN);
+ if (ret < 0)
+ return ret;
+
+ /* wait a bit for the clock to stabilize */
+ msleep(50);
+
+ return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, ADIN1300_CDIAG_RUN,
+ ADIN1300_CDIAG_RUN_EN);
+}
+
+static int adin_cable_test_report_trans(int result)
+{
+ int mask;
+
+ if (result & ADIN1300_CDIAG_RSLT_GOOD)
+ return ETHTOOL_A_CABLE_RESULT_CODE_OK;
+ if (result & ADIN1300_CDIAG_RSLT_OPEN)
+ return ETHTOOL_A_CABLE_RESULT_CODE_OPEN;
+
+ /* short with other pairs */
+ mask = ADIN1300_CDIAG_RSLT_XSHRT3 |
+ ADIN1300_CDIAG_RSLT_XSHRT2 |
+ ADIN1300_CDIAG_RSLT_XSHRT1;
+ if (result & mask)
+ return ETHTOOL_A_CABLE_RESULT_CODE_CROSS_SHORT;
+
+ if (result & ADIN1300_CDIAG_RSLT_SHRT)
+ return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT;
+
+ return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
+}
+
+static int adin_cable_test_report_pair(struct phy_device *phydev,
+ unsigned int pair)
+{
+ int fault_rslt;
+ int ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+ ADIN1300_CDIAG_DTLD_RSLTS(pair));
+ if (ret < 0)
+ return ret;
+
+ fault_rslt = adin_cable_test_report_trans(ret);
+
+ ret = ethnl_cable_test_result(phydev, pair, fault_rslt);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+ ADIN1300_CDIAG_FLT_DIST(pair));
+ if (ret < 0)
+ return ret;
+
+ switch (fault_rslt) {
+ case ETHTOOL_A_CABLE_RESULT_CODE_OPEN:
+ case ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT:
+ case ETHTOOL_A_CABLE_RESULT_CODE_CROSS_SHORT:
+ return ethnl_cable_test_fault_length(phydev, pair, ret * 100);
+ default:
+ return 0;
+ }
+}
+
+static int adin_cable_test_report(struct phy_device *phydev)
+{
+ unsigned int pair;
+ int ret;
+
+ for (pair = ETHTOOL_A_CABLE_PAIR_A; pair <= ETHTOOL_A_CABLE_PAIR_D; pair++) {
+ ret = adin_cable_test_report_pair(phydev, pair);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int adin_cable_test_get_status(struct phy_device *phydev,
+ bool *finished)
+{
+ int ret;
+
+ *finished = false;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, ADIN1300_CDIAG_RUN);
+ if (ret < 0)
+ return ret;
+
+ if (ret & ADIN1300_CDIAG_RUN_EN)
+ return 0;
+
+ *finished = true;
+
+ return adin_cable_test_report(phydev);
+}
+
static struct phy_driver adin_driver[] = {
{
PHY_ID_MATCH_MODEL(PHY_ID_ADIN1200),
.name = "ADIN1200",
+ .flags = PHY_POLL_CABLE_TEST,
.probe = adin_probe,
.config_init = adin_config_init,
.soft_reset = adin_soft_reset,
@@ -745,10 +886,13 @@ static struct phy_driver adin_driver[] = {
.suspend = genphy_suspend,
.read_mmd = adin_read_mmd,
.write_mmd = adin_write_mmd,
+ .cable_test_start = adin_cable_test_start,
+ .cable_test_get_status = adin_cable_test_get_status,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_ADIN1300),
.name = "ADIN1300",
+ .flags = PHY_POLL_CABLE_TEST,
.probe = adin_probe,
.config_init = adin_config_init,
.soft_reset = adin_soft_reset,
@@ -765,6 +909,8 @@ static struct phy_driver adin_driver[] = {
.suspend = genphy_suspend,
.read_mmd = adin_read_mmd,
.write_mmd = adin_write_mmd,
+ .cable_test_start = adin_cable_test_start,
+ .cable_test_get_status = adin_cable_test_get_status,
},
};
diff --git a/drivers/net/phy/aquantia_main.c b/drivers/net/phy/aquantia_main.c
index 41e7c1432497..345f70f9d39b 100644
--- a/drivers/net/phy/aquantia_main.c
+++ b/drivers/net/phy/aquantia_main.c
@@ -52,6 +52,7 @@
#define MDIO_AN_TX_VEND_INT_STATUS1_DOWNSHIFT BIT(1)
#define MDIO_AN_TX_VEND_INT_STATUS2 0xcc01
+#define MDIO_AN_TX_VEND_INT_STATUS2_MASK BIT(0)
#define MDIO_AN_TX_VEND_INT_MASK2 0xd401
#define MDIO_AN_TX_VEND_INT_MASK2_LINK BIT(0)
@@ -246,6 +247,13 @@ static int aqr_config_intr(struct phy_device *phydev)
bool en = phydev->interrupts == PHY_INTERRUPT_ENABLED;
int err;
+ if (en) {
+ /* Clear any pending interrupts before enabling them */
+ err = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_TX_VEND_INT_STATUS2);
+ if (err)
+ return err;
+ }
+
err = phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_TX_VEND_INT_MASK2,
en ? MDIO_AN_TX_VEND_INT_MASK2_LINK : 0);
if (err < 0)
@@ -256,18 +264,39 @@ static int aqr_config_intr(struct phy_device *phydev)
if (err < 0)
return err;
- return phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_INT_VEND_MASK,
- en ? VEND1_GLOBAL_INT_VEND_MASK_GLOBAL3 |
- VEND1_GLOBAL_INT_VEND_MASK_AN : 0);
+ err = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_INT_VEND_MASK,
+ en ? VEND1_GLOBAL_INT_VEND_MASK_GLOBAL3 |
+ VEND1_GLOBAL_INT_VEND_MASK_AN : 0);
+ if (err < 0)
+ return err;
+
+ if (!en) {
+ /* Clear any pending interrupts after we have disabled them */
+ err = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_TX_VEND_INT_STATUS2);
+ if (err)
+ return err;
+ }
+
+ return 0;
}
-static int aqr_ack_interrupt(struct phy_device *phydev)
+static irqreturn_t aqr_handle_interrupt(struct phy_device *phydev)
{
- int reg;
+ int irq_status;
+
+ irq_status = phy_read_mmd(phydev, MDIO_MMD_AN,
+ MDIO_AN_TX_VEND_INT_STATUS2);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (!(irq_status & MDIO_AN_TX_VEND_INT_STATUS2_MASK))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
- reg = phy_read_mmd(phydev, MDIO_MMD_AN,
- MDIO_AN_TX_VEND_INT_STATUS2);
- return (reg < 0) ? reg : 0;
+ return IRQ_HANDLED;
}
static int aqr_read_status(struct phy_device *phydev)
@@ -584,7 +613,7 @@ static struct phy_driver aqr_driver[] = {
.name = "Aquantia AQ1202",
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
- .ack_interrupt = aqr_ack_interrupt,
+ .handle_interrupt = aqr_handle_interrupt,
.read_status = aqr_read_status,
},
{
@@ -592,7 +621,7 @@ static struct phy_driver aqr_driver[] = {
.name = "Aquantia AQ2104",
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
- .ack_interrupt = aqr_ack_interrupt,
+ .handle_interrupt = aqr_handle_interrupt,
.read_status = aqr_read_status,
},
{
@@ -600,7 +629,7 @@ static struct phy_driver aqr_driver[] = {
.name = "Aquantia AQR105",
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
- .ack_interrupt = aqr_ack_interrupt,
+ .handle_interrupt = aqr_handle_interrupt,
.read_status = aqr_read_status,
.suspend = aqr107_suspend,
.resume = aqr107_resume,
@@ -610,7 +639,7 @@ static struct phy_driver aqr_driver[] = {
.name = "Aquantia AQR106",
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
- .ack_interrupt = aqr_ack_interrupt,
+ .handle_interrupt = aqr_handle_interrupt,
.read_status = aqr_read_status,
},
{
@@ -620,7 +649,7 @@ static struct phy_driver aqr_driver[] = {
.config_init = aqr107_config_init,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
- .ack_interrupt = aqr_ack_interrupt,
+ .handle_interrupt = aqr_handle_interrupt,
.read_status = aqr107_read_status,
.get_tunable = aqr107_get_tunable,
.set_tunable = aqr107_set_tunable,
@@ -638,7 +667,7 @@ static struct phy_driver aqr_driver[] = {
.config_init = aqcs109_config_init,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
- .ack_interrupt = aqr_ack_interrupt,
+ .handle_interrupt = aqr_handle_interrupt,
.read_status = aqr107_read_status,
.get_tunable = aqr107_get_tunable,
.set_tunable = aqr107_set_tunable,
@@ -654,7 +683,7 @@ static struct phy_driver aqr_driver[] = {
.name = "Aquantia AQR405",
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
- .ack_interrupt = aqr_ack_interrupt,
+ .handle_interrupt = aqr_handle_interrupt,
.read_status = aqr_read_status,
},
};
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index ed601a7e46a0..d0b36fd6c265 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -614,6 +614,11 @@ static int at803x_config_intr(struct phy_device *phydev)
value = phy_read(phydev, AT803X_INTR_ENABLE);
if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ /* Clear any pending interrupts */
+ err = at803x_ack_interrupt(phydev);
+ if (err)
+ return err;
+
value |= AT803X_INTR_ENABLE_AUTONEG_ERR;
value |= AT803X_INTR_ENABLE_SPEED_CHANGED;
value |= AT803X_INTR_ENABLE_DUPLEX_CHANGED;
@@ -621,13 +626,44 @@ static int at803x_config_intr(struct phy_device *phydev)
value |= AT803X_INTR_ENABLE_LINK_SUCCESS;
err = phy_write(phydev, AT803X_INTR_ENABLE, value);
- }
- else
+ } else {
err = phy_write(phydev, AT803X_INTR_ENABLE, 0);
+ if (err)
+ return err;
+
+ /* Clear any pending interrupts */
+ err = at803x_ack_interrupt(phydev);
+ }
return err;
}
+static irqreturn_t at803x_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status, int_enabled;
+
+ irq_status = phy_read(phydev, AT803X_INTR_STATUS);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ /* Read the current enabled interrupts */
+ int_enabled = phy_read(phydev, AT803X_INTR_ENABLE);
+ if (int_enabled < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ /* See if this was one of our enabled interrupts */
+ if (!(irq_status & int_enabled))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
+}
+
static void at803x_link_change_notify(struct phy_device *phydev)
{
/*
@@ -1062,8 +1098,8 @@ static struct phy_driver at803x_driver[] = {
.resume = at803x_resume,
/* PHY_GBIT_FEATURES */
.read_status = at803x_read_status,
- .ack_interrupt = at803x_ack_interrupt,
.config_intr = at803x_config_intr,
+ .handle_interrupt = at803x_handle_interrupt,
.get_tunable = at803x_get_tunable,
.set_tunable = at803x_set_tunable,
.cable_test_start = at803x_cable_test_start,
@@ -1082,8 +1118,8 @@ static struct phy_driver at803x_driver[] = {
.suspend = at803x_suspend,
.resume = at803x_resume,
/* PHY_BASIC_FEATURES */
- .ack_interrupt = at803x_ack_interrupt,
.config_intr = at803x_config_intr,
+ .handle_interrupt = at803x_handle_interrupt,
}, {
/* Qualcomm Atheros AR8031/AR8033 */
PHY_ID_MATCH_EXACT(ATH8031_PHY_ID),
@@ -1100,8 +1136,8 @@ static struct phy_driver at803x_driver[] = {
/* PHY_GBIT_FEATURES */
.read_status = at803x_read_status,
.aneg_done = at803x_aneg_done,
- .ack_interrupt = &at803x_ack_interrupt,
.config_intr = &at803x_config_intr,
+ .handle_interrupt = at803x_handle_interrupt,
.get_tunable = at803x_get_tunable,
.set_tunable = at803x_set_tunable,
.cable_test_start = at803x_cable_test_start,
@@ -1120,8 +1156,8 @@ static struct phy_driver at803x_driver[] = {
.suspend = at803x_suspend,
.resume = at803x_resume,
/* PHY_BASIC_FEATURES */
- .ack_interrupt = at803x_ack_interrupt,
.config_intr = at803x_config_intr,
+ .handle_interrupt = at803x_handle_interrupt,
.cable_test_start = at803x_cable_test_start,
.cable_test_get_status = at803x_cable_test_get_status,
}, {
@@ -1132,8 +1168,8 @@ static struct phy_driver at803x_driver[] = {
.resume = at803x_resume,
.flags = PHY_POLL_CABLE_TEST,
/* PHY_BASIC_FEATURES */
- .ack_interrupt = &at803x_ack_interrupt,
.config_intr = &at803x_config_intr,
+ .handle_interrupt = at803x_handle_interrupt,
.cable_test_start = at803x_cable_test_start,
.cable_test_get_status = at803x_cable_test_get_status,
.read_status = at803x_read_status,
diff --git a/drivers/net/phy/bcm-cygnus.c b/drivers/net/phy/bcm-cygnus.c
index 9ccf28b0a04d..da8f7cb41b44 100644
--- a/drivers/net/phy/bcm-cygnus.c
+++ b/drivers/net/phy/bcm-cygnus.c
@@ -256,8 +256,8 @@ static struct phy_driver bcm_cygnus_phy_driver[] = {
.name = "Broadcom Cygnus PHY",
/* PHY_GBIT_FEATURES */
.config_init = bcm_cygnus_config_init,
- .ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
+ .handle_interrupt = bcm_phy_handle_interrupt,
.suspend = genphy_suspend,
.resume = bcm_cygnus_resume,
}, {
diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c
index ef6825b30323..53282a6d5928 100644
--- a/drivers/net/phy/bcm-phy-lib.c
+++ b/drivers/net/phy/bcm-phy-lib.c
@@ -181,21 +181,62 @@ EXPORT_SYMBOL_GPL(bcm_phy_ack_intr);
int bcm_phy_config_intr(struct phy_device *phydev)
{
- int reg;
+ int reg, err;
reg = phy_read(phydev, MII_BCM54XX_ECR);
if (reg < 0)
return reg;
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = bcm_phy_ack_intr(phydev);
+ if (err)
+ return err;
+
reg &= ~MII_BCM54XX_ECR_IM;
- else
+ err = phy_write(phydev, MII_BCM54XX_ECR, reg);
+ } else {
reg |= MII_BCM54XX_ECR_IM;
+ err = phy_write(phydev, MII_BCM54XX_ECR, reg);
+ if (err)
+ return err;
- return phy_write(phydev, MII_BCM54XX_ECR, reg);
+ err = bcm_phy_ack_intr(phydev);
+ }
+ return err;
}
EXPORT_SYMBOL_GPL(bcm_phy_config_intr);
+irqreturn_t bcm_phy_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status, irq_mask;
+
+ irq_status = phy_read(phydev, MII_BCM54XX_ISR);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ /* If a bit from the Interrupt Mask register is set, the corresponding
+ * bit from the Interrupt Status register is masked. So read the IMR
+ * and then flip the bits to get the list of possible interrupt
+ * sources.
+ */
+ irq_mask = phy_read(phydev, MII_BCM54XX_IMR);
+ if (irq_mask < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+ irq_mask = ~irq_mask;
+
+ if (!(irq_status & irq_mask))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL_GPL(bcm_phy_handle_interrupt);
+
int bcm_phy_read_shadow(struct phy_device *phydev, u16 shadow)
{
phy_write(phydev, MII_BCM54XX_SHD, MII_BCM54XX_SHD_VAL(shadow));
diff --git a/drivers/net/phy/bcm-phy-lib.h b/drivers/net/phy/bcm-phy-lib.h
index 237a8503c9b4..c3842f87c33b 100644
--- a/drivers/net/phy/bcm-phy-lib.h
+++ b/drivers/net/phy/bcm-phy-lib.h
@@ -63,6 +63,7 @@ int bcm_phy_modify_rdb(struct phy_device *phydev, u16 rdb, u16 mask,
int bcm_phy_ack_intr(struct phy_device *phydev);
int bcm_phy_config_intr(struct phy_device *phydev);
+irqreturn_t bcm_phy_handle_interrupt(struct phy_device *phydev);
int bcm_phy_enable_apd(struct phy_device *phydev, bool dll_pwr_down);
diff --git a/drivers/net/phy/bcm54140.c b/drivers/net/phy/bcm54140.c
index 8998e68bb26b..d8f3024860dc 100644
--- a/drivers/net/phy/bcm54140.c
+++ b/drivers/net/phy/bcm54140.c
@@ -637,13 +637,29 @@ static int bcm54140_config_init(struct phy_device *phydev)
BCM54140_RDB_C_PWR_ISOLATE, 0);
}
-static int bcm54140_did_interrupt(struct phy_device *phydev)
+static irqreturn_t bcm54140_handle_interrupt(struct phy_device *phydev)
{
- int ret;
+ int irq_status, irq_mask;
+
+ irq_status = bcm_phy_read_rdb(phydev, BCM54140_RDB_ISR);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ irq_mask = bcm_phy_read_rdb(phydev, BCM54140_RDB_IMR);
+ if (irq_mask < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+ irq_mask = ~irq_mask;
+
+ if (!(irq_status & irq_mask))
+ return IRQ_NONE;
- ret = bcm_phy_read_rdb(phydev, BCM54140_RDB_ISR);
+ phy_trigger_machine(phydev);
- return (ret < 0) ? 0 : ret;
+ return IRQ_HANDLED;
}
static int bcm54140_ack_intr(struct phy_device *phydev)
@@ -665,7 +681,7 @@ static int bcm54140_config_intr(struct phy_device *phydev)
BCM54140_RDB_TOP_IMR_PORT0, BCM54140_RDB_TOP_IMR_PORT1,
BCM54140_RDB_TOP_IMR_PORT2, BCM54140_RDB_TOP_IMR_PORT3,
};
- int reg;
+ int reg, err;
if (priv->port >= ARRAY_SIZE(port_to_imr_bit))
return -EINVAL;
@@ -674,12 +690,23 @@ static int bcm54140_config_intr(struct phy_device *phydev)
if (reg < 0)
return reg;
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = bcm54140_ack_intr(phydev);
+ if (err)
+ return err;
+
reg &= ~port_to_imr_bit[priv->port];
- else
+ err = bcm54140_base_write_rdb(phydev, BCM54140_RDB_TOP_IMR, reg);
+ } else {
reg |= port_to_imr_bit[priv->port];
+ err = bcm54140_base_write_rdb(phydev, BCM54140_RDB_TOP_IMR, reg);
+ if (err)
+ return err;
+
+ err = bcm54140_ack_intr(phydev);
+ }
- return bcm54140_base_write_rdb(phydev, BCM54140_RDB_TOP_IMR, reg);
+ return err;
}
static int bcm54140_get_downshift(struct phy_device *phydev, u8 *data)
@@ -834,8 +861,7 @@ static struct phy_driver bcm54140_drivers[] = {
.flags = PHY_POLL_CABLE_TEST,
.features = PHY_GBIT_FEATURES,
.config_init = bcm54140_config_init,
- .did_interrupt = bcm54140_did_interrupt,
- .ack_interrupt = bcm54140_ack_intr,
+ .handle_interrupt = bcm54140_handle_interrupt,
.config_intr = bcm54140_config_intr,
.probe = bcm54140_probe,
.suspend = genphy_suspend,
diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c
index 459fb2069c7e..0eb33be824f1 100644
--- a/drivers/net/phy/bcm63xx.c
+++ b/drivers/net/phy/bcm63xx.c
@@ -25,12 +25,22 @@ static int bcm63xx_config_intr(struct phy_device *phydev)
if (reg < 0)
return reg;
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = bcm_phy_ack_intr(phydev);
+ if (err)
+ return err;
+
reg &= ~MII_BCM63XX_IR_GMASK;
- else
+ err = phy_write(phydev, MII_BCM63XX_IR, reg);
+ } else {
reg |= MII_BCM63XX_IR_GMASK;
+ err = phy_write(phydev, MII_BCM63XX_IR, reg);
+ if (err)
+ return err;
+
+ err = bcm_phy_ack_intr(phydev);
+ }
- err = phy_write(phydev, MII_BCM63XX_IR, reg);
return err;
}
@@ -67,8 +77,8 @@ static struct phy_driver bcm63xx_driver[] = {
/* PHY_BASIC_FEATURES */
.flags = PHY_IS_INTERNAL,
.config_init = bcm63xx_config_init,
- .ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm63xx_config_intr,
+ .handle_interrupt = bcm_phy_handle_interrupt,
}, {
/* same phy as above, with just a different OUI */
.phy_id = 0x002bdc00,
@@ -77,8 +87,8 @@ static struct phy_driver bcm63xx_driver[] = {
/* PHY_BASIC_FEATURES */
.flags = PHY_IS_INTERNAL,
.config_init = bcm63xx_config_init,
- .ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm63xx_config_intr,
+ .handle_interrupt = bcm_phy_handle_interrupt,
} };
module_phy_driver(bcm63xx_driver);
diff --git a/drivers/net/phy/bcm87xx.c b/drivers/net/phy/bcm87xx.c
index df360e1c5069..4ac8fd190e9d 100644
--- a/drivers/net/phy/bcm87xx.c
+++ b/drivers/net/phy/bcm87xx.c
@@ -144,35 +144,41 @@ static int bcm87xx_config_intr(struct phy_device *phydev)
if (reg < 0)
return reg;
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = phy_read(phydev, BCM87XX_LASI_STATUS);
+ if (err)
+ return err;
+
reg |= 1;
- else
+ err = phy_write(phydev, BCM87XX_LASI_CONTROL, reg);
+ } else {
reg &= ~1;
+ err = phy_write(phydev, BCM87XX_LASI_CONTROL, reg);
+ if (err)
+ return err;
+
+ err = phy_read(phydev, BCM87XX_LASI_STATUS);
+ }
- err = phy_write(phydev, BCM87XX_LASI_CONTROL, reg);
return err;
}
-static int bcm87xx_did_interrupt(struct phy_device *phydev)
+static irqreturn_t bcm87xx_handle_interrupt(struct phy_device *phydev)
{
- int reg;
+ int irq_status;
- reg = phy_read(phydev, BCM87XX_LASI_STATUS);
-
- if (reg < 0) {
- phydev_err(phydev,
- "Error: Read of BCM87XX_LASI_STATUS failed: %d\n",
- reg);
- return 0;
+ irq_status = phy_read(phydev, BCM87XX_LASI_STATUS);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
}
- return (reg & 1) != 0;
-}
-static int bcm87xx_ack_interrupt(struct phy_device *phydev)
-{
- /* Reading the LASI status clears it. */
- bcm87xx_did_interrupt(phydev);
- return 0;
+ if (irq_status == 0)
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
}
static int bcm8706_match_phy_device(struct phy_device *phydev)
@@ -194,9 +200,8 @@ static struct phy_driver bcm87xx_driver[] = {
.config_init = bcm87xx_config_init,
.config_aneg = bcm87xx_config_aneg,
.read_status = bcm87xx_read_status,
- .ack_interrupt = bcm87xx_ack_interrupt,
.config_intr = bcm87xx_config_intr,
- .did_interrupt = bcm87xx_did_interrupt,
+ .handle_interrupt = bcm87xx_handle_interrupt,
.match_phy_device = bcm8706_match_phy_device,
}, {
.phy_id = PHY_ID_BCM8727,
@@ -206,9 +211,8 @@ static struct phy_driver bcm87xx_driver[] = {
.config_init = bcm87xx_config_init,
.config_aneg = bcm87xx_config_aneg,
.read_status = bcm87xx_read_status,
- .ack_interrupt = bcm87xx_ack_interrupt,
.config_intr = bcm87xx_config_intr,
- .did_interrupt = bcm87xx_did_interrupt,
+ .handle_interrupt = bcm87xx_handle_interrupt,
.match_phy_device = bcm8727_match_phy_device,
} };
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index cd271de9609b..8a4ec3222168 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -634,15 +634,43 @@ static int brcm_fet_config_intr(struct phy_device *phydev)
if (reg < 0)
return reg;
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = brcm_fet_ack_interrupt(phydev);
+ if (err)
+ return err;
+
reg &= ~MII_BRCM_FET_IR_MASK;
- else
+ err = phy_write(phydev, MII_BRCM_FET_INTREG, reg);
+ } else {
reg |= MII_BRCM_FET_IR_MASK;
+ err = phy_write(phydev, MII_BRCM_FET_INTREG, reg);
+ if (err)
+ return err;
+
+ err = brcm_fet_ack_interrupt(phydev);
+ }
- err = phy_write(phydev, MII_BRCM_FET_INTREG, reg);
return err;
}
+static irqreturn_t brcm_fet_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status;
+
+ irq_status = phy_read(phydev, MII_BRCM_FET_INTREG);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (irq_status == 0)
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
+}
+
struct bcm53xx_phy_priv {
u64 *stats;
};
@@ -681,40 +709,40 @@ static struct phy_driver broadcom_drivers[] = {
.name = "Broadcom BCM5411",
/* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
- .ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
+ .handle_interrupt = bcm_phy_handle_interrupt,
}, {
.phy_id = PHY_ID_BCM5421,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5421",
/* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
- .ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
+ .handle_interrupt = bcm_phy_handle_interrupt,
}, {
.phy_id = PHY_ID_BCM54210E,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM54210E",
/* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
- .ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
+ .handle_interrupt = bcm_phy_handle_interrupt,
}, {
.phy_id = PHY_ID_BCM5461,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5461",
/* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
- .ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
+ .handle_interrupt = bcm_phy_handle_interrupt,
}, {
.phy_id = PHY_ID_BCM54612E,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM54612E",
/* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
- .ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
+ .handle_interrupt = bcm_phy_handle_interrupt,
}, {
.phy_id = PHY_ID_BCM54616S,
.phy_id_mask = 0xfffffff0,
@@ -722,8 +750,8 @@ static struct phy_driver broadcom_drivers[] = {
/* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
.config_aneg = bcm54616s_config_aneg,
- .ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
+ .handle_interrupt = bcm_phy_handle_interrupt,
.read_status = bcm54616s_read_status,
.probe = bcm54616s_probe,
}, {
@@ -732,8 +760,8 @@ static struct phy_driver broadcom_drivers[] = {
.name = "Broadcom BCM5464",
/* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
- .ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
+ .handle_interrupt = bcm_phy_handle_interrupt,
.suspend = genphy_suspend,
.resume = genphy_resume,
}, {
@@ -743,8 +771,8 @@ static struct phy_driver broadcom_drivers[] = {
/* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
.config_aneg = bcm5481_config_aneg,
- .ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
+ .handle_interrupt = bcm_phy_handle_interrupt,
}, {
.phy_id = PHY_ID_BCM54810,
.phy_id_mask = 0xfffffff0,
@@ -752,8 +780,8 @@ static struct phy_driver broadcom_drivers[] = {
/* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
.config_aneg = bcm5481_config_aneg,
- .ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
+ .handle_interrupt = bcm_phy_handle_interrupt,
.suspend = genphy_suspend,
.resume = bcm54xx_resume,
}, {
@@ -763,8 +791,8 @@ static struct phy_driver broadcom_drivers[] = {
/* PHY_GBIT_FEATURES */
.config_init = bcm54811_config_init,
.config_aneg = bcm5481_config_aneg,
- .ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
+ .handle_interrupt = bcm_phy_handle_interrupt,
.suspend = genphy_suspend,
.resume = bcm54xx_resume,
}, {
@@ -774,48 +802,48 @@ static struct phy_driver broadcom_drivers[] = {
/* PHY_GBIT_FEATURES */
.config_init = bcm5482_config_init,
.read_status = bcm5482_read_status,
- .ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
+ .handle_interrupt = bcm_phy_handle_interrupt,
}, {
.phy_id = PHY_ID_BCM50610,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM50610",
/* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
- .ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
+ .handle_interrupt = bcm_phy_handle_interrupt,
}, {
.phy_id = PHY_ID_BCM50610M,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM50610M",
/* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
- .ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
+ .handle_interrupt = bcm_phy_handle_interrupt,
}, {
.phy_id = PHY_ID_BCM57780,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM57780",
/* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
- .ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
+ .handle_interrupt = bcm_phy_handle_interrupt,
}, {
.phy_id = PHY_ID_BCMAC131,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCMAC131",
/* PHY_BASIC_FEATURES */
.config_init = brcm_fet_config_init,
- .ack_interrupt = brcm_fet_ack_interrupt,
.config_intr = brcm_fet_config_intr,
+ .handle_interrupt = brcm_fet_handle_interrupt,
}, {
.phy_id = PHY_ID_BCM5241,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5241",
/* PHY_BASIC_FEATURES */
.config_init = brcm_fet_config_init,
- .ack_interrupt = brcm_fet_ack_interrupt,
.config_intr = brcm_fet_config_intr,
+ .handle_interrupt = brcm_fet_handle_interrupt,
}, {
.phy_id = PHY_ID_BCM5395,
.phy_id_mask = 0xfffffff0,
@@ -837,16 +865,16 @@ static struct phy_driver broadcom_drivers[] = {
.get_stats = bcm53xx_phy_get_stats,
.probe = bcm53xx_phy_probe,
.config_init = bcm54xx_config_init,
- .ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
+ .handle_interrupt = bcm_phy_handle_interrupt,
}, {
.phy_id = PHY_ID_BCM89610,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM89610",
/* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
- .ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
+ .handle_interrupt = bcm_phy_handle_interrupt,
} };
module_phy_driver(broadcom_drivers);
diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c
index 9d1612a4d7e6..ef5f412e101f 100644
--- a/drivers/net/phy/cicada.c
+++ b/drivers/net/phy/cicada.c
@@ -87,15 +87,42 @@ static int cis820x_config_intr(struct phy_device *phydev)
{
int err;
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = cis820x_ack_interrupt(phydev);
+ if (err)
+ return err;
+
err = phy_write(phydev, MII_CIS8201_IMASK,
MII_CIS8201_IMASK_MASK);
- else
+ } else {
err = phy_write(phydev, MII_CIS8201_IMASK, 0);
+ if (err)
+ return err;
+
+ err = cis820x_ack_interrupt(phydev);
+ }
return err;
}
+static irqreturn_t cis820x_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status;
+
+ irq_status = phy_read(phydev, MII_CIS8201_ISTAT);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (!(irq_status & MII_CIS8201_IMASK_MASK))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
+}
+
/* Cicada 8201, a.k.a Vitesse VSC8201 */
static struct phy_driver cis820x_driver[] = {
{
@@ -104,16 +131,16 @@ static struct phy_driver cis820x_driver[] = {
.phy_id_mask = 0x000ffff0,
/* PHY_GBIT_FEATURES */
.config_init = &cis820x_config_init,
- .ack_interrupt = &cis820x_ack_interrupt,
.config_intr = &cis820x_config_intr,
+ .handle_interrupt = &cis820x_handle_interrupt,
}, {
.phy_id = 0x000fc440,
.name = "Cicada Cis8204",
.phy_id_mask = 0x000fffc0,
/* PHY_GBIT_FEATURES */
.config_init = &cis820x_config_init,
- .ack_interrupt = &cis820x_ack_interrupt,
.config_intr = &cis820x_config_intr,
+ .handle_interrupt = &cis820x_handle_interrupt,
} };
module_phy_driver(cis820x_driver);
diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c
index 942f277463a4..a3b3842c67e5 100644
--- a/drivers/net/phy/davicom.c
+++ b/drivers/net/phy/davicom.c
@@ -47,6 +47,10 @@
#define MII_DM9161_INTR_STOP \
(MII_DM9161_INTR_DPLX_MASK | MII_DM9161_INTR_SPD_MASK \
| MII_DM9161_INTR_LINK_MASK | MII_DM9161_INTR_MASK)
+#define MII_DM9161_INTR_CHANGE \
+ (MII_DM9161_INTR_DPLX_CHANGE | \
+ MII_DM9161_INTR_SPD_CHANGE | \
+ MII_DM9161_INTR_LINK_CHANGE)
/* DM9161 10BT Configuration/Status */
#define MII_DM9161_10BTCSR 0x12
@@ -57,24 +61,58 @@ MODULE_AUTHOR("Andy Fleming");
MODULE_LICENSE("GPL");
+static int dm9161_ack_interrupt(struct phy_device *phydev)
+{
+ int err = phy_read(phydev, MII_DM9161_INTR);
+
+ return (err < 0) ? err : 0;
+}
+
#define DM9161_DELAY 1
static int dm9161_config_intr(struct phy_device *phydev)
{
- int temp;
+ int temp, err;
temp = phy_read(phydev, MII_DM9161_INTR);
if (temp < 0)
return temp;
- if (PHY_INTERRUPT_ENABLED == phydev->interrupts)
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = dm9161_ack_interrupt(phydev);
+ if (err)
+ return err;
+
temp &= ~(MII_DM9161_INTR_STOP);
- else
+ err = phy_write(phydev, MII_DM9161_INTR, temp);
+ } else {
temp |= MII_DM9161_INTR_STOP;
+ err = phy_write(phydev, MII_DM9161_INTR, temp);
+ if (err)
+ return err;
+
+ err = dm9161_ack_interrupt(phydev);
+ }
+
+ return err;
+}
- temp = phy_write(phydev, MII_DM9161_INTR, temp);
+static irqreturn_t dm9161_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status;
+
+ irq_status = phy_read(phydev, MII_DM9161_INTR);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
- return temp;
+ if (!(irq_status & MII_DM9161_INTR_CHANGE))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
}
static int dm9161_config_aneg(struct phy_device *phydev)
@@ -132,13 +170,6 @@ static int dm9161_config_init(struct phy_device *phydev)
return phy_write(phydev, MII_BMCR, BMCR_ANENABLE);
}
-static int dm9161_ack_interrupt(struct phy_device *phydev)
-{
- int err = phy_read(phydev, MII_DM9161_INTR);
-
- return (err < 0) ? err : 0;
-}
-
static struct phy_driver dm91xx_driver[] = {
{
.phy_id = 0x0181b880,
@@ -147,8 +178,8 @@ static struct phy_driver dm91xx_driver[] = {
/* PHY_BASIC_FEATURES */
.config_init = dm9161_config_init,
.config_aneg = dm9161_config_aneg,
- .ack_interrupt = dm9161_ack_interrupt,
.config_intr = dm9161_config_intr,
+ .handle_interrupt = dm9161_handle_interrupt,
}, {
.phy_id = 0x0181b8b0,
.name = "Davicom DM9161B/C",
@@ -156,8 +187,8 @@ static struct phy_driver dm91xx_driver[] = {
/* PHY_BASIC_FEATURES */
.config_init = dm9161_config_init,
.config_aneg = dm9161_config_aneg,
- .ack_interrupt = dm9161_ack_interrupt,
.config_intr = dm9161_config_intr,
+ .handle_interrupt = dm9161_handle_interrupt,
}, {
.phy_id = 0x0181b8a0,
.name = "Davicom DM9161A",
@@ -165,15 +196,15 @@ static struct phy_driver dm91xx_driver[] = {
/* PHY_BASIC_FEATURES */
.config_init = dm9161_config_init,
.config_aneg = dm9161_config_aneg,
- .ack_interrupt = dm9161_ack_interrupt,
.config_intr = dm9161_config_intr,
+ .handle_interrupt = dm9161_handle_interrupt,
}, {
.phy_id = 0x00181b80,
.name = "Davicom DM9131",
.phy_id_mask = 0x0ffffff0,
/* PHY_BASIC_FEATURES */
- .ack_interrupt = dm9161_ack_interrupt,
.config_intr = dm9161_config_intr,
+ .handle_interrupt = dm9161_handle_interrupt,
} };
module_phy_driver(dm91xx_driver);
diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c
index 6bc7406a1ce7..2f2157e3deab 100644
--- a/drivers/net/phy/mscc/mscc_main.c
+++ b/drivers/net/phy/mscc/mscc_main.c
@@ -1498,7 +1498,7 @@ static irqreturn_t vsc8584_handle_interrupt(struct phy_device *phydev)
vsc8584_handle_macsec_interrupt(phydev);
if (irq_status & MII_VSC85XX_INT_MASK_LINK_CHG)
- phy_mac_interrupt(phydev);
+ phy_trigger_machine(phydev);
return IRQ_HANDLED;
}
@@ -1541,16 +1541,6 @@ static int vsc85xx_config_init(struct phy_device *phydev)
return 0;
}
-static int vsc8584_did_interrupt(struct phy_device *phydev)
-{
- int rc = 0;
-
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
- rc = phy_read(phydev, MII_VSC85XX_INT_STATUS);
-
- return (rc < 0) ? 0 : rc & MII_VSC85XX_INT_MASK_MASK;
-}
-
static int vsc8514_config_pre_init(struct phy_device *phydev)
{
/* These are the settings to override the silicon default
@@ -1933,6 +1923,10 @@ static int vsc85xx_config_intr(struct phy_device *phydev)
int rc;
if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ rc = vsc85xx_ack_interrupt(phydev);
+ if (rc)
+ return rc;
+
vsc8584_config_macsec_intr(phydev);
vsc8584_config_ts_intr(phydev);
@@ -1943,11 +1937,33 @@ static int vsc85xx_config_intr(struct phy_device *phydev)
if (rc < 0)
return rc;
rc = phy_read(phydev, MII_VSC85XX_INT_STATUS);
+ if (rc < 0)
+ return rc;
+
+ rc = vsc85xx_ack_interrupt(phydev);
}
return rc;
}
+static irqreturn_t vsc85xx_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status;
+
+ irq_status = phy_read(phydev, MII_VSC85XX_INT_STATUS);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (!(irq_status & MII_VSC85XX_INT_MASK_MASK))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
+}
+
static int vsc85xx_config_aneg(struct phy_device *phydev)
{
int rc;
@@ -2114,7 +2130,7 @@ static struct phy_driver vsc85xx_driver[] = {
.config_init = &vsc85xx_config_init,
.config_aneg = &vsc85xx_config_aneg,
.read_status = &vsc85xx_read_status,
- .ack_interrupt = &vsc85xx_ack_interrupt,
+ .handle_interrupt = vsc85xx_handle_interrupt,
.config_intr = &vsc85xx_config_intr,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
@@ -2139,9 +2155,8 @@ static struct phy_driver vsc85xx_driver[] = {
.config_aneg = &vsc85xx_config_aneg,
.aneg_done = &genphy_aneg_done,
.read_status = &vsc85xx_read_status,
- .ack_interrupt = &vsc85xx_ack_interrupt,
+ .handle_interrupt = vsc85xx_handle_interrupt,
.config_intr = &vsc85xx_config_intr,
- .did_interrupt = &vsc8584_did_interrupt,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
.probe = &vsc8574_probe,
@@ -2163,7 +2178,7 @@ static struct phy_driver vsc85xx_driver[] = {
.config_init = &vsc8514_config_init,
.config_aneg = &vsc85xx_config_aneg,
.read_status = &vsc85xx_read_status,
- .ack_interrupt = &vsc85xx_ack_interrupt,
+ .handle_interrupt = vsc85xx_handle_interrupt,
.config_intr = &vsc85xx_config_intr,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
@@ -2187,7 +2202,7 @@ static struct phy_driver vsc85xx_driver[] = {
.config_init = &vsc85xx_config_init,
.config_aneg = &vsc85xx_config_aneg,
.read_status = &vsc85xx_read_status,
- .ack_interrupt = &vsc85xx_ack_interrupt,
+ .handle_interrupt = vsc85xx_handle_interrupt,
.config_intr = &vsc85xx_config_intr,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
@@ -2211,7 +2226,7 @@ static struct phy_driver vsc85xx_driver[] = {
.config_init = &vsc85xx_config_init,
.config_aneg = &vsc85xx_config_aneg,
.read_status = &vsc85xx_read_status,
- .ack_interrupt = &vsc85xx_ack_interrupt,
+ .handle_interrupt = vsc85xx_handle_interrupt,
.config_intr = &vsc85xx_config_intr,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
@@ -2235,7 +2250,7 @@ static struct phy_driver vsc85xx_driver[] = {
.config_init = &vsc85xx_config_init,
.config_aneg = &vsc85xx_config_aneg,
.read_status = &vsc85xx_read_status,
- .ack_interrupt = &vsc85xx_ack_interrupt,
+ .handle_interrupt = vsc85xx_handle_interrupt,
.config_intr = &vsc85xx_config_intr,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
@@ -2259,7 +2274,7 @@ static struct phy_driver vsc85xx_driver[] = {
.config_init = &vsc85xx_config_init,
.config_aneg = &vsc85xx_config_aneg,
.read_status = &vsc85xx_read_status,
- .ack_interrupt = &vsc85xx_ack_interrupt,
+ .handle_interrupt = vsc85xx_handle_interrupt,
.config_intr = &vsc85xx_config_intr,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
@@ -2283,9 +2298,8 @@ static struct phy_driver vsc85xx_driver[] = {
.config_init = &vsc8584_config_init,
.config_aneg = &vsc85xx_config_aneg,
.read_status = &vsc85xx_read_status,
- .ack_interrupt = &vsc85xx_ack_interrupt,
+ .handle_interrupt = vsc85xx_handle_interrupt,
.config_intr = &vsc85xx_config_intr,
- .did_interrupt = &vsc8584_did_interrupt,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
.probe = &vsc8574_probe,
@@ -2308,9 +2322,8 @@ static struct phy_driver vsc85xx_driver[] = {
.config_init = &vsc8584_config_init,
.config_aneg = &vsc85xx_config_aneg,
.read_status = &vsc85xx_read_status,
- .ack_interrupt = &vsc85xx_ack_interrupt,
+ .handle_interrupt = vsc85xx_handle_interrupt,
.config_intr = &vsc85xx_config_intr,
- .did_interrupt = &vsc8584_did_interrupt,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
.probe = &vsc8584_probe,
@@ -2333,9 +2346,7 @@ static struct phy_driver vsc85xx_driver[] = {
.aneg_done = &genphy_aneg_done,
.read_status = &vsc85xx_read_status,
.handle_interrupt = &vsc8584_handle_interrupt,
- .ack_interrupt = &vsc85xx_ack_interrupt,
.config_intr = &vsc85xx_config_intr,
- .did_interrupt = &vsc8584_did_interrupt,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
.probe = &vsc8574_probe,
@@ -2359,9 +2370,8 @@ static struct phy_driver vsc85xx_driver[] = {
.config_aneg = &vsc85xx_config_aneg,
.aneg_done = &genphy_aneg_done,
.read_status = &vsc85xx_read_status,
- .ack_interrupt = &vsc85xx_ack_interrupt,
+ .handle_interrupt = vsc85xx_handle_interrupt,
.config_intr = &vsc85xx_config_intr,
- .did_interrupt = &vsc8584_did_interrupt,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
.probe = &vsc8574_probe,
@@ -2386,9 +2396,7 @@ static struct phy_driver vsc85xx_driver[] = {
.aneg_done = &genphy_aneg_done,
.read_status = &vsc85xx_read_status,
.handle_interrupt = &vsc8584_handle_interrupt,
- .ack_interrupt = &vsc85xx_ack_interrupt,
.config_intr = &vsc85xx_config_intr,
- .did_interrupt = &vsc8584_did_interrupt,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
.probe = &vsc8584_probe,
@@ -2411,9 +2419,7 @@ static struct phy_driver vsc85xx_driver[] = {
.aneg_done = &genphy_aneg_done,
.read_status = &vsc85xx_read_status,
.handle_interrupt = &vsc8584_handle_interrupt,
- .ack_interrupt = &vsc85xx_ack_interrupt,
.config_intr = &vsc85xx_config_intr,
- .did_interrupt = &vsc8584_did_interrupt,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
.probe = &vsc8584_probe,
@@ -2436,9 +2442,7 @@ static struct phy_driver vsc85xx_driver[] = {
.aneg_done = &genphy_aneg_done,
.read_status = &vsc85xx_read_status,
.handle_interrupt = &vsc8584_handle_interrupt,
- .ack_interrupt = &vsc85xx_ack_interrupt,
.config_intr = &vsc85xx_config_intr,
- .did_interrupt = &vsc8584_did_interrupt,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
.probe = &vsc8584_probe,
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 35525a671400..477bdf2f94df 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -493,10 +493,11 @@ EXPORT_SYMBOL(phy_queue_state_machine);
*
* @phydev: the phy_device struct
*/
-static void phy_trigger_machine(struct phy_device *phydev)
+void phy_trigger_machine(struct phy_device *phydev)
{
phy_queue_state_machine(phydev, 0);
}
+EXPORT_SYMBOL(phy_trigger_machine);
static void phy_abort_cable_test(struct phy_device *phydev)
{
@@ -924,7 +925,7 @@ void phy_stop_machine(struct phy_device *phydev)
* Must not be called from interrupt context, or while the
* phydev->lock is held.
*/
-static void phy_error(struct phy_device *phydev)
+void phy_error(struct phy_device *phydev)
{
WARN_ON(1);
@@ -934,6 +935,7 @@ static void phy_error(struct phy_device *phydev)
phy_trigger_machine(phydev);
}
+EXPORT_SYMBOL(phy_error);
/**
* phy_disable_interrupts - Disable the PHY interrupts from the PHY side
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 5dab6be6fc38..e13a46c25437 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -2463,6 +2463,19 @@ int genphy_soft_reset(struct phy_device *phydev)
}
EXPORT_SYMBOL(genphy_soft_reset);
+irqreturn_t genphy_handle_interrupt_no_ack(struct phy_device *phydev)
+{
+ /* It seems there are cases where the interrupts are handled by another
+ * entity (ie an IRQ controller embedded inside the PHY) and do not
+ * need any other interraction from phylib. In this case, just trigger
+ * the state machine directly.
+ */
+ phy_trigger_machine(phydev);
+
+ return 0;
+}
+EXPORT_SYMBOL(genphy_handle_interrupt_no_ack);
+
/**
* genphy_read_abilities - read PHY abilities from Clause 22 registers
* @phydev: target phy_device struct
@@ -2815,7 +2828,7 @@ EXPORT_SYMBOL(phy_get_internal_delay);
static bool phy_drv_supports_irq(struct phy_driver *phydrv)
{
- return phydrv->config_intr && phydrv->ack_interrupt;
+ return phydrv->config_intr && (phydrv->ack_interrupt || phydrv->handle_interrupt);
}
/**
@@ -2947,6 +2960,13 @@ static int phy_remove(struct device *dev)
return 0;
}
+static void phy_shutdown(struct device *dev)
+{
+ struct phy_device *phydev = to_phy_device(dev);
+
+ phy_disable_interrupts(phydev);
+}
+
/**
* phy_driver_register - register a phy_driver with the PHY layer
* @new_driver: new phy_driver to register
@@ -2970,6 +2990,7 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner)
new_driver->mdiodrv.driver.bus = &mdio_bus_type;
new_driver->mdiodrv.driver.probe = phy_probe;
new_driver->mdiodrv.driver.remove = phy_remove;
+ new_driver->mdiodrv.driver.shutdown = phy_shutdown;
new_driver->mdiodrv.driver.owner = owner;
new_driver->mdiodrv.driver.probe_type = PROBE_FORCE_SYNCHRONOUS;
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index fb1db713b7fb..efc6fc637db3 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -41,6 +41,12 @@
#define RTL8211E_RX_DELAY BIT(11)
#define RTL8201F_ISR 0x1e
+#define RTL8201F_ISR_ANERR BIT(15)
+#define RTL8201F_ISR_DUPLEX BIT(13)
+#define RTL8201F_ISR_LINK BIT(11)
+#define RTL8201F_ISR_MASK (RTL8201F_ISR_ANERR | \
+ RTL8201F_ISR_DUPLEX | \
+ RTL8201F_ISR_LINK)
#define RTL8201F_IER 0x13
#define RTL8366RB_POWER_SAVE 0x15
@@ -102,24 +108,45 @@ static int rtl8211f_ack_interrupt(struct phy_device *phydev)
static int rtl8201_config_intr(struct phy_device *phydev)
{
u16 val;
+ int err;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = rtl8201_ack_interrupt(phydev);
+ if (err)
+ return err;
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
val = BIT(13) | BIT(12) | BIT(11);
- else
+ err = phy_write_paged(phydev, 0x7, RTL8201F_IER, val);
+ } else {
val = 0;
+ err = phy_write_paged(phydev, 0x7, RTL8201F_IER, val);
+ if (err)
+ return err;
- return phy_write_paged(phydev, 0x7, RTL8201F_IER, val);
+ err = rtl8201_ack_interrupt(phydev);
+ }
+
+ return err;
}
static int rtl8211b_config_intr(struct phy_device *phydev)
{
int err;
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = rtl821x_ack_interrupt(phydev);
+ if (err)
+ return err;
+
err = phy_write(phydev, RTL821x_INER,
RTL8211B_INER_INIT);
- else
+ } else {
err = phy_write(phydev, RTL821x_INER, 0);
+ if (err)
+ return err;
+
+ err = rtl821x_ack_interrupt(phydev);
+ }
return err;
}
@@ -128,11 +155,20 @@ static int rtl8211e_config_intr(struct phy_device *phydev)
{
int err;
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = rtl821x_ack_interrupt(phydev);
+ if (err)
+ return err;
+
err = phy_write(phydev, RTL821x_INER,
RTL8211E_INER_LINK_STATUS);
- else
+ } else {
err = phy_write(phydev, RTL821x_INER, 0);
+ if (err)
+ return err;
+
+ err = rtl821x_ack_interrupt(phydev);
+ }
return err;
}
@@ -140,13 +176,85 @@ static int rtl8211e_config_intr(struct phy_device *phydev)
static int rtl8211f_config_intr(struct phy_device *phydev)
{
u16 val;
+ int err;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = rtl8211f_ack_interrupt(phydev);
+ if (err)
+ return err;
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
val = RTL8211F_INER_LINK_STATUS;
- else
+ err = phy_write_paged(phydev, 0xa42, RTL821x_INER, val);
+ } else {
val = 0;
+ err = phy_write_paged(phydev, 0xa42, RTL821x_INER, val);
+ if (err)
+ return err;
+
+ err = rtl8211f_ack_interrupt(phydev);
+ }
+
+ return err;
+}
+
+static irqreturn_t rtl8201_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status;
+
+ irq_status = phy_read(phydev, RTL8201F_ISR);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (!(irq_status & RTL8201F_ISR_MASK))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rtl821x_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status, irq_enabled;
+
+ irq_status = phy_read(phydev, RTL821x_INSR);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ irq_enabled = phy_read(phydev, RTL821x_INER);
+ if (irq_enabled < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (!(irq_status & irq_enabled))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
- return phy_write_paged(phydev, 0xa42, RTL821x_INER, val);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rtl8211f_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status;
+
+ irq_status = phy_read_paged(phydev, 0xa43, RTL8211F_INSR);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (!(irq_status & RTL8211F_INER_LINK_STATUS))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
}
static int rtl8211_config_aneg(struct phy_device *phydev)
@@ -554,8 +662,8 @@ static struct phy_driver realtek_drvs[] = {
}, {
PHY_ID_MATCH_EXACT(0x001cc816),
.name = "RTL8201F Fast Ethernet",
- .ack_interrupt = &rtl8201_ack_interrupt,
.config_intr = &rtl8201_config_intr,
+ .handle_interrupt = rtl8201_handle_interrupt,
.suspend = genphy_suspend,
.resume = genphy_resume,
.read_page = rtl821x_read_page,
@@ -580,8 +688,8 @@ static struct phy_driver realtek_drvs[] = {
}, {
PHY_ID_MATCH_EXACT(0x001cc912),
.name = "RTL8211B Gigabit Ethernet",
- .ack_interrupt = &rtl821x_ack_interrupt,
.config_intr = &rtl8211b_config_intr,
+ .handle_interrupt = rtl821x_handle_interrupt,
.read_mmd = &genphy_read_mmd_unsupported,
.write_mmd = &genphy_write_mmd_unsupported,
.suspend = rtl8211b_suspend,
@@ -599,8 +707,8 @@ static struct phy_driver realtek_drvs[] = {
}, {
PHY_ID_MATCH_EXACT(0x001cc914),
.name = "RTL8211DN Gigabit Ethernet",
- .ack_interrupt = rtl821x_ack_interrupt,
.config_intr = rtl8211e_config_intr,
+ .handle_interrupt = rtl821x_handle_interrupt,
.suspend = genphy_suspend,
.resume = genphy_resume,
.read_page = rtl821x_read_page,
@@ -609,8 +717,8 @@ static struct phy_driver realtek_drvs[] = {
PHY_ID_MATCH_EXACT(0x001cc915),
.name = "RTL8211E Gigabit Ethernet",
.config_init = &rtl8211e_config_init,
- .ack_interrupt = &rtl821x_ack_interrupt,
.config_intr = &rtl8211e_config_intr,
+ .handle_interrupt = rtl821x_handle_interrupt,
.suspend = genphy_suspend,
.resume = genphy_resume,
.read_page = rtl821x_read_page,
@@ -619,8 +727,8 @@ static struct phy_driver realtek_drvs[] = {
PHY_ID_MATCH_EXACT(0x001cc916),
.name = "RTL8211F Gigabit Ethernet",
.config_init = &rtl8211f_config_init,
- .ack_interrupt = &rtl8211f_ack_interrupt,
.config_intr = &rtl8211f_config_intr,
+ .handle_interrupt = rtl8211f_handle_interrupt,
.suspend = genphy_suspend,
.resume = genphy_resume,
.read_page = rtl821x_read_page,
@@ -660,6 +768,46 @@ static struct phy_driver realtek_drvs[] = {
.read_mmd = rtl822x_read_mmd,
.write_mmd = rtl822x_write_mmd,
}, {
+ PHY_ID_MATCH_EXACT(0x001cc838),
+ .name = "RTL8226-CG 2.5Gbps PHY",
+ .get_features = rtl822x_get_features,
+ .config_aneg = rtl822x_config_aneg,
+ .read_status = rtl822x_read_status,
+ .suspend = genphy_suspend,
+ .resume = rtlgen_resume,
+ .read_page = rtl821x_read_page,
+ .write_page = rtl821x_write_page,
+ }, {
+ PHY_ID_MATCH_EXACT(0x001cc848),
+ .name = "RTL8226B-CG_RTL8221B-CG 2.5Gbps PHY",
+ .get_features = rtl822x_get_features,
+ .config_aneg = rtl822x_config_aneg,
+ .read_status = rtl822x_read_status,
+ .suspend = genphy_suspend,
+ .resume = rtlgen_resume,
+ .read_page = rtl821x_read_page,
+ .write_page = rtl821x_write_page,
+ }, {
+ PHY_ID_MATCH_EXACT(0x001cc849),
+ .name = "RTL8221B-VB-CG 2.5Gbps PHY",
+ .get_features = rtl822x_get_features,
+ .config_aneg = rtl822x_config_aneg,
+ .read_status = rtl822x_read_status,
+ .suspend = genphy_suspend,
+ .resume = rtlgen_resume,
+ .read_page = rtl821x_read_page,
+ .write_page = rtl821x_write_page,
+ }, {
+ PHY_ID_MATCH_EXACT(0x001cc84a),
+ .name = "RTL8221B-VM-CG 2.5Gbps PHY",
+ .get_features = rtl822x_get_features,
+ .config_aneg = rtl822x_config_aneg,
+ .read_status = rtl822x_read_status,
+ .suspend = genphy_suspend,
+ .resume = rtlgen_resume,
+ .read_page = rtl821x_read_page,
+ .write_page = rtl821x_write_page,
+ }, {
PHY_ID_MATCH_EXACT(0x001cc961),
.name = "RTL8366RB Gigabit Ethernet",
.config_init = &rtl8366rb_config_init,
@@ -668,8 +816,8 @@ static struct phy_driver realtek_drvs[] = {
* irq is requested and ACKed by reading the status register,
* which is done by the irqchip code.
*/
- .ack_interrupt = genphy_no_ack_interrupt,
.config_intr = genphy_no_config_intr,
+ .handle_interrupt = genphy_handle_interrupt_no_ack,
.suspend = genphy_suspend,
.resume = genphy_resume,
},
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 07f1f3933927..b4092127a92c 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -975,11 +975,11 @@ static void team_port_disable(struct team *team,
}
#define TEAM_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
- NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
+ NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE | \
NETIF_F_HIGHDMA | NETIF_F_LRO)
#define TEAM_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
- NETIF_F_RXCSUM | NETIF_F_ALL_TSO)
+ NETIF_F_RXCSUM | NETIF_F_GSO_SOFTWARE)
static void __team_compute_features(struct team *team)
{
@@ -1009,8 +1009,7 @@ static void __team_compute_features(struct team *team)
team->dev->vlan_features = vlan_features;
team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_STAG_TX |
- NETIF_F_GSO_UDP_L4;
+ NETIF_F_HW_VLAN_STAG_TX;
team->dev->hard_header_len = max_hard_header_len;
team->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
@@ -2175,7 +2174,7 @@ static void team_setup(struct net_device *dev)
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER;
- dev->hw_features |= NETIF_F_GSO_ENCAP_ALL | NETIF_F_GSO_UDP_L4;
+ dev->hw_features |= NETIF_F_GSO_ENCAP_ALL;
dev->features |= dev->hw_features;
dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
}
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index 99fd12be2111..99381e6bea78 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -13,7 +13,7 @@ obj-$(CONFIG_USB_LAN78XX) += lan78xx.o
obj-$(CONFIG_USB_NET_AX8817X) += asix.o
asix-y := asix_devices.o asix_common.o ax88172a.o
obj-$(CONFIG_USB_NET_AX88179_178A) += ax88179_178a.o
-obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o
+obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o r8153_ecm.o
obj-$(CONFIG_USB_NET_CDC_EEM) += cdc_eem.o
obj-$(CONFIG_USB_NET_DM9601) += dm9601.o
obj-$(CONFIG_USB_NET_SR9700) += sr9700.o
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 65b315bc60ab..bf243edeb064 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -822,20 +822,19 @@ static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
u32 length, u8 *data)
{
int i;
- int ret;
u32 buf;
unsigned long timeout;
- ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
+ lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
if (buf & OTP_PWR_DN_PWRDN_N_) {
/* clear it and wait to be cleared */
- ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
+ lan78xx_write_reg(dev, OTP_PWR_DN, 0);
timeout = jiffies + HZ;
do {
usleep_range(1, 10);
- ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
+ lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
if (time_after(jiffies, timeout)) {
netdev_warn(dev->net,
"timeout on OTP_PWR_DN");
@@ -845,18 +844,18 @@ static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
}
for (i = 0; i < length; i++) {
- ret = lan78xx_write_reg(dev, OTP_ADDR1,
+ lan78xx_write_reg(dev, OTP_ADDR1,
((offset + i) >> 8) & OTP_ADDR1_15_11);
- ret = lan78xx_write_reg(dev, OTP_ADDR2,
+ lan78xx_write_reg(dev, OTP_ADDR2,
((offset + i) & OTP_ADDR2_10_3));
- ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
- ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
+ lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
+ lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
timeout = jiffies + HZ;
do {
udelay(1);
- ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
+ lan78xx_read_reg(dev, OTP_STATUS, &buf);
if (time_after(jiffies, timeout)) {
netdev_warn(dev->net,
"timeout on OTP_STATUS");
@@ -864,7 +863,7 @@ static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
}
} while (buf & OTP_STATUS_BUSY_);
- ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
+ lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
data[i] = (u8)(buf & 0xFF);
}
@@ -876,20 +875,19 @@ static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
u32 length, u8 *data)
{
int i;
- int ret;
u32 buf;
unsigned long timeout;
- ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
+ lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
if (buf & OTP_PWR_DN_PWRDN_N_) {
/* clear it and wait to be cleared */
- ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
+ lan78xx_write_reg(dev, OTP_PWR_DN, 0);
timeout = jiffies + HZ;
do {
udelay(1);
- ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
+ lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
if (time_after(jiffies, timeout)) {
netdev_warn(dev->net,
"timeout on OTP_PWR_DN completion");
@@ -899,21 +897,21 @@ static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
}
/* set to BYTE program mode */
- ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
+ lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
for (i = 0; i < length; i++) {
- ret = lan78xx_write_reg(dev, OTP_ADDR1,
+ lan78xx_write_reg(dev, OTP_ADDR1,
((offset + i) >> 8) & OTP_ADDR1_15_11);
- ret = lan78xx_write_reg(dev, OTP_ADDR2,
+ lan78xx_write_reg(dev, OTP_ADDR2,
((offset + i) & OTP_ADDR2_10_3));
- ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
- ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
- ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
+ lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
+ lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
+ lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
timeout = jiffies + HZ;
do {
udelay(1);
- ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
+ lan78xx_read_reg(dev, OTP_STATUS, &buf);
if (time_after(jiffies, timeout)) {
netdev_warn(dev->net,
"Timeout on OTP_STATUS completion");
@@ -1038,7 +1036,6 @@ static void lan78xx_deferred_multicast_write(struct work_struct *param)
container_of(param, struct lan78xx_priv, set_multicast);
struct lan78xx_net *dev = pdata->dev;
int i;
- int ret;
netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
pdata->rfe_ctl);
@@ -1047,14 +1044,14 @@ static void lan78xx_deferred_multicast_write(struct work_struct *param)
DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
for (i = 1; i < NUM_OF_MAF; i++) {
- ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
- ret = lan78xx_write_reg(dev, MAF_LO(i),
+ lan78xx_write_reg(dev, MAF_HI(i), 0);
+ lan78xx_write_reg(dev, MAF_LO(i),
pdata->pfilter_table[i][1]);
- ret = lan78xx_write_reg(dev, MAF_HI(i),
+ lan78xx_write_reg(dev, MAF_HI(i),
pdata->pfilter_table[i][0]);
}
- ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
+ lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
}
static void lan78xx_set_multicast(struct net_device *netdev)
@@ -1124,7 +1121,6 @@ static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
u16 lcladv, u16 rmtadv)
{
u32 flow = 0, fct_flow = 0;
- int ret;
u8 cap;
if (dev->fc_autoneg)
@@ -1147,10 +1143,10 @@ static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
(cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
(cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
- ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
+ lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
/* threshold value should be set before enabling flow */
- ret = lan78xx_write_reg(dev, FLOW, flow);
+ lan78xx_write_reg(dev, FLOW, flow);
return 0;
}
@@ -1663,11 +1659,10 @@ static const struct ethtool_ops lan78xx_ethtool_ops = {
static void lan78xx_init_mac_address(struct lan78xx_net *dev)
{
u32 addr_lo, addr_hi;
- int ret;
u8 addr[6];
- ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
- ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
+ lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
+ lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
addr[0] = addr_lo & 0xFF;
addr[1] = (addr_lo >> 8) & 0xFF;
@@ -1700,12 +1695,12 @@ static void lan78xx_init_mac_address(struct lan78xx_net *dev)
(addr[2] << 16) | (addr[3] << 24);
addr_hi = addr[4] | (addr[5] << 8);
- ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
- ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
+ lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
+ lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
}
- ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
- ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
+ lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
+ lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
ether_addr_copy(dev->net->dev_addr, addr);
}
@@ -1838,7 +1833,7 @@ static void lan78xx_remove_mdio(struct lan78xx_net *dev)
static void lan78xx_link_status_change(struct net_device *net)
{
struct phy_device *phydev = net->phydev;
- int ret, temp;
+ int temp;
/* At forced 100 F/H mode, chip may fail to set mode correctly
* when cable is switched between long(~50+m) and short one.
@@ -1849,7 +1844,7 @@ static void lan78xx_link_status_change(struct net_device *net)
/* disable phy interrupt */
temp = phy_read(phydev, LAN88XX_INT_MASK);
temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
- ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
+ phy_write(phydev, LAN88XX_INT_MASK, temp);
temp = phy_read(phydev, MII_BMCR);
temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
@@ -1863,7 +1858,7 @@ static void lan78xx_link_status_change(struct net_device *net)
/* enable phy interrupt back */
temp = phy_read(phydev, LAN88XX_INT_MASK);
temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
- ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
+ phy_write(phydev, LAN88XX_INT_MASK, temp);
}
}
@@ -1917,14 +1912,13 @@ static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
struct lan78xx_net *dev =
container_of(data, struct lan78xx_net, domain_data);
u32 buf;
- int ret;
/* call register access here because irq_bus_lock & irq_bus_sync_unlock
* are only two callbacks executed in non-atomic contex.
*/
- ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
+ lan78xx_read_reg(dev, INT_EP_CTL, &buf);
if (buf != data->irqenable)
- ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
+ lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
mutex_unlock(&data->irq_lock);
}
@@ -1991,7 +1985,6 @@ static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
static int lan8835_fixup(struct phy_device *phydev)
{
int buf;
- int ret;
struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
/* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
@@ -2001,11 +1994,11 @@ static int lan8835_fixup(struct phy_device *phydev)
phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
/* RGMII MAC TXC Delay Enable */
- ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
+ lan78xx_write_reg(dev, MAC_RGMII_ID,
MAC_RGMII_ID_TXC_DELAY_EN_);
/* RGMII TX DLL Tune Adjust */
- ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
+ lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
@@ -2189,28 +2182,27 @@ static int lan78xx_phy_init(struct lan78xx_net *dev)
static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
{
- int ret = 0;
u32 buf;
bool rxenabled;
- ret = lan78xx_read_reg(dev, MAC_RX, &buf);
+ lan78xx_read_reg(dev, MAC_RX, &buf);
rxenabled = ((buf & MAC_RX_RXEN_) != 0);
if (rxenabled) {
buf &= ~MAC_RX_RXEN_;
- ret = lan78xx_write_reg(dev, MAC_RX, buf);
+ lan78xx_write_reg(dev, MAC_RX, buf);
}
/* add 4 to size for FCS */
buf &= ~MAC_RX_MAX_SIZE_MASK_;
buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
- ret = lan78xx_write_reg(dev, MAC_RX, buf);
+ lan78xx_write_reg(dev, MAC_RX, buf);
if (rxenabled) {
buf |= MAC_RX_RXEN_;
- ret = lan78xx_write_reg(dev, MAC_RX, buf);
+ lan78xx_write_reg(dev, MAC_RX, buf);
}
return 0;
@@ -2267,13 +2259,12 @@ static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
int ll_mtu = new_mtu + netdev->hard_header_len;
int old_hard_mtu = dev->hard_mtu;
int old_rx_urb_size = dev->rx_urb_size;
- int ret;
/* no second zero-length packet read wanted after mtu-sized packets */
if ((ll_mtu % dev->maxpacket) == 0)
return -EDOM;
- ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN);
+ lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN);
netdev->mtu = new_mtu;
@@ -2296,7 +2287,6 @@ static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
struct lan78xx_net *dev = netdev_priv(netdev);
struct sockaddr *addr = p;
u32 addr_lo, addr_hi;
- int ret;
if (netif_running(netdev))
return -EBUSY;
@@ -2313,12 +2303,12 @@ static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
addr_hi = netdev->dev_addr[4] |
netdev->dev_addr[5] << 8;
- ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
- ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
+ lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
+ lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
/* Added to support MAC address changes */
- ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
- ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
+ lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
+ lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
return 0;
}
@@ -2330,7 +2320,6 @@ static int lan78xx_set_features(struct net_device *netdev,
struct lan78xx_net *dev = netdev_priv(netdev);
struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
unsigned long flags;
- int ret;
spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
@@ -2354,7 +2343,7 @@ static int lan78xx_set_features(struct net_device *netdev,
spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
- ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
+ lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
return 0;
}
@@ -3804,7 +3793,6 @@ static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
{
u32 buf;
- int ret;
int mask_index;
u16 crc;
u32 temp_wucsr;
@@ -3813,26 +3801,26 @@ static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
const u8 ipv6_multicast[3] = { 0x33, 0x33 };
const u8 arp_type[2] = { 0x08, 0x06 };
- ret = lan78xx_read_reg(dev, MAC_TX, &buf);
+ lan78xx_read_reg(dev, MAC_TX, &buf);
buf &= ~MAC_TX_TXEN_;
- ret = lan78xx_write_reg(dev, MAC_TX, buf);
- ret = lan78xx_read_reg(dev, MAC_RX, &buf);
+ lan78xx_write_reg(dev, MAC_TX, buf);
+ lan78xx_read_reg(dev, MAC_RX, &buf);
buf &= ~MAC_RX_RXEN_;
- ret = lan78xx_write_reg(dev, MAC_RX, buf);
+ lan78xx_write_reg(dev, MAC_RX, buf);
- ret = lan78xx_write_reg(dev, WUCSR, 0);
- ret = lan78xx_write_reg(dev, WUCSR2, 0);
- ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
+ lan78xx_write_reg(dev, WUCSR, 0);
+ lan78xx_write_reg(dev, WUCSR2, 0);
+ lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
temp_wucsr = 0;
temp_pmt_ctl = 0;
- ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
+ lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
- ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
+ lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
mask_index = 0;
if (wol & WAKE_PHY) {
@@ -3861,30 +3849,30 @@ static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
/* set WUF_CFG & WUF_MASK for IPv4 Multicast */
crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
- ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
+ lan78xx_write_reg(dev, WUF_CFG(mask_index),
WUF_CFGX_EN_ |
WUF_CFGX_TYPE_MCAST_ |
(0 << WUF_CFGX_OFFSET_SHIFT_) |
(crc & WUF_CFGX_CRC16_MASK_));
- ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
- ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
- ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
- ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
+ lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
+ lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
+ lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
+ lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
mask_index++;
/* for IPv6 Multicast */
crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
- ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
+ lan78xx_write_reg(dev, WUF_CFG(mask_index),
WUF_CFGX_EN_ |
WUF_CFGX_TYPE_MCAST_ |
(0 << WUF_CFGX_OFFSET_SHIFT_) |
(crc & WUF_CFGX_CRC16_MASK_));
- ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
- ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
- ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
- ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
+ lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
+ lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
+ lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
+ lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
mask_index++;
temp_pmt_ctl |= PMT_CTL_WOL_EN_;
@@ -3905,16 +3893,16 @@ static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
* for packettype (offset 12,13) = ARP (0x0806)
*/
crc = lan78xx_wakeframe_crc16(arp_type, 2);
- ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
+ lan78xx_write_reg(dev, WUF_CFG(mask_index),
WUF_CFGX_EN_ |
WUF_CFGX_TYPE_ALL_ |
(0 << WUF_CFGX_OFFSET_SHIFT_) |
(crc & WUF_CFGX_CRC16_MASK_));
- ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
- ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
- ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
- ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
+ lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
+ lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
+ lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
+ lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
mask_index++;
temp_pmt_ctl |= PMT_CTL_WOL_EN_;
@@ -3922,7 +3910,7 @@ static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
}
- ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
+ lan78xx_write_reg(dev, WUCSR, temp_wucsr);
/* when multiple WOL bits are set */
if (hweight_long((unsigned long)wol) > 1) {
@@ -3930,16 +3918,16 @@ static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
}
- ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
+ lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
/* clear WUPS */
- ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
+ lan78xx_read_reg(dev, PMT_CTL, &buf);
buf |= PMT_CTL_WUPS_MASK_;
- ret = lan78xx_write_reg(dev, PMT_CTL, buf);
+ lan78xx_write_reg(dev, PMT_CTL, buf);
- ret = lan78xx_read_reg(dev, MAC_RX, &buf);
+ lan78xx_read_reg(dev, MAC_RX, &buf);
buf |= MAC_RX_RXEN_;
- ret = lan78xx_write_reg(dev, MAC_RX, buf);
+ lan78xx_write_reg(dev, MAC_RX, buf);
return 0;
}
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index b1770489aca5..c448d6089821 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -26,6 +26,7 @@
#include <linux/acpi.h>
#include <linux/firmware.h>
#include <crypto/hash.h>
+#include <linux/usb/r8152.h>
/* Information for net-next */
#define NETNEXT_VERSION "11"
@@ -653,18 +654,6 @@ enum rtl_register_content {
#define INTR_LINK 0x0004
-#define RTL8152_REQT_READ 0xc0
-#define RTL8152_REQT_WRITE 0x40
-#define RTL8152_REQ_GET_REGS 0x05
-#define RTL8152_REQ_SET_REGS 0x05
-
-#define BYTE_EN_DWORD 0xff
-#define BYTE_EN_WORD 0x33
-#define BYTE_EN_BYTE 0x11
-#define BYTE_EN_SIX_BYTES 0x3f
-#define BYTE_EN_START_MASK 0x0f
-#define BYTE_EN_END_MASK 0xf0
-
#define RTL8153_MAX_PACKET 9216 /* 9K */
#define RTL8153_MAX_MTU (RTL8153_MAX_PACKET - VLAN_ETH_HLEN - \
ETH_FCS_LEN)
@@ -689,21 +678,9 @@ enum rtl8152_flags {
LENOVO_MACPASSTHRU,
};
-/* Define these values to match your device */
-#define VENDOR_ID_REALTEK 0x0bda
-#define VENDOR_ID_MICROSOFT 0x045e
-#define VENDOR_ID_SAMSUNG 0x04e8
-#define VENDOR_ID_LENOVO 0x17ef
-#define VENDOR_ID_LINKSYS 0x13b1
-#define VENDOR_ID_NVIDIA 0x0955
-#define VENDOR_ID_TPLINK 0x2357
-
#define DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2 0x3082
#define DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2 0xa387
-#define MCU_TYPE_PLA 0x0100
-#define MCU_TYPE_USB 0x0000
-
struct tally_counter {
__le64 tx_packets;
__le64 rx_packets;
@@ -898,6 +875,7 @@ struct fw_header {
* struct fw_mac - a firmware block used by RTL_FW_PLA and RTL_FW_USB.
* The layout of the firmware block is:
* <struct fw_mac> + <info> + <firmware data>.
+ * @blk_hdr: firmware descriptor (type, length)
* @fw_offset: offset of the firmware binary data. The start address of
* the data would be the address of struct fw_mac + @fw_offset.
* @fw_reg: the register to load the firmware. Depends on chip.
@@ -911,6 +889,7 @@ struct fw_header {
* @bp_num: the break point number which needs to be set for this firmware.
* Depends on the firmware.
* @bp: break points. Depends on firmware.
+ * @reserved: reserved space (unused)
* @fw_ver_reg: the register to store the fw version.
* @fw_ver_data: the firmware version of the current type.
* @info: additional information for debugging, and is followed by the
@@ -936,8 +915,10 @@ struct fw_mac {
/**
* struct fw_phy_patch_key - a firmware block used by RTL_FW_PHY_START.
* This is used to set patch key when loading the firmware of PHY.
+ * @blk_hdr: firmware descriptor (type, length)
* @key_reg: the register to write the patch key.
* @key_data: patch key.
+ * @reserved: reserved space (unused)
*/
struct fw_phy_patch_key {
struct fw_block blk_hdr;
@@ -950,6 +931,7 @@ struct fw_phy_patch_key {
* struct fw_phy_nc - a firmware block used by RTL_FW_PHY_NC.
* The layout of the firmware block is:
* <struct fw_phy_nc> + <info> + <firmware data>.
+ * @blk_hdr: firmware descriptor (type, length)
* @fw_offset: offset of the firmware binary data. The start address of
* the data would be the address of struct fw_phy_nc + @fw_offset.
* @fw_reg: the register to load the firmware. Depends on chip.
@@ -958,8 +940,9 @@ struct fw_phy_patch_key {
* @patch_en_addr: the register of enabling patch mode. Depends on chip.
* @patch_en_value: patch mode enabled mask. Depends on the firmware.
* @mode_reg: the regitster of switching the mode.
- * @mod_pre: the mode needing to be set before loading the firmware.
- * @mod_post: the mode to be set when finishing to load the firmware.
+ * @mode_pre: the mode needing to be set before loading the firmware.
+ * @mode_post: the mode to be set when finishing to load the firmware.
+ * @reserved: reserved space (unused)
* @bp_start: the start register of break points. Depends on chip.
* @bp_num: the break point number which needs to be set for this firmware.
* Depends on the firmware.
@@ -6615,7 +6598,7 @@ static int rtl_fw_init(struct r8152 *tp)
return 0;
}
-static u8 rtl_get_version(struct usb_interface *intf)
+u8 rtl8152_get_version(struct usb_interface *intf)
{
struct usb_device *udev = interface_to_usbdev(intf);
u32 ocp_data = 0;
@@ -6673,12 +6656,13 @@ static u8 rtl_get_version(struct usb_interface *intf)
return version;
}
+EXPORT_SYMBOL_GPL(rtl8152_get_version);
static int rtl8152_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev(intf);
- u8 version = rtl_get_version(intf);
+ u8 version = rtl8152_get_version(intf);
struct r8152 *tp;
struct net_device *netdev;
int ret;
diff --git a/drivers/net/usb/r8153_ecm.c b/drivers/net/usb/r8153_ecm.c
new file mode 100644
index 000000000000..2c3fabd38b16
--- /dev/null
+++ b/drivers/net/usb/r8153_ecm.c
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/mii.h>
+#include <linux/usb.h>
+#include <linux/usb/cdc.h>
+#include <linux/usb/usbnet.h>
+#include <linux/usb/r8152.h>
+
+#define OCP_BASE 0xe86c
+
+static int pla_read_word(struct usbnet *dev, u16 index)
+{
+ u16 byen = BYTE_EN_WORD;
+ u8 shift = index & 2;
+ __le32 tmp;
+ int ret;
+
+ if (shift)
+ byen <<= shift;
+
+ index &= ~3;
+
+ ret = usbnet_read_cmd(dev, RTL8152_REQ_GET_REGS, RTL8152_REQT_READ, index,
+ MCU_TYPE_PLA | byen, &tmp, sizeof(tmp));
+ if (ret < 0)
+ goto out;
+
+ ret = __le32_to_cpu(tmp);
+ ret >>= (shift * 8);
+ ret &= 0xffff;
+
+out:
+ return ret;
+}
+
+static int pla_write_word(struct usbnet *dev, u16 index, u32 data)
+{
+ u32 mask = 0xffff;
+ u16 byen = BYTE_EN_WORD;
+ u8 shift = index & 2;
+ __le32 tmp;
+ int ret;
+
+ data &= mask;
+
+ if (shift) {
+ byen <<= shift;
+ mask <<= (shift * 8);
+ data <<= (shift * 8);
+ }
+
+ index &= ~3;
+
+ ret = usbnet_read_cmd(dev, RTL8152_REQ_GET_REGS, RTL8152_REQT_READ, index,
+ MCU_TYPE_PLA | byen, &tmp, sizeof(tmp));
+
+ if (ret < 0)
+ goto out;
+
+ data |= __le32_to_cpu(tmp) & ~mask;
+ tmp = __cpu_to_le32(data);
+
+ ret = usbnet_write_cmd(dev, RTL8152_REQ_SET_REGS, RTL8152_REQT_WRITE, index,
+ MCU_TYPE_PLA | byen, &tmp, sizeof(tmp));
+
+out:
+ return ret;
+}
+
+static int r8153_ecm_mdio_read(struct net_device *netdev, int phy_id, int reg)
+{
+ struct usbnet *dev = netdev_priv(netdev);
+ int ret;
+
+ ret = pla_write_word(dev, OCP_BASE, 0xa000);
+ if (ret < 0)
+ goto out;
+
+ ret = pla_read_word(dev, 0xb400 + reg * 2);
+
+out:
+ return ret;
+}
+
+static void r8153_ecm_mdio_write(struct net_device *netdev, int phy_id, int reg, int val)
+{
+ struct usbnet *dev = netdev_priv(netdev);
+ int ret;
+
+ ret = pla_write_word(dev, OCP_BASE, 0xa000);
+ if (ret < 0)
+ return;
+
+ ret = pla_write_word(dev, 0xb400 + reg * 2, val);
+}
+
+static int r8153_bind(struct usbnet *dev, struct usb_interface *intf)
+{
+ int status;
+
+ status = usbnet_cdc_bind(dev, intf);
+ if (status < 0)
+ return status;
+
+ dev->mii.dev = dev->net;
+ dev->mii.mdio_read = r8153_ecm_mdio_read;
+ dev->mii.mdio_write = r8153_ecm_mdio_write;
+ dev->mii.reg_num_mask = 0x1f;
+ dev->mii.supports_gmii = 1;
+
+ return status;
+}
+
+static const struct driver_info r8153_info = {
+ .description = "RTL8153 ECM Device",
+ .flags = FLAG_ETHER,
+ .bind = r8153_bind,
+ .unbind = usbnet_cdc_unbind,
+ .status = usbnet_cdc_status,
+ .manage_power = usbnet_manage_power,
+};
+
+static const struct usb_device_id products[] = {
+{
+ USB_DEVICE_AND_INTERFACE_INFO(VENDOR_ID_REALTEK, 0x8153, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&r8153_info,
+},
+
+ { }, /* END */
+};
+MODULE_DEVICE_TABLE(usb, products);
+
+static int rtl8153_ecm_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+#if IS_REACHABLE(CONFIG_USB_RTL8152)
+ if (rtl8152_get_version(intf))
+ return -ENODEV;
+#endif
+
+ return usbnet_probe(intf, id);
+}
+
+static struct usb_driver r8153_ecm_driver = {
+ .name = "r8153_ecm",
+ .id_table = products,
+ .probe = rtl8153_ecm_probe,
+ .disconnect = usbnet_disconnect,
+ .suspend = usbnet_suspend,
+ .resume = usbnet_resume,
+ .reset_resume = usbnet_resume,
+ .supports_autosuspend = 1,
+ .disable_hub_initiated_lpm = 1,
+};
+
+module_usb_driver(r8153_ecm_driver);
+
+MODULE_AUTHOR("Hayes Wang");
+MODULE_DESCRIPTION("Realtek USB ECM device");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 1a557aeba32b..876679af6f7c 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -66,6 +66,7 @@ struct vxlan_net {
struct list_head vxlan_list;
struct hlist_head sock_list[PORT_HASH_SIZE];
spinlock_t sock_lock;
+ struct notifier_block nexthop_notifier_block;
};
/* Forwarding table entry */
@@ -4693,10 +4694,6 @@ static int vxlan_nexthop_event(struct notifier_block *nb,
return NOTIFY_DONE;
}
-static struct notifier_block vxlan_nexthop_notifier_block __read_mostly = {
- .notifier_call = vxlan_nexthop_event,
-};
-
static __net_init int vxlan_init_net(struct net *net)
{
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
@@ -4704,11 +4701,12 @@ static __net_init int vxlan_init_net(struct net *net)
INIT_LIST_HEAD(&vn->vxlan_list);
spin_lock_init(&vn->sock_lock);
+ vn->nexthop_notifier_block.notifier_call = vxlan_nexthop_event;
for (h = 0; h < PORT_HASH_SIZE; ++h)
INIT_HLIST_HEAD(&vn->sock_list[h]);
- return register_nexthop_notifier(net, &vxlan_nexthop_notifier_block);
+ return register_nexthop_notifier(net, &vn->nexthop_notifier_block);
}
static void vxlan_destroy_tunnels(struct net *net, struct list_head *head)
@@ -4740,8 +4738,11 @@ static void __net_exit vxlan_exit_batch_net(struct list_head *net_list)
LIST_HEAD(list);
rtnl_lock();
- list_for_each_entry(net, net_list, exit_list)
- unregister_nexthop_notifier(net, &vxlan_nexthop_notifier_block);
+ list_for_each_entry(net, net_list, exit_list) {
+ struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+
+ unregister_nexthop_notifier(net, &vn->nexthop_notifier_block);
+ }
list_for_each_entry(net, net_list, exit_list)
vxlan_destroy_tunnels(net, &list);
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index 409e5a7ad8e2..0720f5f92caa 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -871,6 +871,45 @@ static int fr_lmi_recv(struct net_device *dev, struct sk_buff *skb)
return 0;
}
+static int fr_snap_parse(struct sk_buff *skb, struct pvc_device *pvc)
+{
+ /* OUI 00-00-00 indicates an Ethertype follows */
+ if (skb->data[0] == 0x00 &&
+ skb->data[1] == 0x00 &&
+ skb->data[2] == 0x00) {
+ if (!pvc->main)
+ return -1;
+ skb->dev = pvc->main;
+ skb->protocol = *(__be16 *)(skb->data + 3); /* Ethertype */
+ skb_pull(skb, 5);
+ skb_reset_mac_header(skb);
+ return 0;
+
+ /* OUI 00-80-C2 stands for the 802.1 organization */
+ } else if (skb->data[0] == 0x00 &&
+ skb->data[1] == 0x80 &&
+ skb->data[2] == 0xC2) {
+ /* PID 00-07 stands for Ethernet frames without FCS */
+ if (skb->data[3] == 0x00 &&
+ skb->data[4] == 0x07) {
+ if (!pvc->ether)
+ return -1;
+ skb_pull(skb, 5);
+ if (skb->len < ETH_HLEN)
+ return -1;
+ skb->protocol = eth_type_trans(skb, pvc->ether);
+ return 0;
+
+ /* PID unsupported */
+ } else {
+ return -1;
+ }
+
+ /* OUI unsupported */
+ } else {
+ return -1;
+ }
+}
static int fr_rx(struct sk_buff *skb)
{
@@ -880,9 +919,9 @@ static int fr_rx(struct sk_buff *skb)
u8 *data = skb->data;
u16 dlci;
struct pvc_device *pvc;
- struct net_device *dev = NULL;
+ struct net_device *dev;
- if (skb->len <= 4 || fh->ea1 || data[2] != FR_UI)
+ if (skb->len < 4 || fh->ea1 || !fh->ea2 || data[2] != FR_UI)
goto rx_error;
dlci = q922_to_dlci(skb->data);
@@ -904,8 +943,7 @@ static int fr_rx(struct sk_buff *skb)
netdev_info(frad, "No PVC for received frame's DLCI %d\n",
dlci);
#endif
- dev_kfree_skb_any(skb);
- return NET_RX_DROP;
+ goto rx_drop;
}
if (pvc->state.fecn != fh->fecn) {
@@ -931,63 +969,51 @@ static int fr_rx(struct sk_buff *skb)
}
if (data[3] == NLPID_IP) {
+ if (!pvc->main)
+ goto rx_drop;
skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */
- dev = pvc->main;
+ skb->dev = pvc->main;
skb->protocol = htons(ETH_P_IP);
+ skb_reset_mac_header(skb);
} else if (data[3] == NLPID_IPV6) {
+ if (!pvc->main)
+ goto rx_drop;
skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */
- dev = pvc->main;
+ skb->dev = pvc->main;
skb->protocol = htons(ETH_P_IPV6);
+ skb_reset_mac_header(skb);
- } else if (skb->len > 10 && data[3] == FR_PAD &&
- data[4] == NLPID_SNAP && data[5] == FR_PAD) {
- u16 oui = ntohs(*(__be16*)(data + 6));
- u16 pid = ntohs(*(__be16*)(data + 8));
- skb_pull(skb, 10);
-
- switch ((((u32)oui) << 16) | pid) {
- case ETH_P_ARP: /* routed frame with SNAP */
- case ETH_P_IPX:
- case ETH_P_IP: /* a long variant */
- case ETH_P_IPV6:
- dev = pvc->main;
- skb->protocol = htons(pid);
- break;
-
- case 0x80C20007: /* bridged Ethernet frame */
- if ((dev = pvc->ether) != NULL)
- skb->protocol = eth_type_trans(skb, dev);
- break;
-
- default:
- netdev_info(frad, "Unsupported protocol, OUI=%x PID=%x\n",
- oui, pid);
- dev_kfree_skb_any(skb);
- return NET_RX_DROP;
+ } else if (data[3] == FR_PAD) {
+ if (skb->len < 5)
+ goto rx_error;
+ if (data[4] == NLPID_SNAP) { /* A SNAP header follows */
+ skb_pull(skb, 5);
+ if (skb->len < 5) /* Incomplete SNAP header */
+ goto rx_error;
+ if (fr_snap_parse(skb, pvc))
+ goto rx_drop;
+ } else {
+ goto rx_drop;
}
+
} else {
netdev_info(frad, "Unsupported protocol, NLPID=%x length=%i\n",
data[3], skb->len);
- dev_kfree_skb_any(skb);
- return NET_RX_DROP;
+ goto rx_drop;
}
- if (dev) {
- dev->stats.rx_packets++; /* PVC traffic */
- dev->stats.rx_bytes += skb->len;
- if (pvc->state.becn)
- dev->stats.rx_compressed++;
- skb->dev = dev;
- netif_rx(skb);
- return NET_RX_SUCCESS;
- } else {
- dev_kfree_skb_any(skb);
- return NET_RX_DROP;
- }
+ dev = skb->dev;
+ dev->stats.rx_packets++; /* PVC traffic */
+ dev->stats.rx_bytes += skb->len;
+ if (pvc->state.becn)
+ dev->stats.rx_compressed++;
+ netif_rx(skb);
+ return NET_RX_SUCCESS;
- rx_error:
+rx_error:
frad->stats.rx_errors++; /* Mark error */
+rx_drop:
dev_kfree_skb_any(skb);
return NET_RX_DROP;
}
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
index 9888a7061873..101def7dc73d 100644
--- a/drivers/soc/fsl/qbman/qman.c
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -1159,7 +1159,7 @@ static u32 fq_to_tag(struct qman_fq *fq)
static u32 __poll_portal_slow(struct qman_portal *p, u32 is);
static inline unsigned int __poll_portal_fast(struct qman_portal *p,
- unsigned int poll_limit);
+ unsigned int poll_limit, bool sched_napi);
static void qm_congestion_task(struct work_struct *work);
static void qm_mr_process_task(struct work_struct *work);
@@ -1174,7 +1174,7 @@ static irqreturn_t portal_isr(int irq, void *ptr)
/* DQRR-handling if it's interrupt-driven */
if (is & QM_PIRQ_DQRI) {
- __poll_portal_fast(p, QMAN_POLL_LIMIT);
+ __poll_portal_fast(p, QMAN_POLL_LIMIT, true);
clear = QM_DQAVAIL_MASK | QM_PIRQ_DQRI;
}
/* Handling of anything else that's interrupt-driven */
@@ -1602,7 +1602,7 @@ static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq)
* user callbacks to call into any QMan API.
*/
static inline unsigned int __poll_portal_fast(struct qman_portal *p,
- unsigned int poll_limit)
+ unsigned int poll_limit, bool sched_napi)
{
const struct qm_dqrr_entry *dq;
struct qman_fq *fq;
@@ -1636,7 +1636,7 @@ static inline unsigned int __poll_portal_fast(struct qman_portal *p,
* and we don't want multiple if()s in the critical
* path (SDQCR).
*/
- res = fq->cb.dqrr(p, fq, dq);
+ res = fq->cb.dqrr(p, fq, dq, sched_napi);
if (res == qman_cb_dqrr_stop)
break;
/* Check for VDQCR completion */
@@ -1646,7 +1646,7 @@ static inline unsigned int __poll_portal_fast(struct qman_portal *p,
/* SDQCR: context_b points to the FQ */
fq = tag_to_fq(be32_to_cpu(dq->context_b));
/* Now let the callback do its stuff */
- res = fq->cb.dqrr(p, fq, dq);
+ res = fq->cb.dqrr(p, fq, dq, sched_napi);
/*
* The callback can request that we exit without
* consuming this entry nor advancing;
@@ -1753,7 +1753,7 @@ EXPORT_SYMBOL(qman_start_using_portal);
int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit)
{
- return __poll_portal_fast(p, limit);
+ return __poll_portal_fast(p, limit, false);
}
EXPORT_SYMBOL(qman_p_poll_dqrr);
diff --git a/drivers/soc/fsl/qbman/qman_test_api.c b/drivers/soc/fsl/qbman/qman_test_api.c
index 7066b2f1467c..28fbddc3c204 100644
--- a/drivers/soc/fsl/qbman/qman_test_api.c
+++ b/drivers/soc/fsl/qbman/qman_test_api.c
@@ -45,7 +45,8 @@
static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *,
struct qman_fq *,
- const struct qm_dqrr_entry *);
+ const struct qm_dqrr_entry *,
+ bool sched_napi);
static void cb_ern(struct qman_portal *, struct qman_fq *,
const union qm_mr_entry *);
static void cb_fqs(struct qman_portal *, struct qman_fq *,
@@ -208,7 +209,8 @@ failed:
static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *p,
struct qman_fq *fq,
- const struct qm_dqrr_entry *dq)
+ const struct qm_dqrr_entry *dq,
+ bool sched_napi)
{
if (WARN_ON(fd_neq(&fd_dq, &dq->fd))) {
pr_err("BADNESS: dequeued frame doesn't match;\n");
diff --git a/drivers/soc/fsl/qbman/qman_test_stash.c b/drivers/soc/fsl/qbman/qman_test_stash.c
index e87b65403b67..b7e8e5ec884c 100644
--- a/drivers/soc/fsl/qbman/qman_test_stash.c
+++ b/drivers/soc/fsl/qbman/qman_test_stash.c
@@ -275,7 +275,8 @@ static inline int process_frame_data(struct hp_handler *handler,
static enum qman_cb_dqrr_result normal_dqrr(struct qman_portal *portal,
struct qman_fq *fq,
- const struct qm_dqrr_entry *dqrr)
+ const struct qm_dqrr_entry *dqrr,
+ bool sched_napi)
{
struct hp_handler *handler = (struct hp_handler *)fq;
@@ -293,7 +294,8 @@ skip:
static enum qman_cb_dqrr_result special_dqrr(struct qman_portal *portal,
struct qman_fq *fq,
- const struct qm_dqrr_entry *dqrr)
+ const struct qm_dqrr_entry *dqrr,
+ bool sched_napi)
{
struct hp_handler *handler = (struct hp_handler *)fq;
diff --git a/include/linux/mhi.h b/include/linux/mhi.h
index d4841e5a5f45..b9bb7dff32e2 100644
--- a/include/linux/mhi.h
+++ b/include/linux/mhi.h
@@ -737,4 +737,11 @@ int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
struct sk_buff *skb, size_t len, enum mhi_flags mflags);
+/**
+ * mhi_queue_is_full - Determine whether queueing new elements is possible
+ * @mhi_dev: Device associated with the channels
+ * @dir: DMA direction for the channel
+ */
+bool mhi_queue_is_full(struct mhi_device *mhi_dev, enum dma_data_direction dir);
+
#endif /* _MHI_H_ */
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index 0b17c4322b09..934de56644e7 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -207,8 +207,8 @@ static inline int find_next_netdev_feature(u64 feature, unsigned long start)
NETIF_F_FSO)
/* List of features with software fallbacks. */
-#define NETIF_F_GSO_SOFTWARE (NETIF_F_ALL_TSO | \
- NETIF_F_GSO_SCTP)
+#define NETIF_F_GSO_SOFTWARE (NETIF_F_ALL_TSO | NETIF_F_GSO_SCTP | \
+ NETIF_F_GSO_UDP_L4 | NETIF_F_GSO_FRAGLIST)
/*
* If one device supports one of these features, then enable them
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index ab192720e2d6..46d9a0c26c67 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -198,6 +198,9 @@ struct ip_set_region {
u32 elements; /* Number of elements vs timeout */
};
+/* The max revision number supported by any set type + 1 */
+#define IPSET_REVISION_MAX 9
+
/* The core set type structure */
struct ip_set_type {
struct list_head list;
@@ -215,6 +218,8 @@ struct ip_set_type {
u8 family;
/* Type revisions */
u8 revision_min, revision_max;
+ /* Revision-specific supported (create) flags */
+ u8 create_flags[IPSET_REVISION_MAX+1];
/* Set features to control swapping */
u16 features;
diff --git a/include/linux/phy.h b/include/linux/phy.h
index eb3cb1a98b45..4f158d6352ae 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -1510,6 +1510,7 @@ int genphy_suspend(struct phy_device *phydev);
int genphy_resume(struct phy_device *phydev);
int genphy_loopback(struct phy_device *phydev, bool enable);
int genphy_soft_reset(struct phy_device *phydev);
+irqreturn_t genphy_handle_interrupt_no_ack(struct phy_device *phydev);
static inline int genphy_config_aneg(struct phy_device *phydev)
{
@@ -1570,8 +1571,10 @@ void phy_drivers_unregister(struct phy_driver *drv, int n);
int phy_driver_register(struct phy_driver *new_driver, struct module *owner);
int phy_drivers_register(struct phy_driver *new_driver, int n,
struct module *owner);
+void phy_error(struct phy_device *phydev);
void phy_state_machine(struct work_struct *work);
void phy_queue_state_machine(struct phy_device *phydev, unsigned long jiffies);
+void phy_trigger_machine(struct phy_device *phydev);
void phy_mac_interrupt(struct phy_device *phydev);
void phy_start_machine(struct phy_device *phydev);
void phy_stop_machine(struct phy_device *phydev);
diff --git a/include/linux/platform_data/hirschmann-hellcreek.h b/include/linux/platform_data/hirschmann-hellcreek.h
new file mode 100644
index 000000000000..388846766bb2
--- /dev/null
+++ b/include/linux/platform_data/hirschmann-hellcreek.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: (GPL-2.0 or MIT) */
+/*
+ * Hirschmann Hellcreek TSN switch platform data.
+ *
+ * Copyright (C) 2020 Linutronix GmbH
+ * Author Kurt Kanzenbach <kurt@linutronix.de>
+ */
+
+#ifndef _HIRSCHMANN_HELLCREEK_H_
+#define _HIRSCHMANN_HELLCREEK_H_
+
+#include <linux/types.h>
+
+struct hellcreek_platform_data {
+ int num_ports; /* Amount of switch ports */
+ int is_100_mbits; /* Is it configured to 100 or 1000 mbit/s */
+ int qbv_support; /* Qbv support on front TSN ports */
+ int qbv_on_cpu_port; /* Qbv support on the CPU port */
+ int qbu_support; /* Qbu support on front TSN ports */
+ u16 module_id; /* Module identificaton */
+};
+
+#endif /* _HIRSCHMANN_HELLCREEK_H_ */
diff --git a/include/linux/usb/r8152.h b/include/linux/usb/r8152.h
new file mode 100644
index 000000000000..20d88b1defc3
--- /dev/null
+++ b/include/linux/usb/r8152.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020 Realtek Semiconductor Corp. All rights reserved.
+ */
+
+#ifndef __LINUX_R8152_H
+#define __LINUX_R8152_H
+
+#define RTL8152_REQT_READ 0xc0
+#define RTL8152_REQT_WRITE 0x40
+#define RTL8152_REQ_GET_REGS 0x05
+#define RTL8152_REQ_SET_REGS 0x05
+
+#define BYTE_EN_DWORD 0xff
+#define BYTE_EN_WORD 0x33
+#define BYTE_EN_BYTE 0x11
+#define BYTE_EN_SIX_BYTES 0x3f
+#define BYTE_EN_START_MASK 0x0f
+#define BYTE_EN_END_MASK 0xf0
+
+#define MCU_TYPE_PLA 0x0100
+#define MCU_TYPE_USB 0x0000
+
+/* Define these values to match your device */
+#define VENDOR_ID_REALTEK 0x0bda
+#define VENDOR_ID_MICROSOFT 0x045e
+#define VENDOR_ID_SAMSUNG 0x04e8
+#define VENDOR_ID_LENOVO 0x17ef
+#define VENDOR_ID_LINKSYS 0x13b1
+#define VENDOR_ID_NVIDIA 0x0955
+#define VENDOR_ID_TPLINK 0x2357
+
+#if IS_REACHABLE(CONFIG_USB_RTL8152)
+extern u8 rtl8152_get_version(struct usb_interface *intf);
+#endif
+
+#endif /* __LINUX_R8152_H */
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 35429a140dfa..4e60d2610f20 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -45,6 +45,7 @@ struct phylink_link_state;
#define DSA_TAG_PROTO_OCELOT_VALUE 15
#define DSA_TAG_PROTO_AR9331_VALUE 16
#define DSA_TAG_PROTO_RTL4_A_VALUE 17
+#define DSA_TAG_PROTO_HELLCREEK_VALUE 18
enum dsa_tag_protocol {
DSA_TAG_PROTO_NONE = DSA_TAG_PROTO_NONE_VALUE,
@@ -65,6 +66,7 @@ enum dsa_tag_protocol {
DSA_TAG_PROTO_OCELOT = DSA_TAG_PROTO_OCELOT_VALUE,
DSA_TAG_PROTO_AR9331 = DSA_TAG_PROTO_AR9331_VALUE,
DSA_TAG_PROTO_RTL4_A = DSA_TAG_PROTO_RTL4_A_VALUE,
+ DSA_TAG_PROTO_HELLCREEK = DSA_TAG_PROTO_HELLCREEK_VALUE,
};
struct packet_type;
@@ -535,6 +537,12 @@ struct dsa_switch_ops {
struct ethtool_regs *regs, void *p);
/*
+ * Upper device tracking.
+ */
+ int (*port_prechangeupper)(struct dsa_switch *ds, int port,
+ struct netdev_notifier_changeupper_info *info);
+
+ /*
* Bridge integration
*/
int (*set_ageing_time)(struct dsa_switch *ds, unsigned int msecs);
diff --git a/include/net/dst.h b/include/net/dst.h
index 8ea8812b0b41..10f0a8399867 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -400,14 +400,12 @@ static inline struct neighbour *dst_neigh_lookup(const struct dst_entry *dst, co
static inline struct neighbour *dst_neigh_lookup_skb(const struct dst_entry *dst,
struct sk_buff *skb)
{
- struct neighbour *n = NULL;
+ struct neighbour *n;
- /* The packets from tunnel devices (eg bareudp) may have only
- * metadata in the dst pointer of skb. Hence a pointer check of
- * neigh_lookup is needed.
- */
- if (dst->ops->neigh_lookup)
- n = dst->ops->neigh_lookup(dst, skb, NULL);
+ if (WARN_ON_ONCE(!dst->ops->neigh_lookup))
+ return NULL;
+
+ n = dst->ops->neigh_lookup(dst, skb, NULL);
return IS_ERR(n) ? NULL : n;
}
diff --git a/include/net/mptcp.h b/include/net/mptcp.h
index 753ba7e755d6..6e706d838e4e 100644
--- a/include/net/mptcp.h
+++ b/include/net/mptcp.h
@@ -29,7 +29,8 @@ struct mptcp_ext {
use_ack:1,
ack64:1,
mpc_map:1,
- __unused:2;
+ frozen:1,
+ __unused:1;
/* one byte hole */
};
@@ -106,6 +107,19 @@ static inline void mptcp_skb_ext_move(struct sk_buff *to,
from->active_extensions = 0;
}
+static inline void mptcp_skb_ext_copy(struct sk_buff *to,
+ struct sk_buff *from)
+{
+ struct mptcp_ext *from_ext;
+
+ from_ext = skb_ext_find(from, SKB_EXT_MPTCP);
+ if (!from_ext)
+ return;
+
+ from_ext->frozen = 1;
+ skb_ext_copy(to, from);
+}
+
static inline bool mptcp_ext_matches(const struct mptcp_ext *to_ext,
const struct mptcp_ext *from_ext)
{
@@ -193,6 +207,11 @@ static inline void mptcp_skb_ext_move(struct sk_buff *to,
{
}
+static inline void mptcp_skb_ext_copy(struct sk_buff *to,
+ struct sk_buff *from)
+{
+}
+
static inline bool mptcp_skb_can_collapse(const struct sk_buff *to,
const struct sk_buff *from)
{
diff --git a/include/net/netfilter/ipv4/nf_reject.h b/include/net/netfilter/ipv4/nf_reject.h
index 40e0e0623f46..0d8ff84a2588 100644
--- a/include/net/netfilter/ipv4/nf_reject.h
+++ b/include/net/netfilter/ipv4/nf_reject.h
@@ -18,4 +18,14 @@ struct iphdr *nf_reject_iphdr_put(struct sk_buff *nskb,
void nf_reject_ip_tcphdr_put(struct sk_buff *nskb, const struct sk_buff *oldskb,
const struct tcphdr *oth);
+struct sk_buff *nf_reject_skb_v4_unreach(struct net *net,
+ struct sk_buff *oldskb,
+ const struct net_device *dev,
+ int hook, u8 code);
+struct sk_buff *nf_reject_skb_v4_tcp_reset(struct net *net,
+ struct sk_buff *oldskb,
+ const struct net_device *dev,
+ int hook);
+
+
#endif /* _IPV4_NF_REJECT_H */
diff --git a/include/net/netfilter/ipv6/nf_reject.h b/include/net/netfilter/ipv6/nf_reject.h
index 4a3ef9ebdf6f..edcf6d1cd316 100644
--- a/include/net/netfilter/ipv6/nf_reject.h
+++ b/include/net/netfilter/ipv6/nf_reject.h
@@ -20,4 +20,13 @@ void nf_reject_ip6_tcphdr_put(struct sk_buff *nskb,
const struct sk_buff *oldskb,
const struct tcphdr *oth, unsigned int otcplen);
+struct sk_buff *nf_reject_skb_v6_tcp_reset(struct net *net,
+ struct sk_buff *oldskb,
+ const struct net_device *dev,
+ int hook);
+struct sk_buff *nf_reject_skb_v6_unreach(struct net *net,
+ struct sk_buff *oldskb,
+ const struct net_device *dev,
+ int hook, u8 code);
+
#endif /* _IPV6_NF_REJECT_H */
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 80f71499b543..1aa585216f34 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -1121,6 +1121,12 @@ static inline void sctp_outq_cork(struct sctp_outq *q)
* sctp_input_cb is currently used on rx and sock rx queue
*/
struct sctp_input_cb {
+ union {
+ struct inet_skb_parm h4;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct inet6_skb_parm h6;
+#endif
+ } header;
struct sctp_chunk *chunk;
struct sctp_af *af;
__be16 encap_port;
diff --git a/include/soc/fsl/qman.h b/include/soc/fsl/qman.h
index 9f484113cfda..59eeba31c192 100644
--- a/include/soc/fsl/qman.h
+++ b/include/soc/fsl/qman.h
@@ -689,7 +689,8 @@ enum qman_cb_dqrr_result {
};
typedef enum qman_cb_dqrr_result (*qman_cb_dqrr)(struct qman_portal *qm,
struct qman_fq *fq,
- const struct qm_dqrr_entry *dqrr);
+ const struct qm_dqrr_entry *dqrr,
+ bool sched_napi);
/*
* This callback type is used when handling ERNs, FQRNs and FQRLs via MR. They
diff --git a/include/uapi/linux/netfilter/ipset/ip_set.h b/include/uapi/linux/netfilter/ipset/ip_set.h
index 11a72a938eb1..6397d75899bc 100644
--- a/include/uapi/linux/netfilter/ipset/ip_set.h
+++ b/include/uapi/linux/netfilter/ipset/ip_set.h
@@ -92,11 +92,11 @@ enum {
/* Reserve empty slots */
IPSET_ATTR_CADT_MAX = 16,
/* Create-only specific attributes */
- IPSET_ATTR_GC,
+ IPSET_ATTR_INITVAL, /* was unused IPSET_ATTR_GC */
IPSET_ATTR_HASHSIZE,
IPSET_ATTR_MAXELEM,
IPSET_ATTR_NETMASK,
- IPSET_ATTR_PROBES,
+ IPSET_ATTR_BUCKETSIZE, /* was unused IPSET_ATTR_PROBES */
IPSET_ATTR_RESIZE,
IPSET_ATTR_SIZE,
/* Kernel-only */
@@ -214,6 +214,8 @@ enum ipset_cadt_flags {
enum ipset_create_flags {
IPSET_CREATE_FLAG_BIT_FORCEADD = 0,
IPSET_CREATE_FLAG_FORCEADD = (1 << IPSET_CREATE_FLAG_BIT_FORCEADD),
+ IPSET_CREATE_FLAG_BIT_BUCKETSIZE = 1,
+ IPSET_CREATE_FLAG_BUCKETSIZE = (1 << IPSET_CREATE_FLAG_BIT_BUCKETSIZE),
IPSET_CREATE_FLAG_BIT_MAX = 7,
};
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index fdd408f6a5d2..d1325ffb0060 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -770,8 +770,12 @@ enum {
* actions in a dump. All dump responses will contain the number of actions
* being dumped stored in for user app's consumption in TCA_ROOT_COUNT
*
+ * TCA_FLAG_TERSE_DUMP user->kernel to request terse (brief) dump that only
+ * includes essential action info (kind, index, etc.)
+ *
*/
#define TCA_FLAG_LARGE_DUMP_ON (1 << 0)
+#define TCA_FLAG_TERSE_DUMP (1 << 1)
/* New extended info filters for IFLA_EXT_MASK */
#define RTEXT_FILTER_VF (1 << 0)
diff --git a/net/bridge/netfilter/Kconfig b/net/bridge/netfilter/Kconfig
index 5040fe43f4b4..e4d287afc2c9 100644
--- a/net/bridge/netfilter/Kconfig
+++ b/net/bridge/netfilter/Kconfig
@@ -17,7 +17,7 @@ config NFT_BRIDGE_META
config NFT_BRIDGE_REJECT
tristate "Netfilter nf_tables bridge reject support"
- depends on NFT_REJECT && NFT_REJECT_IPV4 && NFT_REJECT_IPV6
+ depends on NFT_REJECT
help
Add support to reject packets.
diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c
index deae2c9a0f69..eba0efe64d05 100644
--- a/net/bridge/netfilter/nft_reject_bridge.c
+++ b/net/bridge/netfilter/nft_reject_bridge.c
@@ -39,30 +39,6 @@ static void nft_reject_br_push_etherhdr(struct sk_buff *oldskb,
}
}
-static int nft_bridge_iphdr_validate(struct sk_buff *skb)
-{
- struct iphdr *iph;
- u32 len;
-
- if (!pskb_may_pull(skb, sizeof(struct iphdr)))
- return 0;
-
- iph = ip_hdr(skb);
- if (iph->ihl < 5 || iph->version != 4)
- return 0;
-
- len = ntohs(iph->tot_len);
- if (skb->len < len)
- return 0;
- else if (len < (iph->ihl*4))
- return 0;
-
- if (!pskb_may_pull(skb, iph->ihl*4))
- return 0;
-
- return 1;
-}
-
/* We cannot use oldskb->dev, it can be either bridge device (NF_BRIDGE INPUT)
* or the bridge port (NF_BRIDGE PREROUTING).
*/
@@ -72,29 +48,11 @@ static void nft_reject_br_send_v4_tcp_reset(struct net *net,
int hook)
{
struct sk_buff *nskb;
- struct iphdr *niph;
- const struct tcphdr *oth;
- struct tcphdr _oth;
- if (!nft_bridge_iphdr_validate(oldskb))
- return;
-
- oth = nf_reject_ip_tcphdr_get(oldskb, &_oth, hook);
- if (!oth)
- return;
-
- nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) +
- LL_MAX_HEADER, GFP_ATOMIC);
+ nskb = nf_reject_skb_v4_tcp_reset(net, oldskb, dev, hook);
if (!nskb)
return;
- skb_reserve(nskb, LL_MAX_HEADER);
- niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
- net->ipv4.sysctl_ip_default_ttl);
- nf_reject_ip_tcphdr_put(nskb, oldskb, oth);
- niph->tot_len = htons(nskb->len);
- ip_send_check(niph);
-
nft_reject_br_push_etherhdr(oldskb, nskb);
br_forward(br_port_get_rcu(dev), nskb, false, true);
@@ -106,139 +64,32 @@ static void nft_reject_br_send_v4_unreach(struct net *net,
int hook, u8 code)
{
struct sk_buff *nskb;
- struct iphdr *niph;
- struct icmphdr *icmph;
- unsigned int len;
- __wsum csum;
- u8 proto;
-
- if (!nft_bridge_iphdr_validate(oldskb))
- return;
-
- /* IP header checks: fragment. */
- if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET))
- return;
-
- /* RFC says return as much as we can without exceeding 576 bytes. */
- len = min_t(unsigned int, 536, oldskb->len);
-
- if (!pskb_may_pull(oldskb, len))
- return;
-
- if (pskb_trim_rcsum(oldskb, ntohs(ip_hdr(oldskb)->tot_len)))
- return;
-
- proto = ip_hdr(oldskb)->protocol;
-
- if (!skb_csum_unnecessary(oldskb) &&
- nf_reject_verify_csum(proto) &&
- nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), proto))
- return;
- nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmphdr) +
- LL_MAX_HEADER + len, GFP_ATOMIC);
+ nskb = nf_reject_skb_v4_unreach(net, oldskb, dev, hook, code);
if (!nskb)
return;
- skb_reserve(nskb, LL_MAX_HEADER);
- niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_ICMP,
- net->ipv4.sysctl_ip_default_ttl);
-
- skb_reset_transport_header(nskb);
- icmph = skb_put_zero(nskb, sizeof(struct icmphdr));
- icmph->type = ICMP_DEST_UNREACH;
- icmph->code = code;
-
- skb_put_data(nskb, skb_network_header(oldskb), len);
-
- csum = csum_partial((void *)icmph, len + sizeof(struct icmphdr), 0);
- icmph->checksum = csum_fold(csum);
-
- niph->tot_len = htons(nskb->len);
- ip_send_check(niph);
-
nft_reject_br_push_etherhdr(oldskb, nskb);
br_forward(br_port_get_rcu(dev), nskb, false, true);
}
-static int nft_bridge_ip6hdr_validate(struct sk_buff *skb)
-{
- struct ipv6hdr *hdr;
- u32 pkt_len;
-
- if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
- return 0;
-
- hdr = ipv6_hdr(skb);
- if (hdr->version != 6)
- return 0;
-
- pkt_len = ntohs(hdr->payload_len);
- if (pkt_len + sizeof(struct ipv6hdr) > skb->len)
- return 0;
-
- return 1;
-}
-
static void nft_reject_br_send_v6_tcp_reset(struct net *net,
struct sk_buff *oldskb,
const struct net_device *dev,
int hook)
{
struct sk_buff *nskb;
- const struct tcphdr *oth;
- struct tcphdr _oth;
- unsigned int otcplen;
- struct ipv6hdr *nip6h;
- if (!nft_bridge_ip6hdr_validate(oldskb))
- return;
-
- oth = nf_reject_ip6_tcphdr_get(oldskb, &_oth, &otcplen, hook);
- if (!oth)
- return;
-
- nskb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(struct tcphdr) +
- LL_MAX_HEADER, GFP_ATOMIC);
+ nskb = nf_reject_skb_v6_tcp_reset(net, oldskb, dev, hook);
if (!nskb)
return;
- skb_reserve(nskb, LL_MAX_HEADER);
- nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP,
- net->ipv6.devconf_all->hop_limit);
- nf_reject_ip6_tcphdr_put(nskb, oldskb, oth, otcplen);
- nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr));
-
nft_reject_br_push_etherhdr(oldskb, nskb);
br_forward(br_port_get_rcu(dev), nskb, false, true);
}
-static bool reject6_br_csum_ok(struct sk_buff *skb, int hook)
-{
- const struct ipv6hdr *ip6h = ipv6_hdr(skb);
- int thoff;
- __be16 fo;
- u8 proto = ip6h->nexthdr;
-
- if (skb_csum_unnecessary(skb))
- return true;
-
- if (ip6h->payload_len &&
- pskb_trim_rcsum(skb, ntohs(ip6h->payload_len) + sizeof(*ip6h)))
- return false;
-
- ip6h = ipv6_hdr(skb);
- thoff = ipv6_skip_exthdr(skb, ((u8*)(ip6h+1) - skb->data), &proto, &fo);
- if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0)
- return false;
-
- if (!nf_reject_verify_csum(proto))
- return true;
-
- return nf_ip6_checksum(skb, hook, thoff, proto) == 0;
-}
static void nft_reject_br_send_v6_unreach(struct net *net,
struct sk_buff *oldskb,
@@ -246,49 +97,11 @@ static void nft_reject_br_send_v6_unreach(struct net *net,
int hook, u8 code)
{
struct sk_buff *nskb;
- struct ipv6hdr *nip6h;
- struct icmp6hdr *icmp6h;
- unsigned int len;
-
- if (!nft_bridge_ip6hdr_validate(oldskb))
- return;
- /* Include "As much of invoking packet as possible without the ICMPv6
- * packet exceeding the minimum IPv6 MTU" in the ICMP payload.
- */
- len = min_t(unsigned int, 1220, oldskb->len);
-
- if (!pskb_may_pull(oldskb, len))
- return;
-
- if (!reject6_br_csum_ok(oldskb, hook))
- return;
-
- nskb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(struct icmp6hdr) +
- LL_MAX_HEADER + len, GFP_ATOMIC);
+ nskb = nf_reject_skb_v6_unreach(net, oldskb, dev, hook, code);
if (!nskb)
return;
- skb_reserve(nskb, LL_MAX_HEADER);
- nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_ICMPV6,
- net->ipv6.devconf_all->hop_limit);
-
- skb_reset_transport_header(nskb);
- icmp6h = skb_put_zero(nskb, sizeof(struct icmp6hdr));
- icmp6h->icmp6_type = ICMPV6_DEST_UNREACH;
- icmp6h->icmp6_code = code;
-
- skb_put_data(nskb, skb_network_header(oldskb), len);
- nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr));
-
- icmp6h->icmp6_cksum =
- csum_ipv6_magic(&nip6h->saddr, &nip6h->daddr,
- nskb->len - sizeof(struct ipv6hdr),
- IPPROTO_ICMPV6,
- csum_partial(icmp6h,
- nskb->len - sizeof(struct ipv6hdr),
- 0));
-
nft_reject_br_push_etherhdr(oldskb, nskb);
br_forward(br_port_get_rcu(dev), nskb, false, true);
@@ -364,69 +177,13 @@ static int nft_reject_bridge_validate(const struct nft_ctx *ctx,
(1 << NF_BR_LOCAL_IN));
}
-static int nft_reject_bridge_init(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nlattr * const tb[])
-{
- struct nft_reject *priv = nft_expr_priv(expr);
- int icmp_code;
-
- if (tb[NFTA_REJECT_TYPE] == NULL)
- return -EINVAL;
-
- priv->type = ntohl(nla_get_be32(tb[NFTA_REJECT_TYPE]));
- switch (priv->type) {
- case NFT_REJECT_ICMP_UNREACH:
- case NFT_REJECT_ICMPX_UNREACH:
- if (tb[NFTA_REJECT_ICMP_CODE] == NULL)
- return -EINVAL;
-
- icmp_code = nla_get_u8(tb[NFTA_REJECT_ICMP_CODE]);
- if (priv->type == NFT_REJECT_ICMPX_UNREACH &&
- icmp_code > NFT_REJECT_ICMPX_MAX)
- return -EINVAL;
-
- priv->icmp_code = icmp_code;
- break;
- case NFT_REJECT_TCP_RST:
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static int nft_reject_bridge_dump(struct sk_buff *skb,
- const struct nft_expr *expr)
-{
- const struct nft_reject *priv = nft_expr_priv(expr);
-
- if (nla_put_be32(skb, NFTA_REJECT_TYPE, htonl(priv->type)))
- goto nla_put_failure;
-
- switch (priv->type) {
- case NFT_REJECT_ICMP_UNREACH:
- case NFT_REJECT_ICMPX_UNREACH:
- if (nla_put_u8(skb, NFTA_REJECT_ICMP_CODE, priv->icmp_code))
- goto nla_put_failure;
- break;
- default:
- break;
- }
-
- return 0;
-
-nla_put_failure:
- return -1;
-}
-
static struct nft_expr_type nft_reject_bridge_type;
static const struct nft_expr_ops nft_reject_bridge_ops = {
.type = &nft_reject_bridge_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_reject)),
.eval = nft_reject_bridge_eval,
- .init = nft_reject_bridge_init,
- .dump = nft_reject_bridge_dump,
+ .init = nft_reject_init,
+ .dump = nft_reject_dump,
.validate = nft_reject_bridge_validate,
};
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
index 205e92e604ef..db8a0ff86f36 100644
--- a/net/core/dev_ioctl.c
+++ b/net/core/dev_ioctl.c
@@ -230,7 +230,7 @@ static int dev_do_ioctl(struct net_device *dev,
struct ifreq *ifr, unsigned int cmd)
{
const struct net_device_ops *ops = dev->netdev_ops;
- int err = -EOPNOTSUPP;
+ int err;
err = dsa_ndo_do_ioctl(dev, ifr, cmd);
if (err == 0 || err != -EOPNOTSUPP)
diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig
index 1f9b9b11008c..d975614f7dd6 100644
--- a/net/dsa/Kconfig
+++ b/net/dsa/Kconfig
@@ -56,6 +56,12 @@ config NET_DSA_TAG_BRCM_PREPEND
Broadcom switches which places the tag before the Ethernet header
(prepended).
+config NET_DSA_TAG_HELLCREEK
+ tristate "Tag driver for Hirschmann Hellcreek TSN switches"
+ help
+ Say Y or M if you want to enable support for tagging frames
+ for the Hirschmann Hellcreek TSN switches.
+
config NET_DSA_TAG_GSWIP
tristate "Tag driver for Lantiq / Intel GSWIP switches"
help
diff --git a/net/dsa/Makefile b/net/dsa/Makefile
index 4f47b2025ff5..e25d5457964a 100644
--- a/net/dsa/Makefile
+++ b/net/dsa/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_NET_DSA_TAG_BRCM_COMMON) += tag_brcm.o
obj-$(CONFIG_NET_DSA_TAG_DSA) += tag_dsa.o
obj-$(CONFIG_NET_DSA_TAG_EDSA) += tag_edsa.o
obj-$(CONFIG_NET_DSA_TAG_GSWIP) += tag_gswip.o
+obj-$(CONFIG_NET_DSA_TAG_HELLCREEK) += tag_hellcreek.o
obj-$(CONFIG_NET_DSA_TAG_KSZ) += tag_ksz.o
obj-$(CONFIG_NET_DSA_TAG_RTL4_A) += tag_rtl4_a.o
obj-$(CONFIG_NET_DSA_TAG_LAN9303) += tag_lan9303.o
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index c6806eef906f..59c80052e950 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -2032,10 +2032,22 @@ static int dsa_slave_netdevice_event(struct notifier_block *nb,
switch (event) {
case NETDEV_PRECHANGEUPPER: {
struct netdev_notifier_changeupper_info *info = ptr;
+ struct dsa_switch *ds;
+ struct dsa_port *dp;
+ int err;
if (!dsa_slave_dev_check(dev))
return dsa_prevent_bridging_8021q_upper(dev, ptr);
+ dp = dsa_slave_to_port(dev);
+ ds = dp->ds;
+
+ if (ds->ops->port_prechangeupper) {
+ err = ds->ops->port_prechangeupper(ds, dp->index, info);
+ if (err)
+ return notifier_from_errno(err);
+ }
+
if (is_vlan_dev(info->upper_dev))
return dsa_slave_check_8021q_upper(dev, ptr);
break;
diff --git a/net/dsa/tag_hellcreek.c b/net/dsa/tag_hellcreek.c
new file mode 100644
index 000000000000..2061de06eafb
--- /dev/null
+++ b/net/dsa/tag_hellcreek.c
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * net/dsa/tag_hellcreek.c - Hirschmann Hellcreek switch tag format handling
+ *
+ * Copyright (C) 2019,2020 Linutronix GmbH
+ * Author Kurt Kanzenbach <kurt@linutronix.de>
+ *
+ * Based on tag_ksz.c.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <net/dsa.h>
+
+#include "dsa_priv.h"
+
+#define HELLCREEK_TAG_LEN 1
+
+static struct sk_buff *hellcreek_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ u8 *tag;
+
+ /* Tag encoding */
+ tag = skb_put(skb, HELLCREEK_TAG_LEN);
+ *tag = BIT(dp->index);
+
+ return skb;
+}
+
+static struct sk_buff *hellcreek_rcv(struct sk_buff *skb,
+ struct net_device *dev,
+ struct packet_type *pt)
+{
+ /* Tag decoding */
+ u8 *tag = skb_tail_pointer(skb) - HELLCREEK_TAG_LEN;
+ unsigned int port = tag[0] & 0x03;
+
+ skb->dev = dsa_master_find_slave(dev, 0, port);
+ if (!skb->dev) {
+ netdev_warn(dev, "Failed to get source port: %d\n", port);
+ return NULL;
+ }
+
+ pskb_trim_rcsum(skb, skb->len - HELLCREEK_TAG_LEN);
+
+ skb->offload_fwd_mark = true;
+
+ return skb;
+}
+
+static const struct dsa_device_ops hellcreek_netdev_ops = {
+ .name = "hellcreek",
+ .proto = DSA_TAG_PROTO_HELLCREEK,
+ .xmit = hellcreek_xmit,
+ .rcv = hellcreek_rcv,
+ .overhead = HELLCREEK_TAG_LEN,
+ .tail_tag = true,
+};
+
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_HELLCREEK);
+
+module_dsa_tag_driver(hellcreek_netdev_ops);
diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c
index 9dcfa4e461b6..04e5e0bfd86a 100644
--- a/net/ipv4/netfilter/nf_reject_ipv4.c
+++ b/net/ipv4/netfilter/nf_reject_ipv4.c
@@ -12,6 +12,128 @@
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_bridge.h>
+static int nf_reject_iphdr_validate(struct sk_buff *skb)
+{
+ struct iphdr *iph;
+ u32 len;
+
+ if (!pskb_may_pull(skb, sizeof(struct iphdr)))
+ return 0;
+
+ iph = ip_hdr(skb);
+ if (iph->ihl < 5 || iph->version != 4)
+ return 0;
+
+ len = ntohs(iph->tot_len);
+ if (skb->len < len)
+ return 0;
+ else if (len < (iph->ihl*4))
+ return 0;
+
+ if (!pskb_may_pull(skb, iph->ihl*4))
+ return 0;
+
+ return 1;
+}
+
+struct sk_buff *nf_reject_skb_v4_tcp_reset(struct net *net,
+ struct sk_buff *oldskb,
+ const struct net_device *dev,
+ int hook)
+{
+ const struct tcphdr *oth;
+ struct sk_buff *nskb;
+ struct iphdr *niph;
+ struct tcphdr _oth;
+
+ if (!nf_reject_iphdr_validate(oldskb))
+ return NULL;
+
+ oth = nf_reject_ip_tcphdr_get(oldskb, &_oth, hook);
+ if (!oth)
+ return NULL;
+
+ nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) +
+ LL_MAX_HEADER, GFP_ATOMIC);
+ if (!nskb)
+ return NULL;
+
+ nskb->dev = (struct net_device *)dev;
+
+ skb_reserve(nskb, LL_MAX_HEADER);
+ niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
+ net->ipv4.sysctl_ip_default_ttl);
+ nf_reject_ip_tcphdr_put(nskb, oldskb, oth);
+ niph->tot_len = htons(nskb->len);
+ ip_send_check(niph);
+
+ return nskb;
+}
+EXPORT_SYMBOL_GPL(nf_reject_skb_v4_tcp_reset);
+
+struct sk_buff *nf_reject_skb_v4_unreach(struct net *net,
+ struct sk_buff *oldskb,
+ const struct net_device *dev,
+ int hook, u8 code)
+{
+ struct sk_buff *nskb;
+ struct iphdr *niph;
+ struct icmphdr *icmph;
+ unsigned int len;
+ __wsum csum;
+ u8 proto;
+
+ if (!nf_reject_iphdr_validate(oldskb))
+ return NULL;
+
+ /* IP header checks: fragment. */
+ if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET))
+ return NULL;
+
+ /* RFC says return as much as we can without exceeding 576 bytes. */
+ len = min_t(unsigned int, 536, oldskb->len);
+
+ if (!pskb_may_pull(oldskb, len))
+ return NULL;
+
+ if (pskb_trim_rcsum(oldskb, ntohs(ip_hdr(oldskb)->tot_len)))
+ return NULL;
+
+ proto = ip_hdr(oldskb)->protocol;
+
+ if (!skb_csum_unnecessary(oldskb) &&
+ nf_reject_verify_csum(proto) &&
+ nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), proto))
+ return NULL;
+
+ nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmphdr) +
+ LL_MAX_HEADER + len, GFP_ATOMIC);
+ if (!nskb)
+ return NULL;
+
+ nskb->dev = (struct net_device *)dev;
+
+ skb_reserve(nskb, LL_MAX_HEADER);
+ niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_ICMP,
+ net->ipv4.sysctl_ip_default_ttl);
+
+ skb_reset_transport_header(nskb);
+ icmph = skb_put_zero(nskb, sizeof(struct icmphdr));
+ icmph->type = ICMP_DEST_UNREACH;
+ icmph->code = code;
+
+ skb_put_data(nskb, skb_network_header(oldskb), len);
+
+ csum = csum_partial((void *)icmph, len + sizeof(struct icmphdr), 0);
+ icmph->checksum = csum_fold(csum);
+
+ niph->tot_len = htons(nskb->len);
+ ip_send_check(niph);
+
+ return nskb;
+}
+EXPORT_SYMBOL_GPL(nf_reject_skb_v4_unreach);
+
const struct tcphdr *nf_reject_ip_tcphdr_get(struct sk_buff *oldskb,
struct tcphdr *_oth, int hook)
{
@@ -124,7 +246,8 @@ void nf_send_reset(struct net *net, struct sk_buff *oldskb, int hook)
if (!oth)
return;
- if (hook == NF_INET_PRE_ROUTING && nf_reject_fill_skb_dst(oldskb))
+ if ((hook == NF_INET_PRE_ROUTING || hook == NF_INET_INGRESS) &&
+ nf_reject_fill_skb_dst(oldskb) < 0)
return;
if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
@@ -193,7 +316,8 @@ void nf_send_unreach(struct sk_buff *skb_in, int code, int hook)
if (iph->frag_off & htons(IP_OFFSET))
return;
- if (hook == NF_INET_PRE_ROUTING && nf_reject_fill_skb_dst(skb_in))
+ if ((hook == NF_INET_PRE_ROUTING || hook == NF_INET_INGRESS) &&
+ nf_reject_fill_skb_dst(skb_in) < 0)
return;
if (skb_csum_unnecessary(skb_in) || !nf_reject_verify_csum(proto)) {
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index bf48cd73e967..9abf5a0358d5 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1569,6 +1569,7 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
if (!buff)
return -ENOMEM; /* We'll just try again later. */
skb_copy_decrypted(buff, skb);
+ mptcp_skb_ext_copy(buff, skb);
sk_wmem_queued_add(sk, buff->truesize);
sk_mem_charge(sk, buff->truesize);
@@ -2123,6 +2124,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
if (unlikely(!buff))
return -ENOMEM;
skb_copy_decrypted(buff, skb);
+ mptcp_skb_ext_copy(buff, skb);
sk_wmem_queued_add(sk, buff->truesize);
sk_mem_charge(sk, buff->truesize);
@@ -2393,6 +2395,7 @@ static int tcp_mtu_probe(struct sock *sk)
skb = tcp_send_head(sk);
skb_copy_decrypted(nskb, skb);
+ mptcp_skb_ext_copy(nskb, skb);
TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c
index 4aef6baaa55e..aa35e6e37c1f 100644
--- a/net/ipv6/netfilter/nf_reject_ipv6.c
+++ b/net/ipv6/netfilter/nf_reject_ipv6.c
@@ -12,6 +12,140 @@
#include <linux/netfilter_ipv6.h>
#include <linux/netfilter_bridge.h>
+static bool nf_reject_v6_csum_ok(struct sk_buff *skb, int hook)
+{
+ const struct ipv6hdr *ip6h = ipv6_hdr(skb);
+ int thoff;
+ __be16 fo;
+ u8 proto = ip6h->nexthdr;
+
+ if (skb_csum_unnecessary(skb))
+ return true;
+
+ if (ip6h->payload_len &&
+ pskb_trim_rcsum(skb, ntohs(ip6h->payload_len) + sizeof(*ip6h)))
+ return false;
+
+ ip6h = ipv6_hdr(skb);
+ thoff = ipv6_skip_exthdr(skb, ((u8*)(ip6h+1) - skb->data), &proto, &fo);
+ if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0)
+ return false;
+
+ if (!nf_reject_verify_csum(proto))
+ return true;
+
+ return nf_ip6_checksum(skb, hook, thoff, proto) == 0;
+}
+
+static int nf_reject_ip6hdr_validate(struct sk_buff *skb)
+{
+ struct ipv6hdr *hdr;
+ u32 pkt_len;
+
+ if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
+ return 0;
+
+ hdr = ipv6_hdr(skb);
+ if (hdr->version != 6)
+ return 0;
+
+ pkt_len = ntohs(hdr->payload_len);
+ if (pkt_len + sizeof(struct ipv6hdr) > skb->len)
+ return 0;
+
+ return 1;
+}
+
+struct sk_buff *nf_reject_skb_v6_tcp_reset(struct net *net,
+ struct sk_buff *oldskb,
+ const struct net_device *dev,
+ int hook)
+{
+ struct sk_buff *nskb;
+ const struct tcphdr *oth;
+ struct tcphdr _oth;
+ unsigned int otcplen;
+ struct ipv6hdr *nip6h;
+
+ if (!nf_reject_ip6hdr_validate(oldskb))
+ return NULL;
+
+ oth = nf_reject_ip6_tcphdr_get(oldskb, &_oth, &otcplen, hook);
+ if (!oth)
+ return NULL;
+
+ nskb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(struct tcphdr) +
+ LL_MAX_HEADER, GFP_ATOMIC);
+ if (!nskb)
+ return NULL;
+
+ nskb->dev = (struct net_device *)dev;
+
+ skb_reserve(nskb, LL_MAX_HEADER);
+ nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP,
+ net->ipv6.devconf_all->hop_limit);
+ nf_reject_ip6_tcphdr_put(nskb, oldskb, oth, otcplen);
+ nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr));
+
+ return nskb;
+}
+EXPORT_SYMBOL_GPL(nf_reject_skb_v6_tcp_reset);
+
+struct sk_buff *nf_reject_skb_v6_unreach(struct net *net,
+ struct sk_buff *oldskb,
+ const struct net_device *dev,
+ int hook, u8 code)
+{
+ struct sk_buff *nskb;
+ struct ipv6hdr *nip6h;
+ struct icmp6hdr *icmp6h;
+ unsigned int len;
+
+ if (!nf_reject_ip6hdr_validate(oldskb))
+ return NULL;
+
+ /* Include "As much of invoking packet as possible without the ICMPv6
+ * packet exceeding the minimum IPv6 MTU" in the ICMP payload.
+ */
+ len = min_t(unsigned int, 1220, oldskb->len);
+
+ if (!pskb_may_pull(oldskb, len))
+ return NULL;
+
+ if (!nf_reject_v6_csum_ok(oldskb, hook))
+ return NULL;
+
+ nskb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(struct icmp6hdr) +
+ LL_MAX_HEADER + len, GFP_ATOMIC);
+ if (!nskb)
+ return NULL;
+
+ nskb->dev = (struct net_device *)dev;
+
+ skb_reserve(nskb, LL_MAX_HEADER);
+ nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_ICMPV6,
+ net->ipv6.devconf_all->hop_limit);
+
+ skb_reset_transport_header(nskb);
+ icmp6h = skb_put_zero(nskb, sizeof(struct icmp6hdr));
+ icmp6h->icmp6_type = ICMPV6_DEST_UNREACH;
+ icmp6h->icmp6_code = code;
+
+ skb_put_data(nskb, skb_network_header(oldskb), len);
+ nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr));
+
+ icmp6h->icmp6_cksum =
+ csum_ipv6_magic(&nip6h->saddr, &nip6h->daddr,
+ nskb->len - sizeof(struct ipv6hdr),
+ IPPROTO_ICMPV6,
+ csum_partial(icmp6h,
+ nskb->len - sizeof(struct ipv6hdr),
+ 0));
+
+ return nskb;
+}
+EXPORT_SYMBOL_GPL(nf_reject_skb_v6_unreach);
+
const struct tcphdr *nf_reject_ip6_tcphdr_get(struct sk_buff *oldskb,
struct tcphdr *otcph,
unsigned int *otcplen, int hook)
@@ -170,7 +304,7 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
fl6.fl6_sport = otcph->dest;
fl6.fl6_dport = otcph->source;
- if (hook == NF_INET_PRE_ROUTING) {
+ if (hook == NF_INET_PRE_ROUTING || hook == NF_INET_INGRESS) {
nf_ip6_route(net, &dst, flowi6_to_flowi(&fl6), false);
if (!dst)
return;
@@ -268,7 +402,8 @@ void nf_send_unreach6(struct net *net, struct sk_buff *skb_in,
if (hooknum == NF_INET_LOCAL_OUT && skb_in->dev == NULL)
skb_in->dev = net->loopback_dev;
- if (hooknum == NF_INET_PRE_ROUTING && nf_reject6_fill_skb_dst(skb_in))
+ if ((hooknum == NF_INET_PRE_ROUTING || hooknum == NF_INET_INGRESS) &&
+ nf_reject6_fill_skb_dst(skb_in) < 0)
return;
icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0);
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index f2868a8a50c3..47bab701555f 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -377,6 +377,8 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
if (!pskb_may_pull(skb, sizeof(*hdr)))
goto err;
+ skb_dst_drop(skb);
+
/* Read and decode the label */
hdr = mpls_hdr(skb);
dec = mpls_entry_decode(hdr);
diff --git a/net/mptcp/ctrl.c b/net/mptcp/ctrl.c
index 54b888f94009..96ba616f59bf 100644
--- a/net/mptcp/ctrl.c
+++ b/net/mptcp/ctrl.c
@@ -18,6 +18,7 @@ struct mptcp_pernet {
struct ctl_table_header *ctl_table_hdr;
int mptcp_enabled;
+ unsigned int add_addr_timeout;
};
static struct mptcp_pernet *mptcp_get_pernet(struct net *net)
@@ -30,6 +31,11 @@ int mptcp_is_enabled(struct net *net)
return mptcp_get_pernet(net)->mptcp_enabled;
}
+unsigned int mptcp_get_add_addr_timeout(struct net *net)
+{
+ return mptcp_get_pernet(net)->add_addr_timeout;
+}
+
static struct ctl_table mptcp_sysctl_table[] = {
{
.procname = "enabled",
@@ -40,12 +46,19 @@ static struct ctl_table mptcp_sysctl_table[] = {
*/
.proc_handler = proc_dointvec,
},
+ {
+ .procname = "add_addr_timeout",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
{}
};
static void mptcp_pernet_set_defaults(struct mptcp_pernet *pernet)
{
pernet->mptcp_enabled = 1;
+ pernet->add_addr_timeout = TCP_RTO_MAX;
}
static int mptcp_pernet_new_table(struct net *net, struct mptcp_pernet *pernet)
@@ -61,6 +74,7 @@ static int mptcp_pernet_new_table(struct net *net, struct mptcp_pernet *pernet)
}
table[0].data = &pernet->mptcp_enabled;
+ table[1].data = &pernet->add_addr_timeout;
hdr = register_net_sysctl(net, MPTCP_SYSCTL_PATH, table);
if (!hdr)
diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
index 0d6f3d912891..ed60538df7b2 100644
--- a/net/mptcp/pm_netlink.c
+++ b/net/mptcp/pm_netlink.c
@@ -206,6 +206,7 @@ static void mptcp_pm_add_timer(struct timer_list *timer)
struct mptcp_pm_add_entry *entry = from_timer(entry, timer, add_timer);
struct mptcp_sock *msk = entry->sock;
struct sock *sk = (struct sock *)msk;
+ struct net *net = sock_net(sk);
pr_debug("msk=%p", msk);
@@ -232,7 +233,8 @@ static void mptcp_pm_add_timer(struct timer_list *timer)
}
if (entry->retrans_times < ADD_ADDR_RETRANS_MAX)
- sk_reset_timer(sk, timer, jiffies + TCP_RTO_MAX);
+ sk_reset_timer(sk, timer,
+ jiffies + mptcp_get_add_addr_timeout(net));
spin_unlock_bh(&msk->pm.lock);
@@ -264,6 +266,7 @@ static bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk,
{
struct mptcp_pm_add_entry *add_entry = NULL;
struct sock *sk = (struct sock *)msk;
+ struct net *net = sock_net(sk);
if (lookup_anno_list_by_saddr(msk, &entry->addr))
return false;
@@ -279,7 +282,8 @@ static bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk,
add_entry->retrans_times = 0;
timer_setup(&add_entry->add_timer, mptcp_pm_add_timer, 0);
- sk_reset_timer(sk, &add_entry->add_timer, jiffies + TCP_RTO_MAX);
+ sk_reset_timer(sk, &add_entry->add_timer,
+ jiffies + mptcp_get_add_addr_timeout(net));
return true;
}
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index e7419fd15d84..b84b84adc9ad 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -466,6 +466,18 @@ static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
struct tcp_sock *tp;
u32 old_copied_seq;
bool done = false;
+ int sk_rbuf;
+
+ sk_rbuf = READ_ONCE(sk->sk_rcvbuf);
+
+ if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
+ int ssk_rbuf = READ_ONCE(ssk->sk_rcvbuf);
+
+ if (unlikely(ssk_rbuf > sk_rbuf)) {
+ WRITE_ONCE(sk->sk_rcvbuf, ssk_rbuf);
+ sk_rbuf = ssk_rbuf;
+ }
+ }
pr_debug("msk=%p ssk=%p", msk, ssk);
tp = tcp_sk(ssk);
@@ -528,7 +540,7 @@ static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
WRITE_ONCE(tp->copied_seq, seq);
more_data_avail = mptcp_subflow_data_available(ssk);
- if (atomic_read(&sk->sk_rmem_alloc) > READ_ONCE(sk->sk_rcvbuf)) {
+ if (atomic_read(&sk->sk_rmem_alloc) > sk_rbuf) {
done = true;
break;
}
@@ -622,6 +634,7 @@ void mptcp_data_ready(struct sock *sk, struct sock *ssk)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
struct mptcp_sock *msk = mptcp_sk(sk);
+ int sk_rbuf, ssk_rbuf;
bool wake;
/* move_skbs_to_msk below can legitly clear the data_avail flag,
@@ -632,12 +645,16 @@ void mptcp_data_ready(struct sock *sk, struct sock *ssk)
if (wake)
set_bit(MPTCP_DATA_READY, &msk->flags);
- if (atomic_read(&sk->sk_rmem_alloc) < READ_ONCE(sk->sk_rcvbuf) &&
- move_skbs_to_msk(msk, ssk))
+ ssk_rbuf = READ_ONCE(ssk->sk_rcvbuf);
+ sk_rbuf = READ_ONCE(sk->sk_rcvbuf);
+ if (unlikely(ssk_rbuf > sk_rbuf))
+ sk_rbuf = ssk_rbuf;
+
+ /* over limit? can't append more skbs to msk */
+ if (atomic_read(&sk->sk_rmem_alloc) > sk_rbuf)
goto wake;
- /* don't schedule if mptcp sk is (still) over limit */
- if (atomic_read(&sk->sk_rmem_alloc) > READ_ONCE(sk->sk_rcvbuf))
+ if (move_skbs_to_msk(msk, ssk))
goto wake;
/* mptcp socket is owned, release_cb should retry */
@@ -754,8 +771,11 @@ static bool mptcp_skb_can_collapse_to(u64 write_seq,
if (!tcp_skb_can_collapse_to(skb))
return false;
- /* can collapse only if MPTCP level sequence is in order */
- return mpext && mpext->data_seq + mpext->data_len == write_seq;
+ /* can collapse only if MPTCP level sequence is in order and this
+ * mapping has not been xmitted yet
+ */
+ return mpext && mpext->data_seq + mpext->data_len == write_seq &&
+ !mpext->frozen;
}
static bool mptcp_frag_can_collapse_to(const struct mptcp_sock *msk,
@@ -833,19 +853,25 @@ static void mptcp_clean_una(struct sock *sk)
}
out:
- if (cleaned) {
+ if (cleaned)
sk_mem_reclaim_partial(sk);
+}
- /* Only wake up writers if a subflow is ready */
- if (mptcp_is_writeable(msk)) {
- set_bit(MPTCP_SEND_SPACE, &mptcp_sk(sk)->flags);
- smp_mb__after_atomic();
+static void mptcp_clean_una_wakeup(struct sock *sk)
+{
+ struct mptcp_sock *msk = mptcp_sk(sk);
- /* set SEND_SPACE before sk_stream_write_space clears
- * NOSPACE
- */
- sk_stream_write_space(sk);
- }
+ mptcp_clean_una(sk);
+
+ /* Only wake up writers if a subflow is ready */
+ if (mptcp_is_writeable(msk)) {
+ set_bit(MPTCP_SEND_SPACE, &msk->flags);
+ smp_mb__after_atomic();
+
+ /* set SEND_SPACE before sk_stream_write_space clears
+ * NOSPACE
+ */
+ sk_stream_write_space(sk);
}
}
@@ -1476,13 +1502,14 @@ static bool __mptcp_move_skbs(struct mptcp_sock *msk)
__mptcp_flush_join_list(msk);
do {
struct sock *ssk = mptcp_subflow_recv_lookup(msk);
+ bool slowpath;
if (!ssk)
break;
- lock_sock(ssk);
+ slowpath = lock_sock_fast(ssk);
done = __mptcp_move_skbs_from_subflow(msk, ssk, &moved);
- release_sock(ssk);
+ unlock_sock_fast(ssk, slowpath);
} while (!done);
if (mptcp_ofo_queue(msk) || moved > 0) {
@@ -1748,7 +1775,7 @@ static void mptcp_worker(struct work_struct *work)
long timeo = 0;
lock_sock(sk);
- mptcp_clean_una(sk);
+ mptcp_clean_una_wakeup(sk);
mptcp_check_data_fin_ack(sk);
__mptcp_flush_join_list(msk);
if (test_and_clear_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index 13ab89dc1914..278c88c405e8 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -362,6 +362,7 @@ mptcp_subflow_get_mapped_dsn(const struct mptcp_subflow_context *subflow)
}
int mptcp_is_enabled(struct net *net);
+unsigned int mptcp_get_add_addr_timeout(struct net *net);
void mptcp_subflow_fully_established(struct mptcp_subflow_context *subflow,
struct mptcp_options_received *mp_opt);
bool mptcp_subflow_data_available(struct sock *sk);
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 52370211e46b..49fbef0d99be 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -682,6 +682,16 @@ config NFT_FIB_NETDEV
The lookup will be delegated to the IPv4 or IPv6 FIB depending
on the protocol of the packet.
+config NFT_REJECT_NETDEV
+ depends on NFT_REJECT_IPV4
+ depends on NFT_REJECT_IPV6
+ tristate "Netfilter nf_tables netdev REJECT support"
+ help
+ This option enables the REJECT support from the netdev table.
+ The return packet generation will be delegated to the IPv4
+ or IPv6 ICMP or TCP RST implementation depending on the
+ protocol of the packet.
+
endif # NF_TABLES_NETDEV
endif # NF_TABLES
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 0e0ded87e27b..33da7bf1b68e 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -101,6 +101,7 @@ obj-$(CONFIG_NFT_QUEUE) += nft_queue.o
obj-$(CONFIG_NFT_QUOTA) += nft_quota.o
obj-$(CONFIG_NFT_REJECT) += nft_reject.o
obj-$(CONFIG_NFT_REJECT_INET) += nft_reject_inet.o
+obj-$(CONFIG_NFT_REJECT_NETDEV) += nft_reject_netdev.o
obj-$(CONFIG_NFT_TUNNEL) += nft_tunnel.o
obj-$(CONFIG_NFT_COUNTER) += nft_counter.o
obj-$(CONFIG_NFT_LOG) += nft_log.o
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 6f35832f0de3..e76bfca2d3ef 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -1109,6 +1109,8 @@ static int ip_set_create(struct net *net, struct sock *ctnl,
ret = -IPSET_ERR_PROTOCOL;
goto put_out;
}
+ /* Set create flags depending on the type revision */
+ set->flags |= set->type->create_flags[revision];
ret = set->type->create(net, set, tb, flags);
if (ret != 0)
@@ -1239,10 +1241,12 @@ static int ip_set_destroy(struct net *net, struct sock *ctnl,
/* Modified by ip_set_destroy() only, which is serialized */
inst->is_destroyed = false;
} else {
+ u32 flags = flag_exist(nlh);
s = find_set_and_id(inst, nla_data(attr[IPSET_ATTR_SETNAME]),
&i);
if (!s) {
- ret = -ENOENT;
+ if (!(flags & IPSET_FLAG_EXIST))
+ ret = -ENOENT;
goto out;
} else if (s->ref || s->ref_netlink) {
ret = -IPSET_ERR_BUSY;
diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
index 521e970be402..5f1208ad049e 100644
--- a/net/netfilter/ipset/ip_set_hash_gen.h
+++ b/net/netfilter/ipset/ip_set_hash_gen.h
@@ -37,18 +37,18 @@
*/
/* Number of elements to store in an initial array block */
-#define AHASH_INIT_SIZE 4
+#define AHASH_INIT_SIZE 2
/* Max number of elements to store in an array block */
-#define AHASH_MAX_SIZE (3 * AHASH_INIT_SIZE)
+#define AHASH_MAX_SIZE (6 * AHASH_INIT_SIZE)
/* Max muber of elements in the array block when tuned */
#define AHASH_MAX_TUNED 64
+#define AHASH_MAX(h) ((h)->bucketsize)
+
/* Max number of elements can be tuned */
#ifdef IP_SET_HASH_WITH_MULTI
-#define AHASH_MAX(h) ((h)->ahash_max)
-
static u8
-tune_ahash_max(u8 curr, u32 multi)
+tune_bucketsize(u8 curr, u32 multi)
{
u32 n;
@@ -61,12 +61,10 @@ tune_ahash_max(u8 curr, u32 multi)
*/
return n > curr && n <= AHASH_MAX_TUNED ? n : curr;
}
-
-#define TUNE_AHASH_MAX(h, multi) \
- ((h)->ahash_max = tune_ahash_max((h)->ahash_max, multi))
+#define TUNE_BUCKETSIZE(h, multi) \
+ ((h)->bucketsize = tune_bucketsize((h)->bucketsize, multi))
#else
-#define AHASH_MAX(h) AHASH_MAX_SIZE
-#define TUNE_AHASH_MAX(h, multi)
+#define TUNE_BUCKETSIZE(h, multi)
#endif
/* A hash bucket */
@@ -321,9 +319,7 @@ struct htype {
#ifdef IP_SET_HASH_WITH_MARKMASK
u32 markmask; /* markmask value for mark mask to store */
#endif
-#ifdef IP_SET_HASH_WITH_MULTI
- u8 ahash_max; /* max elements in an array block */
-#endif
+ u8 bucketsize; /* max elements in an array block */
#ifdef IP_SET_HASH_WITH_NETMASK
u8 netmask; /* netmask value for subnets to store */
#endif
@@ -950,7 +946,7 @@ mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
goto set_full;
/* Create a new slot */
if (n->pos >= n->size) {
- TUNE_AHASH_MAX(h, multi);
+ TUNE_BUCKETSIZE(h, multi);
if (n->size >= AHASH_MAX(h)) {
/* Trigger rehashing */
mtype_data_next(&h->next, d);
@@ -1305,6 +1301,11 @@ mtype_head(struct ip_set *set, struct sk_buff *skb)
if (nla_put_u32(skb, IPSET_ATTR_MARKMASK, h->markmask))
goto nla_put_failure;
#endif
+ if (set->flags & IPSET_CREATE_FLAG_BUCKETSIZE) {
+ if (nla_put_u8(skb, IPSET_ATTR_BUCKETSIZE, h->bucketsize) ||
+ nla_put_net32(skb, IPSET_ATTR_INITVAL, htonl(h->initval)))
+ goto nla_put_failure;
+ }
if (nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref)) ||
nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)) ||
nla_put_net32(skb, IPSET_ATTR_ELEMENTS, htonl(elements)))
@@ -1547,8 +1548,20 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
#ifdef IP_SET_HASH_WITH_MARKMASK
h->markmask = markmask;
#endif
- get_random_bytes(&h->initval, sizeof(h->initval));
-
+ if (tb[IPSET_ATTR_INITVAL])
+ h->initval = ntohl(nla_get_be32(tb[IPSET_ATTR_INITVAL]));
+ else
+ get_random_bytes(&h->initval, sizeof(h->initval));
+ h->bucketsize = AHASH_MAX_SIZE;
+ if (tb[IPSET_ATTR_BUCKETSIZE]) {
+ h->bucketsize = nla_get_u8(tb[IPSET_ATTR_BUCKETSIZE]);
+ if (h->bucketsize < AHASH_INIT_SIZE)
+ h->bucketsize = AHASH_INIT_SIZE;
+ else if (h->bucketsize > AHASH_MAX_SIZE)
+ h->bucketsize = AHASH_MAX_SIZE;
+ else if (h->bucketsize % 2)
+ h->bucketsize += 1;
+ }
t->htable_bits = hbits;
t->maxelem = h->maxelem / ahash_numof_locks(hbits);
RCU_INIT_POINTER(h->table, t);
diff --git a/net/netfilter/ipset/ip_set_hash_ip.c b/net/netfilter/ipset/ip_set_hash_ip.c
index 5d6d68eaf6a9..d1bef23fd4f5 100644
--- a/net/netfilter/ipset/ip_set_hash_ip.c
+++ b/net/netfilter/ipset/ip_set_hash_ip.c
@@ -23,7 +23,8 @@
/* 1 Counters support */
/* 2 Comments support */
/* 3 Forceadd support */
-#define IPSET_TYPE_REV_MAX 4 /* skbinfo support */
+/* 4 skbinfo support */
+#define IPSET_TYPE_REV_MAX 5 /* bucketsize, initval support */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@netfilter.org>");
@@ -277,11 +278,13 @@ static struct ip_set_type hash_ip_type __read_mostly = {
.family = NFPROTO_UNSPEC,
.revision_min = IPSET_TYPE_REV_MIN,
.revision_max = IPSET_TYPE_REV_MAX,
+ .create_flags[IPSET_TYPE_REV_MAX] = IPSET_CREATE_FLAG_BUCKETSIZE,
.create = hash_ip_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
[IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
- [IPSET_ATTR_PROBES] = { .type = NLA_U8 },
+ [IPSET_ATTR_INITVAL] = { .type = NLA_U32 },
+ [IPSET_ATTR_BUCKETSIZE] = { .type = NLA_U8 },
[IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_NETMASK] = { .type = NLA_U8 },
diff --git a/net/netfilter/ipset/ip_set_hash_ipmac.c b/net/netfilter/ipset/ip_set_hash_ipmac.c
index eceb7bc4a93a..467c59a83c0a 100644
--- a/net/netfilter/ipset/ip_set_hash_ipmac.c
+++ b/net/netfilter/ipset/ip_set_hash_ipmac.c
@@ -23,7 +23,7 @@
#include <linux/netfilter/ipset/ip_set_hash.h>
#define IPSET_TYPE_REV_MIN 0
-#define IPSET_TYPE_REV_MAX 0
+#define IPSET_TYPE_REV_MAX 1 /* bucketsize, initval support */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Tomasz Chilinski <tomasz.chilinski@chilan.com>");
@@ -268,11 +268,13 @@ static struct ip_set_type hash_ipmac_type __read_mostly = {
.family = NFPROTO_UNSPEC,
.revision_min = IPSET_TYPE_REV_MIN,
.revision_max = IPSET_TYPE_REV_MAX,
+ .create_flags[IPSET_TYPE_REV_MAX] = IPSET_CREATE_FLAG_BUCKETSIZE,
.create = hash_ipmac_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
[IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
- [IPSET_ATTR_PROBES] = { .type = NLA_U8 },
+ [IPSET_ATTR_INITVAL] = { .type = NLA_U32 },
+ [IPSET_ATTR_BUCKETSIZE] = { .type = NLA_U8 },
[IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
diff --git a/net/netfilter/ipset/ip_set_hash_ipmark.c b/net/netfilter/ipset/ip_set_hash_ipmark.c
index aba1df617d6e..18346d18aa16 100644
--- a/net/netfilter/ipset/ip_set_hash_ipmark.c
+++ b/net/netfilter/ipset/ip_set_hash_ipmark.c
@@ -21,7 +21,8 @@
#define IPSET_TYPE_REV_MIN 0
/* 1 Forceadd support */
-#define IPSET_TYPE_REV_MAX 2 /* skbinfo support */
+/* 2 skbinfo support */
+#define IPSET_TYPE_REV_MAX 3 /* bucketsize, initval support */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Vytas Dauksa <vytas.dauksa@smoothwall.net>");
@@ -274,12 +275,14 @@ static struct ip_set_type hash_ipmark_type __read_mostly = {
.family = NFPROTO_UNSPEC,
.revision_min = IPSET_TYPE_REV_MIN,
.revision_max = IPSET_TYPE_REV_MAX,
+ .create_flags[IPSET_TYPE_REV_MAX] = IPSET_CREATE_FLAG_BUCKETSIZE,
.create = hash_ipmark_create,
.create_policy = {
[IPSET_ATTR_MARKMASK] = { .type = NLA_U32 },
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
[IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
- [IPSET_ATTR_PROBES] = { .type = NLA_U8 },
+ [IPSET_ATTR_INITVAL] = { .type = NLA_U32 },
+ [IPSET_ATTR_BUCKETSIZE] = { .type = NLA_U8 },
[IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
diff --git a/net/netfilter/ipset/ip_set_hash_ipport.c b/net/netfilter/ipset/ip_set_hash_ipport.c
index 1ff228717e29..e1ca11196515 100644
--- a/net/netfilter/ipset/ip_set_hash_ipport.c
+++ b/net/netfilter/ipset/ip_set_hash_ipport.c
@@ -25,7 +25,8 @@
/* 2 Counters support added */
/* 3 Comments support added */
/* 4 Forceadd support added */
-#define IPSET_TYPE_REV_MAX 5 /* skbinfo support added */
+/* 5 skbinfo support added */
+#define IPSET_TYPE_REV_MAX 6 /* bucketsize, initval support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@netfilter.org>");
@@ -341,11 +342,13 @@ static struct ip_set_type hash_ipport_type __read_mostly = {
.family = NFPROTO_UNSPEC,
.revision_min = IPSET_TYPE_REV_MIN,
.revision_max = IPSET_TYPE_REV_MAX,
+ .create_flags[IPSET_TYPE_REV_MAX] = IPSET_CREATE_FLAG_BUCKETSIZE,
.create = hash_ipport_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
[IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
- [IPSET_ATTR_PROBES] = { .type = NLA_U8 },
+ [IPSET_ATTR_INITVAL] = { .type = NLA_U32 },
+ [IPSET_ATTR_BUCKETSIZE] = { .type = NLA_U8 },
[IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
[IPSET_ATTR_PROTO] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
diff --git a/net/netfilter/ipset/ip_set_hash_ipportip.c b/net/netfilter/ipset/ip_set_hash_ipportip.c
index fa88afd812fa..ab179e064597 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportip.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportip.c
@@ -25,7 +25,8 @@
/* 2 Counters support added */
/* 3 Comments support added */
/* 4 Forceadd support added */
-#define IPSET_TYPE_REV_MAX 5 /* skbinfo support added */
+/* 5 skbinfo support added */
+#define IPSET_TYPE_REV_MAX 6 /* bucketsize, initval support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@netfilter.org>");
@@ -356,11 +357,13 @@ static struct ip_set_type hash_ipportip_type __read_mostly = {
.family = NFPROTO_UNSPEC,
.revision_min = IPSET_TYPE_REV_MIN,
.revision_max = IPSET_TYPE_REV_MAX,
+ .create_flags[IPSET_TYPE_REV_MAX] = IPSET_CREATE_FLAG_BUCKETSIZE,
.create = hash_ipportip_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
[IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
- [IPSET_ATTR_PROBES] = { .type = NLA_U8 },
+ [IPSET_ATTR_INITVAL] = { .type = NLA_U32 },
+ [IPSET_ATTR_BUCKETSIZE] = { .type = NLA_U8 },
[IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c
index eef6ecfcb409..8f075b44cf64 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportnet.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c
@@ -27,7 +27,8 @@
/* 4 Counters support added */
/* 5 Comments support added */
/* 6 Forceadd support added */
-#define IPSET_TYPE_REV_MAX 7 /* skbinfo support added */
+/* 7 skbinfo support added */
+#define IPSET_TYPE_REV_MAX 8 /* bucketsize, initval support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@netfilter.org>");
@@ -513,11 +514,13 @@ static struct ip_set_type hash_ipportnet_type __read_mostly = {
.family = NFPROTO_UNSPEC,
.revision_min = IPSET_TYPE_REV_MIN,
.revision_max = IPSET_TYPE_REV_MAX,
+ .create_flags[IPSET_TYPE_REV_MAX] = IPSET_CREATE_FLAG_BUCKETSIZE,
.create = hash_ipportnet_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
[IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
- [IPSET_ATTR_PROBES] = { .type = NLA_U8 },
+ [IPSET_ATTR_INITVAL] = { .type = NLA_U32 },
+ [IPSET_ATTR_BUCKETSIZE] = { .type = NLA_U8 },
[IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
diff --git a/net/netfilter/ipset/ip_set_hash_mac.c b/net/netfilter/ipset/ip_set_hash_mac.c
index 0b61593165ef..718814730acf 100644
--- a/net/netfilter/ipset/ip_set_hash_mac.c
+++ b/net/netfilter/ipset/ip_set_hash_mac.c
@@ -16,7 +16,7 @@
#include <linux/netfilter/ipset/ip_set_hash.h>
#define IPSET_TYPE_REV_MIN 0
-#define IPSET_TYPE_REV_MAX 0
+#define IPSET_TYPE_REV_MAX 1 /* bucketsize, initval support */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@netfilter.org>");
@@ -125,11 +125,13 @@ static struct ip_set_type hash_mac_type __read_mostly = {
.family = NFPROTO_UNSPEC,
.revision_min = IPSET_TYPE_REV_MIN,
.revision_max = IPSET_TYPE_REV_MAX,
+ .create_flags[IPSET_TYPE_REV_MAX] = IPSET_CREATE_FLAG_BUCKETSIZE,
.create = hash_mac_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
[IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
- [IPSET_ATTR_PROBES] = { .type = NLA_U8 },
+ [IPSET_ATTR_INITVAL] = { .type = NLA_U32 },
+ [IPSET_ATTR_BUCKETSIZE] = { .type = NLA_U8 },
[IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c
index 136cf0781d3a..c1a11f041ac6 100644
--- a/net/netfilter/ipset/ip_set_hash_net.c
+++ b/net/netfilter/ipset/ip_set_hash_net.c
@@ -24,7 +24,8 @@
/* 3 Counters support added */
/* 4 Comments support added */
/* 5 Forceadd support added */
-#define IPSET_TYPE_REV_MAX 6 /* skbinfo mapping support added */
+/* 6 skbinfo support added */
+#define IPSET_TYPE_REV_MAX 7 /* bucketsize, initval support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@netfilter.org>");
@@ -354,11 +355,13 @@ static struct ip_set_type hash_net_type __read_mostly = {
.family = NFPROTO_UNSPEC,
.revision_min = IPSET_TYPE_REV_MIN,
.revision_max = IPSET_TYPE_REV_MAX,
+ .create_flags[IPSET_TYPE_REV_MAX] = IPSET_CREATE_FLAG_BUCKETSIZE,
.create = hash_net_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
[IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
- [IPSET_ATTR_PROBES] = { .type = NLA_U8 },
+ [IPSET_ATTR_INITVAL] = { .type = NLA_U32 },
+ [IPSET_ATTR_BUCKETSIZE] = { .type = NLA_U8 },
[IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c
index be5e95a0d876..3d74169b794c 100644
--- a/net/netfilter/ipset/ip_set_hash_netiface.c
+++ b/net/netfilter/ipset/ip_set_hash_netiface.c
@@ -26,7 +26,8 @@
/* 4 Comments support added */
/* 5 Forceadd support added */
/* 6 skbinfo support added */
-#define IPSET_TYPE_REV_MAX 7 /* interface wildcard support added */
+/* 7 interface wildcard support added */
+#define IPSET_TYPE_REV_MAX 8 /* bucketsize, initval support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@netfilter.org>");
@@ -470,11 +471,13 @@ static struct ip_set_type hash_netiface_type __read_mostly = {
.family = NFPROTO_UNSPEC,
.revision_min = IPSET_TYPE_REV_MIN,
.revision_max = IPSET_TYPE_REV_MAX,
+ .create_flags[IPSET_TYPE_REV_MAX] = IPSET_CREATE_FLAG_BUCKETSIZE,
.create = hash_netiface_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
[IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
- [IPSET_ATTR_PROBES] = { .type = NLA_U8 },
+ [IPSET_ATTR_INITVAL] = { .type = NLA_U32 },
+ [IPSET_ATTR_BUCKETSIZE] = { .type = NLA_U8 },
[IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
[IPSET_ATTR_PROTO] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
diff --git a/net/netfilter/ipset/ip_set_hash_netnet.c b/net/netfilter/ipset/ip_set_hash_netnet.c
index da4ef910b12d..6532f0505e66 100644
--- a/net/netfilter/ipset/ip_set_hash_netnet.c
+++ b/net/netfilter/ipset/ip_set_hash_netnet.c
@@ -22,7 +22,8 @@
#define IPSET_TYPE_REV_MIN 0
/* 1 Forceadd support added */
-#define IPSET_TYPE_REV_MAX 2 /* skbinfo support added */
+/* 2 skbinfo support added */
+#define IPSET_TYPE_REV_MAX 3 /* bucketsize, initval support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Oliver Smith <oliver@8.c.9.b.0.7.4.0.1.0.0.2.ip6.arpa>");
@@ -459,11 +460,13 @@ static struct ip_set_type hash_netnet_type __read_mostly = {
.family = NFPROTO_UNSPEC,
.revision_min = IPSET_TYPE_REV_MIN,
.revision_max = IPSET_TYPE_REV_MAX,
+ .create_flags[IPSET_TYPE_REV_MAX] = IPSET_CREATE_FLAG_BUCKETSIZE,
.create = hash_netnet_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
[IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
- [IPSET_ATTR_PROBES] = { .type = NLA_U8 },
+ [IPSET_ATTR_INITVAL] = { .type = NLA_U32 },
+ [IPSET_ATTR_BUCKETSIZE] = { .type = NLA_U8 },
[IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
diff --git a/net/netfilter/ipset/ip_set_hash_netport.c b/net/netfilter/ipset/ip_set_hash_netport.c
index 34448df80fb9..ec1564a1cb5a 100644
--- a/net/netfilter/ipset/ip_set_hash_netport.c
+++ b/net/netfilter/ipset/ip_set_hash_netport.c
@@ -26,7 +26,8 @@
/* 4 Counters support added */
/* 5 Comments support added */
/* 6 Forceadd support added */
-#define IPSET_TYPE_REV_MAX 7 /* skbinfo support added */
+/* 7 skbinfo support added */
+#define IPSET_TYPE_REV_MAX 8 /* bucketsize, initval support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@netfilter.org>");
@@ -460,11 +461,13 @@ static struct ip_set_type hash_netport_type __read_mostly = {
.family = NFPROTO_UNSPEC,
.revision_min = IPSET_TYPE_REV_MIN,
.revision_max = IPSET_TYPE_REV_MAX,
+ .create_flags[IPSET_TYPE_REV_MAX] = IPSET_CREATE_FLAG_BUCKETSIZE,
.create = hash_netport_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
[IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
- [IPSET_ATTR_PROBES] = { .type = NLA_U8 },
+ [IPSET_ATTR_INITVAL] = { .type = NLA_U32 },
+ [IPSET_ATTR_BUCKETSIZE] = { .type = NLA_U8 },
[IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
[IPSET_ATTR_PROTO] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
diff --git a/net/netfilter/ipset/ip_set_hash_netportnet.c b/net/netfilter/ipset/ip_set_hash_netportnet.c
index 934c1712cba8..0e91d1e82f1c 100644
--- a/net/netfilter/ipset/ip_set_hash_netportnet.c
+++ b/net/netfilter/ipset/ip_set_hash_netportnet.c
@@ -23,7 +23,8 @@
#define IPSET_TYPE_REV_MIN 0
/* 0 Comments support added */
/* 1 Forceadd support added */
-#define IPSET_TYPE_REV_MAX 2 /* skbinfo support added */
+/* 2 skbinfo support added */
+#define IPSET_TYPE_REV_MAX 3 /* bucketsize, initval support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Oliver Smith <oliver@8.c.9.b.0.7.4.0.1.0.0.2.ip6.arpa>");
@@ -558,11 +559,13 @@ static struct ip_set_type hash_netportnet_type __read_mostly = {
.family = NFPROTO_UNSPEC,
.revision_min = IPSET_TYPE_REV_MIN,
.revision_max = IPSET_TYPE_REV_MAX,
+ .create_flags[IPSET_TYPE_REV_MAX] = IPSET_CREATE_FLAG_BUCKETSIZE,
.create = hash_netportnet_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
[IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
- [IPSET_ATTR_PROBES] = { .type = NLA_U8 },
+ [IPSET_ATTR_INITVAL] = { .type = NLA_U32 },
+ [IPSET_ATTR_BUCKETSIZE] = { .type = NLA_U8 },
[IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 65cb8e3c13d9..522a9d28754b 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -581,7 +581,8 @@ struct nft_module_request {
};
#ifdef CONFIG_MODULES
-static int nft_request_module(struct net *net, const char *fmt, ...)
+static __printf(2, 3) int nft_request_module(struct net *net, const char *fmt,
+ ...)
{
char module_name[MODULE_NAME_LEN];
struct nft_module_request *req;
diff --git a/net/netfilter/nft_reject.c b/net/netfilter/nft_reject.c
index 61fb7e8afbf0..927ff8459bd9 100644
--- a/net/netfilter/nft_reject.c
+++ b/net/netfilter/nft_reject.c
@@ -40,6 +40,7 @@ int nft_reject_init(const struct nft_ctx *ctx,
const struct nlattr * const tb[])
{
struct nft_reject *priv = nft_expr_priv(expr);
+ int icmp_code;
if (tb[NFTA_REJECT_TYPE] == NULL)
return -EINVAL;
@@ -47,9 +48,17 @@ int nft_reject_init(const struct nft_ctx *ctx,
priv->type = ntohl(nla_get_be32(tb[NFTA_REJECT_TYPE]));
switch (priv->type) {
case NFT_REJECT_ICMP_UNREACH:
+ case NFT_REJECT_ICMPX_UNREACH:
if (tb[NFTA_REJECT_ICMP_CODE] == NULL)
return -EINVAL;
- priv->icmp_code = nla_get_u8(tb[NFTA_REJECT_ICMP_CODE]);
+
+ icmp_code = nla_get_u8(tb[NFTA_REJECT_ICMP_CODE]);
+ if (priv->type == NFT_REJECT_ICMPX_UNREACH &&
+ icmp_code > NFT_REJECT_ICMPX_MAX)
+ return -EINVAL;
+
+ priv->icmp_code = icmp_code;
+ break;
case NFT_REJECT_TCP_RST:
break;
default:
@@ -69,6 +78,7 @@ int nft_reject_dump(struct sk_buff *skb, const struct nft_expr *expr)
switch (priv->type) {
case NFT_REJECT_ICMP_UNREACH:
+ case NFT_REJECT_ICMPX_UNREACH:
if (nla_put_u8(skb, NFTA_REJECT_ICMP_CODE, priv->icmp_code))
goto nla_put_failure;
break;
diff --git a/net/netfilter/nft_reject_inet.c b/net/netfilter/nft_reject_inet.c
index cf8f2646e93c..32f3ea398ddf 100644
--- a/net/netfilter/nft_reject_inet.c
+++ b/net/netfilter/nft_reject_inet.c
@@ -58,60 +58,16 @@ static void nft_reject_inet_eval(const struct nft_expr *expr,
regs->verdict.code = NF_DROP;
}
-static int nft_reject_inet_init(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nlattr * const tb[])
+static int nft_reject_inet_validate(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nft_data **data)
{
- struct nft_reject *priv = nft_expr_priv(expr);
- int icmp_code;
-
- if (tb[NFTA_REJECT_TYPE] == NULL)
- return -EINVAL;
-
- priv->type = ntohl(nla_get_be32(tb[NFTA_REJECT_TYPE]));
- switch (priv->type) {
- case NFT_REJECT_ICMP_UNREACH:
- case NFT_REJECT_ICMPX_UNREACH:
- if (tb[NFTA_REJECT_ICMP_CODE] == NULL)
- return -EINVAL;
-
- icmp_code = nla_get_u8(tb[NFTA_REJECT_ICMP_CODE]);
- if (priv->type == NFT_REJECT_ICMPX_UNREACH &&
- icmp_code > NFT_REJECT_ICMPX_MAX)
- return -EINVAL;
-
- priv->icmp_code = icmp_code;
- break;
- case NFT_REJECT_TCP_RST:
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static int nft_reject_inet_dump(struct sk_buff *skb,
- const struct nft_expr *expr)
-{
- const struct nft_reject *priv = nft_expr_priv(expr);
-
- if (nla_put_be32(skb, NFTA_REJECT_TYPE, htonl(priv->type)))
- goto nla_put_failure;
-
- switch (priv->type) {
- case NFT_REJECT_ICMP_UNREACH:
- case NFT_REJECT_ICMPX_UNREACH:
- if (nla_put_u8(skb, NFTA_REJECT_ICMP_CODE, priv->icmp_code))
- goto nla_put_failure;
- break;
- default:
- break;
- }
-
- return 0;
-
-nla_put_failure:
- return -1;
+ return nft_chain_validate_hooks(ctx->chain,
+ (1 << NF_INET_LOCAL_IN) |
+ (1 << NF_INET_FORWARD) |
+ (1 << NF_INET_LOCAL_OUT) |
+ (1 << NF_INET_PRE_ROUTING) |
+ (1 << NF_INET_INGRESS));
}
static struct nft_expr_type nft_reject_inet_type;
@@ -119,9 +75,9 @@ static const struct nft_expr_ops nft_reject_inet_ops = {
.type = &nft_reject_inet_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_reject)),
.eval = nft_reject_inet_eval,
- .init = nft_reject_inet_init,
- .dump = nft_reject_inet_dump,
- .validate = nft_reject_validate,
+ .init = nft_reject_init,
+ .dump = nft_reject_dump,
+ .validate = nft_reject_inet_validate,
};
static struct nft_expr_type nft_reject_inet_type __read_mostly = {
diff --git a/net/netfilter/nft_reject_netdev.c b/net/netfilter/nft_reject_netdev.c
new file mode 100644
index 000000000000..d89f68754f42
--- /dev/null
+++ b/net/netfilter/nft_reject_netdev.c
@@ -0,0 +1,189 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020 Laura Garcia Liebana <nevola@gmail.com>
+ * Copyright (c) 2020 Jose M. Guisado <guigom@riseup.net>
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nft_reject.h>
+#include <net/netfilter/ipv4/nf_reject.h>
+#include <net/netfilter/ipv6/nf_reject.h>
+
+static void nft_reject_queue_xmit(struct sk_buff *nskb, struct sk_buff *oldskb)
+{
+ dev_hard_header(nskb, nskb->dev, ntohs(oldskb->protocol),
+ eth_hdr(oldskb)->h_source, eth_hdr(oldskb)->h_dest,
+ nskb->len);
+ dev_queue_xmit(nskb);
+}
+
+static void nft_reject_netdev_send_v4_tcp_reset(struct net *net,
+ struct sk_buff *oldskb,
+ const struct net_device *dev,
+ int hook)
+{
+ struct sk_buff *nskb;
+
+ nskb = nf_reject_skb_v4_tcp_reset(net, oldskb, dev, hook);
+ if (!nskb)
+ return;
+
+ nft_reject_queue_xmit(nskb, oldskb);
+}
+
+static void nft_reject_netdev_send_v4_unreach(struct net *net,
+ struct sk_buff *oldskb,
+ const struct net_device *dev,
+ int hook, u8 code)
+{
+ struct sk_buff *nskb;
+
+ nskb = nf_reject_skb_v4_unreach(net, oldskb, dev, hook, code);
+ if (!nskb)
+ return;
+
+ nft_reject_queue_xmit(nskb, oldskb);
+}
+
+static void nft_reject_netdev_send_v6_tcp_reset(struct net *net,
+ struct sk_buff *oldskb,
+ const struct net_device *dev,
+ int hook)
+{
+ struct sk_buff *nskb;
+
+ nskb = nf_reject_skb_v6_tcp_reset(net, oldskb, dev, hook);
+ if (!nskb)
+ return;
+
+ nft_reject_queue_xmit(nskb, oldskb);
+}
+
+
+static void nft_reject_netdev_send_v6_unreach(struct net *net,
+ struct sk_buff *oldskb,
+ const struct net_device *dev,
+ int hook, u8 code)
+{
+ struct sk_buff *nskb;
+
+ nskb = nf_reject_skb_v6_unreach(net, oldskb, dev, hook, code);
+ if (!nskb)
+ return;
+
+ nft_reject_queue_xmit(nskb, oldskb);
+}
+
+static void nft_reject_netdev_eval(const struct nft_expr *expr,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
+{
+ struct ethhdr *eth = eth_hdr(pkt->skb);
+ struct nft_reject *priv = nft_expr_priv(expr);
+ const unsigned char *dest = eth->h_dest;
+
+ if (is_broadcast_ether_addr(dest) ||
+ is_multicast_ether_addr(dest))
+ goto out;
+
+ switch (eth->h_proto) {
+ case htons(ETH_P_IP):
+ switch (priv->type) {
+ case NFT_REJECT_ICMP_UNREACH:
+ nft_reject_netdev_send_v4_unreach(nft_net(pkt), pkt->skb,
+ nft_in(pkt),
+ nft_hook(pkt),
+ priv->icmp_code);
+ break;
+ case NFT_REJECT_TCP_RST:
+ nft_reject_netdev_send_v4_tcp_reset(nft_net(pkt), pkt->skb,
+ nft_in(pkt),
+ nft_hook(pkt));
+ break;
+ case NFT_REJECT_ICMPX_UNREACH:
+ nft_reject_netdev_send_v4_unreach(nft_net(pkt), pkt->skb,
+ nft_in(pkt),
+ nft_hook(pkt),
+ nft_reject_icmp_code(priv->icmp_code));
+ break;
+ }
+ break;
+ case htons(ETH_P_IPV6):
+ switch (priv->type) {
+ case NFT_REJECT_ICMP_UNREACH:
+ nft_reject_netdev_send_v6_unreach(nft_net(pkt), pkt->skb,
+ nft_in(pkt),
+ nft_hook(pkt),
+ priv->icmp_code);
+ break;
+ case NFT_REJECT_TCP_RST:
+ nft_reject_netdev_send_v6_tcp_reset(nft_net(pkt), pkt->skb,
+ nft_in(pkt),
+ nft_hook(pkt));
+ break;
+ case NFT_REJECT_ICMPX_UNREACH:
+ nft_reject_netdev_send_v6_unreach(nft_net(pkt), pkt->skb,
+ nft_in(pkt),
+ nft_hook(pkt),
+ nft_reject_icmpv6_code(priv->icmp_code));
+ break;
+ }
+ break;
+ default:
+ /* No explicit way to reject this protocol, drop it. */
+ break;
+ }
+out:
+ regs->verdict.code = NF_DROP;
+}
+
+static int nft_reject_netdev_validate(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nft_data **data)
+{
+ return nft_chain_validate_hooks(ctx->chain, (1 << NF_NETDEV_INGRESS));
+}
+
+static struct nft_expr_type nft_reject_netdev_type;
+static const struct nft_expr_ops nft_reject_netdev_ops = {
+ .type = &nft_reject_netdev_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_reject)),
+ .eval = nft_reject_netdev_eval,
+ .init = nft_reject_init,
+ .dump = nft_reject_dump,
+ .validate = nft_reject_netdev_validate,
+};
+
+static struct nft_expr_type nft_reject_netdev_type __read_mostly = {
+ .family = NFPROTO_NETDEV,
+ .name = "reject",
+ .ops = &nft_reject_netdev_ops,
+ .policy = nft_reject_policy,
+ .maxattr = NFTA_REJECT_MAX,
+ .owner = THIS_MODULE,
+};
+
+static int __init nft_reject_netdev_module_init(void)
+{
+ return nft_register_expr(&nft_reject_netdev_type);
+}
+
+static void __exit nft_reject_netdev_module_exit(void)
+{
+ nft_unregister_expr(&nft_reject_netdev_type);
+}
+
+module_init(nft_reject_netdev_module_init);
+module_exit(nft_reject_netdev_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Laura Garcia Liebana <nevola@gmail.com>");
+MODULE_AUTHOR("Jose M. Guisado <guigom@riseup.net>");
+MODULE_DESCRIPTION("Reject packets from netdev via nftables");
+MODULE_ALIAS_NFT_AF_EXPR(5, "reject");
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index f66417d5d2c3..1341c59c2f40 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -215,6 +215,36 @@ static size_t tcf_action_fill_size(const struct tc_action *act)
return sz;
}
+static int
+tcf_action_dump_terse(struct sk_buff *skb, struct tc_action *a, bool from_act)
+{
+ unsigned char *b = skb_tail_pointer(skb);
+ struct tc_cookie *cookie;
+
+ if (nla_put_string(skb, TCA_KIND, a->ops->kind))
+ goto nla_put_failure;
+ if (tcf_action_copy_stats(skb, a, 0))
+ goto nla_put_failure;
+ if (from_act && nla_put_u32(skb, TCA_ACT_INDEX, a->tcfa_index))
+ goto nla_put_failure;
+
+ rcu_read_lock();
+ cookie = rcu_dereference(a->act_cookie);
+ if (cookie) {
+ if (nla_put(skb, TCA_ACT_COOKIE, cookie->len, cookie->data)) {
+ rcu_read_unlock();
+ goto nla_put_failure;
+ }
+ }
+ rcu_read_unlock();
+
+ return 0;
+
+nla_put_failure:
+ nlmsg_trim(skb, b);
+ return -1;
+}
+
static int tcf_dump_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
struct netlink_callback *cb)
{
@@ -248,7 +278,9 @@ static int tcf_dump_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
index--;
goto nla_put_failure;
}
- err = tcf_action_dump_1(skb, p, 0, 0);
+ err = (act_flags & TCA_FLAG_TERSE_DUMP) ?
+ tcf_action_dump_terse(skb, p, true) :
+ tcf_action_dump_1(skb, p, 0, 0);
if (err < 0) {
index--;
nlmsg_trim(skb, nest);
@@ -752,34 +784,6 @@ tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
return a->ops->dump(skb, a, bind, ref);
}
-static int
-tcf_action_dump_terse(struct sk_buff *skb, struct tc_action *a)
-{
- unsigned char *b = skb_tail_pointer(skb);
- struct tc_cookie *cookie;
-
- if (nla_put_string(skb, TCA_KIND, a->ops->kind))
- goto nla_put_failure;
- if (tcf_action_copy_stats(skb, a, 0))
- goto nla_put_failure;
-
- rcu_read_lock();
- cookie = rcu_dereference(a->act_cookie);
- if (cookie) {
- if (nla_put(skb, TCA_ACT_COOKIE, cookie->len, cookie->data)) {
- rcu_read_unlock();
- goto nla_put_failure;
- }
- }
- rcu_read_unlock();
-
- return 0;
-
-nla_put_failure:
- nlmsg_trim(skb, b);
- return -1;
-}
-
int
tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
{
@@ -787,7 +791,7 @@ tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
unsigned char *b = skb_tail_pointer(skb);
struct nlattr *nest;
- if (tcf_action_dump_terse(skb, a))
+ if (tcf_action_dump_terse(skb, a, false))
goto nla_put_failure;
if (a->hw_stats != TCA_ACT_HW_STATS_ANY &&
@@ -832,7 +836,7 @@ int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[],
nest = nla_nest_start_noflag(skb, i + 1);
if (nest == NULL)
goto nla_put_failure;
- err = terse ? tcf_action_dump_terse(skb, a) :
+ err = terse ? tcf_action_dump_terse(skb, a, false) :
tcf_action_dump_1(skb, a, bind, ref);
if (err < 0)
goto errout;
@@ -1469,7 +1473,8 @@ static int tcf_action_add(struct net *net, struct nlattr *nla,
}
static const struct nla_policy tcaa_policy[TCA_ROOT_MAX + 1] = {
- [TCA_ROOT_FLAGS] = NLA_POLICY_BITFIELD32(TCA_FLAG_LARGE_DUMP_ON),
+ [TCA_ROOT_FLAGS] = NLA_POLICY_BITFIELD32(TCA_FLAG_LARGE_DUMP_ON |
+ TCA_FLAG_TERSE_DUMP),
[TCA_ROOT_TIME_DELTA] = { .type = NLA_U32 },
};
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 814754d7cab5..c3e89c776e66 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -1074,7 +1074,7 @@ static struct inet_protosw sctpv6_stream_protosw = {
static int sctp6_rcv(struct sk_buff *skb)
{
- memset(skb->cb, 0, sizeof(skb->cb));
+ SCTP_INPUT_CB(skb)->encap_port = 0;
return sctp_rcv(skb) ? -1 : 0;
}
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 41f287a13b54..6f2bbfeec3a4 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -843,7 +843,6 @@ static int sctp_ctl_sock_init(struct net *net)
static int sctp_udp_rcv(struct sock *sk, struct sk_buff *skb)
{
- memset(skb->cb, 0, sizeof(skb->cb));
SCTP_INPUT_CB(skb)->encap_port = udp_hdr(skb)->source;
skb_set_transport_header(skb, sizeof(struct udphdr));
@@ -1163,7 +1162,7 @@ static struct inet_protosw sctp_stream_protosw = {
static int sctp4_rcv(struct sk_buff *skb)
{
- memset(skb->cb, 0, sizeof(skb->cb));
+ SCTP_INPUT_CB(skb)->encap_port = 0;
return sctp_rcv(skb);
}
diff --git a/tools/testing/selftests/net/forwarding/bridge_igmp.sh b/tools/testing/selftests/net/forwarding/bridge_igmp.sh
index 0e71abdd7a03..675eff45b037 100755
--- a/tools/testing/selftests/net/forwarding/bridge_igmp.sh
+++ b/tools/testing/selftests/net/forwarding/bridge_igmp.sh
@@ -105,38 +105,6 @@ cleanup()
vrf_cleanup
}
-# return 0 if the packet wasn't seen on host2_if or 1 if it was
-mcast_packet_test()
-{
- local mac=$1
- local src_ip=$2
- local ip=$3
- local host1_if=$4
- local host2_if=$5
- local seen=0
-
- # Add an ACL on `host2_if` which will tell us whether the packet
- # was received by it or not.
- tc qdisc add dev $host2_if ingress
- tc filter add dev $host2_if ingress protocol ip pref 1 handle 101 \
- flower ip_proto udp dst_mac $mac action drop
-
- $MZ $host1_if -c 1 -p 64 -b $mac -A $src_ip -B $ip -t udp "dp=4096,sp=2048" -q
- sleep 1
-
- tc -j -s filter show dev $host2_if ingress \
- | jq -e ".[] | select(.options.handle == 101) \
- | select(.options.actions[0].stats.packets == 1)" &> /dev/null
- if [[ $? -eq 0 ]]; then
- seen=1
- fi
-
- tc filter del dev $host2_if ingress protocol ip pref 1 handle 101 flower
- tc qdisc del dev $host2_if ingress
-
- return $seen
-}
-
v2reportleave_test()
{
RET=0
@@ -169,73 +137,6 @@ v2reportleave_test()
log_test "IGMPv2 leave $TEST_GROUP"
}
-check_sg_entries()
-{
- local report=$1; shift
- local slist=("$@")
- local sarg=""
-
- for src in "${slist[@]}"; do
- sarg="${sarg} and .source_list[].address == \"$src\""
- done
- bridge -j -d -s mdb show dev br0 \
- | jq -e ".[].mdb[] | \
- select(.grp == \"$TEST_GROUP\" and .source_list != null $sarg)" &>/dev/null
- check_err $? "Wrong *,G entry source list after $report report"
-
- for sgent in "${slist[@]}"; do
- bridge -j -d -s mdb show dev br0 \
- | jq -e ".[].mdb[] | \
- select(.grp == \"$TEST_GROUP\" and .src == \"$sgent\")" &>/dev/null
- check_err $? "Missing S,G entry ($sgent, $TEST_GROUP)"
- done
-}
-
-check_sg_fwding()
-{
- local should_fwd=$1; shift
- local sources=("$@")
-
- for src in "${sources[@]}"; do
- local retval=0
-
- mcast_packet_test $TEST_GROUP_MAC $src $TEST_GROUP $h2 $h1
- retval=$?
- if [ $should_fwd -eq 1 ]; then
- check_fail $retval "Didn't forward traffic from S,G ($src, $TEST_GROUP)"
- else
- check_err $retval "Forwarded traffic for blocked S,G ($src, $TEST_GROUP)"
- fi
- done
-}
-
-check_sg_state()
-{
- local is_blocked=$1; shift
- local sources=("$@")
- local should_fail=1
-
- if [ $is_blocked -eq 1 ]; then
- should_fail=0
- fi
-
- for src in "${sources[@]}"; do
- bridge -j -d -s mdb show dev br0 \
- | jq -e ".[].mdb[] | \
- select(.grp == \"$TEST_GROUP\" and .source_list != null) |
- .source_list[] |
- select(.address == \"$src\") |
- select(.timer == \"0.00\")" &>/dev/null
- check_err_fail $should_fail $? "Entry $src has zero timer"
-
- bridge -j -d -s mdb show dev br0 \
- | jq -e ".[].mdb[] | \
- select(.grp == \"$TEST_GROUP\" and .src == \"$src\" and \
- .flags[] == \"blocked\")" &>/dev/null
- check_err_fail $should_fail $? "Entry $src has blocked flag"
- done
-}
-
v3include_prepare()
{
local host1_if=$1
@@ -257,7 +158,7 @@ v3include_prepare()
select(.grp == \"$TEST_GROUP\" and \
.source_list != null and .filter_mode == \"include\")" &>/dev/null
check_err $? "Wrong *,G entry filter mode"
- check_sg_entries "is_include" "${X[@]}"
+ brmcast_check_sg_entries "is_include" "${X[@]}"
}
v3exclude_prepare()
@@ -279,10 +180,10 @@ v3exclude_prepare()
.source_list != null and .filter_mode == \"exclude\")" &>/dev/null
check_err $? "Wrong *,G entry filter mode"
- check_sg_entries "is_exclude" "${X[@]}" "${Y[@]}"
+ brmcast_check_sg_entries "is_exclude" "${X[@]}" "${Y[@]}"
- check_sg_state 0 "${X[@]}"
- check_sg_state 1 "${Y[@]}"
+ brmcast_check_sg_state 0 "${X[@]}"
+ brmcast_check_sg_state 1 "${Y[@]}"
bridge -j -d -s mdb show dev br0 \
| jq -e ".[].mdb[] | \
@@ -308,10 +209,10 @@ v3include_test()
v3include_prepare $h1 $ALL_MAC $ALL_GROUP
- check_sg_state 0 "${X[@]}"
+ brmcast_check_sg_state 0 "${X[@]}"
- check_sg_fwding 1 "${X[@]}"
- check_sg_fwding 0 "192.0.2.100"
+ brmcast_check_sg_fwding 1 "${X[@]}"
+ brmcast_check_sg_fwding 0 "192.0.2.100"
log_test "IGMPv3 report $TEST_GROUP is_include"
@@ -327,12 +228,12 @@ v3inc_allow_test()
$MZ $h1 -c 1 -b $ALL_MAC -B $ALL_GROUP -t ip "proto=2,p=$MZPKT_ALLOW" -q
sleep 1
- check_sg_entries "allow" "${X[@]}"
+ brmcast_check_sg_entries "allow" "${X[@]}"
- check_sg_state 0 "${X[@]}"
+ brmcast_check_sg_state 0 "${X[@]}"
- check_sg_fwding 1 "${X[@]}"
- check_sg_fwding 0 "192.0.2.100"
+ brmcast_check_sg_fwding 1 "${X[@]}"
+ brmcast_check_sg_fwding 0 "192.0.2.100"
log_test "IGMPv3 report $TEST_GROUP include -> allow"
@@ -348,12 +249,12 @@ v3inc_is_include_test()
$MZ $h1 -c 1 -b $ALL_MAC -B $ALL_GROUP -t ip "proto=2,p=$MZPKT_IS_INC2" -q
sleep 1
- check_sg_entries "is_include" "${X[@]}"
+ brmcast_check_sg_entries "is_include" "${X[@]}"
- check_sg_state 0 "${X[@]}"
+ brmcast_check_sg_state 0 "${X[@]}"
- check_sg_fwding 1 "${X[@]}"
- check_sg_fwding 0 "192.0.2.100"
+ brmcast_check_sg_fwding 1 "${X[@]}"
+ brmcast_check_sg_fwding 0 "192.0.2.100"
log_test "IGMPv3 report $TEST_GROUP include -> is_include"
@@ -366,8 +267,8 @@ v3inc_is_exclude_test()
v3exclude_prepare $h1 $ALL_MAC $ALL_GROUP
- check_sg_fwding 1 "${X[@]}" 192.0.2.100
- check_sg_fwding 0 "${Y[@]}"
+ brmcast_check_sg_fwding 1 "${X[@]}" 192.0.2.100
+ brmcast_check_sg_fwding 0 "${Y[@]}"
log_test "IGMPv3 report $TEST_GROUP include -> is_exclude"
@@ -393,10 +294,10 @@ v3inc_to_exclude_test()
.source_list != null and .filter_mode == \"exclude\")" &>/dev/null
check_err $? "Wrong *,G entry filter mode"
- check_sg_entries "to_exclude" "${X[@]}" "${Y[@]}"
+ brmcast_check_sg_entries "to_exclude" "${X[@]}" "${Y[@]}"
- check_sg_state 0 "${X[@]}"
- check_sg_state 1 "${Y[@]}"
+ brmcast_check_sg_state 0 "${X[@]}"
+ brmcast_check_sg_state 1 "${Y[@]}"
bridge -j -d -s mdb show dev br0 \
| jq -e ".[].mdb[] | \
@@ -411,8 +312,8 @@ v3inc_to_exclude_test()
.source_list[].address == \"192.0.2.21\")" &>/dev/null
check_fail $? "Wrong *,G entry source list, 192.0.2.21 entry still exists"
- check_sg_fwding 1 "${X[@]}" 192.0.2.100
- check_sg_fwding 0 "${Y[@]}"
+ brmcast_check_sg_fwding 1 "${X[@]}" 192.0.2.100
+ brmcast_check_sg_fwding 0 "${Y[@]}"
log_test "IGMPv3 report $TEST_GROUP include -> to_exclude"
@@ -431,13 +332,13 @@ v3exc_allow_test()
$MZ $h1 -c 1 -b $ALL_MAC -B $ALL_GROUP -t ip "proto=2,p=$MZPKT_ALLOW2" -q
sleep 1
- check_sg_entries "allow" "${X[@]}" "${Y[@]}"
+ brmcast_check_sg_entries "allow" "${X[@]}" "${Y[@]}"
- check_sg_state 0 "${X[@]}"
- check_sg_state 1 "${Y[@]}"
+ brmcast_check_sg_state 0 "${X[@]}"
+ brmcast_check_sg_state 1 "${Y[@]}"
- check_sg_fwding 1 "${X[@]}" 192.0.2.100
- check_sg_fwding 0 "${Y[@]}"
+ brmcast_check_sg_fwding 1 "${X[@]}" 192.0.2.100
+ brmcast_check_sg_fwding 0 "${Y[@]}"
log_test "IGMPv3 report $TEST_GROUP exclude -> allow"
@@ -454,13 +355,13 @@ v3exc_is_include_test()
$MZ $h1 -c 1 -b $ALL_MAC -B $ALL_GROUP -t ip "proto=2,p=$MZPKT_IS_INC3" -q
sleep 1
- check_sg_entries "is_include" "${X[@]}" "${Y[@]}"
+ brmcast_check_sg_entries "is_include" "${X[@]}" "${Y[@]}"
- check_sg_state 0 "${X[@]}"
- check_sg_state 1 "${Y[@]}"
+ brmcast_check_sg_state 0 "${X[@]}"
+ brmcast_check_sg_state 1 "${Y[@]}"
- check_sg_fwding 1 "${X[@]}" 192.0.2.100
- check_sg_fwding 0 "${Y[@]}"
+ brmcast_check_sg_fwding 1 "${X[@]}" 192.0.2.100
+ brmcast_check_sg_fwding 0 "${Y[@]}"
log_test "IGMPv3 report $TEST_GROUP exclude -> is_include"
@@ -477,13 +378,13 @@ v3exc_is_exclude_test()
$MZ $h1 -c 1 -b $ALL_MAC -B $ALL_GROUP -t ip "proto=2,p=$MZPKT_IS_EXC2" -q
sleep 1
- check_sg_entries "is_exclude" "${X[@]}" "${Y[@]}"
+ brmcast_check_sg_entries "is_exclude" "${X[@]}" "${Y[@]}"
- check_sg_state 0 "${X[@]}"
- check_sg_state 1 "${Y[@]}"
+ brmcast_check_sg_state 0 "${X[@]}"
+ brmcast_check_sg_state 1 "${Y[@]}"
- check_sg_fwding 1 "${X[@]}" 192.0.2.100
- check_sg_fwding 0 "${Y[@]}"
+ brmcast_check_sg_fwding 1 "${X[@]}" 192.0.2.100
+ brmcast_check_sg_fwding 0 "${Y[@]}"
log_test "IGMPv3 report $TEST_GROUP exclude -> is_exclude"
@@ -503,13 +404,13 @@ v3exc_to_exclude_test()
$MZ $h1 -c 1 -b $ALL_MAC -B $ALL_GROUP -t ip "proto=2,p=$MZPKT_TO_EXC" -q
sleep 1
- check_sg_entries "to_exclude" "${X[@]}" "${Y[@]}"
+ brmcast_check_sg_entries "to_exclude" "${X[@]}" "${Y[@]}"
- check_sg_state 0 "${X[@]}"
- check_sg_state 1 "${Y[@]}"
+ brmcast_check_sg_state 0 "${X[@]}"
+ brmcast_check_sg_state 1 "${Y[@]}"
- check_sg_fwding 1 "${X[@]}" 192.0.2.100
- check_sg_fwding 0 "${Y[@]}"
+ brmcast_check_sg_fwding 1 "${X[@]}" 192.0.2.100
+ brmcast_check_sg_fwding 0 "${Y[@]}"
log_test "IGMPv3 report $TEST_GROUP exclude -> to_exclude"
@@ -528,9 +429,9 @@ v3inc_block_test()
$MZ $h1 -c 1 -b $ALL_MAC -B $ALL_GROUP -t ip "proto=2,p=$MZPKT_BLOCK" -q
# make sure the lowered timers have expired (by default 2 seconds)
sleep 3
- check_sg_entries "block" "${X[@]}"
+ brmcast_check_sg_entries "block" "${X[@]}"
- check_sg_state 0 "${X[@]}"
+ brmcast_check_sg_state 0 "${X[@]}"
bridge -j -d -s mdb show dev br0 \
| jq -e ".[].mdb[] | \
@@ -539,8 +440,8 @@ v3inc_block_test()
.source_list[].address == \"192.0.2.1\")" &>/dev/null
check_fail $? "Wrong *,G entry source list, 192.0.2.1 entry still exists"
- check_sg_fwding 1 "${X[@]}"
- check_sg_fwding 0 "192.0.2.100"
+ brmcast_check_sg_fwding 1 "${X[@]}"
+ brmcast_check_sg_fwding 0 "192.0.2.100"
log_test "IGMPv3 report $TEST_GROUP include -> block"
@@ -560,13 +461,13 @@ v3exc_block_test()
$MZ $h1 -c 1 -b $ALL_MAC -B $ALL_GROUP -t ip "proto=2,p=$MZPKT_BLOCK" -q
sleep 1
- check_sg_entries "block" "${X[@]}" "${Y[@]}"
+ brmcast_check_sg_entries "block" "${X[@]}" "${Y[@]}"
- check_sg_state 0 "${X[@]}"
- check_sg_state 1 "${Y[@]}"
+ brmcast_check_sg_state 0 "${X[@]}"
+ brmcast_check_sg_state 1 "${Y[@]}"
- check_sg_fwding 1 "${X[@]}" 192.0.2.100
- check_sg_fwding 0 "${Y[@]}"
+ brmcast_check_sg_fwding 1 "${X[@]}" 192.0.2.100
+ brmcast_check_sg_fwding 0 "${Y[@]}"
log_test "IGMPv3 report $TEST_GROUP exclude -> block"
@@ -606,12 +507,12 @@ v3exc_timeout_test()
.source_list[].address == \"192.0.2.2\")" &>/dev/null
check_fail $? "Wrong *,G entry source list, 192.0.2.2 entry still exists"
- check_sg_entries "allow" "${X[@]}"
+ brmcast_check_sg_entries "allow" "${X[@]}"
- check_sg_state 0 "${X[@]}"
+ brmcast_check_sg_state 0 "${X[@]}"
- check_sg_fwding 1 "${X[@]}"
- check_sg_fwding 0 192.0.2.100
+ brmcast_check_sg_fwding 1 "${X[@]}"
+ brmcast_check_sg_fwding 0 192.0.2.100
log_test "IGMPv3 group $TEST_GROUP exclude timeout"
@@ -642,7 +543,7 @@ v3star_ex_auto_add_test()
.flags[] == \"added_by_star_ex\")" &>/dev/null
check_err $? "Auto-added S,G entry doesn't have added_by_star_ex flag"
- check_sg_fwding 1 192.0.2.3
+ brmcast_check_sg_fwding 1 192.0.2.3
log_test "IGMPv3 S,G port entry automatic add to a *,G port"
diff --git a/tools/testing/selftests/net/forwarding/bridge_mld.sh b/tools/testing/selftests/net/forwarding/bridge_mld.sh
new file mode 100755
index 000000000000..ffdcfa87ca2b
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/bridge_mld.sh
@@ -0,0 +1,558 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+ALL_TESTS="mldv2include_test mldv2inc_allow_test mldv2inc_is_include_test mldv2inc_is_exclude_test \
+ mldv2inc_to_exclude_test mldv2exc_allow_test mldv2exc_is_include_test \
+ mldv2exc_is_exclude_test mldv2exc_to_exclude_test mldv2inc_block_test \
+ mldv2exc_block_test mldv2exc_timeout_test mldv2star_ex_auto_add_test"
+NUM_NETIFS=4
+CHECK_TC="yes"
+TEST_GROUP="ff02::cc"
+TEST_GROUP_MAC="33:33:00:00:00:cc"
+
+# MLDv2 is_in report: grp ff02::cc is_include 2001:db8:1::1,2001:db8:1::2,2001:db8:1::3
+MZPKT_IS_INC="33:33:00:00:00:01:fe:54:00:04:5e:ba:86:dd:60:0a:2d:ae:00:54:00:01:fe:80:00:\
+00:00:00:00:00:fc:54:00:ff:fe:04:5e:ba:ff:02:00:00:00:00:00:00:00:00:00:00:00:00:00:01:3a:\
+00:05:02:00:00:00:00:8f:00:8e:d9:00:00:00:01:01:00:00:03:ff:02:00:00:00:00:00:00:00:00:00:\
+00:00:00:00:cc:20:01:0d:b8:00:01:00:00:00:00:00:00:00:00:00:01:20:01:0d:b8:00:01:00:00:00:\
+00:00:00:00:00:00:02:20:01:0d:b8:00:01:00:00:00:00:00:00:00:00:00:03"
+# MLDv2 is_in report: grp ff02::cc is_include 2001:db8:1::10,2001:db8:1::11,2001:db8:1::12
+MZPKT_IS_INC2="33:33:00:00:00:01:fe:54:00:04:5e:ba:86:dd:60:0a:2d:ae:00:54:00:01:fe:80:00:\
+00:00:00:00:00:fc:54:00:ff:fe:04:5e:ba:ff:02:00:00:00:00:00:00:00:00:00:00:00:00:00:01:3a:00:\
+05:02:00:00:00:00:8f:00:8e:ac:00:00:00:01:01:00:00:03:ff:02:00:00:00:00:00:00:00:00:00:00:00:\
+00:00:cc:20:01:0d:b8:00:01:00:00:00:00:00:00:00:00:00:10:20:01:0d:b8:00:01:00:00:00:00:00:00:\
+00:00:00:11:20:01:0d:b8:00:01:00:00:00:00:00:00:00:00:00:12"
+# MLDv2 is_in report: grp ff02::cc is_include 2001:db8:1::20,2001:db8:1::30
+MZPKT_IS_INC3="33:33:00:00:00:01:fe:54:00:04:5e:ba:86:dd:60:0a:2d:ae:00:44:00:01:fe:80:00:00:00:\
+00:00:00:fc:54:00:ff:fe:04:5e:ba:ff:02:00:00:00:00:00:00:00:00:00:00:00:00:00:01:3a:00:05:02:00:\
+00:00:00:8f:00:bc:5a:00:00:00:01:01:00:00:02:ff:02:00:00:00:00:00:00:00:00:00:00:00:00:00:cc:20:\
+01:0d:b8:00:01:00:00:00:00:00:00:00:00:00:20:20:01:0d:b8:00:01:00:00:00:00:00:00:00:00:00:30"
+# MLDv2 allow report: grp ff02::cc allow 2001:db8:1::10,2001:db8:1::11,2001:db8:1::12
+MZPKT_ALLOW="33:33:00:00:00:01:fe:54:00:04:5e:ba:86:dd:60:0a:2d:ae:00:54:00:01:fe:80:00:00:\
+00:00:00:00:fc:54:00:ff:fe:04:5e:ba:ff:02:00:00:00:00:00:00:00:00:00:00:00:00:00:01:3a:00:05:\
+02:00:00:00:00:8f:00:8a:ac:00:00:00:01:05:00:00:03:ff:02:00:00:00:00:00:00:00:00:00:00:00:00:\
+00:cc:20:01:0d:b8:00:01:00:00:00:00:00:00:00:00:00:10:20:01:0d:b8:00:01:00:00:00:00:00:00:00:\
+00:00:11:20:01:0d:b8:00:01:00:00:00:00:00:00:00:00:00:12"
+# MLDv2 allow report: grp ff02::cc allow 2001:db8:1::20,2001:db8:1::30
+MZPKT_ALLOW2="33:33:00:00:00:01:fe:54:00:04:5e:ba:86:dd:60:0a:2d:ae:00:44:00:01:fe:80:00:00:00:\
+00:00:00:fc:54:00:ff:fe:04:5e:ba:ff:02:00:00:00:00:00:00:00:00:00:00:00:00:00:01:3a:00:05:02:00:\
+00:00:00:8f:00:b8:5a:00:00:00:01:05:00:00:02:ff:02:00:00:00:00:00:00:00:00:00:00:00:00:00:cc:20:\
+01:0d:b8:00:01:00:00:00:00:00:00:00:00:00:20:20:01:0d:b8:00:01:00:00:00:00:00:00:00:00:00:30"
+# MLDv2 is_ex report: grp ff02::cc is_exclude 2001:db8:1::1,2001:db8:1::2,2001:db8:1::20,2001:db8:1::21
+MZPKT_IS_EXC="33:33:00:00:00:01:fe:54:00:04:5e:ba:86:dd:60:0a:2d:ae:00:64:00:01:fe:80:00:00:00:\
+00:00:00:fc:54:00:ff:fe:04:5e:ba:ff:02:00:00:00:00:00:00:00:00:00:00:00:00:00:01:3a:00:05:02:00:\
+00:00:00:8f:00:5f:d0:00:00:00:01:02:00:00:04:ff:02:00:00:00:00:00:00:00:00:00:00:00:00:00:cc:20:\
+01:0d:b8:00:01:00:00:00:00:00:00:00:00:00:01:20:01:0d:b8:00:01:00:00:00:00:00:00:00:00:00:02:20:\
+01:0d:b8:00:01:00:00:00:00:00:00:00:00:00:20:20:01:0d:b8:00:01:00:00:00:00:00:00:00:00:00:21"
+# MLDv2 is_ex report: grp ff02::cc is_exclude 2001:db8:1::20,2001:db8:1::30
+MZPKT_IS_EXC2="33:33:00:00:00:01:fe:54:00:04:5e:ba:86:dd:60:0a:2d:ae:00:44:00:01:fe:80:00:00:00:\
+00:00:00:fc:54:00:ff:fe:04:5e:ba:ff:02:00:00:00:00:00:00:00:00:00:00:00:00:00:01:3a:00:05:02:00:\
+00:00:00:8f:00:bb:5a:00:00:00:01:02:00:00:02:ff:02:00:00:00:00:00:00:00:00:00:00:00:00:00:cc:20:\
+01:0d:b8:00:01:00:00:00:00:00:00:00:00:00:20:20:01:0d:b8:00:01:00:00:00:00:00:00:00:00:00:30"
+# MLDv2 to_ex report: grp ff02::cc to_exclude 2001:db8:1::1,2001:db8:1::20,2001:db8:1::30
+MZPKT_TO_EXC="33:33:00:00:00:01:fe:54:00:04:5e:ba:86:dd:60:0a:2d:ae:00:54:00:01:fe:80:00:00:00:\
+00:00:00:fc:54:00:ff:fe:04:5e:ba:ff:02:00:00:00:00:00:00:00:00:00:00:00:00:00:01:3a:00:05:02:00:\
+00:00:00:8f:00:8b:8e:00:00:00:01:04:00:00:03:ff:02:00:00:00:00:00:00:00:00:00:00:00:00:00:cc:20:\
+01:0d:b8:00:01:00:00:00:00:00:00:00:00:00:01:20:01:0d:b8:00:01:00:00:00:00:00:00:00:00:00:20:20:\
+01:0d:b8:00:01:00:00:00:00:00:00:00:00:00:30"
+# MLDv2 block report: grp ff02::cc block 2001:db8:1::1,2001:db8:1::20,2001:db8:1::30
+MZPKT_BLOCK="33:33:00:00:00:01:fe:54:00:04:5e:ba:86:dd:60:0a:2d:ae:00:54:00:01:fe:80:00:00:00:00:\
+00:00:fc:54:00:ff:fe:04:5e:ba:ff:02:00:00:00:00:00:00:00:00:00:00:00:00:00:01:3a:00:05:02:00:00:\
+00:00:8f:00:89:8e:00:00:00:01:06:00:00:03:ff:02:00:00:00:00:00:00:00:00:00:00:00:00:00:cc:20:01:\
+0d:b8:00:01:00:00:00:00:00:00:00:00:00:01:20:01:0d:b8:00:01:00:00:00:00:00:00:00:00:00:20:20:01:\
+0d:b8:00:01:00:00:00:00:00:00:00:00:00:30"
+
+source lib.sh
+
+h1_create()
+{
+ simple_if_init $h1 2001:db8:1::1/64
+}
+
+h1_destroy()
+{
+ simple_if_fini $h1 2001:db8:1::1/64
+}
+
+h2_create()
+{
+ simple_if_init $h2 2001:db8:1::2/64
+}
+
+h2_destroy()
+{
+ simple_if_fini $h2 2001:db8:1::2/64
+}
+
+switch_create()
+{
+ ip link add dev br0 type bridge mcast_snooping 1 mcast_query_response_interval 100 \
+ mcast_mld_version 2 mcast_startup_query_interval 300 \
+ mcast_querier 1
+
+ ip link set dev $swp1 master br0
+ ip link set dev $swp2 master br0
+
+ ip link set dev br0 up
+ ip link set dev $swp1 up
+ ip link set dev $swp2 up
+
+ # make sure a query has been generated
+ sleep 5
+}
+
+switch_destroy()
+{
+ ip link set dev $swp2 down
+ ip link set dev $swp1 down
+
+ ip link del dev br0
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ swp1=${NETIFS[p2]}
+
+ swp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ vrf_prepare
+
+ h1_create
+ h2_create
+
+ switch_create
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ switch_destroy
+
+ h2_destroy
+ h1_destroy
+
+ vrf_cleanup
+}
+
+mldv2include_prepare()
+{
+ local host1_if=$1
+ local X=("2001:db8:1::1" "2001:db8:1::2" "2001:db8:1::3")
+
+ ip link set dev br0 type bridge mcast_mld_version 2
+ check_err $? "Could not change bridge MLD version to 2"
+
+ $MZ $host1_if $MZPKT_IS_INC -q
+ sleep 1
+ bridge -j -d -s mdb show dev br0 \
+ | jq -e ".[].mdb[] | \
+ select(.grp == \"$TEST_GROUP\" and .source_list != null)" &>/dev/null
+ check_err $? "Missing *,G entry with source list"
+ bridge -j -d -s mdb show dev br0 \
+ | jq -e ".[].mdb[] | \
+ select(.grp == \"$TEST_GROUP\" and \
+ .source_list != null and .filter_mode == \"include\")" &>/dev/null
+ check_err $? "Wrong *,G entry filter mode"
+ brmcast_check_sg_entries "is_include" "${X[@]}"
+}
+
+mldv2exclude_prepare()
+{
+ local host1_if=$1
+ local mac=$2
+ local group=$3
+ local pkt=$4
+ local X=("2001:db8:1::1" "2001:db8:1::2")
+ local Y=("2001:db8:1::20" "2001:db8:1::21")
+
+ mldv2include_prepare $h1
+
+ $MZ $host1_if -c 1 $MZPKT_IS_EXC -q
+ sleep 1
+ bridge -j -d -s mdb show dev br0 \
+ | jq -e ".[].mdb[] | \
+ select(.grp == \"$TEST_GROUP\" and \
+ .source_list != null and .filter_mode == \"exclude\")" &>/dev/null
+ check_err $? "Wrong *,G entry filter mode"
+
+ brmcast_check_sg_entries "is_exclude" "${X[@]}" "${Y[@]}"
+
+ brmcast_check_sg_state 0 "${X[@]}"
+ brmcast_check_sg_state 1 "${Y[@]}"
+
+ bridge -j -d -s mdb show dev br0 \
+ | jq -e ".[].mdb[] | \
+ select(.grp == \"$TEST_GROUP\" and \
+ .source_list != null and
+ .source_list[].address == \"2001:db8:1::3\")" &>/dev/null
+ check_fail $? "Wrong *,G entry source list, 2001:db8:1::3 entry still exists"
+}
+
+mldv2cleanup()
+{
+ local port=$1
+
+ bridge mdb del dev br0 port $port grp $TEST_GROUP
+ ip link set dev br0 type bridge mcast_mld_version 1
+}
+
+mldv2include_test()
+{
+ RET=0
+ local X=("2001:db8:1::1" "2001:db8:1::2" "2001:db8:1::3")
+
+ mldv2include_prepare $h1
+
+ brmcast_check_sg_state 0 "${X[@]}"
+
+ brmcast_check_sg_fwding 1 "${X[@]}"
+ brmcast_check_sg_fwding 0 "2001:db8:1::100"
+
+ log_test "MLDv2 report $TEST_GROUP is_include"
+
+ mldv2cleanup $swp1
+}
+
+mldv2inc_allow_test()
+{
+ RET=0
+ local X=("2001:db8:1::10" "2001:db8:1::11" "2001:db8:1::12")
+
+ mldv2include_prepare $h1
+
+ $MZ $h1 -c 1 $MZPKT_ALLOW -q
+ sleep 1
+ brmcast_check_sg_entries "allow" "${X[@]}"
+
+ brmcast_check_sg_state 0 "${X[@]}"
+
+ brmcast_check_sg_fwding 1 "${X[@]}"
+ brmcast_check_sg_fwding 0 "2001:db8:1::100"
+
+ log_test "MLDv2 report $TEST_GROUP include -> allow"
+
+ mldv2cleanup $swp1
+}
+
+mldv2inc_is_include_test()
+{
+ RET=0
+ local X=("2001:db8:1::10" "2001:db8:1::11" "2001:db8:1::12")
+
+ mldv2include_prepare $h1
+
+ $MZ $h1 -c 1 $MZPKT_IS_INC2 -q
+ sleep 1
+ brmcast_check_sg_entries "is_include" "${X[@]}"
+
+ brmcast_check_sg_state 0 "${X[@]}"
+
+ brmcast_check_sg_fwding 1 "${X[@]}"
+ brmcast_check_sg_fwding 0 "2001:db8:1::100"
+
+ log_test "MLDv2 report $TEST_GROUP include -> is_include"
+
+ mldv2cleanup $swp1
+}
+
+mldv2inc_is_exclude_test()
+{
+ RET=0
+
+ mldv2exclude_prepare $h1
+
+ brmcast_check_sg_fwding 1 "${X[@]}" 2001:db8:1::100
+ brmcast_check_sg_fwding 0 "${Y[@]}"
+
+ log_test "MLDv2 report $TEST_GROUP include -> is_exclude"
+
+ mldv2cleanup $swp1
+}
+
+mldv2inc_to_exclude_test()
+{
+ RET=0
+ local X=("2001:db8:1::1")
+ local Y=("2001:db8:1::20" "2001:db8:1::30")
+
+ mldv2include_prepare $h1
+
+ ip link set dev br0 type bridge mcast_last_member_interval 500
+ check_err $? "Could not change mcast_last_member_interval to 5s"
+
+ $MZ $h1 -c 1 $MZPKT_TO_EXC -q
+ sleep 1
+ bridge -j -d -s mdb show dev br0 \
+ | jq -e ".[].mdb[] | \
+ select(.grp == \"$TEST_GROUP\" and \
+ .source_list != null and .filter_mode == \"exclude\")" &>/dev/null
+ check_err $? "Wrong *,G entry filter mode"
+
+ brmcast_check_sg_entries "to_exclude" "${X[@]}" "${Y[@]}"
+
+ brmcast_check_sg_state 0 "${X[@]}"
+ brmcast_check_sg_state 1 "${Y[@]}"
+
+ bridge -j -d -s mdb show dev br0 \
+ | jq -e ".[].mdb[] | \
+ select(.grp == \"$TEST_GROUP\" and \
+ .source_list != null and
+ .source_list[].address == \"2001:db8:1::2\")" &>/dev/null
+ check_fail $? "Wrong *,G entry source list, 2001:db8:1::2 entry still exists"
+ bridge -j -d -s mdb show dev br0 \
+ | jq -e ".[].mdb[] | \
+ select(.grp == \"$TEST_GROUP\" and \
+ .source_list != null and
+ .source_list[].address == \"2001:db8:1::21\")" &>/dev/null
+ check_fail $? "Wrong *,G entry source list, 2001:db8:1::21 entry still exists"
+
+ brmcast_check_sg_fwding 1 "${X[@]}" 2001:db8:1::100
+ brmcast_check_sg_fwding 0 "${Y[@]}"
+
+ log_test "MLDv2 report $TEST_GROUP include -> to_exclude"
+
+ ip link set dev br0 type bridge mcast_last_member_interval 100
+
+ mldv2cleanup $swp1
+}
+
+mldv2exc_allow_test()
+{
+ RET=0
+ local X=("2001:db8:1::1" "2001:db8:1::2" "2001:db8:1::20" "2001:db8:1::30")
+ local Y=("2001:db8:1::21")
+
+ mldv2exclude_prepare $h1
+
+ $MZ $h1 -c 1 $MZPKT_ALLOW2 -q
+ sleep 1
+ brmcast_check_sg_entries "allow" "${X[@]}" "${Y[@]}"
+
+ brmcast_check_sg_state 0 "${X[@]}"
+ brmcast_check_sg_state 1 "${Y[@]}"
+
+ brmcast_check_sg_fwding 1 "${X[@]}" 2001:db8:1::100
+ brmcast_check_sg_fwding 0 "${Y[@]}"
+
+ log_test "MLDv2 report $TEST_GROUP exclude -> allow"
+
+ mldv2cleanup $swp1
+}
+
+mldv2exc_is_include_test()
+{
+ RET=0
+ local X=("2001:db8:1::1" "2001:db8:1::2" "2001:db8:1::20" "2001:db8:1::30")
+ local Y=("2001:db8:1::21")
+
+ mldv2exclude_prepare $h1
+
+ $MZ $h1 -c 1 $MZPKT_IS_INC3 -q
+ sleep 1
+ brmcast_check_sg_entries "is_include" "${X[@]}" "${Y[@]}"
+
+ brmcast_check_sg_state 0 "${X[@]}"
+ brmcast_check_sg_state 1 "${Y[@]}"
+
+ brmcast_check_sg_fwding 1 "${X[@]}" 2001:db8:1::100
+ brmcast_check_sg_fwding 0 "${Y[@]}"
+
+ log_test "MLDv2 report $TEST_GROUP exclude -> is_include"
+
+ mldv2cleanup $swp1
+}
+
+mldv2exc_is_exclude_test()
+{
+ RET=0
+ local X=("2001:db8:1::30")
+ local Y=("2001:db8:1::20")
+
+ mldv2exclude_prepare $h1
+
+ $MZ $h1 -c 1 $MZPKT_IS_EXC2 -q
+ sleep 1
+ brmcast_check_sg_entries "is_exclude" "${X[@]}" "${Y[@]}"
+
+ brmcast_check_sg_state 0 "${X[@]}"
+ brmcast_check_sg_state 1 "${Y[@]}"
+
+ brmcast_check_sg_fwding 1 "${X[@]}" 2001:db8:1::100
+ brmcast_check_sg_fwding 0 "${Y[@]}"
+
+ log_test "MLDv2 report $TEST_GROUP exclude -> is_exclude"
+
+ mldv2cleanup $swp1
+}
+
+mldv2exc_to_exclude_test()
+{
+ RET=0
+ local X=("2001:db8:1::1" "2001:db8:1::30")
+ local Y=("2001:db8:1::20")
+
+ mldv2exclude_prepare $h1
+
+ ip link set dev br0 type bridge mcast_last_member_interval 500
+ check_err $? "Could not change mcast_last_member_interval to 5s"
+
+ $MZ $h1 -c 1 $MZPKT_TO_EXC -q
+ sleep 1
+ brmcast_check_sg_entries "to_exclude" "${X[@]}" "${Y[@]}"
+
+ brmcast_check_sg_state 0 "${X[@]}"
+ brmcast_check_sg_state 1 "${Y[@]}"
+
+ brmcast_check_sg_fwding 1 "${X[@]}" 2001:db8:1::100
+ brmcast_check_sg_fwding 0 "${Y[@]}"
+
+ log_test "MLDv2 report $TEST_GROUP exclude -> to_exclude"
+
+ ip link set dev br0 type bridge mcast_last_member_interval 100
+
+ mldv2cleanup $swp1
+}
+
+mldv2inc_block_test()
+{
+ RET=0
+ local X=("2001:db8:1::2" "2001:db8:1::3")
+
+ mldv2include_prepare $h1
+
+ $MZ $h1 -c 1 $MZPKT_BLOCK -q
+ # make sure the lowered timers have expired (by default 2 seconds)
+ sleep 3
+ brmcast_check_sg_entries "block" "${X[@]}"
+
+ brmcast_check_sg_state 0 "${X[@]}"
+
+ bridge -j -d -s mdb show dev br0 \
+ | jq -e ".[].mdb[] | \
+ select(.grp == \"$TEST_GROUP\" and \
+ .source_list != null and
+ .source_list[].address == \"2001:db8:1::1\")" &>/dev/null
+ check_fail $? "Wrong *,G entry source list, 2001:db8:1::1 entry still exists"
+
+ brmcast_check_sg_fwding 1 "${X[@]}"
+ brmcast_check_sg_fwding 0 2001:db8:1::100
+
+ log_test "MLDv2 report $TEST_GROUP include -> block"
+
+ mldv2cleanup $swp1
+}
+
+mldv2exc_block_test()
+{
+ RET=0
+ local X=("2001:db8:1::1" "2001:db8:1::2" "2001:db8:1::30")
+ local Y=("2001:db8:1::20" "2001:db8:1::21")
+
+ mldv2exclude_prepare $h1
+
+ ip link set dev br0 type bridge mcast_last_member_interval 500
+ check_err $? "Could not change mcast_last_member_interval to 5s"
+
+ $MZ $h1 -c 1 $MZPKT_BLOCK -q
+ sleep 1
+ brmcast_check_sg_entries "block" "${X[@]}" "${Y[@]}"
+
+ brmcast_check_sg_state 0 "${X[@]}"
+ brmcast_check_sg_state 1 "${Y[@]}"
+
+ brmcast_check_sg_fwding 1 "${X[@]}" 2001:db8:1::100
+ brmcast_check_sg_fwding 0 "${Y[@]}"
+
+ log_test "MLDv2 report $TEST_GROUP exclude -> block"
+
+ ip link set dev br0 type bridge mcast_last_member_interval 100
+
+ mldv2cleanup $swp1
+}
+
+mldv2exc_timeout_test()
+{
+ RET=0
+ local X=("2001:db8:1::20" "2001:db8:1::30")
+
+ # GMI should be 3 seconds
+ ip link set dev br0 type bridge mcast_query_interval 100 mcast_query_response_interval 100
+
+ mldv2exclude_prepare $h1
+ ip link set dev br0 type bridge mcast_query_interval 500 mcast_query_response_interval 500
+ $MZ $h1 -c 1 $MZPKT_ALLOW2 -q
+ sleep 3
+ bridge -j -d -s mdb show dev br0 \
+ | jq -e ".[].mdb[] | \
+ select(.grp == \"$TEST_GROUP\" and \
+ .source_list != null and .filter_mode == \"include\")" &>/dev/null
+ check_err $? "Wrong *,G entry filter mode"
+
+ bridge -j -d -s mdb show dev br0 \
+ | jq -e ".[].mdb[] | \
+ select(.grp == \"$TEST_GROUP\" and \
+ .source_list != null and
+ .source_list[].address == \"2001:db8:1::1\")" &>/dev/null
+ check_fail $? "Wrong *,G entry source list, 2001:db8:1::1 entry still exists"
+ bridge -j -d -s mdb show dev br0 \
+ | jq -e ".[].mdb[] | \
+ select(.grp == \"$TEST_GROUP\" and \
+ .source_list != null and
+ .source_list[].address == \"2001:db8:1::2\")" &>/dev/null
+ check_fail $? "Wrong *,G entry source list, 2001:db8:1::2 entry still exists"
+
+ brmcast_check_sg_entries "allow" "${X[@]}"
+
+ brmcast_check_sg_state 0 "${X[@]}"
+
+ brmcast_check_sg_fwding 1 "${X[@]}"
+ brmcast_check_sg_fwding 0 2001:db8:1::100
+
+ log_test "MLDv2 group $TEST_GROUP exclude timeout"
+
+ ip link set dev br0 type bridge mcast_query_interval 12500 \
+ mcast_query_response_interval 1000
+
+ mldv2cleanup $swp1
+}
+
+mldv2star_ex_auto_add_test()
+{
+ RET=0
+
+ mldv2exclude_prepare $h1
+
+ $MZ $h2 -c 1 $MZPKT_IS_INC -q
+ sleep 1
+ bridge -j -d -s mdb show dev br0 \
+ | jq -e ".[].mdb[] | \
+ select(.grp == \"$TEST_GROUP\" and .src == \"2001:db8:1::3\" and \
+ .port == \"$swp1\")" &>/dev/null
+ check_err $? "S,G entry for *,G port doesn't exist"
+
+ bridge -j -d -s mdb show dev br0 \
+ | jq -e ".[].mdb[] | \
+ select(.grp == \"$TEST_GROUP\" and .src == \"2001:db8:1::3\" and \
+ .port == \"$swp1\" and \
+ .flags[] == \"added_by_star_ex\")" &>/dev/null
+ check_err $? "Auto-added S,G entry doesn't have added_by_star_ex flag"
+
+ brmcast_check_sg_fwding 1 2001:db8:1::3
+
+ log_test "MLDv2 S,G port entry automatic add to a *,G port"
+
+ mldv2cleanup $swp1
+ mldv2cleanup $swp2
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh
index 927f9ba49e08..98ea37d26c44 100644
--- a/tools/testing/selftests/net/forwarding/lib.sh
+++ b/tools/testing/selftests/net/forwarding/lib.sh
@@ -1270,3 +1270,110 @@ tcpdump_show()
{
tcpdump -e -n -r $capfile 2>&1
}
+
+# return 0 if the packet wasn't seen on host2_if or 1 if it was
+mcast_packet_test()
+{
+ local mac=$1
+ local src_ip=$2
+ local ip=$3
+ local host1_if=$4
+ local host2_if=$5
+ local seen=0
+ local tc_proto="ip"
+ local mz_v6arg=""
+
+ # basic check to see if we were passed an IPv4 address, if not assume IPv6
+ if [[ ! $ip =~ ^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then
+ tc_proto="ipv6"
+ mz_v6arg="-6"
+ fi
+
+ # Add an ACL on `host2_if` which will tell us whether the packet
+ # was received by it or not.
+ tc qdisc add dev $host2_if ingress
+ tc filter add dev $host2_if ingress protocol $tc_proto pref 1 handle 101 \
+ flower ip_proto udp dst_mac $mac action drop
+
+ $MZ $host1_if $mz_v6arg -c 1 -p 64 -b $mac -A $src_ip -B $ip -t udp "dp=4096,sp=2048" -q
+ sleep 1
+
+ tc -j -s filter show dev $host2_if ingress \
+ | jq -e ".[] | select(.options.handle == 101) \
+ | select(.options.actions[0].stats.packets == 1)" &> /dev/null
+ if [[ $? -eq 0 ]]; then
+ seen=1
+ fi
+
+ tc filter del dev $host2_if ingress protocol $tc_proto pref 1 handle 101 flower
+ tc qdisc del dev $host2_if ingress
+
+ return $seen
+}
+
+brmcast_check_sg_entries()
+{
+ local report=$1; shift
+ local slist=("$@")
+ local sarg=""
+
+ for src in "${slist[@]}"; do
+ sarg="${sarg} and .source_list[].address == \"$src\""
+ done
+ bridge -j -d -s mdb show dev br0 \
+ | jq -e ".[].mdb[] | \
+ select(.grp == \"$TEST_GROUP\" and .source_list != null $sarg)" &>/dev/null
+ check_err $? "Wrong *,G entry source list after $report report"
+
+ for sgent in "${slist[@]}"; do
+ bridge -j -d -s mdb show dev br0 \
+ | jq -e ".[].mdb[] | \
+ select(.grp == \"$TEST_GROUP\" and .src == \"$sgent\")" &>/dev/null
+ check_err $? "Missing S,G entry ($sgent, $TEST_GROUP)"
+ done
+}
+
+brmcast_check_sg_fwding()
+{
+ local should_fwd=$1; shift
+ local sources=("$@")
+
+ for src in "${sources[@]}"; do
+ local retval=0
+
+ mcast_packet_test $TEST_GROUP_MAC $src $TEST_GROUP $h2 $h1
+ retval=$?
+ if [ $should_fwd -eq 1 ]; then
+ check_fail $retval "Didn't forward traffic from S,G ($src, $TEST_GROUP)"
+ else
+ check_err $retval "Forwarded traffic for blocked S,G ($src, $TEST_GROUP)"
+ fi
+ done
+}
+
+brmcast_check_sg_state()
+{
+ local is_blocked=$1; shift
+ local sources=("$@")
+ local should_fail=1
+
+ if [ $is_blocked -eq 1 ]; then
+ should_fail=0
+ fi
+
+ for src in "${sources[@]}"; do
+ bridge -j -d -s mdb show dev br0 \
+ | jq -e ".[].mdb[] | \
+ select(.grp == \"$TEST_GROUP\" and .source_list != null) |
+ .source_list[] |
+ select(.address == \"$src\") |
+ select(.timer == \"0.00\")" &>/dev/null
+ check_err_fail $should_fail $? "Entry $src has zero timer"
+
+ bridge -j -d -s mdb show dev br0 \
+ | jq -e ".[].mdb[] | \
+ select(.grp == \"$TEST_GROUP\" and .src == \"$src\" and \
+ .flags[] == \"blocked\")" &>/dev/null
+ check_err_fail $should_fail $? "Entry $src has blocked flag"
+ done
+}
diff --git a/tools/testing/selftests/net/mptcp/config b/tools/testing/selftests/net/mptcp/config
index 741a1c4f4ae8..0faaccd21447 100644
--- a/tools/testing/selftests/net/mptcp/config
+++ b/tools/testing/selftests/net/mptcp/config
@@ -5,3 +5,13 @@ CONFIG_INET_DIAG=m
CONFIG_INET_MPTCP_DIAG=m
CONFIG_VETH=y
CONFIG_NET_SCH_NETEM=m
+CONFIG_NETFILTER=y
+CONFIG_NETFILTER_ADVANCED=y
+CONFIG_NETFILTER_NETLINK=m
+CONFIG_NF_TABLES=m
+CONFIG_NFT_COUNTER=m
+CONFIG_NFT_COMPAT=m
+CONFIG_NETFILTER_XTABLES=m
+CONFIG_NETFILTER_XT_MATCH_BPF=m
+CONFIG_NF_TABLES_IPV4=y
+CONFIG_NF_TABLES_IPV6=y
diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
index 08f53d86dedc..0d93b243695f 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
@@ -13,6 +13,24 @@ capture=0
TEST_COUNT=0
+# generated using "nfbpf_compile '(ip && (ip[54] & 0xf0) == 0x30) ||
+# (ip6 && (ip6[74] & 0xf0) == 0x30)'"
+CBPF_MPTCP_SUBOPTION_ADD_ADDR="14,
+ 48 0 0 0,
+ 84 0 0 240,
+ 21 0 3 64,
+ 48 0 0 54,
+ 84 0 0 240,
+ 21 6 7 48,
+ 48 0 0 0,
+ 84 0 0 240,
+ 21 0 4 96,
+ 48 0 0 74,
+ 84 0 0 240,
+ 21 0 1 48,
+ 6 0 0 65535,
+ 6 0 0 0"
+
init()
{
capout=$(mktemp)
@@ -82,6 +100,26 @@ reset_with_cookies()
done
}
+reset_with_add_addr_timeout()
+{
+ local ip="${1:-4}"
+ local tables
+
+ tables="iptables"
+ if [ $ip -eq 6 ]; then
+ tables="ip6tables"
+ fi
+
+ reset
+
+ ip netns exec $ns1 sysctl -q net.mptcp.add_addr_timeout=1
+ ip netns exec $ns2 $tables -A OUTPUT -p tcp \
+ -m tcp --tcp-option 30 \
+ -m bpf --bytecode \
+ "$CBPF_MPTCP_SUBOPTION_ADD_ADDR" \
+ -j DROP
+}
+
for arg in "$@"; do
if [ "$arg" = "-c" ]; then
capture=1
@@ -94,6 +132,17 @@ if [ $? -ne 0 ];then
exit $ksft_skip
fi
+iptables -V > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not run all tests without iptables tool"
+ exit $ksft_skip
+fi
+
+ip6tables -V > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not run all tests without ip6tables tool"
+ exit $ksft_skip
+fi
check_transfer()
{
@@ -135,6 +184,7 @@ do_transfer()
connect_addr="$5"
rm_nr_ns1="$6"
rm_nr_ns2="$7"
+ speed="$8"
port=$((10000+$TEST_COUNT))
TEST_COUNT=$((TEST_COUNT+1))
@@ -159,7 +209,7 @@ do_transfer()
sleep 1
fi
- if [[ $rm_nr_ns1 -eq 0 && $rm_nr_ns2 -eq 0 ]]; then
+ if [ $speed = "fast" ]; then
mptcp_connect="./mptcp_connect -j"
else
mptcp_connect="./mptcp_connect -r"
@@ -250,26 +300,13 @@ run_tests()
listener_ns="$1"
connector_ns="$2"
connect_addr="$3"
+ rm_nr_ns1="${4:-0}"
+ rm_nr_ns2="${5:-0}"
+ speed="${6:-fast}"
lret=0
- do_transfer ${listener_ns} ${connector_ns} MPTCP MPTCP ${connect_addr} 0 0
- lret=$?
- if [ $lret -ne 0 ]; then
- ret=$lret
- return
- fi
-}
-
-run_remove_tests()
-{
- listener_ns="$1"
- connector_ns="$2"
- connect_addr="$3"
- rm_nr_ns1="$4"
- rm_nr_ns2="$5"
- lret=0
-
- do_transfer ${listener_ns} ${connector_ns} MPTCP MPTCP ${connect_addr} ${rm_nr_ns1} ${rm_nr_ns2}
+ do_transfer ${listener_ns} ${connector_ns} MPTCP MPTCP ${connect_addr} \
+ ${rm_nr_ns1} ${rm_nr_ns2} ${speed}
lret=$?
if [ $lret -ne 0 ]; then
ret=$lret
@@ -491,12 +528,21 @@ run_tests $ns1 $ns2 10.0.1.1
chk_join_nr "multiple subflows and signal" 3 3 3
chk_add_nr 1 1
+# add_addr timeout
+reset_with_add_addr_timeout
+ip netns exec $ns1 ./pm_nl_ctl limits 0 1
+ip netns exec $ns2 ./pm_nl_ctl limits 1 1
+ip netns exec $ns1 ./pm_nl_ctl add 10.0.2.1 flags signal
+run_tests $ns1 $ns2 10.0.1.1 0 0 slow
+chk_join_nr "signal address, ADD_ADDR timeout" 1 1 1
+chk_add_nr 4 0
+
# single subflow, remove
reset
ip netns exec $ns1 ./pm_nl_ctl limits 0 1
ip netns exec $ns2 ./pm_nl_ctl limits 0 1
ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow
-run_remove_tests $ns1 $ns2 10.0.1.1 0 1
+run_tests $ns1 $ns2 10.0.1.1 0 1 slow
chk_join_nr "remove single subflow" 1 1 1
chk_rm_nr 1 1
@@ -506,7 +552,7 @@ ip netns exec $ns1 ./pm_nl_ctl limits 0 2
ip netns exec $ns2 ./pm_nl_ctl limits 0 2
ip netns exec $ns2 ./pm_nl_ctl add 10.0.2.2 flags subflow
ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow
-run_remove_tests $ns1 $ns2 10.0.1.1 0 2
+run_tests $ns1 $ns2 10.0.1.1 0 2 slow
chk_join_nr "remove multiple subflows" 2 2 2
chk_rm_nr 2 2
@@ -515,7 +561,7 @@ reset
ip netns exec $ns1 ./pm_nl_ctl limits 0 1
ip netns exec $ns1 ./pm_nl_ctl add 10.0.2.1 flags signal
ip netns exec $ns2 ./pm_nl_ctl limits 1 1
-run_remove_tests $ns1 $ns2 10.0.1.1 1 0
+run_tests $ns1 $ns2 10.0.1.1 1 0 slow
chk_join_nr "remove single address" 1 1 1
chk_add_nr 1 1
chk_rm_nr 0 0
@@ -526,7 +572,7 @@ ip netns exec $ns1 ./pm_nl_ctl limits 0 2
ip netns exec $ns1 ./pm_nl_ctl add 10.0.2.1 flags signal
ip netns exec $ns2 ./pm_nl_ctl limits 1 2
ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow
-run_remove_tests $ns1 $ns2 10.0.1.1 1 1
+run_tests $ns1 $ns2 10.0.1.1 1 1 slow
chk_join_nr "remove subflow and signal" 2 2 2
chk_add_nr 1 1
chk_rm_nr 1 1
@@ -538,7 +584,7 @@ ip netns exec $ns1 ./pm_nl_ctl add 10.0.2.1 flags signal
ip netns exec $ns2 ./pm_nl_ctl limits 1 3
ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow
ip netns exec $ns2 ./pm_nl_ctl add 10.0.4.2 flags subflow
-run_remove_tests $ns1 $ns2 10.0.1.1 1 2
+run_tests $ns1 $ns2 10.0.1.1 1 2 slow
chk_join_nr "remove subflows and signal" 3 3 3
chk_add_nr 1 1
chk_rm_nr 2 2