diff options
23 files changed, 537 insertions, 97 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-mdio b/Documentation/ABI/testing/sysfs-bus-mdio new file mode 100644 index 000000000000..da86efc7781b --- /dev/null +++ b/Documentation/ABI/testing/sysfs-bus-mdio @@ -0,0 +1,63 @@ +What: /sys/bus/mdio_bus/devices/.../statistics/ +Date: January 2020 +KernelVersion: 5.6 +Contact: netdev@vger.kernel.org +Description: + This folder contains statistics about global and per + MDIO bus address statistics. + +What: /sys/bus/mdio_bus/devices/.../statistics/transfers +Date: January 2020 +KernelVersion: 5.6 +Contact: netdev@vger.kernel.org +Description: + Total number of transfers for this MDIO bus. + +What: /sys/bus/mdio_bus/devices/.../statistics/errors +Date: January 2020 +KernelVersion: 5.6 +Contact: netdev@vger.kernel.org +Description: + Total number of transfer errors for this MDIO bus. + +What: /sys/bus/mdio_bus/devices/.../statistics/writes +Date: January 2020 +KernelVersion: 5.6 +Contact: netdev@vger.kernel.org +Description: + Total number of write transactions for this MDIO bus. + +What: /sys/bus/mdio_bus/devices/.../statistics/reads +Date: January 2020 +KernelVersion: 5.6 +Contact: netdev@vger.kernel.org +Description: + Total number of read transactions for this MDIO bus. + +What: /sys/bus/mdio_bus/devices/.../statistics/transfers_<addr> +Date: January 2020 +KernelVersion: 5.6 +Contact: netdev@vger.kernel.org +Description: + Total number of transfers for this MDIO bus address. + +What: /sys/bus/mdio_bus/devices/.../statistics/errors_<addr> +Date: January 2020 +KernelVersion: 5.6 +Contact: netdev@vger.kernel.org +Description: + Total number of transfer errors for this MDIO bus address. + +What: /sys/bus/mdio_bus/devices/.../statistics/writes_<addr> +Date: January 2020 +KernelVersion: 5.6 +Contact: netdev@vger.kernel.org +Description: + Total number of write transactions for this MDIO bus address. + +What: /sys/bus/mdio_bus/devices/.../statistics/reads_<addr> +Date: January 2020 +KernelVersion: 5.6 +Contact: netdev@vger.kernel.org +Description: + Total number of read transactions for this MDIO bus address. diff --git a/Documentation/networking/devlink/mlxsw.rst b/Documentation/networking/devlink/mlxsw.rst index ccba9769c651..5f9bb0a0616a 100644 --- a/Documentation/networking/devlink/mlxsw.rst +++ b/Documentation/networking/devlink/mlxsw.rst @@ -40,7 +40,7 @@ The ``mlxsw`` driver supports reloading via ``DEVLINK_CMD_RELOAD`` Info versions ============= -The ``mlx5`` driver reports the following versions +The ``mlxsw`` driver reports the following versions .. list-table:: devlink info versions implemented :widths: 5 5 90 diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index e65ed4d0a7ad..dee79588d2b1 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -549,6 +549,7 @@ source "drivers/net/hyperv/Kconfig" config NETDEVSIM tristate "Simulated networking device" depends on DEBUG_FS + depends on INET depends on IPV6 || IPV6=n select NET_DEVLINK help diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index feccb6201660..269cc6953d47 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -323,7 +323,7 @@ static int felix_parse_ports_node(struct felix *felix, struct device *dev = felix->ocelot.dev; struct device_node *child; - for_each_child_of_node(ports_node, child) { + for_each_available_child_of_node(ports_node, child) { phy_interface_t phy_mode; u32 port; int err; diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index 03482616faa7..2c812b481778 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -645,6 +645,27 @@ static void vsc9959_pcs_init_sgmii(struct phy_device *pcs, const struct phylink_link_state *state) { if (link_an_mode == MLO_AN_INBAND) { + int bmsr, bmcr; + + /* Some PHYs like VSC8234 don't like it when AN restarts on + * their system side and they restart line side AN too, going + * into an endless link up/down loop. Don't restart PCS AN if + * link is up already. + * We do check that AN is enabled just in case this is the 1st + * call, PCS detects a carrier but AN is disabled from power on + * or by boot loader. + */ + bmcr = phy_read(pcs, MII_BMCR); + if (bmcr < 0) + return; + + bmsr = phy_read(pcs, MII_BMSR); + if (bmsr < 0) + return; + + if ((bmcr & BMCR_ANENABLE) && (bmsr & BMSR_LSTATUS)) + return; + /* SGMII spec requires tx_config_Reg[15:0] to be exactly 0x4001 * for the MAC PCS in order to acknowledge the AN. */ @@ -892,7 +913,6 @@ static void vsc9959_pcs_link_state_usxgmii(struct phy_device *pcs, break; } - pcs->link = USXGMII_LPA_LNKS(lpa); if (USXGMII_LPA_DUPLEX(lpa)) pcs->duplex = DUPLEX_FULL; else diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c index 00382b7c5bd8..0c6bf3a55a9a 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c @@ -36,7 +36,6 @@ void enetc_sched_speed_set(struct net_device *ndev) case SPEED_10: default: pspeed = ENETC_PMR_PSPEED_10M; - netdev_err(ndev, "Qbv PSPEED set speed link down.\n"); } priv->speed = speed; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index ba2566e2123d..0637ccadee79 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -696,21 +696,24 @@ static netdev_tx_t fm10k_xmit_frame(struct sk_buff *skb, struct net_device *dev) /** * fm10k_tx_timeout - Respond to a Tx Hang * @netdev: network interface device structure + * @txqueue: the index of the Tx queue that timed out **/ static void fm10k_tx_timeout(struct net_device *netdev, unsigned int txqueue) { struct fm10k_intfc *interface = netdev_priv(netdev); + struct fm10k_ring *tx_ring; bool real_tx_hang = false; - int i; - -#define TX_TIMEO_LIMIT 16000 - for (i = 0; i < interface->num_tx_queues; i++) { - struct fm10k_ring *tx_ring = interface->tx_ring[i]; - if (check_for_tx_hang(tx_ring) && fm10k_check_tx_hang(tx_ring)) - real_tx_hang = true; + if (txqueue >= interface->num_tx_queues) { + WARN(1, "invalid Tx queue index %d", txqueue); + return; } + tx_ring = interface->tx_ring[txqueue]; + if (check_for_tx_hang(tx_ring) && fm10k_check_tx_hang(tx_ring)) + real_tx_hang = true; + +#define TX_TIMEO_LIMIT 16000 if (real_tx_hang) { fm10k_tx_timeout_reset(interface); } else { diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 33912cf964eb..8c3e753bfb9d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -307,37 +307,18 @@ static void i40e_tx_timeout(struct net_device *netdev, unsigned int txqueue) struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; struct i40e_ring *tx_ring = NULL; - unsigned int i, hung_queue = 0; + unsigned int i; u32 head, val; pf->tx_timeout_count++; - /* find the stopped queue the same way the stack does */ - for (i = 0; i < netdev->num_tx_queues; i++) { - struct netdev_queue *q; - unsigned long trans_start; - - q = netdev_get_tx_queue(netdev, i); - trans_start = q->trans_start; - if (netif_xmit_stopped(q) && - time_after(jiffies, - (trans_start + netdev->watchdog_timeo))) { - hung_queue = i; - break; - } - } - - if (i == netdev->num_tx_queues) { - netdev_info(netdev, "tx_timeout: no netdev hung queue found\n"); - } else { - /* now that we have an index, find the tx_ring struct */ - for (i = 0; i < vsi->num_queue_pairs; i++) { - if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) { - if (hung_queue == - vsi->tx_rings[i]->queue_index) { - tx_ring = vsi->tx_rings[i]; - break; - } + /* with txqueue index, find the tx_ring struct */ + for (i = 0; i < vsi->num_queue_pairs; i++) { + if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) { + if (txqueue == + vsi->tx_rings[i]->queue_index) { + tx_ring = vsi->tx_rings[i]; + break; } } } @@ -363,14 +344,14 @@ static void i40e_tx_timeout(struct net_device *netdev, unsigned int txqueue) val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0); netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n", - vsi->seid, hung_queue, tx_ring->next_to_clean, + vsi->seid, txqueue, tx_ring->next_to_clean, head, tx_ring->next_to_use, readl(tx_ring->tail), val); } pf->tx_timeout_last_recovery = jiffies; - netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n", - pf->tx_timeout_recovery_level, hung_queue); + netdev_info(netdev, "tx_timeout recovery level %d, txqueue %d\n", + pf->tx_timeout_recovery_level, txqueue); switch (pf->tx_timeout_recovery_level) { case 1: diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index bf539483e25e..eb9d00608e9a 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -5086,36 +5086,17 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue) struct ice_ring *tx_ring = NULL; struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; - int hung_queue = -1; u32 i; pf->tx_timeout_count++; - /* find the stopped queue the same way dev_watchdog() does */ - for (i = 0; i < netdev->num_tx_queues; i++) { - unsigned long trans_start; - struct netdev_queue *q; - - q = netdev_get_tx_queue(netdev, i); - trans_start = q->trans_start; - if (netif_xmit_stopped(q) && - time_after(jiffies, - trans_start + netdev->watchdog_timeo)) { - hung_queue = i; - break; - } - } - - if (i == netdev->num_tx_queues) - netdev_info(netdev, "tx_timeout: no netdev hung queue found\n"); - else - /* now that we have an index, find the tx_ring struct */ - for (i = 0; i < vsi->num_txq; i++) - if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) - if (hung_queue == vsi->tx_rings[i]->q_index) { - tx_ring = vsi->tx_rings[i]; - break; - } + /* now that we have an index, find the tx_ring struct */ + for (i = 0; i < vsi->num_txq; i++) + if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) + if (txqueue == vsi->tx_rings[i]->q_index) { + tx_ring = vsi->tx_rings[i]; + break; + } /* Reset recovery level if enough time has elapsed after last timeout. * Also ensure no new reset action happens before next timeout period. @@ -5130,19 +5111,19 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue) struct ice_hw *hw = &pf->hw; u32 head, val = 0; - head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[hung_queue])) & + head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])) & QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S; /* Read interrupt register */ val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx)); netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n", - vsi->vsi_num, hung_queue, tx_ring->next_to_clean, + vsi->vsi_num, txqueue, tx_ring->next_to_clean, head, tx_ring->next_to_use, val); } pf->tx_timeout_last_recovery = jiffies; - netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n", - pf->tx_timeout_recovery_level, hung_queue); + netdev_info(netdev, "tx_timeout recovery level %d, txqueue %d\n", + pf->tx_timeout_recovery_level, txqueue); switch (pf->tx_timeout_recovery_level) { case 1: diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index 0e401f116d54..149dca0012ba 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -1020,8 +1020,8 @@ bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget) s16 ntc = xdp_ring->next_to_clean; struct ice_tx_desc *tx_desc; struct ice_tx_buf *tx_buf; - bool xmit_done = true; u32 xsk_frames = 0; + bool xmit_done; tx_desc = ICE_TX_DESC(xdp_ring, ntc); tx_buf = &xdp_ring->tx_buf[ntc]; diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c index db289bcce21d..5a506440560a 100644 --- a/drivers/net/ethernet/intel/igc/igc_base.c +++ b/drivers/net/ethernet/intel/igc/igc_base.c @@ -212,6 +212,7 @@ static s32 igc_get_invariants_base(struct igc_hw *hw) case IGC_DEV_ID_I225_I: case IGC_DEV_ID_I220_V: case IGC_DEV_ID_I225_K: + case IGC_DEV_ID_I225_BLANK_NVM: mac->type = igc_i225; break; default: diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h index 2121fc34e300..58efa7a02c68 100644 --- a/drivers/net/ethernet/intel/igc/igc_defines.h +++ b/drivers/net/ethernet/intel/igc/igc_defines.h @@ -282,6 +282,10 @@ #define IGC_TXD_STAT_TC 0x00000004 /* Tx Underrun */ #define IGC_TXD_EXTCMD_TSTAMP 0x00000010 /* IEEE1588 Timestamp packet */ +/* IPSec Encrypt Enable */ +#define IGC_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define IGC_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ + /* Transmit Control */ #define IGC_TCTL_EN 0x00000002 /* enable Tx */ #define IGC_TCTL_PSP 0x00000008 /* pad short packets */ @@ -460,6 +464,7 @@ /* PHY Status Register */ #define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ #define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ +#define IGC_PHY_RST_COMP 0x0100 /* Internal PHY reset completion */ /* PHY 1000 MII Register/Bit Definitions */ /* PHY Registers defined by IEEE */ diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h index 20f710645746..90ac0e0144d8 100644 --- a/drivers/net/ethernet/intel/igc/igc_hw.h +++ b/drivers/net/ethernet/intel/igc/igc_hw.h @@ -21,8 +21,7 @@ #define IGC_DEV_ID_I225_I 0x15F8 #define IGC_DEV_ID_I220_V 0x15F7 #define IGC_DEV_ID_I225_K 0x3100 - -#define IGC_FUNC_0 0 +#define IGC_DEV_ID_I225_BLANK_NVM 0x15FD /* Function pointers for the MAC. */ struct igc_mac_operations { diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index c359f3d9fb25..d9d5425fe8d9 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -45,6 +45,7 @@ static const struct pci_device_id igc_pci_tbl[] = { { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_I), board_base }, { PCI_VDEVICE(INTEL, IGC_DEV_ID_I220_V), board_base }, { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_BLANK_NVM), board_base }, /* required last entry */ {0, } }; @@ -880,7 +881,7 @@ static void igc_tx_ctxtdesc(struct igc_ring *tx_ring, /* set bits to identify this as an advanced context descriptor */ type_tucmd |= IGC_TXD_CMD_DEXT | IGC_ADVTXD_DTYP_CTXT; - /* For 82575, context index must be unique per ring. */ + /* For i225, context index must be unique per ring. */ if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) mss_l4len_idx |= tx_ring->reg_idx << 4; @@ -999,6 +1000,10 @@ static u32 igc_tx_cmd_type(struct sk_buff *skb, u32 tx_flags) IGC_ADVTXD_DCMD_DEXT | IGC_ADVTXD_DCMD_IFCS; + /* set segmentation bits for TSO */ + cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSO, + (IGC_ADVTXD_DCMD_TSE)); + /* set timestamp bit if present */ cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP, (IGC_ADVTXD_MAC_TSTAMP)); @@ -1170,6 +1175,100 @@ dma_error: return -1; } +static int igc_tso(struct igc_ring *tx_ring, + struct igc_tx_buffer *first, + u8 *hdr_len) +{ + u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; + struct sk_buff *skb = first->skb; + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + union { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; + } l4; + u32 paylen, l4_offset; + int err; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + if (!skb_is_gso(skb)) + return 0; + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + ip.hdr = skb_network_header(skb); + l4.hdr = skb_checksum_start(skb); + + /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ + type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP; + + /* initialize outer IP header fields */ + if (ip.v4->version == 4) { + unsigned char *csum_start = skb_checksum_start(skb); + unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); + + /* IP header will have to cancel out any data that + * is not a part of the outer IP header + */ + ip.v4->check = csum_fold(csum_partial(trans_start, + csum_start - trans_start, + 0)); + type_tucmd |= IGC_ADVTXD_TUCMD_IPV4; + + ip.v4->tot_len = 0; + first->tx_flags |= IGC_TX_FLAGS_TSO | + IGC_TX_FLAGS_CSUM | + IGC_TX_FLAGS_IPV4; + } else { + ip.v6->payload_len = 0; + first->tx_flags |= IGC_TX_FLAGS_TSO | + IGC_TX_FLAGS_CSUM; + } + + /* determine offset of inner transport header */ + l4_offset = l4.hdr - skb->data; + + /* remove payload length from inner checksum */ + paylen = skb->len - l4_offset; + if (type_tucmd & IGC_ADVTXD_TUCMD_L4T_TCP) { + /* compute length of segmentation header */ + *hdr_len = (l4.tcp->doff * 4) + l4_offset; + csum_replace_by_diff(&l4.tcp->check, + (__force __wsum)htonl(paylen)); + } else { + /* compute length of segmentation header */ + *hdr_len = sizeof(*l4.udp) + l4_offset; + csum_replace_by_diff(&l4.udp->check, + (__force __wsum)htonl(paylen)); + } + + /* update gso size and bytecount with header size */ + first->gso_segs = skb_shinfo(skb)->gso_segs; + first->bytecount += (first->gso_segs - 1) * *hdr_len; + + /* MSS L4LEN IDX */ + mss_l4len_idx = (*hdr_len - l4_offset) << IGC_ADVTXD_L4LEN_SHIFT; + mss_l4len_idx |= skb_shinfo(skb)->gso_size << IGC_ADVTXD_MSS_SHIFT; + + /* VLAN MACLEN IPLEN */ + vlan_macip_lens = l4.hdr - ip.hdr; + vlan_macip_lens |= (ip.hdr - skb->data) << IGC_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; + + igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, + type_tucmd, mss_l4len_idx); + + return 1; +} + static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb, struct igc_ring *tx_ring) { @@ -1179,6 +1278,7 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb, u32 tx_flags = 0; unsigned short f; u8 hdr_len = 0; + int tso = 0; /* need: 1 descriptor per page * PAGE_SIZE/IGC_MAX_DATA_PER_TXD, * + 1 desc for skb_headlen/IGC_MAX_DATA_PER_TXD, @@ -1225,11 +1325,21 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb, first->tx_flags = tx_flags; first->protocol = protocol; - igc_tx_csum(tx_ring, first); + tso = igc_tso(tx_ring, first, &hdr_len); + if (tso < 0) + goto out_drop; + else if (!tso) + igc_tx_csum(tx_ring, first); igc_tx_map(tx_ring, first, hdr_len); return NETDEV_TX_OK; + +out_drop: + dev_kfree_skb_any(first->skb); + first->skb = NULL; + + return NETDEV_TX_OK; } static inline struct igc_ring *igc_tx_queue_mapping(struct igc_adapter *adapter, @@ -4588,6 +4698,8 @@ static int igc_probe(struct pci_dev *pdev, /* Add supported features to the features list*/ netdev->features |= NETIF_F_SG; + netdev->features |= NETIF_F_TSO; + netdev->features |= NETIF_F_TSO6; netdev->features |= NETIF_F_RXCSUM; netdev->features |= NETIF_F_HW_CSUM; netdev->features |= NETIF_F_SCTP_CRC; diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c index f4b05af0dd2f..8e1799508edc 100644 --- a/drivers/net/ethernet/intel/igc/igc_phy.c +++ b/drivers/net/ethernet/intel/igc/igc_phy.c @@ -173,6 +173,7 @@ s32 igc_check_downshift(struct igc_hw *hw) s32 igc_phy_hw_reset(struct igc_hw *hw) { struct igc_phy_info *phy = &hw->phy; + u32 phpm = 0, timeout = 10000; s32 ret_val; u32 ctrl; @@ -186,6 +187,8 @@ s32 igc_phy_hw_reset(struct igc_hw *hw) if (ret_val) goto out; + phpm = rd32(IGC_I225_PHPM); + ctrl = rd32(IGC_CTRL); wr32(IGC_CTRL, ctrl | IGC_CTRL_PHY_RST); wrfl(); @@ -195,7 +198,18 @@ s32 igc_phy_hw_reset(struct igc_hw *hw) wr32(IGC_CTRL, ctrl); wrfl(); - usleep_range(1500, 2000); + /* SW should guarantee 100us for the completion of the PHY reset */ + usleep_range(100, 150); + do { + phpm = rd32(IGC_I225_PHPM); + timeout--; + udelay(1); + } while (!(phpm & IGC_PHY_RST_COMP) && timeout); + + if (!timeout) + hw_dbg("Timeout is expired after a phy reset\n"); + + usleep_range(100, 150); phy->ops.release(hw); diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h index c82111051898..c9029b549b90 100644 --- a/drivers/net/ethernet/intel/igc/igc_regs.h +++ b/drivers/net/ethernet/intel/igc/igc_regs.h @@ -12,6 +12,7 @@ #define IGC_MDIC 0x00020 /* MDI Control - RW */ #define IGC_MDICNFG 0x00E04 /* MDC/MDIO Configuration - RW */ #define IGC_CONNSW 0x00034 /* Copper/Fiber switch control - RW */ +#define IGC_I225_PHPM 0x00E14 /* I225 PHY Power Management */ /* Internal Packet Buffer Size Registers */ #define IGC_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c index 8f56289fc2ec..f32d56ac3e80 100644 --- a/drivers/net/netdevsim/fib.c +++ b/drivers/net/netdevsim/fib.c @@ -467,7 +467,7 @@ nsim_fib6_rt_create(struct nsim_fib_data *data, fib6_rt = kzalloc(sizeof(*fib6_rt), GFP_ATOMIC); if (!fib6_rt) - return NULL; + return ERR_PTR(-ENOMEM); nsim_fib_rt_init(data, &fib6_rt->common, &rt->fib6_dst.addr, sizeof(rt->fib6_dst.addr), rt->fib6_dst.plen, AF_INET6, @@ -650,8 +650,8 @@ static int nsim_fib6_rt_insert(struct nsim_fib_data *data, int err; fib6_rt = nsim_fib6_rt_create(data, fen6_info); - if (!fib6_rt) - return -ENOMEM; + if (IS_ERR(fib6_rt)) + return PTR_ERR(fib6_rt); fib6_rt_old = nsim_fib6_rt_lookup(&data->fib_rt_ht, fen6_info->rt); if (!fib6_rt_old) diff --git a/drivers/net/phy/adin.c b/drivers/net/phy/adin.c index cf5a391c93e6..1dca3e883df4 100644 --- a/drivers/net/phy/adin.c +++ b/drivers/net/phy/adin.c @@ -145,7 +145,7 @@ struct adin_clause45_mmd_map { u16 adin_regnum; }; -static struct adin_clause45_mmd_map adin_clause45_mmd_map[] = { +static const struct adin_clause45_mmd_map adin_clause45_mmd_map[] = { { MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE, ADIN1300_EEE_CAP_REG }, { MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, ADIN1300_EEE_LPABLE_REG }, { MDIO_MMD_AN, MDIO_AN_EEE_ADV, ADIN1300_EEE_ADV_REG }, @@ -159,7 +159,7 @@ struct adin_hw_stat { u16 reg2; }; -static struct adin_hw_stat adin_hw_stats[] = { +static const struct adin_hw_stat adin_hw_stats[] = { { "total_frames_checked_count", 0x940A, 0x940B }, /* hi + lo */ { "length_error_frames_count", 0x940C }, { "alignment_error_frames_count", 0x940D }, @@ -456,7 +456,7 @@ static int adin_phy_config_intr(struct phy_device *phydev) static int adin_cl45_to_adin_reg(struct phy_device *phydev, int devad, u16 cl45_regnum) { - struct adin_clause45_mmd_map *m; + const struct adin_clause45_mmd_map *m; int i; if (devad == MDIO_MMD_VEND1) @@ -650,7 +650,7 @@ static void adin_get_strings(struct phy_device *phydev, u8 *data) } static int adin_read_mmd_stat_regs(struct phy_device *phydev, - struct adin_hw_stat *stat, + const struct adin_hw_stat *stat, u32 *val) { int ret; @@ -676,7 +676,7 @@ static int adin_read_mmd_stat_regs(struct phy_device *phydev, static u64 adin_get_stat(struct phy_device *phydev, int i) { - struct adin_hw_stat *stat = &adin_hw_stats[i]; + const struct adin_hw_stat *stat = &adin_hw_stats[i]; struct adin_priv *priv = phydev->priv; u32 val; int ret; diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 8d753bb07227..9bb9f37f21dc 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -158,9 +158,11 @@ struct mii_bus *mdiobus_alloc_size(size_t size) if (size) bus->priv = (void *)bus + aligned_size; - /* Initialise the interrupts to polling */ - for (i = 0; i < PHY_MAX_ADDR; i++) + /* Initialise the interrupts to polling and 64-bit seqcounts */ + for (i = 0; i < PHY_MAX_ADDR; i++) { bus->irq[i] = PHY_POLL; + u64_stats_init(&bus->stats[i].syncp); + } return bus; } @@ -249,9 +251,215 @@ static void mdiobus_release(struct device *d) kfree(bus); } +struct mdio_bus_stat_attr { + int addr; + unsigned int field_offset; +}; + +static u64 mdio_bus_get_stat(struct mdio_bus_stats *s, unsigned int offset) +{ + const char *p = (const char *)s + offset; + unsigned int start; + u64 val = 0; + + do { + start = u64_stats_fetch_begin(&s->syncp); + val = u64_stats_read((const u64_stats_t *)p); + } while (u64_stats_fetch_retry(&s->syncp, start)); + + return val; +} + +static u64 mdio_bus_get_global_stat(struct mii_bus *bus, unsigned int offset) +{ + unsigned int i; + u64 val = 0; + + for (i = 0; i < PHY_MAX_ADDR; i++) + val += mdio_bus_get_stat(&bus->stats[i], offset); + + return val; +} + +static ssize_t mdio_bus_stat_field_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct mii_bus *bus = to_mii_bus(dev); + struct mdio_bus_stat_attr *sattr; + struct dev_ext_attribute *eattr; + u64 val; + + eattr = container_of(attr, struct dev_ext_attribute, attr); + sattr = eattr->var; + + if (sattr->addr < 0) + val = mdio_bus_get_global_stat(bus, sattr->field_offset); + else + val = mdio_bus_get_stat(&bus->stats[sattr->addr], + sattr->field_offset); + + return sprintf(buf, "%llu\n", val); +} + +static ssize_t mdio_bus_device_stat_field_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct mdio_device *mdiodev = to_mdio_device(dev); + struct mii_bus *bus = mdiodev->bus; + struct mdio_bus_stat_attr *sattr; + struct dev_ext_attribute *eattr; + int addr = mdiodev->addr; + u64 val; + + eattr = container_of(attr, struct dev_ext_attribute, attr); + sattr = eattr->var; + + val = mdio_bus_get_stat(&bus->stats[addr], sattr->field_offset); + + return sprintf(buf, "%llu\n", val); +} + +#define MDIO_BUS_STATS_ATTR_DECL(field, file) \ +static struct dev_ext_attribute dev_attr_mdio_bus_##field = { \ + .attr = { .attr = { .name = file, .mode = 0444 }, \ + .show = mdio_bus_stat_field_show, \ + }, \ + .var = &((struct mdio_bus_stat_attr) { \ + -1, offsetof(struct mdio_bus_stats, field) \ + }), \ +}; \ +static struct dev_ext_attribute dev_attr_mdio_bus_device_##field = { \ + .attr = { .attr = { .name = file, .mode = 0444 }, \ + .show = mdio_bus_device_stat_field_show, \ + }, \ + .var = &((struct mdio_bus_stat_attr) { \ + -1, offsetof(struct mdio_bus_stats, field) \ + }), \ +}; + +#define MDIO_BUS_STATS_ATTR(field) \ + MDIO_BUS_STATS_ATTR_DECL(field, __stringify(field)) + +MDIO_BUS_STATS_ATTR(transfers); +MDIO_BUS_STATS_ATTR(errors); +MDIO_BUS_STATS_ATTR(writes); +MDIO_BUS_STATS_ATTR(reads); + +#define MDIO_BUS_STATS_ADDR_ATTR_DECL(field, addr, file) \ +static struct dev_ext_attribute dev_attr_mdio_bus_addr_##field##_##addr = { \ + .attr = { .attr = { .name = file, .mode = 0444 }, \ + .show = mdio_bus_stat_field_show, \ + }, \ + .var = &((struct mdio_bus_stat_attr) { \ + addr, offsetof(struct mdio_bus_stats, field) \ + }), \ +} + +#define MDIO_BUS_STATS_ADDR_ATTR(field, addr) \ + MDIO_BUS_STATS_ADDR_ATTR_DECL(field, addr, \ + __stringify(field) "_" __stringify(addr)) + +#define MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(addr) \ + MDIO_BUS_STATS_ADDR_ATTR(transfers, addr); \ + MDIO_BUS_STATS_ADDR_ATTR(errors, addr); \ + MDIO_BUS_STATS_ADDR_ATTR(writes, addr); \ + MDIO_BUS_STATS_ADDR_ATTR(reads, addr) \ + +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(0); +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(1); +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(2); +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(3); +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(4); +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(5); +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(6); +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(7); +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(8); +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(9); +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(10); +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(11); +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(12); +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(13); +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(14); +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(15); +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(16); +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(17); +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(18); +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(19); +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(20); +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(21); +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(22); +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(23); +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(24); +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(25); +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(26); +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(27); +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(28); +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(29); +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(30); +MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(31); + +#define MDIO_BUS_STATS_ADDR_ATTR_GROUP(addr) \ + &dev_attr_mdio_bus_addr_transfers_##addr.attr.attr, \ + &dev_attr_mdio_bus_addr_errors_##addr.attr.attr, \ + &dev_attr_mdio_bus_addr_writes_##addr.attr.attr, \ + &dev_attr_mdio_bus_addr_reads_##addr.attr.attr \ + +static struct attribute *mdio_bus_statistics_attrs[] = { + &dev_attr_mdio_bus_transfers.attr.attr, + &dev_attr_mdio_bus_errors.attr.attr, + &dev_attr_mdio_bus_writes.attr.attr, + &dev_attr_mdio_bus_reads.attr.attr, + MDIO_BUS_STATS_ADDR_ATTR_GROUP(0), + MDIO_BUS_STATS_ADDR_ATTR_GROUP(1), + MDIO_BUS_STATS_ADDR_ATTR_GROUP(2), + MDIO_BUS_STATS_ADDR_ATTR_GROUP(3), + MDIO_BUS_STATS_ADDR_ATTR_GROUP(4), + MDIO_BUS_STATS_ADDR_ATTR_GROUP(5), + MDIO_BUS_STATS_ADDR_ATTR_GROUP(6), + MDIO_BUS_STATS_ADDR_ATTR_GROUP(7), + MDIO_BUS_STATS_ADDR_ATTR_GROUP(8), + MDIO_BUS_STATS_ADDR_ATTR_GROUP(9), + MDIO_BUS_STATS_ADDR_ATTR_GROUP(10), + MDIO_BUS_STATS_ADDR_ATTR_GROUP(11), + MDIO_BUS_STATS_ADDR_ATTR_GROUP(12), + MDIO_BUS_STATS_ADDR_ATTR_GROUP(13), + MDIO_BUS_STATS_ADDR_ATTR_GROUP(14), + MDIO_BUS_STATS_ADDR_ATTR_GROUP(15), + MDIO_BUS_STATS_ADDR_ATTR_GROUP(16), + MDIO_BUS_STATS_ADDR_ATTR_GROUP(17), + MDIO_BUS_STATS_ADDR_ATTR_GROUP(18), + MDIO_BUS_STATS_ADDR_ATTR_GROUP(19), + MDIO_BUS_STATS_ADDR_ATTR_GROUP(20), + MDIO_BUS_STATS_ADDR_ATTR_GROUP(21), + MDIO_BUS_STATS_ADDR_ATTR_GROUP(22), + MDIO_BUS_STATS_ADDR_ATTR_GROUP(23), + MDIO_BUS_STATS_ADDR_ATTR_GROUP(24), + MDIO_BUS_STATS_ADDR_ATTR_GROUP(25), + MDIO_BUS_STATS_ADDR_ATTR_GROUP(26), + MDIO_BUS_STATS_ADDR_ATTR_GROUP(27), + MDIO_BUS_STATS_ADDR_ATTR_GROUP(28), + MDIO_BUS_STATS_ADDR_ATTR_GROUP(29), + MDIO_BUS_STATS_ADDR_ATTR_GROUP(30), + MDIO_BUS_STATS_ADDR_ATTR_GROUP(31), + NULL, +}; + +static const struct attribute_group mdio_bus_statistics_group = { + .name = "statistics", + .attrs = mdio_bus_statistics_attrs, +}; + +static const struct attribute_group *mdio_bus_groups[] = { + &mdio_bus_statistics_group, + NULL, +}; + static struct class mdio_bus_class = { .name = "mdio_bus", .dev_release = mdiobus_release, + .dev_groups = mdio_bus_groups, }; #if IS_ENABLED(CONFIG_OF_MDIO) @@ -530,6 +738,24 @@ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr) } EXPORT_SYMBOL(mdiobus_scan); +static void mdiobus_stats_acct(struct mdio_bus_stats *stats, bool op, int ret) +{ + u64_stats_update_begin(&stats->syncp); + + u64_stats_inc(&stats->transfers); + if (ret < 0) { + u64_stats_inc(&stats->errors); + goto out; + } + + if (op) + u64_stats_inc(&stats->reads); + else + u64_stats_inc(&stats->writes); +out: + u64_stats_update_end(&stats->syncp); +} + /** * __mdiobus_read - Unlocked version of the mdiobus_read function * @bus: the mii_bus struct @@ -549,6 +775,7 @@ int __mdiobus_read(struct mii_bus *bus, int addr, u32 regnum) retval = bus->read(bus, addr, regnum); trace_mdio_access(bus, 1, addr, regnum, retval, retval); + mdiobus_stats_acct(&bus->stats[addr], true, retval); return retval; } @@ -574,6 +801,7 @@ int __mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val) err = bus->write(bus, addr, regnum, val); trace_mdio_access(bus, 0, addr, regnum, val, err); + mdiobus_stats_acct(&bus->stats[addr], false, err); return err; } @@ -719,8 +947,27 @@ static int mdio_uevent(struct device *dev, struct kobj_uevent_env *env) return 0; } +static struct attribute *mdio_bus_device_statistics_attrs[] = { + &dev_attr_mdio_bus_device_transfers.attr.attr, + &dev_attr_mdio_bus_device_errors.attr.attr, + &dev_attr_mdio_bus_device_writes.attr.attr, + &dev_attr_mdio_bus_device_reads.attr.attr, + NULL, +}; + +static const struct attribute_group mdio_bus_device_statistics_group = { + .name = "statistics", + .attrs = mdio_bus_device_statistics_attrs, +}; + +static const struct attribute_group *mdio_bus_dev_groups[] = { + &mdio_bus_device_statistics_group, + NULL, +}; + struct bus_type mdio_bus_type = { .name = "mdio_bus", + .dev_groups = mdio_bus_dev_groups, .match = mdio_bus_match, .uevent = mdio_uevent, }; diff --git a/drivers/net/xen-netback/hash.c b/drivers/net/xen-netback/hash.c index 10d580c3dea3..6b7532f7c936 100644 --- a/drivers/net/xen-netback/hash.c +++ b/drivers/net/xen-netback/hash.c @@ -51,7 +51,8 @@ static void xenvif_add_hash(struct xenvif *vif, const u8 *tag, found = false; oldest = NULL; - list_for_each_entry_rcu(entry, &vif->hash.cache.list, link) { + list_for_each_entry_rcu(entry, &vif->hash.cache.list, link, + lockdep_is_held(&vif->hash.cache.lock)) { /* Make sure we don't add duplicate entries */ if (entry->len == len && memcmp(entry->tag, tag, len) == 0) @@ -102,7 +103,8 @@ static void xenvif_flush_hash(struct xenvif *vif) spin_lock_irqsave(&vif->hash.cache.lock, flags); - list_for_each_entry_rcu(entry, &vif->hash.cache.list, link) { + list_for_each_entry_rcu(entry, &vif->hash.cache.list, link, + lockdep_is_held(&vif->hash.cache.lock)) { list_del_rcu(&entry->link); vif->hash.cache.count--; kfree_rcu(entry, rcu); diff --git a/include/linux/phy.h b/include/linux/phy.h index 2929d0bc307f..99a87f02667f 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -22,6 +22,7 @@ #include <linux/timer.h> #include <linux/workqueue.h> #include <linux/mod_devicetable.h> +#include <linux/u64_stats_sync.h> #include <linux/atomic.h> @@ -212,6 +213,15 @@ struct sfp_bus; struct sfp_upstream_ops; struct sk_buff; +struct mdio_bus_stats { + u64_stats_t transfers; + u64_stats_t errors; + u64_stats_t writes; + u64_stats_t reads; + /* Must be last, add new statistics above */ + struct u64_stats_sync syncp; +}; + /* * The Bus class for PHYs. Devices which provide access to * PHYs should register using this structure @@ -224,6 +234,7 @@ struct mii_bus { int (*read)(struct mii_bus *bus, int addr, int regnum); int (*write)(struct mii_bus *bus, int addr, int regnum, u16 val); int (*reset)(struct mii_bus *bus); + struct mdio_bus_stats stats[PHY_MAX_ADDR]; /* * A lock to ensure that only one thing can read/write diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index b8ceaf0cd997..854d39ef1ca3 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -347,9 +347,9 @@ static inline struct net *read_pnet(const possible_net_t *pnet) #endif int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp); -int peernet2id(struct net *net, struct net *peer); -bool peernet_has_id(struct net *net, struct net *peer); -struct net *get_net_ns_by_id(struct net *net, int id); +int peernet2id(const struct net *net, struct net *peer); +bool peernet_has_id(const struct net *net, struct net *peer); +struct net *get_net_ns_by_id(const struct net *net, int id); struct pernet_operations { struct list_head list; @@ -427,7 +427,7 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header) } #endif -static inline int rt_genid_ipv4(struct net *net) +static inline int rt_genid_ipv4(const struct net *net) { return atomic_read(&net->ipv4.rt_genid); } @@ -459,7 +459,7 @@ static inline void rt_genid_bump_all(struct net *net) rt_genid_bump_ipv6(net); } -static inline int fnhe_genid(struct net *net) +static inline int fnhe_genid(const struct net *net) { return atomic_read(&net->fnhe_genid); } diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index 6412c1fbfcb5..757cc1d084e7 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -268,7 +268,7 @@ int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp) EXPORT_SYMBOL_GPL(peernet2id_alloc); /* This function returns, if assigned, the id of a peer netns. */ -int peernet2id(struct net *net, struct net *peer) +int peernet2id(const struct net *net, struct net *peer) { int id; @@ -283,12 +283,12 @@ EXPORT_SYMBOL(peernet2id); /* This function returns true is the peer netns has an id assigned into the * current netns. */ -bool peernet_has_id(struct net *net, struct net *peer) +bool peernet_has_id(const struct net *net, struct net *peer) { return peernet2id(net, peer) >= 0; } -struct net *get_net_ns_by_id(struct net *net, int id) +struct net *get_net_ns_by_id(const struct net *net, int id) { struct net *peer; |