diff options
Diffstat (limited to 'drivers/net/ethernet')
95 files changed, 466 insertions, 509 deletions
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c index 47e5984f16fb..3155f7fa83eb 100644 --- a/drivers/net/ethernet/aeroflex/greth.c +++ b/drivers/net/ethernet/aeroflex/greth.c @@ -613,7 +613,6 @@ static irqreturn_t greth_interrupt(int irq, void *dev_id) napi_schedule(&greth->napi); } - mmiowb(); spin_unlock(&greth->devlock); return retval; diff --git a/drivers/net/ethernet/alacritech/slicoss.c b/drivers/net/ethernet/alacritech/slicoss.c index 16477aa6d61f..4f7e792e50e9 100644 --- a/drivers/net/ethernet/alacritech/slicoss.c +++ b/drivers/net/ethernet/alacritech/slicoss.c @@ -345,8 +345,6 @@ static void slic_set_rx_mode(struct net_device *dev) if (sdev->promisc != set_promisc) { sdev->promisc = set_promisc; slic_configure_rcv(sdev); - /* make sure writes to receiver cant leak out of the lock */ - mmiowb(); } spin_unlock_bh(&sdev->link_lock); } @@ -1461,8 +1459,6 @@ static netdev_tx_t slic_xmit(struct sk_buff *skb, struct net_device *dev) if (slic_get_free_tx_descs(txq) < SLIC_MAX_REQ_TX_DESCS) netif_stop_queue(dev); - /* make sure writes to io-memory cant leak out of tx queue lock */ - mmiowb(); return NETDEV_TX_OK; drop_skb: diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c index b17d435de09f..05798aa5bb73 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_com.c @@ -2016,7 +2016,6 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data) mb(); writel_relaxed((u32)aenq->head, dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF); - mmiowb(); } int ena_com_dev_reset(struct ena_com_dev *ena_dev, diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c index 9e07b469066a..f35c9a75be50 100644 --- a/drivers/net/ethernet/atheros/atlx/atl1.c +++ b/drivers/net/ethernet/atheros/atlx/atl1.c @@ -1721,7 +1721,7 @@ static void atl1_inc_smb(struct atl1_adapter *adapter) adapter->soft_stats.scc += smb->tx_1_col; adapter->soft_stats.mcc += smb->tx_2_col; adapter->soft_stats.latecol += smb->tx_late_col; - adapter->soft_stats.tx_underun += smb->tx_underrun; + adapter->soft_stats.tx_underrun += smb->tx_underrun; adapter->soft_stats.tx_trunc += smb->tx_trunc; adapter->soft_stats.tx_pause += smb->tx_pause; @@ -2439,7 +2439,6 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb, atl1_tx_map(adapter, skb, ptpd); atl1_tx_queue(adapter, count, ptpd); atl1_update_mailbox(adapter); - mmiowb(); return NETDEV_TX_OK; } @@ -3179,7 +3178,7 @@ static struct atl1_stats atl1_gstrings_stats[] = { {"tx_deferred_ok", ATL1_STAT(soft_stats.deffer)}, {"tx_single_coll_ok", ATL1_STAT(soft_stats.scc)}, {"tx_multi_coll_ok", ATL1_STAT(soft_stats.mcc)}, - {"tx_underun", ATL1_STAT(soft_stats.tx_underun)}, + {"tx_underrun", ATL1_STAT(soft_stats.tx_underrun)}, {"tx_trunc", ATL1_STAT(soft_stats.tx_trunc)}, {"tx_pause", ATL1_STAT(soft_stats.tx_pause)}, {"rx_pause", ATL1_STAT(soft_stats.rx_pause)}, diff --git a/drivers/net/ethernet/atheros/atlx/atl1.h b/drivers/net/ethernet/atheros/atlx/atl1.h index 34a58cd846a0..eacff19ea05b 100644 --- a/drivers/net/ethernet/atheros/atlx/atl1.h +++ b/drivers/net/ethernet/atheros/atlx/atl1.h @@ -681,7 +681,7 @@ struct atl1_sft_stats { u64 scc; /* packets TX after a single collision */ u64 mcc; /* packets TX after multiple collisions */ u64 latecol; /* TX packets w/ late collisions */ - u64 tx_underun; /* TX packets aborted due to TX FIFO underrun + u64 tx_underrun; /* TX packets aborted due to TX FIFO underrun * or TRD FIFO underrun */ u64 tx_trunc; /* TX packets truncated due to size > MTU */ u64 rx_pause; /* num Pause packets received. */ diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c index d99317b3d891..dd81c5863111 100644 --- a/drivers/net/ethernet/atheros/atlx/atl2.c +++ b/drivers/net/ethernet/atheros/atlx/atl2.c @@ -553,7 +553,7 @@ static void atl2_intr_tx(struct atl2_adapter *adapter) netdev->stats.tx_aborted_errors++; if (txs->late_col) netdev->stats.tx_window_errors++; - if (txs->underun) + if (txs->underrun) netdev->stats.tx_fifo_errors++; } while (1); @@ -908,7 +908,6 @@ static netdev_tx_t atl2_xmit_frame(struct sk_buff *skb, ATL2_WRITE_REGW(&adapter->hw, REG_MB_TXD_WR_IDX, (adapter->txd_write_ptr >> 2)); - mmiowb(); dev_consume_skb_any(skb); return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/atheros/atlx/atl2.h b/drivers/net/ethernet/atheros/atlx/atl2.h index c64a6bdfa7ae..25ec84cb4853 100644 --- a/drivers/net/ethernet/atheros/atlx/atl2.h +++ b/drivers/net/ethernet/atheros/atlx/atl2.h @@ -260,7 +260,7 @@ struct tx_pkt_status { unsigned multi_col:1; unsigned late_col:1; unsigned abort_col:1; - unsigned underun:1; /* current packet is aborted + unsigned underrun:1; /* current packet is aborted * due to txram underrun */ unsigned:3; /* reserved */ unsigned update:1; /* always 1'b1 in tx_status_buf */ diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index d63371d70bce..dfdd14eadd57 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -3305,8 +3305,6 @@ next_rx: BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq); - mmiowb(); - return rx_pkt; } @@ -6723,8 +6721,6 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) BNX2_WR16(bp, txr->tx_bidx_addr, prod); BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq); - mmiowb(); - txr->tx_prod = prod; if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) { diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index ecb1bd7eb508..0c8f5b546c6f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -4166,8 +4166,6 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) DOORBELL_RELAXED(bp, txdata->cid, txdata->tx_db.raw); - mmiowb(); - txdata->tx_bd_prod += nbd; if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) { diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index 2462e7aa0c5d..2d57af9c061c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -527,8 +527,6 @@ static inline void bnx2x_update_rx_prod(struct bnx2x *bp, REG_WR_RELAXED(bp, fp->ustorm_rx_prods_offset + i * 4, ((u32 *)&rx_prods)[i]); - mmiowb(); /* keep prod updates ordered */ - DP(NETIF_MSG_RX_STATUS, "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n", fp->index, bd_prod, rx_comp_prod, rx_sge_prod); @@ -653,7 +651,6 @@ static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id, REG_WR(bp, igu_addr, cmd_data.sb_id_and_flags); /* Make sure that ACK is written */ - mmiowb(); barrier(); } @@ -674,7 +671,6 @@ static inline void bnx2x_hc_ack_sb(struct bnx2x *bp, u8 sb_id, REG_WR(bp, hc_addr, (*(u32 *)&igu_ack)); /* Make sure that ACK is written */ - mmiowb(); barrier(); } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index 749d0ef44371..0745cccd416d 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -2623,7 +2623,6 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode) wmb(); DOORBELL_RELAXED(bp, txdata->cid, txdata->tx_db.raw); - mmiowb(); barrier(); num_pkts++; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 626b491f7674..3716c828ff5d 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -869,9 +869,6 @@ static void bnx2x_hc_int_disable(struct bnx2x *bp) "write %x to HC %d (addr 0x%x)\n", val, port, addr); - /* flush all outstanding writes */ - mmiowb(); - REG_WR(bp, addr, val); if (REG_RD(bp, addr) != val) BNX2X_ERR("BUG! Proper val not read from IGU!\n"); @@ -887,9 +884,6 @@ static void bnx2x_igu_int_disable(struct bnx2x *bp) DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val); - /* flush all outstanding writes */ - mmiowb(); - REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val) BNX2X_ERR("BUG! Proper val not read from IGU!\n"); @@ -1595,7 +1589,6 @@ static void bnx2x_hc_int_enable(struct bnx2x *bp) /* * Ensure that HC_CONFIG is written before leading/trailing edge config */ - mmiowb(); barrier(); if (!CHIP_IS_E1(bp)) { @@ -1611,9 +1604,6 @@ static void bnx2x_hc_int_enable(struct bnx2x *bp) REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); } - - /* Make sure that interrupts are indeed enabled from here on */ - mmiowb(); } static void bnx2x_igu_int_enable(struct bnx2x *bp) @@ -1674,9 +1664,6 @@ static void bnx2x_igu_int_enable(struct bnx2x *bp) REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val); REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val); - - /* Make sure that interrupts are indeed enabled from here on */ - mmiowb(); } void bnx2x_int_enable(struct bnx2x *bp) @@ -3833,7 +3820,6 @@ static void bnx2x_sp_prod_update(struct bnx2x *bp) REG_WR16_RELAXED(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func), bp->spq_prod_idx); - mmiowb(); } /** @@ -5244,7 +5230,6 @@ static void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod) { /* No memory barriers */ storm_memset_eq_prod(bp, prod, BP_FUNC(bp)); - mmiowb(); /* keep prod updates ordered */ } static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid, @@ -6513,7 +6498,6 @@ void bnx2x_nic_init_cnic(struct bnx2x *bp) /* flush all */ mb(); - mmiowb(); } void bnx2x_pre_irq_nic_init(struct bnx2x *bp) @@ -6553,7 +6537,6 @@ void bnx2x_post_irq_nic_init(struct bnx2x *bp, u32 load_code) /* flush all before enabling interrupts */ mb(); - mmiowb(); bnx2x_int_enable(bp); @@ -7775,12 +7758,10 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, bool is_pf) DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", data, igu_addr_data); REG_WR(bp, igu_addr_data, data); - mmiowb(); barrier(); DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", ctl, igu_addr_ctl); REG_WR(bp, igu_addr_ctl, ctl); - mmiowb(); barrier(); /* wait for clean up to finish */ @@ -9550,7 +9531,6 @@ static void bnx2x_set_234_gates(struct bnx2x *bp, bool close) DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "%s gates #2, #3 and #4\n", close ? "closing" : "opening"); - mmiowb(); } #define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */ @@ -9674,7 +9654,6 @@ static void bnx2x_pxp_prep(struct bnx2x *bp) if (!CHIP_IS_E1(bp)) { REG_WR(bp, PXP2_REG_RD_START_INIT, 0); REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0); - mmiowb(); } } @@ -9774,16 +9753,13 @@ static void bnx2x_process_kill_chip_reset(struct bnx2x *bp, bool global) reset_mask1 & (~not_reset_mask1)); barrier(); - mmiowb(); REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2 & (~stay_reset2)); barrier(); - mmiowb(); REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1); - mmiowb(); } /** @@ -9867,9 +9843,6 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global) REG_WR(bp, MISC_REG_UNPREPARED, 0); barrier(); - /* Make sure all is written to the chip before the reset */ - mmiowb(); - /* Wait for 1ms to empty GLUE and PCI-E core queues, * PSWHST, GRC and PSWRD Tetris buffer. */ @@ -14828,7 +14801,6 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl) if (rc) break; - mmiowb(); barrier(); /* Start accepting on iSCSI L2 ring */ @@ -14863,7 +14835,6 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl) if (!bnx2x_wait_sp_comp(bp, sp_bits)) BNX2X_ERR("rx_mode completion timed out!\n"); - mmiowb(); barrier(); /* Unset iSCSI L2 MAC */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 7b22a6d8514c..80d250a6d048 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -5039,7 +5039,6 @@ static inline int bnx2x_q_init(struct bnx2x *bp, /* As no ramrod is sent, complete the command immediately */ o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT); - mmiowb(); smp_mb(); return 0; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index c97b642e6537..0edbb0a76847 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -100,13 +100,11 @@ static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf, DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", cmd_data.sb_id_and_flags, igu_addr_data); REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags); - mmiowb(); barrier(); DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", ctl, igu_addr_ctl); REG_WR(bp, igu_addr_ctl, ctl); - mmiowb(); barrier(); } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index a9bdc21873d3..0752b7fa4d9c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -172,8 +172,6 @@ static int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping) /* Trigger the PF FW */ writeb_relaxed(1, &zone_data->trigger.vf_pf_channel.addr_valid); - mmiowb(); - /* Wait for PF to complete */ while ((tout >= 0) && (!*done)) { msleep(interval); @@ -957,7 +955,7 @@ int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add) bnx2x_sample_bulletin(bp); if (bp->shadow_bulletin.content.valid_bitmap & 1 << VLAN_VALID) { - BNX2X_ERR("Hypervisor will dicline the request, avoiding\n"); + BNX2X_ERR("Hypervisor will decline the request, avoiding\n"); rc = -EINVAL; goto out; } @@ -1179,7 +1177,6 @@ static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp, /* ack the FW */ storm_memset_vf_mbx_ack(bp, vf->abs_vfid); - mmiowb(); /* copy the response header including status-done field, * must be last dmae, must be after FW is acked @@ -2174,7 +2171,6 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf, */ storm_memset_vf_mbx_ack(bp, vf->abs_vfid); /* Firmware ack should be written before unlocking channel */ - mmiowb(); bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type); } } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 0bb9d7b3a2b6..2a4341708c0f 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -556,8 +556,6 @@ normal_tx: tx_done: - mmiowb(); - if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) { if (skb->xmit_more && !tx_buf->is_push) bnxt_db_write(bp, &txr->tx_db, prod); @@ -1133,6 +1131,8 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, tpa_info = &rxr->rx_tpa[agg_id]; if (unlikely(cons != rxr->rx_next_cons)) { + netdev_warn(bp->dev, "TPA cons %x != expected cons %x\n", + cons, rxr->rx_next_cons); bnxt_sched_reset(bp, rxr); return; } @@ -1585,15 +1585,17 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, } cons = rxcmp->rx_cmp_opaque; - rx_buf = &rxr->rx_buf_ring[cons]; - data = rx_buf->data; - data_ptr = rx_buf->data_ptr; if (unlikely(cons != rxr->rx_next_cons)) { int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp); + netdev_warn(bp->dev, "RX cons %x != expected cons %x\n", + cons, rxr->rx_next_cons); bnxt_sched_reset(bp, rxr); return rc1; } + rx_buf = &rxr->rx_buf_ring[cons]; + data = rx_buf->data; + data_ptr = rx_buf->data_ptr; prefetch(data_ptr); misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1); @@ -1610,12 +1612,18 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, rx_buf->data = NULL; if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) { + u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2); + bnxt_reuse_rx_data(rxr, cons, data); if (agg_bufs) bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs); rc = -EIO; - goto next_rx; + if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) { + netdev_warn(bp->dev, "RX buffer error %x\n", rx_err); + bnxt_sched_reset(bp, rxr); + } + goto next_rx_no_len; } len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT; @@ -1696,12 +1704,13 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, rc = 1; next_rx: - rxr->rx_prod = NEXT_RX(prod); - rxr->rx_next_cons = NEXT_RX(cons); - cpr->rx_packets += 1; cpr->rx_bytes += len; +next_rx_no_len: + rxr->rx_prod = NEXT_RX(prod); + rxr->rx_next_cons = NEXT_RX(cons); + next_rx_no_prod_no_len: *raw_cons = tmp_raw_cons; @@ -2123,7 +2132,6 @@ static int bnxt_poll(struct napi_struct *napi, int budget) &dim_sample); net_dim(&cpr->dim, dim_sample); } - mmiowb(); return work_done; } @@ -5125,10 +5133,10 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) for (i = 0; i < bp->tx_nr_rings; i++) { struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; struct bnxt_ring_struct *ring = &txr->tx_ring_struct; - u32 cmpl_ring_id; - cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr); if (ring->fw_ring_id != INVALID_HW_RING_ID) { + u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr); + hwrm_ring_free_send_msg(bp, ring, RING_FREE_REQ_RING_TYPE_TX, close_path ? cmpl_ring_id : @@ -5141,10 +5149,10 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; u32 grp_idx = rxr->bnapi->index; - u32 cmpl_ring_id; - cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); if (ring->fw_ring_id != INVALID_HW_RING_ID) { + u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); + hwrm_ring_free_send_msg(bp, ring, RING_FREE_REQ_RING_TYPE_RX, close_path ? cmpl_ring_id : @@ -5163,10 +5171,10 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct; u32 grp_idx = rxr->bnapi->index; - u32 cmpl_ring_id; - cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); if (ring->fw_ring_id != INVALID_HW_RING_ID) { + u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); + hwrm_ring_free_send_msg(bp, ring, type, close_path ? cmpl_ring_id : INVALID_HW_RING_ID); @@ -5305,17 +5313,16 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req, req->num_tx_rings = cpu_to_le16(tx_rings); if (BNXT_NEW_RM(bp)) { enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0; + enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; if (bp->flags & BNXT_FLAG_CHIP_P5) { enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0; enables |= tx_rings + ring_grps ? - FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS | - FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; + FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; } else { enables |= cp_rings ? - FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS | - FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; + FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; enables |= ring_grps ? FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS | FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; @@ -5355,14 +5362,13 @@ __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS | FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; + enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; if (bp->flags & BNXT_FLAG_CHIP_P5) { enables |= tx_rings + ring_grps ? - FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS | - FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; + FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; } else { enables |= cp_rings ? - FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS | - FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; + FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; enables |= ring_grps ? FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; } @@ -6743,6 +6749,7 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp) struct hwrm_queue_pri2cos_qcfg_input req2 = {0}; struct hwrm_port_qstats_ext_input req = {0}; struct bnxt_pf_info *pf = &bp->pf; + u32 tx_stat_size; int rc; if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) @@ -6752,13 +6759,16 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp) req.port_id = cpu_to_le16(pf->port_id); req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext)); req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_ext_map); - req.tx_stat_size = cpu_to_le16(sizeof(struct tx_port_stats_ext)); + tx_stat_size = bp->hw_tx_port_stats_ext ? + sizeof(*bp->hw_tx_port_stats_ext) : 0; + req.tx_stat_size = cpu_to_le16(tx_stat_size); req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_ext_map); mutex_lock(&bp->hwrm_cmd_lock); rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (!rc) { bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8; - bp->fw_tx_stats_ext_size = le16_to_cpu(resp->tx_stat_size) / 8; + bp->fw_tx_stats_ext_size = tx_stat_size ? + le16_to_cpu(resp->tx_stat_size) / 8 : 0; } else { bp->fw_rx_stats_ext_size = 0; bp->fw_tx_stats_ext_size = 0; @@ -8951,8 +8961,15 @@ static int bnxt_cfg_rx_mode(struct bnxt *bp) skip_uc: rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); + if (rc && vnic->mc_list_count) { + netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n", + rc); + vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; + vnic->mc_list_count = 0; + rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); + } if (rc) - netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n", + netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n", rc); return rc; @@ -10675,6 +10692,7 @@ init_err_cleanup_tc: bnxt_clear_int_mode(bp); init_err_pci_clean: + bnxt_free_hwrm_short_cmd_req(bp); bnxt_free_hwrm_resources(bp); bnxt_free_ctx_mem(bp); kfree(bp->ctx); diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 328373e0578f..2aebd4bbb67d 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -1073,7 +1073,6 @@ static void tg3_int_reenable(struct tg3_napi *tnapi) struct tg3 *tp = tnapi->tp; tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24); - mmiowb(); /* When doing tagged status, this work check is unnecessary. * The last_tag we write above tells the chip which piece of @@ -4283,7 +4282,7 @@ static void tg3_power_down(struct tg3 *tp) pci_set_power_state(tp->pdev, PCI_D3hot); } -static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex) +static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u32 *speed, u8 *duplex) { switch (val & MII_TG3_AUX_STAT_SPDMASK) { case MII_TG3_AUX_STAT_10HALF: @@ -4787,7 +4786,7 @@ static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset) bool current_link_up; u32 bmsr, val; u32 lcl_adv, rmt_adv; - u16 current_speed; + u32 current_speed; u8 current_duplex; int i, err; @@ -5719,7 +5718,7 @@ out: static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset) { u32 orig_pause_cfg; - u16 orig_active_speed; + u32 orig_active_speed; u8 orig_active_duplex; u32 mac_status; bool current_link_up; @@ -5823,7 +5822,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset) { int err = 0; u32 bmsr, bmcr; - u16 current_speed = SPEED_UNKNOWN; + u32 current_speed = SPEED_UNKNOWN; u8 current_duplex = DUPLEX_UNKNOWN; bool current_link_up = false; u32 local_adv, remote_adv, sgsr; @@ -6999,7 +6998,6 @@ next_pkt_nopost: tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx); } - mmiowb(); } else if (work_mask) { /* rx_std_buffers[] and rx_jmb_buffers[] entries must be * updated before the producer indices can be updated. @@ -7210,8 +7208,6 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget) tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, dpr->rx_jmb_prod_idx); - mmiowb(); - if (err) tw32_f(HOSTCC_MODE, tp->coal_now); } @@ -7278,7 +7274,6 @@ static int tg3_poll_msix(struct napi_struct *napi, int budget) HOSTCC_MODE_ENABLE | tnapi->coal_now); } - mmiowb(); break; } } @@ -8159,7 +8154,6 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) if (!skb->xmit_more || netif_xmit_stopped(txq)) { /* Packets are ready, update Tx producer idx on card. */ tw32_tx_mbox(tnapi->prodmbox, entry); - mmiowb(); } return NETDEV_TX_OK; diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h index a772a33b685c..6953d0546acb 100644 --- a/drivers/net/ethernet/broadcom/tg3.h +++ b/drivers/net/ethernet/broadcom/tg3.h @@ -2873,7 +2873,7 @@ struct tg3_tx_ring_info { struct tg3_link_config { /* Describes what we're trying to get. */ u32 advertising; - u16 speed; + u32 speed; u8 duplex; u8 autoneg; u8 flowctrl; @@ -2882,7 +2882,7 @@ struct tg3_link_config { u8 active_flowctrl; u8 active_duplex; - u16 active_speed; + u32 active_speed; u32 rmt_adv; }; diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 1522aee81884..3da2795e2486 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -898,7 +898,9 @@ static void macb_tx_interrupt(struct macb_queue *queue) /* First, update TX stats if needed */ if (skb) { - if (gem_ptp_do_txstamp(queue, skb, desc) == 0) { + if (unlikely(skb_shinfo(skb)->tx_flags & + SKBTX_HW_TSTAMP) && + gem_ptp_do_txstamp(queue, skb, desc) == 0) { /* skb now belongs to timestamp buffer * and will be removed later */ diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c index 2df7440f58df..39643be8c30a 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c +++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c @@ -38,9 +38,6 @@ int lio_cn6xxx_soft_reset(struct octeon_device *oct) lio_pci_readq(oct, CN6XXX_CIU_SOFT_RST); lio_pci_writeq(oct, 1, CN6XXX_CIU_SOFT_RST); - /* make sure that the reset is written before starting timer */ - mmiowb(); - /* Wait for 10ms as Octeon resets. */ mdelay(100); @@ -487,9 +484,6 @@ void lio_cn6xxx_disable_interrupt(struct octeon_device *oct, /* Disable Interrupts */ writeq(0, cn6xxx->intr_enb_reg64); - - /* make sure interrupts are really disabled */ - mmiowb(); } static void lio_cn6xxx_get_pcie_qlmport(struct octeon_device *oct) @@ -555,10 +549,6 @@ static int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct) value &= ~(1 << oq_no); octeon_write_csr(oct, reg, value); - /* Ensure that the enable register is written. - */ - mmiowb(); - spin_unlock(&cn6xxx->lock_for_droq_int_enb_reg); } } diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c index ce8c3f818666..934115d18488 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c @@ -1449,7 +1449,6 @@ void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq) iq->pkt_in_done -= iq->pkts_processed; iq->pkts_processed = 0; /* this write needs to be flushed before we release the lock */ - mmiowb(); spin_unlock_bh(&iq->lock); oct = iq->oct_dev; } diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c index a0c099f71524..017169023cca 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c @@ -513,8 +513,6 @@ int octeon_retry_droq_refill(struct octeon_droq *droq) */ wmb(); writel(desc_refilled, droq->pkts_credit_reg); - /* make sure mmio write completes */ - mmiowb(); if (pkts_credit + desc_refilled >= CN23XX_SLI_DEF_BP) reschedule = 0; @@ -712,8 +710,6 @@ octeon_droq_fast_process_packets(struct octeon_device *oct, */ wmb(); writel(desc_refilled, droq->pkts_credit_reg); - /* make sure mmio write completes */ - mmiowb(); } } } /* for (each packet)... */ diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c index c6f4cbda040f..fcf20a8f92d9 100644 --- a/drivers/net/ethernet/cavium/liquidio/request_manager.c +++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c @@ -278,7 +278,6 @@ ring_doorbell(struct octeon_device *oct, struct octeon_instr_queue *iq) if (atomic_read(&oct->status) == OCT_DEV_RUNNING) { writel(iq->fill_cnt, iq->doorbell_reg); /* make sure doorbell write goes through */ - mmiowb(); iq->fill_cnt = 0; iq->last_db_time = jiffies; return; diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 28eac9056211..c032bef1b776 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -32,6 +32,13 @@ #define DRV_NAME "nicvf" #define DRV_VERSION "1.0" +/* NOTE: Packets bigger than 1530 are split across multiple pages and XDP needs + * the buffer to be contiguous. Allow XDP to be set up only if we don't exceed + * this value, keeping headroom for the 14 byte Ethernet header and two + * VLAN tags (for QinQ) + */ +#define MAX_XDP_MTU (1530 - ETH_HLEN - VLAN_HLEN * 2) + /* Supported devices */ static const struct pci_device_id nicvf_id_table[] = { { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, @@ -1582,6 +1589,15 @@ static int nicvf_change_mtu(struct net_device *netdev, int new_mtu) struct nicvf *nic = netdev_priv(netdev); int orig_mtu = netdev->mtu; + /* For now just support only the usual MTU sized frames, + * plus some headroom for VLAN, QinQ. + */ + if (nic->xdp_prog && new_mtu > MAX_XDP_MTU) { + netdev_warn(netdev, "Jumbo frames not yet supported with XDP, current MTU %d.\n", + netdev->mtu); + return -EINVAL; + } + netdev->mtu = new_mtu; if (!netif_running(netdev)) @@ -1830,8 +1846,10 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog) bool bpf_attached = false; int ret = 0; - /* For now just support only the usual MTU sized frames */ - if (prog && (dev->mtu > 1500)) { + /* For now just support only the usual MTU sized frames, + * plus some headroom for VLAN, QinQ. + */ + if (prog && dev->mtu > MAX_XDP_MTU) { netdev_warn(dev, "Jumbo frames not yet supported with XDP, current MTU %d.\n", dev->mtu); return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 697c2427f2b7..a96ad20ee484 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1840,13 +1840,9 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable) int ret; if (enable) { - ret = clk_prepare_enable(fep->clk_ahb); - if (ret) - return ret; - ret = clk_prepare_enable(fep->clk_enet_out); if (ret) - goto failed_clk_enet_out; + return ret; if (fep->clk_ptp) { mutex_lock(&fep->ptp_clk_mutex); @@ -1866,7 +1862,6 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable) phy_reset_after_clk_enable(ndev->phydev); } else { - clk_disable_unprepare(fep->clk_ahb); clk_disable_unprepare(fep->clk_enet_out); if (fep->clk_ptp) { mutex_lock(&fep->ptp_clk_mutex); @@ -1885,8 +1880,6 @@ failed_clk_ref: failed_clk_ptp: if (fep->clk_enet_out) clk_disable_unprepare(fep->clk_enet_out); -failed_clk_enet_out: - clk_disable_unprepare(fep->clk_ahb); return ret; } @@ -3470,6 +3463,9 @@ fec_probe(struct platform_device *pdev) ret = clk_prepare_enable(fep->clk_ipg); if (ret) goto failed_clk_ipg; + ret = clk_prepare_enable(fep->clk_ahb); + if (ret) + goto failed_clk_ahb; fep->reg_phy = devm_regulator_get_optional(&pdev->dev, "phy"); if (!IS_ERR(fep->reg_phy)) { @@ -3563,6 +3559,9 @@ failed_reset: pm_runtime_put(&pdev->dev); pm_runtime_disable(&pdev->dev); failed_regulator: + clk_disable_unprepare(fep->clk_ahb); +failed_clk_ahb: + clk_disable_unprepare(fep->clk_ipg); failed_clk_ipg: fec_enet_clk_enable(ndev, false); failed_clk: @@ -3686,6 +3685,7 @@ static int __maybe_unused fec_runtime_suspend(struct device *dev) struct net_device *ndev = dev_get_drvdata(dev); struct fec_enet_private *fep = netdev_priv(ndev); + clk_disable_unprepare(fep->clk_ahb); clk_disable_unprepare(fep->clk_ipg); return 0; @@ -3695,8 +3695,20 @@ static int __maybe_unused fec_runtime_resume(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct fec_enet_private *fep = netdev_priv(ndev); + int ret; - return clk_prepare_enable(fep->clk_ipg); + ret = clk_prepare_enable(fep->clk_ahb); + if (ret) + return ret; + ret = clk_prepare_enable(fep->clk_ipg); + if (ret) + goto failed_clk_ipg; + + return 0; + +failed_clk_ipg: + clk_disable_unprepare(fep->clk_ahb); + return ret; } static const struct dev_pm_ops fec_pm_ops = { diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 51cfe95f3e24..3dfb2d131eb7 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -3762,6 +3762,7 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter) { struct device *dev = &adapter->vdev->dev; struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf; + netdev_features_t old_hw_features = 0; union ibmvnic_crq crq; int i; @@ -3837,24 +3838,41 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter) adapter->ip_offload_ctrl.large_rx_ipv4 = 0; adapter->ip_offload_ctrl.large_rx_ipv6 = 0; - adapter->netdev->features = NETIF_F_SG | NETIF_F_GSO; + if (adapter->state != VNIC_PROBING) { + old_hw_features = adapter->netdev->hw_features; + adapter->netdev->hw_features = 0; + } + + adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO; if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum) - adapter->netdev->features |= NETIF_F_IP_CSUM; + adapter->netdev->hw_features |= NETIF_F_IP_CSUM; if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum) - adapter->netdev->features |= NETIF_F_IPV6_CSUM; + adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM; if ((adapter->netdev->features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))) - adapter->netdev->features |= NETIF_F_RXCSUM; + adapter->netdev->hw_features |= NETIF_F_RXCSUM; if (buf->large_tx_ipv4) - adapter->netdev->features |= NETIF_F_TSO; + adapter->netdev->hw_features |= NETIF_F_TSO; if (buf->large_tx_ipv6) - adapter->netdev->features |= NETIF_F_TSO6; + adapter->netdev->hw_features |= NETIF_F_TSO6; - adapter->netdev->hw_features |= adapter->netdev->features; + if (adapter->state == VNIC_PROBING) { + adapter->netdev->features |= adapter->netdev->hw_features; + } else if (old_hw_features != adapter->netdev->hw_features) { + netdev_features_t tmp = 0; + + /* disable features no longer supported */ + adapter->netdev->features &= adapter->netdev->hw_features; + /* turn on features now supported if previously enabled */ + tmp = (old_hw_features ^ adapter->netdev->hw_features) & + adapter->netdev->hw_features; + adapter->netdev->features |= + tmp & adapter->netdev->wanted_features; + } memset(&crq, 0, sizeof(crq)); crq.control_ip_offload.first = IBMVNIC_CRQ_CMD; diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 8fe9af0e2ab7..466bf1ea186d 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -3270,11 +3270,6 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, if (!skb->xmit_more || netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) { writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt); - /* we need this if more than one processor can write to - * our tail at a time, it synchronizes IO on IA64/Altix - * systems - */ - mmiowb(); } } else { dev_kfree_skb_any(skb); diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 7acc61e4f645..022c3ac0e40f 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -3816,7 +3816,6 @@ static void e1000_flush_tx_ring(struct e1000_adapter *adapter) if (tx_ring->next_to_use == tx_ring->count) tx_ring->next_to_use = 0; ew32(TDT(0), tx_ring->next_to_use); - mmiowb(); usleep_range(200, 250); } @@ -5904,12 +5903,6 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, tx_ring->next_to_use); else writel(tx_ring->next_to_use, tx_ring->tail); - - /* we need this if more than one processor can write - * to our tail at a time, it synchronizes IO on - *IA64/Altix systems - */ - mmiowb(); } } else { dev_kfree_skb_any(skb); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c index 5d4f1761dc0c..8de77155f2e7 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c @@ -321,8 +321,6 @@ static void fm10k_mask_aer_comp_abort(struct pci_dev *pdev) pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_MASK, &err_mask); err_mask |= PCI_ERR_UNC_COMP_ABORT; pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_MASK, err_mask); - - mmiowb(); } int fm10k_iov_resume(struct pci_dev *pdev) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index ecef949f3baa..cbf76a96e94e 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -1039,11 +1039,6 @@ static void fm10k_tx_map(struct fm10k_ring *tx_ring, /* notify HW of packet */ if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { writel(i, tx_ring->tail); - - /* we need this if more than one processor can write to our tail - * at a time, it synchronizes IO on IA64/Altix systems - */ - mmiowb(); } return; diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 6c97667d20ef..ffb611bbedfa 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -3471,11 +3471,6 @@ static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, /* notify HW of packet */ if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { writel(i, tx_ring->tail); - - /* we need this if more than one processor can write to our tail - * at a time, it synchronizes IO on IA64/Altix systems - */ - mmiowb(); } return 0; diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c index 9b4d7cec2e18..6bfef82e7607 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c @@ -2360,11 +2360,6 @@ static inline void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb, /* notify HW of packet */ if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { writel(i, tx_ring->tail); - - /* we need this if more than one processor can write to our tail - * at a time, it synchronizes IO on IA64/Altix systems - */ - mmiowb(); } return; diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index c289d97f477d..1af21bbe180e 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -1356,11 +1356,6 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first, /* notify HW of packet */ if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { writel(i, tx_ring->tail); - - /* we need this if more than one processor can write to our tail - * at a time, it synchronizes IO on IA64/Altix systems - */ - mmiowb(); } return; diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 3269d8e94744..1d71ec360b1c 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -6028,11 +6028,6 @@ static int igb_tx_map(struct igb_ring *tx_ring, if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { writel(i, tx_ring->tail); - - /* we need this if more than one processor can write to our tail - * at a time, it synchronizes IO on IA64/Altix systems - */ - mmiowb(); } return 0; diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index 4eab83faec62..34cd30d7162f 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -2279,10 +2279,6 @@ static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter, tx_ring->buffer_info[first].next_to_watch = tx_desc; tx_ring->next_to_use = i; writel(i, adapter->hw.hw_addr + tx_ring->tail); - /* we need this if more than one processor can write to our tail - * at a time, it synchronizes IO on IA64/Altix systems - */ - mmiowb(); } static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb, diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 87a11879bf2d..f8d692f6aa4f 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -892,11 +892,6 @@ static int igc_tx_map(struct igc_ring *tx_ring, if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { writel(i, tx_ring->tail); - - /* we need this if more than one processor can write to our tail - * at a time, it synchronizes IO on IA64/Altix systems - */ - mmiowb(); } return 0; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index e100054a3765..99e23cf6a73a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -8299,11 +8299,6 @@ static int ixgbe_tx_map(struct ixgbe_ring *tx_ring, if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { writel(i, tx_ring->tail); - - /* we need this if more than one processor can write to our tail - * at a time, it synchronizes IO on IA64/Altix systems - */ - mmiowb(); } return 0; diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index 8b3495ee2b6e..49486c10ef81 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -1139,9 +1139,6 @@ static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, u16 idx) /* Make sure write' to descriptors are complete before we tell hardware */ wmb(); sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx); - - /* Synchronize I/O on since next processor may write to tail */ - mmiowb(); } @@ -1354,7 +1351,6 @@ stopped: /* reset the Rx prefetch unit */ sky2_write32(hw, Y2_QADDR(rxq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET); - mmiowb(); } /* Clean out receive buffer area, assumes receiver hardware stopped */ diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c index c81d15bf259c..87e90b5d4d7d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/catas.c +++ b/drivers/net/ethernet/mellanox/mlx4/catas.c @@ -129,10 +129,6 @@ static int mlx4_reset_slave(struct mlx4_dev *dev) comm_flags = rst_req << COM_CHAN_RST_REQ_OFFSET; __raw_writel((__force u32)cpu_to_be32(comm_flags), (__iomem char *)priv->mfunc.comm + MLX4_COMM_CHAN_FLAGS); - /* Make sure that our comm channel write doesn't - * get mixed in with writes from another CPU. - */ - mmiowb(); end = msecs_to_jiffies(MLX4_COMM_TIME) + jiffies; while (time_before(jiffies, end)) { diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index a5d5d6fc1da0..c678344d22a2 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -281,7 +281,6 @@ static int mlx4_comm_cmd_post(struct mlx4_dev *dev, u8 cmd, u16 param) val = param | (cmd << 16) | (priv->cmd.comm_toggle << 31); __raw_writel((__force u32) cpu_to_be32(val), &priv->mfunc.comm->slave_write); - mmiowb(); mutex_unlock(&dev->persist->device_state_mutex); return 0; } @@ -496,12 +495,6 @@ static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param, (op_modifier << HCR_OPMOD_SHIFT) | op), hcr + 6); - /* - * Make sure that our HCR writes don't get mixed in with - * writes from another CPU starting a FW command. - */ - mmiowb(); - cmd->toggle = cmd->toggle ^ 1; ret = 0; @@ -2206,7 +2199,6 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd, } __raw_writel((__force u32) cpu_to_be32(reply), &priv->mfunc.comm[slave].slave_read); - mmiowb(); return; @@ -2410,7 +2402,6 @@ int mlx4_multi_func_init(struct mlx4_dev *dev) &priv->mfunc.comm[i].slave_write); __raw_writel((__force u32) 0, &priv->mfunc.comm[i].slave_read); - mmiowb(); for (port = 1; port <= MLX4_MAX_PORTS; port++) { struct mlx4_vport_state *admin_vport; struct mlx4_vport_state *oper_vport; @@ -2576,10 +2567,6 @@ void mlx4_report_internal_err_comm_event(struct mlx4_dev *dev) slave_read |= (u32)COMM_CHAN_EVENT_INTERNAL_ERR; __raw_writel((__force u32)cpu_to_be32(slave_read), &priv->mfunc.comm[slave].slave_read); - /* Make sure that our comm channel write doesn't - * get mixed in with writes from another CPU. - */ - mmiowb(); } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index be48c6440251..c087d1014b09 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -917,7 +917,6 @@ static void cmd_work_handler(struct work_struct *work) mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx); wmb(); iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell); - mmiowb(); /* if not in polling don't use ent after this point */ if (cmd_mode == CMD_MODE_POLLING || poll_cmd) { poll_timeout(ent); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 71c65cc17904..d3eaf2ceaa39 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -858,6 +858,7 @@ void mlx5e_close_channels(struct mlx5e_channels *chs); * switching channels */ typedef int (*mlx5e_fp_hw_modify)(struct mlx5e_priv *priv); +int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv); int mlx5e_safe_switch_channels(struct mlx5e_priv *priv, struct mlx5e_channels *new_chs, mlx5e_fp_hw_modify hw_modify); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c index 9d38e62cdf24..476dd97f7f2f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c @@ -186,12 +186,17 @@ static int mlx5e_tx_reporter_recover_from_ctx(struct mlx5e_tx_err_ctx *err_ctx) static int mlx5e_tx_reporter_recover_all(struct mlx5e_priv *priv) { - int err; + int err = 0; rtnl_lock(); mutex_lock(&priv->state_lock); - mlx5e_close_locked(priv->netdev); - err = mlx5e_open_locked(priv->netdev); + + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) + goto out; + + err = mlx5e_safe_reopen_channels(priv); + +out: mutex_unlock(&priv->state_lock); rtnl_unlock(); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c index fa2a3c444cdc..eec07b34b4ad 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c @@ -39,6 +39,10 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv, return -EOPNOTSUPP; } + if (!(mlx5e_eswitch_rep(*out_dev) && + mlx5e_is_uplink_rep(netdev_priv(*out_dev)))) + return -EOPNOTSUPP; + return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c index 03b2a9f9c589..cad34d6f5f45 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c @@ -33,6 +33,26 @@ #include <linux/bpf_trace.h> #include "en/xdp.h" +int mlx5e_xdp_max_mtu(struct mlx5e_params *params) +{ + int hr = NET_IP_ALIGN + XDP_PACKET_HEADROOM; + + /* Let S := SKB_DATA_ALIGN(sizeof(struct skb_shared_info)). + * The condition checked in mlx5e_rx_is_linear_skb is: + * SKB_DATA_ALIGN(sw_mtu + hard_mtu + hr) + S <= PAGE_SIZE (1) + * (Note that hw_mtu == sw_mtu + hard_mtu.) + * What is returned from this function is: + * max_mtu = PAGE_SIZE - S - hr - hard_mtu (2) + * After assigning sw_mtu := max_mtu, the left side of (1) turns to + * SKB_DATA_ALIGN(PAGE_SIZE - S) + S, which is equal to PAGE_SIZE, + * because both PAGE_SIZE and S are already aligned. Any number greater + * than max_mtu would make the left side of (1) greater than PAGE_SIZE, + * so max_mtu is the maximum MTU allowed. + */ + + return MLX5E_HW2SW_MTU(params, SKB_MAX_HEAD(hr)); +} + static inline bool mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_dma_info *di, struct xdp_buff *xdp) @@ -304,9 +324,9 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq) mlx5e_xdpi_fifo_pop(xdpi_fifo); if (is_redirect) { - xdp_return_frame(xdpi.xdpf); dma_unmap_single(sq->pdev, xdpi.dma_addr, xdpi.xdpf->len, DMA_TO_DEVICE); + xdp_return_frame(xdpi.xdpf); } else { /* Recycle RX page */ mlx5e_page_release(rq, &xdpi.di, true); @@ -345,9 +365,9 @@ void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq) mlx5e_xdpi_fifo_pop(xdpi_fifo); if (is_redirect) { - xdp_return_frame(xdpi.xdpf); dma_unmap_single(sq->pdev, xdpi.dma_addr, xdpi.xdpf->len, DMA_TO_DEVICE); + xdp_return_frame(xdpi.xdpf); } else { /* Recycle RX page */ mlx5e_page_release(rq, &xdpi.di, false); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h index ee27a7c8cd87..553956cadc8a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h @@ -34,13 +34,12 @@ #include "en.h" -#define MLX5E_XDP_MAX_MTU ((int)(PAGE_SIZE - \ - MLX5_SKB_FRAG_SZ(XDP_PACKET_HEADROOM))) #define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN) #define MLX5E_XDP_TX_EMPTY_DS_COUNT \ (sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) #define MLX5E_XDP_TX_DS_COUNT (MLX5E_XDP_TX_EMPTY_DS_COUNT + 1 /* SG DS */) +int mlx5e_xdp_max_mtu(struct mlx5e_params *params); bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di, void *va, u16 *rx_headroom, u32 *len); bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 5efce4a3ff79..78dc8fe2a83c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -1586,7 +1586,7 @@ static int mlx5e_get_module_info(struct net_device *netdev, break; case MLX5_MODULE_ID_SFP: modinfo->type = ETH_MODULE_SFF_8472; - modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + modinfo->eeprom_len = MLX5_EEPROM_PAGE_LENGTH; break; default: netdev_err(priv->netdev, "%s: cable type not recognized:0x%x\n", @@ -1768,7 +1768,8 @@ static int set_pflag_rx_no_csum_complete(struct net_device *netdev, bool enable) struct mlx5e_channel *c; int i; - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) + if (!test_bit(MLX5E_STATE_OPENED, &priv->state) || + priv->channels.params.xdp_prog) return 0; for (i = 0; i < channels->num; i++) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index b5fdbd3190d9..46157e2a1e5a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -951,7 +951,11 @@ static int mlx5e_open_rq(struct mlx5e_channel *c, if (params->rx_dim_enabled) __set_bit(MLX5E_RQ_STATE_AM, &c->rq.state); - if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE)) + /* We disable csum_complete when XDP is enabled since + * XDP programs might manipulate packets which will render + * skb->checksum incorrect. + */ + if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE) || c->xdp) __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state); return 0; @@ -2937,6 +2941,14 @@ int mlx5e_safe_switch_channels(struct mlx5e_priv *priv, return 0; } +int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv) +{ + struct mlx5e_channels new_channels = {}; + + new_channels.params = priv->channels.params; + return mlx5e_safe_switch_channels(priv, &new_channels, NULL); +} + void mlx5e_timestamp_init(struct mlx5e_priv *priv) { priv->tstamp.tx_type = HWTSTAMP_TX_OFF; @@ -3765,7 +3777,7 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu, if (params->xdp_prog && !mlx5e_rx_is_linear_skb(priv->mdev, &new_channels.params)) { netdev_err(netdev, "MTU(%d) > %d is not allowed while XDP enabled\n", - new_mtu, MLX5E_XDP_MAX_MTU); + new_mtu, mlx5e_xdp_max_mtu(params)); err = -EINVAL; goto out; } @@ -4161,11 +4173,10 @@ static void mlx5e_tx_timeout_work(struct work_struct *work) if (!report_failed) goto unlock; - mlx5e_close_locked(priv->netdev); - err = mlx5e_open_locked(priv->netdev); + err = mlx5e_safe_reopen_channels(priv); if (err) netdev_err(priv->netdev, - "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n", + "mlx5e_safe_reopen_channels failed recovering from a tx_timeout, err(%d).\n", err); unlock: @@ -4201,7 +4212,8 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog) if (!mlx5e_rx_is_linear_skb(priv->mdev, &new_channels.params)) { netdev_warn(netdev, "XDP is not allowed with MTU(%d) > %d\n", - new_channels.params.sw_mtu, MLX5E_XDP_MAX_MTU); + new_channels.params.sw_mtu, + mlx5e_xdp_max_mtu(&new_channels.params)); return -EINVAL; } @@ -4553,7 +4565,7 @@ void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params, { enum mlx5e_traffic_types tt; - rss_params->hfunc = ETH_RSS_HASH_XOR; + rss_params->hfunc = ETH_RSS_HASH_TOP; netdev_rss_key_fill(rss_params->toeplitz_hash_key, sizeof(rss_params->toeplitz_hash_key)); mlx5e_build_default_indir_rqt(rss_params->indirection_rqt, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 3dde5c7e0739..c3b3002ff62f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -692,7 +692,14 @@ static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth, { *proto = ((struct ethhdr *)skb->data)->h_proto; *proto = __vlan_get_protocol(skb, *proto, network_depth); - return (*proto == htons(ETH_P_IP) || *proto == htons(ETH_P_IPV6)); + + if (*proto == htons(ETH_P_IP)) + return pskb_may_pull(skb, *network_depth + sizeof(struct iphdr)); + + if (*proto == htons(ETH_P_IPV6)) + return pskb_may_pull(skb, *network_depth + sizeof(struct ipv6hdr)); + + return false; } static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb) @@ -712,17 +719,6 @@ static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb) rq->stats->ecn_mark += !!rc; } -static u32 mlx5e_get_fcs(const struct sk_buff *skb) -{ - const void *fcs_bytes; - u32 _fcs_bytes; - - fcs_bytes = skb_header_pointer(skb, skb->len - ETH_FCS_LEN, - ETH_FCS_LEN, &_fcs_bytes); - - return __get_unaligned_cpu32(fcs_bytes); -} - static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto) { void *ip_p = skb->data + network_depth; @@ -733,6 +729,68 @@ static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto) #define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN) +#define MAX_PADDING 8 + +static void +tail_padding_csum_slow(struct sk_buff *skb, int offset, int len, + struct mlx5e_rq_stats *stats) +{ + stats->csum_complete_tail_slow++; + skb->csum = csum_block_add(skb->csum, + skb_checksum(skb, offset, len, 0), + offset); +} + +static void +tail_padding_csum(struct sk_buff *skb, int offset, + struct mlx5e_rq_stats *stats) +{ + u8 tail_padding[MAX_PADDING]; + int len = skb->len - offset; + void *tail; + + if (unlikely(len > MAX_PADDING)) { + tail_padding_csum_slow(skb, offset, len, stats); + return; + } + + tail = skb_header_pointer(skb, offset, len, tail_padding); + if (unlikely(!tail)) { + tail_padding_csum_slow(skb, offset, len, stats); + return; + } + + stats->csum_complete_tail++; + skb->csum = csum_block_add(skb->csum, csum_partial(tail, len, 0), offset); +} + +static void +mlx5e_skb_padding_csum(struct sk_buff *skb, int network_depth, __be16 proto, + struct mlx5e_rq_stats *stats) +{ + struct ipv6hdr *ip6; + struct iphdr *ip4; + int pkt_len; + + switch (proto) { + case htons(ETH_P_IP): + ip4 = (struct iphdr *)(skb->data + network_depth); + pkt_len = network_depth + ntohs(ip4->tot_len); + break; + case htons(ETH_P_IPV6): + ip6 = (struct ipv6hdr *)(skb->data + network_depth); + pkt_len = network_depth + sizeof(*ip6) + ntohs(ip6->payload_len); + break; + default: + return; + } + + if (likely(pkt_len >= skb->len)) + return; + + tail_padding_csum(skb, pkt_len, stats); +} + static inline void mlx5e_handle_csum(struct net_device *netdev, struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq, @@ -752,7 +810,8 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, return; } - if (unlikely(test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state))) + /* True when explicitly set via priv flag, or XDP prog is loaded */ + if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state)) goto csum_unnecessary; /* CQE csum doesn't cover padding octets in short ethernet @@ -780,18 +839,15 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, skb->csum = csum_partial(skb->data + ETH_HLEN, network_depth - ETH_HLEN, skb->csum); - if (unlikely(netdev->features & NETIF_F_RXFCS)) - skb->csum = csum_block_add(skb->csum, - (__force __wsum)mlx5e_get_fcs(skb), - skb->len - ETH_FCS_LEN); + + mlx5e_skb_padding_csum(skb, network_depth, proto, stats); stats->csum_complete++; return; } csum_unnecessary: if (likely((cqe->hds_ip_ext & CQE_L3_OK) && - ((cqe->hds_ip_ext & CQE_L4_OK) || - (get_cqe_l4_hdr_type(cqe) == CQE_L4_HDR_TYPE_NONE)))) { + (cqe->hds_ip_ext & CQE_L4_OK))) { skb->ip_summed = CHECKSUM_UNNECESSARY; if (cqe_is_tunneled(cqe)) { skb->csum_level = 1; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 1a78e05cbba8..b75aa8b8bf04 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -59,6 +59,8 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail_slow) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) }, @@ -151,6 +153,8 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets; s->rx_csum_none += rq_stats->csum_none; s->rx_csum_complete += rq_stats->csum_complete; + s->rx_csum_complete_tail += rq_stats->csum_complete_tail; + s->rx_csum_complete_tail_slow += rq_stats->csum_complete_tail_slow; s->rx_csum_unnecessary += rq_stats->csum_unnecessary; s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner; s->rx_xdp_drop += rq_stats->xdp_drop; @@ -1190,6 +1194,8 @@ static const struct counter_desc rq_stats_desc[] = { { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) }, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 4640d4f986f8..16c3b785f282 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -71,6 +71,8 @@ struct mlx5e_sw_stats { u64 rx_csum_unnecessary; u64 rx_csum_none; u64 rx_csum_complete; + u64 rx_csum_complete_tail; + u64 rx_csum_complete_tail_slow; u64 rx_csum_unnecessary_inner; u64 rx_xdp_drop; u64 rx_xdp_redirect; @@ -181,6 +183,8 @@ struct mlx5e_rq_stats { u64 packets; u64 bytes; u64 csum_complete; + u64 csum_complete_tail; + u64 csum_complete_tail_slow; u64 csum_unnecessary; u64 csum_unnecessary_inner; u64 csum_none; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c index 8de64e88c670..22a2ef111514 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c @@ -148,14 +148,16 @@ static int mlx5_fpga_tls_alloc_swid(struct idr *idr, spinlock_t *idr_spinlock, return ret; } -static void mlx5_fpga_tls_release_swid(struct idr *idr, - spinlock_t *idr_spinlock, u32 swid) +static void *mlx5_fpga_tls_release_swid(struct idr *idr, + spinlock_t *idr_spinlock, u32 swid) { unsigned long flags; + void *ptr; spin_lock_irqsave(idr_spinlock, flags); - idr_remove(idr, swid); + ptr = idr_remove(idr, swid); spin_unlock_irqrestore(idr_spinlock, flags); + return ptr; } static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn, @@ -165,20 +167,12 @@ static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn, kfree(buf); } -struct mlx5_teardown_stream_context { - struct mlx5_fpga_tls_command_context cmd; - u32 swid; -}; - static void mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn, struct mlx5_fpga_device *fdev, struct mlx5_fpga_tls_command_context *cmd, struct mlx5_fpga_dma_buf *resp) { - struct mlx5_teardown_stream_context *ctx = - container_of(cmd, struct mlx5_teardown_stream_context, cmd); - if (resp) { u32 syndrome = MLX5_GET(tls_resp, resp->sg[0].data, syndrome); @@ -186,14 +180,6 @@ mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn, mlx5_fpga_err(fdev, "Teardown stream failed with syndrome = %d", syndrome); - else if (MLX5_GET(tls_cmd, cmd->buf.sg[0].data, direction_sx)) - mlx5_fpga_tls_release_swid(&fdev->tls->tx_idr, - &fdev->tls->tx_idr_spinlock, - ctx->swid); - else - mlx5_fpga_tls_release_swid(&fdev->tls->rx_idr, - &fdev->tls->rx_idr_spinlock, - ctx->swid); } mlx5_fpga_tls_put_command_ctx(cmd); } @@ -217,22 +203,22 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq, void *cmd; int ret; - rcu_read_lock(); - flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle)); - rcu_read_unlock(); - - if (!flow) { - WARN_ONCE(1, "Received NULL pointer for handle\n"); - return -EINVAL; - } - buf = kzalloc(size, GFP_ATOMIC); if (!buf) return -ENOMEM; cmd = (buf + 1); + rcu_read_lock(); + flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle)); + if (unlikely(!flow)) { + rcu_read_unlock(); + WARN_ONCE(1, "Received NULL pointer for handle\n"); + kfree(buf); + return -EINVAL; + } mlx5_fpga_tls_flow_to_cmd(flow, cmd); + rcu_read_unlock(); MLX5_SET(tls_cmd, cmd, swid, ntohl(handle)); MLX5_SET64(tls_cmd, cmd, tls_rcd_sn, be64_to_cpu(rcd_sn)); @@ -253,7 +239,7 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq, static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev, void *flow, u32 swid, gfp_t flags) { - struct mlx5_teardown_stream_context *ctx; + struct mlx5_fpga_tls_command_context *ctx; struct mlx5_fpga_dma_buf *buf; void *cmd; @@ -261,7 +247,7 @@ static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev, if (!ctx) return; - buf = &ctx->cmd.buf; + buf = &ctx->buf; cmd = (ctx + 1); MLX5_SET(tls_cmd, cmd, command_type, CMD_TEARDOWN_STREAM); MLX5_SET(tls_cmd, cmd, swid, swid); @@ -272,8 +258,7 @@ static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev, buf->sg[0].data = cmd; buf->sg[0].size = MLX5_TLS_COMMAND_SIZE; - ctx->swid = swid; - mlx5_fpga_tls_cmd_send(mdev->fpga, &ctx->cmd, + mlx5_fpga_tls_cmd_send(mdev->fpga, ctx, mlx5_fpga_tls_teardown_completion); } @@ -283,13 +268,14 @@ void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid, struct mlx5_fpga_tls *tls = mdev->fpga->tls; void *flow; - rcu_read_lock(); if (direction_sx) - flow = idr_find(&tls->tx_idr, swid); + flow = mlx5_fpga_tls_release_swid(&tls->tx_idr, + &tls->tx_idr_spinlock, + swid); else - flow = idr_find(&tls->rx_idr, swid); - - rcu_read_unlock(); + flow = mlx5_fpga_tls_release_swid(&tls->rx_idr, + &tls->rx_idr_spinlock, + swid); if (!flow) { mlx5_fpga_err(mdev->fpga, "No flow information for swid %u\n", @@ -297,6 +283,7 @@ void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid, return; } + synchronize_rcu(); /* before kfree(flow) */ mlx5_fpga_tls_send_teardown_cmd(mdev, flow, swid, flags); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index 21b7f05b16a5..361468e0435d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -317,10 +317,6 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev, size -= offset + size - MLX5_EEPROM_PAGE_LENGTH; i2c_addr = MLX5_I2C_ADDR_LOW; - if (offset >= MLX5_EEPROM_PAGE_LENGTH) { - i2c_addr = MLX5_I2C_ADDR_HIGH; - offset -= MLX5_EEPROM_PAGE_LENGTH; - } MLX5_SET(mcia_reg, in, l, 0); MLX5_SET(mcia_reg, in, module, module_num); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index d23d53c0e284..f26a4ca29363 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -568,7 +568,7 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core) if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) return 0; - emad_wq = alloc_workqueue("mlxsw_core_emad", WQ_MEM_RECLAIM, 0); + emad_wq = alloc_workqueue("mlxsw_core_emad", 0, 0); if (!emad_wq) return -ENOMEM; mlxsw_core->emad_wq = emad_wq; @@ -1958,10 +1958,10 @@ static int __init mlxsw_core_module_init(void) { int err; - mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, WQ_MEM_RECLAIM, 0); + mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, 0, 0); if (!mlxsw_wq) return -ENOMEM; - mlxsw_owq = alloc_ordered_workqueue("%s_ordered", WQ_MEM_RECLAIM, + mlxsw_owq = alloc_ordered_workqueue("%s_ordered", 0, mlxsw_core_driver_name); if (!mlxsw_owq) { err = -ENOMEM; diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h index ffee38e36ce8..8648ca171254 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h +++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h @@ -27,7 +27,7 @@ #define MLXSW_PCI_SW_RESET 0xF0010 #define MLXSW_PCI_SW_RESET_RST_BIT BIT(0) -#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 13000 +#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 20000 #define MLXSW_PCI_SW_RESET_WAIT_MSECS 100 #define MLXSW_PCI_FW_READY 0xA1844 #define MLXSW_PCI_FW_READY_MASK 0xFFFF diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 9eb63300c1d3..6b8aa3761899 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -3126,11 +3126,11 @@ mlxsw_sp_port_set_link_ksettings(struct net_device *dev, if (err) return err; + mlxsw_sp_port->link.autoneg = autoneg; + if (!netif_running(dev)) return 0; - mlxsw_sp_port->link.autoneg = autoneg; - mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); @@ -3316,7 +3316,7 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) err = mlxsw_sp_port_ets_set(mlxsw_sp_port, MLXSW_REG_QEEC_HIERARCY_TC, i + 8, i, - false, 0); + true, 100); if (err) return err; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index 9a79b5e11597..d633bef5f105 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c @@ -70,6 +70,7 @@ static const struct mlxsw_sp_sb_pool_des mlxsw_sp2_sb_pool_dess[] = { {MLXSW_REG_SBXX_DIR_EGRESS, 1}, {MLXSW_REG_SBXX_DIR_EGRESS, 2}, {MLXSW_REG_SBXX_DIR_EGRESS, 3}, + {MLXSW_REG_SBXX_DIR_EGRESS, 15}, }; #define MLXSW_SP_SB_ING_TC_COUNT 8 @@ -428,6 +429,7 @@ static const struct mlxsw_sp_sb_pr mlxsw_sp2_sb_prs[] = { MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0), MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0), MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0), + MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI), }; static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp, @@ -517,14 +519,14 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_egress[] = { MLXSW_SP_SB_CM(0, 7, 4), MLXSW_SP_SB_CM(0, 7, 4), MLXSW_SP_SB_CM(0, 7, 4), - MLXSW_SP_SB_CM(0, 7, 4), - MLXSW_SP_SB_CM(0, 7, 4), - MLXSW_SP_SB_CM(0, 7, 4), - MLXSW_SP_SB_CM(0, 7, 4), - MLXSW_SP_SB_CM(0, 7, 4), - MLXSW_SP_SB_CM(0, 7, 4), - MLXSW_SP_SB_CM(0, 7, 4), - MLXSW_SP_SB_CM(0, 7, 4), + MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), + MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), + MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), + MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), + MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), + MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), + MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), + MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), MLXSW_SP_SB_CM(1, 0xff, 4), }; @@ -671,6 +673,7 @@ static const struct mlxsw_sp_sb_pm mlxsw_sp2_sb_pms[] = { MLXSW_SP_SB_PM(0, 0), MLXSW_SP_SB_PM(0, 0), MLXSW_SP_SB_PM(0, 0), + MLXSW_SP_SB_PM(10000, 90000), }; static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 52fed8c7bf1e..902e766a8ed3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -6781,7 +6781,7 @@ static int mlxsw_sp_router_port_check_rif_addr(struct mlxsw_sp *mlxsw_sp, /* A RIF is not created for macvlan netdevs. Their MAC is used to * populate the FDB */ - if (netif_is_macvlan(dev)) + if (netif_is_macvlan(dev) || netif_is_l3_master(dev)) return 0; for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index f6ce386c3036..50111f228d77 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -1630,7 +1630,7 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid_index; int err = 0; - if (switchdev_trans_ph_prepare(trans)) + if (switchdev_trans_ph_commit(trans)) return 0; bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index a1d0d6e42533..d715ef4fc92f 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -613,7 +613,7 @@ static int ocelot_mact_mc_add(struct ocelot_port *port, struct netdev_hw_addr *hw_addr) { struct ocelot *ocelot = port->ocelot; - struct netdev_hw_addr *ha = kzalloc(sizeof(*ha), GFP_KERNEL); + struct netdev_hw_addr *ha = kzalloc(sizeof(*ha), GFP_ATOMIC); if (!ha) return -ENOMEM; @@ -959,10 +959,8 @@ static void ocelot_get_strings(struct net_device *netdev, u32 sset, u8 *data) ETH_GSTRING_LEN); } -static void ocelot_check_stats(struct work_struct *work) +static void ocelot_update_stats(struct ocelot *ocelot) { - struct delayed_work *del_work = to_delayed_work(work); - struct ocelot *ocelot = container_of(del_work, struct ocelot, stats_work); int i, j; mutex_lock(&ocelot->stats_lock); @@ -986,11 +984,19 @@ static void ocelot_check_stats(struct work_struct *work) } } - cancel_delayed_work(&ocelot->stats_work); + mutex_unlock(&ocelot->stats_lock); +} + +static void ocelot_check_stats_work(struct work_struct *work) +{ + struct delayed_work *del_work = to_delayed_work(work); + struct ocelot *ocelot = container_of(del_work, struct ocelot, + stats_work); + + ocelot_update_stats(ocelot); + queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work, OCELOT_STATS_CHECK_DELAY); - - mutex_unlock(&ocelot->stats_lock); } static void ocelot_get_ethtool_stats(struct net_device *dev, @@ -1001,7 +1007,7 @@ static void ocelot_get_ethtool_stats(struct net_device *dev, int i; /* check and update now */ - ocelot_check_stats(&ocelot->stats_work.work); + ocelot_update_stats(ocelot); /* Copy all counters */ for (i = 0; i < ocelot->num_stats; i++) @@ -1809,7 +1815,7 @@ int ocelot_init(struct ocelot *ocelot) ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL(6), ANA_CPUQ_8021_CFG, i); - INIT_DELAYED_WORK(&ocelot->stats_work, ocelot_check_stats); + INIT_DELAYED_WORK(&ocelot->stats_work, ocelot_check_stats_work); queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work, OCELOT_STATS_CHECK_DELAY); return 0; diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index e0340f778d8f..d8b7fba96d58 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -1439,7 +1439,6 @@ myri10ge_tx_done(struct myri10ge_slice_state *ss, int mcp_index) tx->queue_active = 0; put_be32(htonl(1), tx->send_stop); mb(); - mmiowb(); } __netif_tx_unlock(dev_queue); } @@ -2861,7 +2860,6 @@ again: tx->queue_active = 1; put_be32(htonl(1), tx->send_go); mb(); - mmiowb(); } tx->pkt_start++; if ((avail - count) < MXGEFW_MAX_SEND_DESC) { diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index feda9644289d..3b2ae1a21678 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -4153,8 +4153,6 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev) writeq(val64, &tx_fifo->List_Control); - mmiowb(); - put_off++; if (put_off == fifo->tx_curr_put_info.fifo_len + 1) put_off = 0; diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c index 7cde387e5ec6..51cd57ab3d95 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-config.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c @@ -2366,6 +2366,7 @@ static void *__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size, dma_object->addr))) { vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle); + memblock = NULL; goto exit; } diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c index b877acec5cde..1d334f2e0a56 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c @@ -1826,7 +1826,6 @@ static int vxge_poll_msix(struct napi_struct *napi, int budget) vxge_hw_channel_msix_unmask( (struct __vxge_hw_channel *)ring->handle, ring->rx_vector_no); - mmiowb(); } /* We are copying and returning the local variable, in case if after @@ -2234,8 +2233,6 @@ static irqreturn_t vxge_tx_msix_handle(int irq, void *dev_id) vxge_hw_channel_msix_unmask((struct __vxge_hw_channel *)fifo->handle, fifo->tx_vector_no); - mmiowb(); - return IRQ_HANDLED; } @@ -2272,14 +2269,12 @@ vxge_alarm_msix_handle(int irq, void *dev_id) */ vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id); vxge_hw_vpath_msix_clear(vdev->vpaths[i].handle, msix_id); - mmiowb(); status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle, vdev->exec_mode); if (status == VXGE_HW_OK) { vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle, msix_id); - mmiowb(); continue; } vxge_debug_intr(VXGE_ERR, diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c index 59e77e3086bb..709d20d9938f 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c @@ -1399,11 +1399,7 @@ static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo, VXGE_HW_NODBW_GET_NO_SNOOP(no_snoop), &fifo->nofl_db->control_0); - mmiowb(); - writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr); - - mmiowb(); } /** diff --git a/drivers/net/ethernet/netronome/nfp/abm/cls.c b/drivers/net/ethernet/netronome/nfp/abm/cls.c index 9852080cf454..ff3913085665 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/cls.c +++ b/drivers/net/ethernet/netronome/nfp/abm/cls.c @@ -39,7 +39,7 @@ nfp_abm_u32_check_knode(struct nfp_abm *abm, struct tc_cls_u32_knode *knode, } if (knode->sel->off || knode->sel->offshift || knode->sel->offmask || knode->sel->offoff || knode->fshift) { - NL_SET_ERR_MSG_MOD(extack, "variable offseting not supported"); + NL_SET_ERR_MSG_MOD(extack, "variable offsetting not supported"); return false; } if (knode->sel->hoff || knode->sel->hmask) { @@ -78,7 +78,7 @@ nfp_abm_u32_check_knode(struct nfp_abm *abm, struct tc_cls_u32_knode *knode, k = &knode->sel->keys[0]; if (k->offmask) { - NL_SET_ERR_MSG_MOD(extack, "offset mask - variable offseting not supported"); + NL_SET_ERR_MSG_MOD(extack, "offset mask - variable offsetting not supported"); return false; } if (k->off) { diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index 43a57ec296fd..127c89b22ef0 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -431,12 +431,16 @@ struct qed_qm_info { u8 num_pf_rls; }; +#define QED_OVERFLOW_BIT 1 + struct qed_db_recovery_info { struct list_head list; /* Lock to protect the doorbell recovery mechanism list */ spinlock_t lock; + bool dorq_attn; u32 db_recovery_counter; + unsigned long overflow; }; struct storm_stats { @@ -920,8 +924,7 @@ u16 qed_get_cm_pq_idx_llt_mtc(struct qed_hwfn *p_hwfn, u8 tc); /* doorbell recovery mechanism */ void qed_db_recovery_dp(struct qed_hwfn *p_hwfn); -void qed_db_recovery_execute(struct qed_hwfn *p_hwfn, - enum qed_db_rec_exec db_exec); +void qed_db_recovery_execute(struct qed_hwfn *p_hwfn); bool qed_edpm_enabled(struct qed_hwfn *p_hwfn); /* Other Linux specific common definitions */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 9df8c4b3b54e..866cdc86a3f2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -102,11 +102,15 @@ static void qed_db_recovery_dp_entry(struct qed_hwfn *p_hwfn, /* Doorbell address sanity (address within doorbell bar range) */ static bool qed_db_rec_sanity(struct qed_dev *cdev, - void __iomem *db_addr, void *db_data) + void __iomem *db_addr, + enum qed_db_rec_width db_width, + void *db_data) { + u32 width = (db_width == DB_REC_WIDTH_32B) ? 32 : 64; + /* Make sure doorbell address is within the doorbell bar */ if (db_addr < cdev->doorbells || - (u8 __iomem *)db_addr > + (u8 __iomem *)db_addr + width > (u8 __iomem *)cdev->doorbells + cdev->db_size) { WARN(true, "Illegal doorbell address: %p. Legal range for doorbell addresses is [%p..%p]\n", @@ -159,7 +163,7 @@ int qed_db_recovery_add(struct qed_dev *cdev, } /* Sanitize doorbell address */ - if (!qed_db_rec_sanity(cdev, db_addr, db_data)) + if (!qed_db_rec_sanity(cdev, db_addr, db_width, db_data)) return -EINVAL; /* Obtain hwfn from doorbell address */ @@ -205,10 +209,6 @@ int qed_db_recovery_del(struct qed_dev *cdev, return 0; } - /* Sanitize doorbell address */ - if (!qed_db_rec_sanity(cdev, db_addr, db_data)) - return -EINVAL; - /* Obtain hwfn from doorbell address */ p_hwfn = qed_db_rec_find_hwfn(cdev, db_addr); @@ -300,31 +300,24 @@ void qed_db_recovery_dp(struct qed_hwfn *p_hwfn) /* Ring the doorbell of a single doorbell recovery entry */ static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn, - struct qed_db_recovery_entry *db_entry, - enum qed_db_rec_exec db_exec) -{ - if (db_exec != DB_REC_ONCE) { - /* Print according to width */ - if (db_entry->db_width == DB_REC_WIDTH_32B) { - DP_VERBOSE(p_hwfn, QED_MSG_SPQ, - "%s doorbell address %p data %x\n", - db_exec == DB_REC_DRY_RUN ? - "would have rung" : "ringing", - db_entry->db_addr, - *(u32 *)db_entry->db_data); - } else { - DP_VERBOSE(p_hwfn, QED_MSG_SPQ, - "%s doorbell address %p data %llx\n", - db_exec == DB_REC_DRY_RUN ? - "would have rung" : "ringing", - db_entry->db_addr, - *(u64 *)(db_entry->db_data)); - } + struct qed_db_recovery_entry *db_entry) +{ + /* Print according to width */ + if (db_entry->db_width == DB_REC_WIDTH_32B) { + DP_VERBOSE(p_hwfn, QED_MSG_SPQ, + "ringing doorbell address %p data %x\n", + db_entry->db_addr, + *(u32 *)db_entry->db_data); + } else { + DP_VERBOSE(p_hwfn, QED_MSG_SPQ, + "ringing doorbell address %p data %llx\n", + db_entry->db_addr, + *(u64 *)(db_entry->db_data)); } /* Sanity */ if (!qed_db_rec_sanity(p_hwfn->cdev, db_entry->db_addr, - db_entry->db_data)) + db_entry->db_width, db_entry->db_data)) return; /* Flush the write combined buffer. Since there are multiple doorbelling @@ -334,14 +327,12 @@ static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn, wmb(); /* Ring the doorbell */ - if (db_exec == DB_REC_REAL_DEAL || db_exec == DB_REC_ONCE) { - if (db_entry->db_width == DB_REC_WIDTH_32B) - DIRECT_REG_WR(db_entry->db_addr, - *(u32 *)(db_entry->db_data)); - else - DIRECT_REG_WR64(db_entry->db_addr, - *(u64 *)(db_entry->db_data)); - } + if (db_entry->db_width == DB_REC_WIDTH_32B) + DIRECT_REG_WR(db_entry->db_addr, + *(u32 *)(db_entry->db_data)); + else + DIRECT_REG_WR64(db_entry->db_addr, + *(u64 *)(db_entry->db_data)); /* Flush the write combined buffer. Next doorbell may come from a * different entity to the same address... @@ -350,29 +341,21 @@ static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn, } /* Traverse the doorbell recovery entry list and ring all the doorbells */ -void qed_db_recovery_execute(struct qed_hwfn *p_hwfn, - enum qed_db_rec_exec db_exec) +void qed_db_recovery_execute(struct qed_hwfn *p_hwfn) { struct qed_db_recovery_entry *db_entry = NULL; - if (db_exec != DB_REC_ONCE) { - DP_NOTICE(p_hwfn, - "Executing doorbell recovery. Counter was %d\n", - p_hwfn->db_recovery_info.db_recovery_counter); + DP_NOTICE(p_hwfn, "Executing doorbell recovery. Counter was %d\n", + p_hwfn->db_recovery_info.db_recovery_counter); - /* Track amount of times recovery was executed */ - p_hwfn->db_recovery_info.db_recovery_counter++; - } + /* Track amount of times recovery was executed */ + p_hwfn->db_recovery_info.db_recovery_counter++; /* Protect the list */ spin_lock_bh(&p_hwfn->db_recovery_info.lock); list_for_each_entry(db_entry, - &p_hwfn->db_recovery_info.list, list_entry) { - qed_db_recovery_ring(p_hwfn, db_entry, db_exec); - if (db_exec == DB_REC_ONCE) - break; - } - + &p_hwfn->db_recovery_info.list, list_entry) + qed_db_recovery_ring(p_hwfn, db_entry); spin_unlock_bh(&p_hwfn->db_recovery_info.lock); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index e23980e301b6..fdfedbc8e431 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -378,6 +378,9 @@ static int qed_db_rec_flush_queue(struct qed_hwfn *p_hwfn, u32 count = QED_DB_REC_COUNT; u32 usage = 1; + /* Flush any pending (e)dpms as they may never arrive */ + qed_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1); + /* wait for usage to zero or count to run out. This is necessary since * EDPM doorbell transactions can take multiple 64b cycles, and as such * can "split" over the pci. Possibly, the doorbell drop can happen with @@ -406,51 +409,74 @@ static int qed_db_rec_flush_queue(struct qed_hwfn *p_hwfn, int qed_db_rec_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { - u32 overflow; + u32 attn_ovfl, cur_ovfl; int rc; - overflow = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY); - DP_NOTICE(p_hwfn, "PF Overflow sticky 0x%x\n", overflow); - if (!overflow) { - qed_db_recovery_execute(p_hwfn, DB_REC_ONCE); + attn_ovfl = test_and_clear_bit(QED_OVERFLOW_BIT, + &p_hwfn->db_recovery_info.overflow); + cur_ovfl = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY); + if (!cur_ovfl && !attn_ovfl) return 0; - } - if (qed_edpm_enabled(p_hwfn)) { + DP_NOTICE(p_hwfn, "PF Overflow sticky: attn %u current %u\n", + attn_ovfl, cur_ovfl); + + if (cur_ovfl && !p_hwfn->db_bar_no_edpm) { rc = qed_db_rec_flush_queue(p_hwfn, p_ptt); if (rc) return rc; } - /* Flush any pending (e)dpm as they may never arrive */ - qed_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1); - /* Release overflow sticky indication (stop silently dropping everything) */ qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0); /* Repeat all last doorbells (doorbell drop recovery) */ - qed_db_recovery_execute(p_hwfn, DB_REC_REAL_DEAL); + qed_db_recovery_execute(p_hwfn); return 0; } -static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn) +static void qed_dorq_attn_overflow(struct qed_hwfn *p_hwfn) { - u32 int_sts, first_drop_reason, details, address, all_drops_reason; struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt; + u32 overflow; int rc; - int_sts = qed_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS); - DP_NOTICE(p_hwfn->cdev, "DORQ attention. int_sts was %x\n", int_sts); + overflow = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY); + if (!overflow) + goto out; + + /* Run PF doorbell recovery in next periodic handler */ + set_bit(QED_OVERFLOW_BIT, &p_hwfn->db_recovery_info.overflow); + + if (!p_hwfn->db_bar_no_edpm) { + rc = qed_db_rec_flush_queue(p_hwfn, p_ptt); + if (rc) + goto out; + } + + qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0); +out: + /* Schedule the handler even if overflow was not detected */ + qed_periodic_db_rec_start(p_hwfn); +} + +static int qed_dorq_attn_int_sts(struct qed_hwfn *p_hwfn) +{ + u32 int_sts, first_drop_reason, details, address, all_drops_reason; + struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt; /* int_sts may be zero since all PFs were interrupted for doorbell * overflow but another one already handled it. Can abort here. If * This PF also requires overflow recovery we will be interrupted again. * The masked almost full indication may also be set. Ignoring. */ + int_sts = qed_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS); if (!(int_sts & ~DORQ_REG_INT_STS_DORQ_FIFO_AFULL)) return 0; + DP_NOTICE(p_hwfn->cdev, "DORQ attention. int_sts was %x\n", int_sts); + /* check if db_drop or overflow happened */ if (int_sts & (DORQ_REG_INT_STS_DB_DROP | DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR)) { @@ -477,11 +503,6 @@ static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn) GET_FIELD(details, QED_DORQ_ATTENTION_SIZE) * 4, first_drop_reason, all_drops_reason); - rc = qed_db_rec_handler(p_hwfn, p_ptt); - qed_periodic_db_rec_start(p_hwfn); - if (rc) - return rc; - /* Clear the doorbell drop details and prepare for next drop */ qed_wr(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS_REL, 0); @@ -507,6 +528,25 @@ static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn) return -EINVAL; } +static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn) +{ + p_hwfn->db_recovery_info.dorq_attn = true; + qed_dorq_attn_overflow(p_hwfn); + + return qed_dorq_attn_int_sts(p_hwfn); +} + +static void qed_dorq_attn_handler(struct qed_hwfn *p_hwfn) +{ + if (p_hwfn->db_recovery_info.dorq_attn) + goto out; + + /* Call DORQ callback if the attention was missed */ + qed_dorq_attn_cb(p_hwfn); +out: + p_hwfn->db_recovery_info.dorq_attn = false; +} + /* Instead of major changes to the data-structure, we have a some 'special' * identifiers for sources that changed meaning between adapters. */ @@ -774,18 +814,12 @@ static inline u16 qed_attn_update_idx(struct qed_hwfn *p_hwfn, { u16 rc = 0, index; - /* Make certain HW write took affect */ - mmiowb(); - index = le16_to_cpu(p_sb_desc->sb_attn->sb_index); if (p_sb_desc->index != index) { p_sb_desc->index = index; rc = QED_SB_ATT_IDX; } - /* Make certain we got a consistent view with HW */ - mmiowb(); - return rc; } @@ -1080,6 +1114,9 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn, } } + /* Handle missed DORQ attention */ + qed_dorq_attn_handler(p_hwfn); + /* Clear IGU indication for the deasserted bits */ DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview + GTT_BAR0_MAP_REG_IGU_CMD + @@ -1170,7 +1207,6 @@ static void qed_sb_ack_attn(struct qed_hwfn *p_hwfn, /* Both segments (interrupts & acks) are written to same place address; * Need to guarantee all commands will be received (in-order) by HW. */ - mmiowb(); barrier(); } @@ -1805,9 +1841,6 @@ static void qed_int_igu_enable_attn(struct qed_hwfn *p_hwfn, qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff); qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff); - /* Flush the writes to IGU */ - mmiowb(); - /* Unmask AEU signals toward IGU */ qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff); } @@ -1871,9 +1904,6 @@ static void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn, qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl); - /* Flush the write to IGU */ - mmiowb(); - /* calculate where to read the status bit from */ sb_bit = 1 << (igu_sb_id % 32); sb_bit_addr = igu_sb_id / 32 * sizeof(u32); diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h index 1f356ed4f761..d473b522afc5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.h +++ b/drivers/net/ethernet/qlogic/qed/qed_int.h @@ -192,8 +192,8 @@ void qed_int_disable_post_isr_release(struct qed_dev *cdev); /** * @brief - Doorbell Recovery handler. - * Run DB_REAL_DEAL doorbell recovery in case of PF overflow - * (and flush DORQ if needed), otherwise run DB_REC_ONCE. + * Run doorbell recovery in case of PF overflow (and flush DORQ if + * needed). * * @param p_hwfn * @param p_ptt diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index f164d4acebcb..6de23b56b294 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -970,7 +970,7 @@ static void qed_update_pf_params(struct qed_dev *cdev, } } -#define QED_PERIODIC_DB_REC_COUNT 100 +#define QED_PERIODIC_DB_REC_COUNT 10 #define QED_PERIODIC_DB_REC_INTERVAL_MS 100 #define QED_PERIODIC_DB_REC_INTERVAL \ msecs_to_jiffies(QED_PERIODIC_DB_REC_INTERVAL_MS) diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index 79b311b86f66..f5f3c03b9dd2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c @@ -341,9 +341,6 @@ void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod) USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id); REG_WR16(p_hwfn, addr, prod); - - /* keep prod updates ordered */ - mmiowb(); } int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie) diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 9faaa6df78ed..2f318aaf2b05 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -1591,7 +1591,7 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn, p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN; } else { DP_INFO(p_hwfn, - "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's faspath HSI %02x.%02x\n", + "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's fastpath HSI %02x.%02x\n", vf->abs_vf_id, req->vfdev_info.eth_fp_hsi_major, req->vfdev_info.eth_fp_hsi_minor, diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index b4c8949933f1..4555c0b161ef 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -1526,14 +1526,6 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev, barrier(); writel(txq->tx_db.raw, txq->doorbell_addr); - /* mmiowb is needed to synchronize doorbell writes from more than one - * processor. It guarantees that the write arrives to the device before - * the queue lock is released and another start_xmit is called (possibly - * on another CPU). Without this barrier, the next doorbell can bypass - * this doorbell. This is applicable to IA64/Altix systems. - */ - mmiowb(); - for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) { if (qede_txq_has_work(txq)) break; diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c index 31b046e24565..6f7e3622c6b4 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_fp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c @@ -580,14 +580,6 @@ void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq) internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods), (u32 *)&rx_prods); - - /* mmiowb is needed to synchronize doorbell writes from more than one - * processor. It guarantees that the write arrives to the device before - * the napi lock is released and another qede_poll is called (possibly - * on another CPU). Without this barrier, the next doorbell can bypass - * this doorbell. This is applicable to IA64/Altix systems. - */ - mmiowb(); } static void qede_get_rxhash(struct sk_buff *skb, u8 bitfields, __le32 rss_hash) diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c index 5f3f42a25361..bddb2b5982dc 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c @@ -490,18 +490,17 @@ int qede_ptp_enable(struct qede_dev *edev, bool init_tc) ptp->clock = ptp_clock_register(&ptp->clock_info, &edev->pdev->dev); if (IS_ERR(ptp->clock)) { - rc = -EINVAL; DP_ERR(edev, "PTP clock registration failed\n"); + qede_ptp_disable(edev); + rc = -EINVAL; goto err2; } return 0; -err2: - qede_ptp_disable(edev); - ptp->clock = NULL; err1: kfree(ptp); +err2: edev->ptp = NULL; return rc; diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c index b61b88cbc0c7..457444894d80 100644 --- a/drivers/net/ethernet/qlogic/qla3xxx.c +++ b/drivers/net/ethernet/qlogic/qla3xxx.c @@ -1858,7 +1858,6 @@ static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev) wmb(); writel_relaxed(qdev->small_buf_q_producer_index, &port_regs->CommonRegs.rxSmallQProducerIndex); - mmiowb(); } } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 0c443ea98479..374a4d4371f9 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -497,7 +497,7 @@ struct qlcnic_hardware_context { u16 board_type; u16 supported_type; - u16 link_speed; + u32 link_speed; u16 link_duplex; u16 link_autoneg; u16 module_type; diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h index 3e71b65a9546..ad7c5eb8a3b6 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge.h +++ b/drivers/net/ethernet/qlogic/qlge/qlge.h @@ -2181,7 +2181,6 @@ static inline void ql_write32(const struct ql_adapter *qdev, int reg, u32 val) static inline void ql_write_db_reg(u32 val, void __iomem *addr) { writel(val, addr); - mmiowb(); } /* diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index 07e1c623048e..6cae33072496 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -2695,7 +2695,6 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev) wmb(); ql_write_db_reg_relaxed(tx_ring->prod_idx, tx_ring->prod_idx_db_reg); - mmiowb(); netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev, "tx queued, slot %d, len %d\n", tx_ring->prod_idx, skb->len); diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 19efa88f3f02..ed651dde6ef9 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -28,6 +28,7 @@ #include <linux/pm_runtime.h> #include <linux/firmware.h> #include <linux/prefetch.h> +#include <linux/pci-aspm.h> #include <linux/ipv6.h> #include <net/ip6_checksum.h> @@ -7352,6 +7353,11 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) return rc; + /* Disable ASPM completely as that cause random device stop working + * problems as well as full system hangs for some PCIe devices users. + */ + pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1); + /* enable device (incl. PCI PM wakeup and hotplug setup) */ rc = pcim_enable_device(pdev); if (rc < 0) { diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 8154b38c08f7..316b47741d3f 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -728,7 +728,6 @@ static irqreturn_t ravb_emac_interrupt(int irq, void *dev_id) spin_lock(&priv->lock); ravb_emac_interrupt_unlocked(ndev); - mmiowb(); spin_unlock(&priv->lock); return IRQ_HANDLED; } @@ -848,7 +847,6 @@ static irqreturn_t ravb_interrupt(int irq, void *dev_id) result = IRQ_HANDLED; } - mmiowb(); spin_unlock(&priv->lock); return result; } @@ -881,7 +879,6 @@ static irqreturn_t ravb_multi_interrupt(int irq, void *dev_id) result = IRQ_HANDLED; } - mmiowb(); spin_unlock(&priv->lock); return result; } @@ -898,7 +895,6 @@ static irqreturn_t ravb_dma_interrupt(int irq, void *dev_id, int q) if (ravb_queue_interrupt(ndev, q)) result = IRQ_HANDLED; - mmiowb(); spin_unlock(&priv->lock); return result; } @@ -943,7 +939,6 @@ static int ravb_poll(struct napi_struct *napi, int budget) ravb_write(ndev, ~(mask | TIS_RESERVED), TIS); ravb_tx_free(ndev, q, true); netif_wake_subqueue(ndev, q); - mmiowb(); spin_unlock_irqrestore(&priv->lock, flags); } } @@ -959,7 +954,6 @@ static int ravb_poll(struct napi_struct *napi, int budget) ravb_write(ndev, mask, RIE0); ravb_write(ndev, mask, TIE); } - mmiowb(); spin_unlock_irqrestore(&priv->lock, flags); /* Receive error message handling */ @@ -1008,7 +1002,6 @@ static void ravb_adjust_link(struct net_device *ndev) if (priv->no_avb_link && phydev->link) ravb_rcv_snd_enable(ndev); - mmiowb(); spin_unlock_irqrestore(&priv->lock, flags); if (new_state && netif_msg_link(priv)) @@ -1601,7 +1594,6 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) netif_stop_subqueue(ndev, q); exit: - mmiowb(); spin_unlock_irqrestore(&priv->lock, flags); return NETDEV_TX_OK; @@ -1673,7 +1665,6 @@ static void ravb_set_rx_mode(struct net_device *ndev) spin_lock_irqsave(&priv->lock, flags); ravb_modify(ndev, ECMR, ECMR_PRM, ndev->flags & IFF_PROMISC ? ECMR_PRM : 0); - mmiowb(); spin_unlock_irqrestore(&priv->lock, flags); } diff --git a/drivers/net/ethernet/renesas/ravb_ptp.c b/drivers/net/ethernet/renesas/ravb_ptp.c index dce2a40a31e3..9a42580693cb 100644 --- a/drivers/net/ethernet/renesas/ravb_ptp.c +++ b/drivers/net/ethernet/renesas/ravb_ptp.c @@ -196,7 +196,6 @@ static int ravb_ptp_extts(struct ptp_clock_info *ptp, ravb_write(ndev, GIE_PTCS, GIE); else ravb_write(ndev, GID_PTCD, GID); - mmiowb(); spin_unlock_irqrestore(&priv->lock, flags); return 0; @@ -259,7 +258,6 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp, else ravb_write(ndev, GID_PTMD0, GID); } - mmiowb(); spin_unlock_irqrestore(&priv->lock, flags); return error; @@ -331,7 +329,6 @@ void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev) spin_lock_irqsave(&priv->lock, flags); ravb_wait(ndev, GCCR, GCCR_TCR, GCCR_TCR_NOREQ); ravb_modify(ndev, GCCR, GCCR_TCSS, GCCR_TCSS_ADJGPTP); - mmiowb(); spin_unlock_irqrestore(&priv->lock, flags); priv->ptp.clock = ptp_clock_register(&priv->ptp.info, &pdev->dev); diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index e33af371b169..ed30aebdb941 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -2010,7 +2010,6 @@ static void sh_eth_adjust_link(struct net_device *ndev) if ((mdp->cd->no_psr || mdp->no_ether_link) && phydev->link) sh_eth_rcv_snd_enable(ndev); - mmiowb(); spin_unlock_irqrestore(&mdp->lock, flags); if (new_state && netif_msg_link(mdp)) diff --git a/drivers/net/ethernet/sfc/falcon/io.h b/drivers/net/ethernet/sfc/falcon/io.h index 7085ee1d5e2b..c3577643fbda 100644 --- a/drivers/net/ethernet/sfc/falcon/io.h +++ b/drivers/net/ethernet/sfc/falcon/io.h @@ -108,7 +108,6 @@ static inline void ef4_writeo(struct ef4_nic *efx, const ef4_oword_t *value, _ef4_writed(efx, value->u32[2], reg + 8); _ef4_writed(efx, value->u32[3], reg + 12); #endif - mmiowb(); spin_unlock_irqrestore(&efx->biu_lock, flags); } @@ -130,7 +129,6 @@ static inline void ef4_sram_writeq(struct ef4_nic *efx, void __iomem *membase, __raw_writel((__force u32)value->u32[0], membase + addr); __raw_writel((__force u32)value->u32[1], membase + addr + 4); #endif - mmiowb(); spin_unlock_irqrestore(&efx->biu_lock, flags); } diff --git a/drivers/net/ethernet/sfc/io.h b/drivers/net/ethernet/sfc/io.h index 89563170af52..2774a10f44e9 100644 --- a/drivers/net/ethernet/sfc/io.h +++ b/drivers/net/ethernet/sfc/io.h @@ -120,7 +120,6 @@ static inline void efx_writeo(struct efx_nic *efx, const efx_oword_t *value, _efx_writed(efx, value->u32[2], reg + 8); _efx_writed(efx, value->u32[3], reg + 12); #endif - mmiowb(); spin_unlock_irqrestore(&efx->biu_lock, flags); } @@ -142,7 +141,6 @@ static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase, __raw_writel((__force u32)value->u32[0], membase + addr); __raw_writel((__force u32)value->u32[1], membase + addr + 4); #endif - mmiowb(); spin_unlock_irqrestore(&efx->biu_lock, flags); } diff --git a/drivers/net/ethernet/silan/sc92031.c b/drivers/net/ethernet/silan/sc92031.c index c07fd594fe71..02b3962b0e63 100644 --- a/drivers/net/ethernet/silan/sc92031.c +++ b/drivers/net/ethernet/silan/sc92031.c @@ -251,7 +251,6 @@ enum PMConfigBits { * use of mdelay() at _sc92031_reset. * Functions prefixed with _sc92031_ must be called with the lock held; * functions prefixed with sc92031_ must be called without the lock held. - * Use mmiowb() before unlocking if the hardware was written to. */ /* Locking rules for the interrupt: @@ -361,7 +360,6 @@ static void sc92031_disable_interrupts(struct net_device *dev) /* stop interrupts */ iowrite32(0, port_base + IntrMask); _sc92031_dummy_read(port_base); - mmiowb(); /* wait for any concurrent interrupt/tasklet to finish */ synchronize_irq(priv->pdev->irq); @@ -379,7 +377,6 @@ static void sc92031_enable_interrupts(struct net_device *dev) wmb(); iowrite32(IntrBits, port_base + IntrMask); - mmiowb(); } static void _sc92031_disable_tx_rx(struct net_device *dev) @@ -867,7 +864,6 @@ out: rmb(); iowrite32(intr_mask, port_base + IntrMask); - mmiowb(); spin_unlock(&priv->lock); } @@ -901,7 +897,6 @@ out_none: rmb(); iowrite32(intr_mask, port_base + IntrMask); - mmiowb(); return IRQ_NONE; } @@ -978,7 +973,6 @@ static netdev_tx_t sc92031_start_xmit(struct sk_buff *skb, iowrite32(priv->tx_bufs_dma_addr + entry * TX_BUF_SIZE, port_base + TxAddr0 + entry * 4); iowrite32(tx_status, port_base + TxStatus0 + entry * 4); - mmiowb(); if (priv->tx_head - priv->tx_tail >= NUM_TX_DESC) netif_stop_queue(dev); @@ -1024,7 +1018,6 @@ static int sc92031_open(struct net_device *dev) spin_lock_bh(&priv->lock); _sc92031_reset(dev); - mmiowb(); spin_unlock_bh(&priv->lock); sc92031_enable_interrupts(dev); @@ -1060,7 +1053,6 @@ static int sc92031_stop(struct net_device *dev) _sc92031_disable_tx_rx(dev); _sc92031_tx_clear(dev); - mmiowb(); spin_unlock_bh(&priv->lock); @@ -1081,7 +1073,6 @@ static void sc92031_set_multicast_list(struct net_device *dev) _sc92031_set_mar(dev); _sc92031_set_rx_config(dev); - mmiowb(); spin_unlock_bh(&priv->lock); } @@ -1098,7 +1089,6 @@ static void sc92031_tx_timeout(struct net_device *dev) priv->tx_timeouts++; _sc92031_reset(dev); - mmiowb(); spin_unlock(&priv->lock); @@ -1140,7 +1130,6 @@ sc92031_ethtool_get_link_ksettings(struct net_device *dev, output_status = _sc92031_mii_read(port_base, MII_OutputStatus); _sc92031_mii_scan(port_base); - mmiowb(); spin_unlock_bh(&priv->lock); @@ -1311,7 +1300,6 @@ static int sc92031_ethtool_set_wol(struct net_device *dev, priv->pm_config = pm_config; iowrite32(pm_config, port_base + PMConfig); - mmiowb(); spin_unlock_bh(&priv->lock); @@ -1337,7 +1325,6 @@ static int sc92031_ethtool_nway_reset(struct net_device *dev) out: _sc92031_mii_scan(port_base); - mmiowb(); spin_unlock_bh(&priv->lock); @@ -1530,7 +1517,6 @@ static int sc92031_suspend(struct pci_dev *pdev, pm_message_t state) _sc92031_disable_tx_rx(dev); _sc92031_tx_clear(dev); - mmiowb(); spin_unlock_bh(&priv->lock); @@ -1555,7 +1541,6 @@ static int sc92031_resume(struct pci_dev *pdev) spin_lock_bh(&priv->lock); _sc92031_reset(dev); - mmiowb(); spin_unlock_bh(&priv->lock); sc92031_enable_interrupts(dev); diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c index a18149720aa2..cba5881b2746 100644 --- a/drivers/net/ethernet/socionext/netsec.c +++ b/drivers/net/ethernet/socionext/netsec.c @@ -673,7 +673,8 @@ static void netsec_process_tx(struct netsec_priv *priv) } static void *netsec_alloc_rx_data(struct netsec_priv *priv, - dma_addr_t *dma_handle, u16 *desc_len) + dma_addr_t *dma_handle, u16 *desc_len, + bool napi) { size_t total_len = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); size_t payload_len = NETSEC_RX_BUF_SZ; @@ -682,7 +683,7 @@ static void *netsec_alloc_rx_data(struct netsec_priv *priv, total_len += SKB_DATA_ALIGN(payload_len + NETSEC_SKB_PAD); - buf = napi_alloc_frag(total_len); + buf = napi ? napi_alloc_frag(total_len) : netdev_alloc_frag(total_len); if (!buf) return NULL; @@ -765,7 +766,8 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget) /* allocate a fresh buffer and map it to the hardware. * This will eventually replace the old buffer in the hardware */ - buf_addr = netsec_alloc_rx_data(priv, &dma_handle, &desc_len); + buf_addr = netsec_alloc_rx_data(priv, &dma_handle, &desc_len, + true); if (unlikely(!buf_addr)) break; @@ -1069,7 +1071,8 @@ static int netsec_setup_rx_dring(struct netsec_priv *priv) void *buf; u16 len; - buf = netsec_alloc_rx_data(priv, &dma_handle, &len); + buf = netsec_alloc_rx_data(priv, &dma_handle, &len, + false); if (!buf) { netsec_uninit_pkt_dring(priv, NETSEC_RING_RX); goto err_out; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c index 062a600fa5a7..21428537e231 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c @@ -333,6 +333,9 @@ static int stm32mp1_parse_data(struct stm32_dwmac *dwmac, */ dwmac->irq_pwr_wakeup = platform_get_irq_byname(pdev, "stm32_pwr_wakeup"); + if (dwmac->irq_pwr_wakeup == -EPROBE_DEFER) + return -EPROBE_DEFER; + if (!dwmac->clk_eth_ck && dwmac->irq_pwr_wakeup >= 0) { err = device_init_wakeup(&pdev->dev, true); if (err) { diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c index b7dd4e3c760d..6d690678c20e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c @@ -140,7 +140,7 @@ static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode, p->des0 |= cpu_to_le32(RDES0_OWN); bfsize1 = min(bfsize, BUF_SIZE_2KiB - 1); - p->des1 |= cpu_to_le32(bfsize & RDES1_BUFFER1_SIZE_MASK); + p->des1 |= cpu_to_le32(bfsize1 & RDES1_BUFFER1_SIZE_MASK); if (mode == STMMAC_CHAIN_MODE) ndesc_rx_set_on_chain(p, end); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index a26e36dbb5df..48712437d0da 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -2616,8 +2616,6 @@ static int stmmac_open(struct net_device *dev) u32 chan; int ret; - stmmac_check_ether_addr(priv); - if (priv->hw->pcs != STMMAC_PCS_RGMII && priv->hw->pcs != STMMAC_PCS_TBI && priv->hw->pcs != STMMAC_PCS_RTBI) { @@ -4303,6 +4301,8 @@ int stmmac_dvr_probe(struct device *device, if (ret) goto error_hw_init; + stmmac_check_ether_addr(priv); + /* Configure real RX and TX queues */ netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use); netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c index d819e8eaba12..26db6aa002d1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c @@ -159,6 +159,12 @@ static const struct dmi_system_id quark_pci_dmi[] = { }, .driver_data = (void *)&galileo_stmmac_dmi_data, }, + /* + * There are 2 types of SIMATIC IOT2000: IOT2020 and IOT2040. + * The asset tag "6ES7647-0AA00-0YA2" is only for IOT2020 which + * has only one pci network device while other asset tags are + * for IOT2040 which has two. + */ { .matches = { DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"), @@ -170,8 +176,6 @@ static const struct dmi_system_id quark_pci_dmi[] = { { .matches = { DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"), - DMI_EXACT_MATCH(DMI_BOARD_ASSET_TAG, - "6ES7647-0AA00-1YA2"), }, .driver_data = (void *)&iot2040_stmmac_dmi_data, }, diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index 33949248c829..ab55416a10fa 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c @@ -571,7 +571,6 @@ static void rhine_ack_events(struct rhine_private *rp, u32 mask) if (rp->quirks & rqStatusWBRace) iowrite8(mask >> 16, ioaddr + IntrStatus2); iowrite16(mask, ioaddr + IntrStatus); - mmiowb(); } /* @@ -863,7 +862,6 @@ static int rhine_napipoll(struct napi_struct *napi, int budget) if (work_done < budget) { napi_complete_done(napi, work_done); iowrite16(enable_mask, ioaddr + IntrEnable); - mmiowb(); } return work_done; } @@ -1893,7 +1891,6 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb, static void rhine_irq_disable(struct rhine_private *rp) { iowrite16(0x0000, rp->base + IntrEnable); - mmiowb(); } /* The interrupt handler does all of the Rx thread work and cleans up diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c index d8ba512f166a..1713c2d2dccf 100644 --- a/drivers/net/ethernet/wiznet/w5100.c +++ b/drivers/net/ethernet/wiznet/w5100.c @@ -219,7 +219,6 @@ static inline int __w5100_write_direct(struct net_device *ndev, u32 addr, static inline int w5100_write_direct(struct net_device *ndev, u32 addr, u8 data) { __w5100_write_direct(ndev, addr, data); - mmiowb(); return 0; } @@ -236,7 +235,6 @@ static int w5100_write16_direct(struct net_device *ndev, u32 addr, u16 data) { __w5100_write_direct(ndev, addr, data >> 8); __w5100_write_direct(ndev, addr + 1, data); - mmiowb(); return 0; } @@ -260,8 +258,6 @@ static int w5100_writebulk_direct(struct net_device *ndev, u32 addr, for (i = 0; i < len; i++, addr++) __w5100_write_direct(ndev, addr, *buf++); - mmiowb(); - return 0; } @@ -375,7 +371,6 @@ static int w5100_readbulk_indirect(struct net_device *ndev, u32 addr, u8 *buf, for (i = 0; i < len; i++) *buf++ = w5100_read_direct(ndev, W5100_IDM_DR); - mmiowb(); spin_unlock_irqrestore(&mmio_priv->reg_lock, flags); return 0; @@ -394,7 +389,6 @@ static int w5100_writebulk_indirect(struct net_device *ndev, u32 addr, for (i = 0; i < len; i++) __w5100_write_direct(ndev, W5100_IDM_DR, *buf++); - mmiowb(); spin_unlock_irqrestore(&mmio_priv->reg_lock, flags); return 0; diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c index f9da5d6172e3..3f03eecc0479 100644 --- a/drivers/net/ethernet/wiznet/w5300.c +++ b/drivers/net/ethernet/wiznet/w5300.c @@ -141,7 +141,6 @@ static u16 w5300_read_indirect(struct w5300_priv *priv, u16 addr) spin_lock_irqsave(&priv->reg_lock, flags); w5300_write_direct(priv, W5300_IDM_AR, addr); - mmiowb(); data = w5300_read_direct(priv, W5300_IDM_DR); spin_unlock_irqrestore(&priv->reg_lock, flags); @@ -154,9 +153,7 @@ static void w5300_write_indirect(struct w5300_priv *priv, u16 addr, u16 data) spin_lock_irqsave(&priv->reg_lock, flags); w5300_write_direct(priv, W5300_IDM_AR, addr); - mmiowb(); w5300_write_direct(priv, W5300_IDM_DR, data); - mmiowb(); spin_unlock_irqrestore(&priv->reg_lock, flags); } @@ -192,7 +189,6 @@ static int w5300_command(struct w5300_priv *priv, u16 cmd) unsigned long timeout = jiffies + msecs_to_jiffies(100); w5300_write(priv, W5300_S0_CR, cmd); - mmiowb(); while (w5300_read(priv, W5300_S0_CR) != 0) { if (time_after(jiffies, timeout)) @@ -241,18 +237,15 @@ static void w5300_write_macaddr(struct w5300_priv *priv) w5300_write(priv, W5300_SHARH, ndev->dev_addr[4] << 8 | ndev->dev_addr[5]); - mmiowb(); } static void w5300_hw_reset(struct w5300_priv *priv) { w5300_write_direct(priv, W5300_MR, MR_RST); - mmiowb(); mdelay(5); w5300_write_direct(priv, W5300_MR, priv->indirect ? MR_WDF(7) | MR_PB | MR_IND : MR_WDF(7) | MR_PB); - mmiowb(); w5300_write(priv, W5300_IMR, 0); w5300_write_macaddr(priv); @@ -264,24 +257,20 @@ static void w5300_hw_reset(struct w5300_priv *priv) w5300_write32(priv, W5300_TMSRL, 64 << 24); w5300_write32(priv, W5300_TMSRH, 0); w5300_write(priv, W5300_MTYPE, 0x00ff); - mmiowb(); } static void w5300_hw_start(struct w5300_priv *priv) { w5300_write(priv, W5300_S0_MR, priv->promisc ? S0_MR_MACRAW : S0_MR_MACRAW_MF); - mmiowb(); w5300_command(priv, S0_CR_OPEN); w5300_write(priv, W5300_S0_IMR, S0_IR_RECV | S0_IR_SENDOK); w5300_write(priv, W5300_IMR, IR_S0); - mmiowb(); } static void w5300_hw_close(struct w5300_priv *priv) { w5300_write(priv, W5300_IMR, 0); - mmiowb(); w5300_command(priv, S0_CR_CLOSE); } @@ -372,7 +361,6 @@ static netdev_tx_t w5300_start_tx(struct sk_buff *skb, struct net_device *ndev) netif_stop_queue(ndev); w5300_write_frame(priv, skb->data, skb->len); - mmiowb(); ndev->stats.tx_packets++; ndev->stats.tx_bytes += skb->len; dev_kfree_skb(skb); @@ -419,7 +407,6 @@ static int w5300_napi_poll(struct napi_struct *napi, int budget) if (rx_count < budget) { napi_complete_done(napi, rx_count); w5300_write(priv, W5300_IMR, IR_S0); - mmiowb(); } return rx_count; @@ -434,7 +421,6 @@ static irqreturn_t w5300_interrupt(int irq, void *ndev_instance) if (!ir) return IRQ_NONE; w5300_write(priv, W5300_S0_IR, ir); - mmiowb(); if (ir & S0_IR_SENDOK) { netif_dbg(priv, tx_done, ndev, "tx done\n"); @@ -444,7 +430,6 @@ static irqreturn_t w5300_interrupt(int irq, void *ndev_instance) if (ir & S0_IR_RECV) { if (napi_schedule_prep(&priv->napi)) { w5300_write(priv, W5300_IMR, 0); - mmiowb(); __napi_schedule(&priv->napi); } } |