aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/sfc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/sfc')
-rw-r--r--drivers/net/ethernet/sfc/falcon/io.h2
-rw-r--r--drivers/net/ethernet/sfc/falcon/tx.c4
-rw-r--r--drivers/net/ethernet/sfc/io.h2
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c1
-rw-r--r--drivers/net/ethernet/sfc/tx.c12
5 files changed, 8 insertions, 13 deletions
diff --git a/drivers/net/ethernet/sfc/falcon/io.h b/drivers/net/ethernet/sfc/falcon/io.h
index 7085ee1d5e2b..c3577643fbda 100644
--- a/drivers/net/ethernet/sfc/falcon/io.h
+++ b/drivers/net/ethernet/sfc/falcon/io.h
@@ -108,7 +108,6 @@ static inline void ef4_writeo(struct ef4_nic *efx, const ef4_oword_t *value,
_ef4_writed(efx, value->u32[2], reg + 8);
_ef4_writed(efx, value->u32[3], reg + 12);
#endif
- mmiowb();
spin_unlock_irqrestore(&efx->biu_lock, flags);
}
@@ -130,7 +129,6 @@ static inline void ef4_sram_writeq(struct ef4_nic *efx, void __iomem *membase,
__raw_writel((__force u32)value->u32[0], membase + addr);
__raw_writel((__force u32)value->u32[1], membase + addr + 4);
#endif
- mmiowb();
spin_unlock_irqrestore(&efx->biu_lock, flags);
}
diff --git a/drivers/net/ethernet/sfc/falcon/tx.c b/drivers/net/ethernet/sfc/falcon/tx.c
index 3409bbf5b19f..c5059f456f37 100644
--- a/drivers/net/ethernet/sfc/falcon/tx.c
+++ b/drivers/net/ethernet/sfc/falcon/tx.c
@@ -321,7 +321,7 @@ netdev_tx_t ef4_enqueue_skb(struct ef4_tx_queue *tx_queue, struct sk_buff *skb)
netdev_tx_sent_queue(tx_queue->core_txq, skb_len);
/* Pass off to hardware */
- if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq)) {
+ if (!netdev_xmit_more() || netif_xmit_stopped(tx_queue->core_txq)) {
struct ef4_tx_queue *txq2 = ef4_tx_queue_partner(tx_queue);
/* There could be packets left on the partner queue if those
@@ -333,7 +333,7 @@ netdev_tx_t ef4_enqueue_skb(struct ef4_tx_queue *tx_queue, struct sk_buff *skb)
ef4_nic_push_buffers(tx_queue);
} else {
- tx_queue->xmit_more_available = skb->xmit_more;
+ tx_queue->xmit_more_available = netdev_xmit_more();
}
tx_queue->tx_packets++;
diff --git a/drivers/net/ethernet/sfc/io.h b/drivers/net/ethernet/sfc/io.h
index 89563170af52..2774a10f44e9 100644
--- a/drivers/net/ethernet/sfc/io.h
+++ b/drivers/net/ethernet/sfc/io.h
@@ -120,7 +120,6 @@ static inline void efx_writeo(struct efx_nic *efx, const efx_oword_t *value,
_efx_writed(efx, value->u32[2], reg + 8);
_efx_writed(efx, value->u32[3], reg + 12);
#endif
- mmiowb();
spin_unlock_irqrestore(&efx->biu_lock, flags);
}
@@ -142,7 +141,6 @@ static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
__raw_writel((__force u32)value->u32[0], membase + addr);
__raw_writel((__force u32)value->u32[1], membase + addr + 4);
#endif
- mmiowb();
spin_unlock_irqrestore(&efx->biu_lock, flags);
}
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index 9382bb0b4d5a..a4bbfebe3d64 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -342,6 +342,7 @@ static void efx_mcdi_phy_decode_link(struct efx_nic *efx,
break;
default:
WARN_ON(1);
+ /* Fall through */
case MC_CMD_FCNTL_OFF:
link_state->fc = 0;
break;
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 06c8f282263f..e182055ec2eb 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -478,8 +478,6 @@ static int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue,
next = skb->next;
skb->next = NULL;
- if (next)
- skb->xmit_more = true;
efx_enqueue_skb(tx_queue, skb);
skb = next;
}
@@ -506,7 +504,7 @@ static int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue,
netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
{
unsigned int old_insert_count = tx_queue->insert_count;
- bool xmit_more = skb->xmit_more;
+ bool xmit_more = netdev_xmit_more();
bool data_mapped = false;
unsigned int segments;
unsigned int skb_len;
@@ -533,7 +531,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
if (rc)
goto err;
#ifdef EFX_USE_PIO
- } else if (skb_len <= efx_piobuf_size && !skb->xmit_more &&
+ } else if (skb_len <= efx_piobuf_size && !xmit_more &&
efx_nic_may_tx_pio(tx_queue)) {
/* Use PIO for short packets with an empty queue. */
if (efx_enqueue_skb_pio(tx_queue, skb))
@@ -559,8 +557,8 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
if (__netdev_tx_sent_queue(tx_queue->core_txq, skb_len, xmit_more)) {
struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
- /* There could be packets left on the partner queue if those
- * SKBs had skb->xmit_more set. If we do not push those they
+ /* There could be packets left on the partner queue if
+ * xmit_more was set. If we do not push those they
* could be left for a long time and cause a netdev watchdog.
*/
if (txq2->xmit_more_available)
@@ -568,7 +566,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
efx_nic_push_buffers(tx_queue);
} else {
- tx_queue->xmit_more_available = skb->xmit_more;
+ tx_queue->xmit_more_available = xmit_more;
}
if (segments) {