aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390/net/qeth_l3_main.c
diff options
context:
space:
mode:
authorJulian Wiedmann <jwi@linux.ibm.com>2019-04-17 18:17:35 +0200
committerDavid S. Miller <davem@davemloft.net>2019-04-17 10:33:59 -0700
commit54a50941b7db8726732919daa859b931a9f496e2 (patch)
tree8d34ae098a862a52ff80102c5344d959e2998c79 /drivers/s390/net/qeth_l3_main.c
parents390/qeth: simplify QoS code (diff)
downloadlinux-dev-54a50941b7db8726732919daa859b931a9f496e2.tar.xz
linux-dev-54a50941b7db8726732919daa859b931a9f496e2.zip
s390/qeth: stop/wake TX queues based on their fill level
Current xmit code only stops the txq after attempting to fill an IO buffer that hasn't been TX-completed yet. In many-connection scenarios, this can result in frequent rejected TX attempts, requeuing of skbs with NETDEV_TX_BUSY and extra overhead. Now that we have a proper 1-to-1 relation between stack-side txqs and our HW Queues, overhaul the stop/wake logic so that the xmit code stops the txq as needed. Given that we might map multiple skbs into a single buffer, it's crucial to ensure that the queue always provides an _entirely_ empty IO buffer. Otherwise large skbs (eg TSO) might not fit into the last available buffer. So whenever qeth_do_send_packet() first utilizes an _empty_ buffer, it updates & checks the used_buffers count. This now ensures that an skb passed to qeth_xmit() can always be mapped into an IO buffer, so remove all of the -EBUSY roll-back handling in the TX path. We preserve the minimal safety-checks ("Is this IO buffer really available?"), just in case some nasty future bug ever attempts to corrupt an in-use buffer. Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to '')
-rw-r--r--drivers/s390/net/qeth_l3_main.c18
1 files changed, 2 insertions, 16 deletions
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 65244da4f415..4c9394105138 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -2036,7 +2036,6 @@ static void qeth_l3_fixup_headers(struct sk_buff *skb)
static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb,
struct qeth_qdio_out_q *queue, int ipv, int cast_type)
{
- unsigned char eth_hdr[ETH_HLEN];
unsigned int hw_hdr_len;
int rc;
@@ -2046,17 +2045,10 @@ static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb,
rc = skb_cow_head(skb, hw_hdr_len - ETH_HLEN);
if (rc)
return rc;
- skb_copy_from_linear_data(skb, eth_hdr, ETH_HLEN);
skb_pull(skb, ETH_HLEN);
qeth_l3_fixup_headers(skb);
- rc = qeth_xmit(card, skb, queue, ipv, cast_type, qeth_l3_fill_header);
- if (rc == -EBUSY) {
- /* roll back to ETH header */
- skb_push(skb, ETH_HLEN);
- skb_copy_to_linear_data(skb, eth_hdr, ETH_HLEN);
- }
- return rc;
+ return qeth_xmit(card, skb, queue, ipv, cast_type, qeth_l3_fill_header);
}
static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
@@ -2091,8 +2083,6 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
if (cast_type == RTN_BROADCAST && !card->info.broadcast_capable)
goto tx_drop;
- netif_stop_subqueue(dev, txq);
-
if (ipv == 4 || IS_IQD(card))
rc = qeth_l3_xmit(card, skb, queue, ipv, cast_type);
else
@@ -2102,16 +2092,12 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
if (!rc) {
QETH_TXQ_STAT_INC(queue, tx_packets);
QETH_TXQ_STAT_ADD(queue, tx_bytes, tx_bytes);
- netif_wake_subqueue(dev, txq);
return NETDEV_TX_OK;
- } else if (rc == -EBUSY) {
- return NETDEV_TX_BUSY;
- } /* else fall through */
+ }
tx_drop:
QETH_TXQ_STAT_INC(queue, tx_dropped);
kfree_skb(skb);
- netif_wake_subqueue(dev, txq);
return NETDEV_TX_OK;
}