aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netfront.c
diff options
context:
space:
mode:
authorJuergen Gross <jgross@suse.com>2018-06-12 08:57:53 +0200
committerDavid S. Miller <davem@davemloft.net>2018-06-12 15:42:38 -0700
commit57f230ab04d2910a06d17d988f1c4d7586a59113 (patch)
treec11ba9e13cf34a683fe4721442e7914c4e234965 /drivers/net/xen-netfront.c
parentsmc: convert to ->poll_mask (diff)
downloadlinux-dev-57f230ab04d2910a06d17d988f1c4d7586a59113.tar.xz
linux-dev-57f230ab04d2910a06d17d988f1c4d7586a59113.zip
xen/netfront: raise max number of slots in xennet_get_responses()
The max number of slots used in xennet_get_responses() is set to MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD). In old kernel-xen MAX_SKB_FRAGS was 18, while nowadays it is 17. This difference is resulting in frequent messages "too many slots" and a reduced network throughput for some workloads (factor 10 below that of a kernel-xen based guest). Replacing MAX_SKB_FRAGS by XEN_NETIF_NR_SLOTS_MIN for calculation of the max number of slots to use solves that problem (tests showed no more messages "too many slots" and throughput was as high as with the kernel-xen based guest system). Replace MAX_SKB_FRAGS-2 by XEN_NETIF_NR_SLOTS_MIN-1 in netfront_tx_slot_available() for making it clearer what is really being tested without actually modifying the tested value. Signed-off-by: Juergen Gross <jgross@suse.com> Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/xen-netfront.c')
-rw-r--r--drivers/net/xen-netfront.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 679da1abd73c..922ce0abf5cf 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -239,7 +239,7 @@ static void rx_refill_timeout(struct timer_list *t)
static int netfront_tx_slot_available(struct netfront_queue *queue)
{
return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) <
- (NET_TX_RING_SIZE - MAX_SKB_FRAGS - 2);
+ (NET_TX_RING_SIZE - XEN_NETIF_NR_SLOTS_MIN - 1);
}
static void xennet_maybe_wake_tx(struct netfront_queue *queue)
@@ -790,7 +790,7 @@ static int xennet_get_responses(struct netfront_queue *queue,
RING_IDX cons = queue->rx.rsp_cons;
struct sk_buff *skb = xennet_get_rx_skb(queue, cons);
grant_ref_t ref = xennet_get_rx_ref(queue, cons);
- int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD);
+ int max = XEN_NETIF_NR_SLOTS_MIN + (rx->status <= RX_COPY_THRESHOLD);
int slots = 1;
int err = 0;
unsigned long ret;