aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netback/netback.c
diff options
context:
space:
mode:
authorPaul Durrant <Paul.Durrant@citrix.com>2014-01-08 12:41:58 +0000
committerDavid S. Miller <davem@davemloft.net>2014-01-09 23:05:46 -0500
commit11b57f90257c1d6a91cee720151b69e0c2020cf6 (patch)
tree6da4cdb6bbaf71ab38ad9a133e2391aa4910f32a /drivers/net/xen-netback/netback.c
parentcxgb4: Changed FW check version to match FW binary version (diff)
downloadlinux-dev-11b57f90257c1d6a91cee720151b69e0c2020cf6.tar.xz
linux-dev-11b57f90257c1d6a91cee720151b69e0c2020cf6.zip
xen-netback: stop vif thread spinning if frontend is unresponsive
The recent patch to improve guest receive side flow control (ca2f09f2) had a slight flaw in the wait condition for the vif thread in that any remaining skbs in the guest receive side netback internal queue would prevent the thread from sleeping. An unresponsive frontend can lead to a permanently non-empty internal queue and thus the thread will spin. In this case the thread should really sleep until the frontend becomes responsive again. This patch adds an extra flag to the vif which is set if the shared ring is full and cleared when skbs are drained into the shared ring. Thus, if the thread runs, finds the shared ring full and can make no progress the flag remains set. If the flag remains set then the thread will sleep, regardless of a non-empty queue, until the next event from the frontend. Signed-off-by: Paul Durrant <paul.durrant@citrix.com> Cc: Wei Liu <wei.liu2@citrix.com> Cc: Ian Campbell <ian.campbell@citrix.com> Cc: David Vrabel <david.vrabel@citrix.com> Acked-by: Wei Liu <wei.liu2@citrix.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to '')
-rw-r--r--drivers/net/xen-netback/netback.c14
1 files changed, 9 insertions, 5 deletions
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 4f81ac0e2f0a..27385639b6e5 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -476,7 +476,8 @@ static void xenvif_rx_action(struct xenvif *vif)
int ret;
unsigned long offset;
struct skb_cb_overlay *sco;
- int need_to_notify = 0;
+ bool need_to_notify = false;
+ bool ring_full = false;
struct netrx_pending_operations npo = {
.copy = vif->grant_copy_op,
@@ -508,7 +509,8 @@ static void xenvif_rx_action(struct xenvif *vif)
/* If the skb may not fit then bail out now */
if (!xenvif_rx_ring_slots_available(vif, max_slots_needed)) {
skb_queue_head(&vif->rx_queue, skb);
- need_to_notify = 1;
+ need_to_notify = true;
+ ring_full = true;
break;
}
@@ -521,6 +523,8 @@ static void xenvif_rx_action(struct xenvif *vif)
BUG_ON(npo.meta_prod > ARRAY_SIZE(vif->meta));
+ vif->rx_queue_stopped = !npo.copy_prod && ring_full;
+
if (!npo.copy_prod)
goto done;
@@ -592,8 +596,7 @@ static void xenvif_rx_action(struct xenvif *vif)
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);
- if (ret)
- need_to_notify = 1;
+ need_to_notify |= !!ret;
npo.meta_cons += sco->meta_slots_used;
dev_kfree_skb(skb);
@@ -1724,7 +1727,8 @@ static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
static inline int rx_work_todo(struct xenvif *vif)
{
- return !skb_queue_empty(&vif->rx_queue) || vif->rx_event;
+ return (!skb_queue_empty(&vif->rx_queue) && !vif->rx_queue_stopped) ||
+ vif->rx_event;
}
static inline int tx_work_todo(struct xenvif *vif)