aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-03-31 16:56:43 -0400
committerDavid S. Miller <davem@davemloft.net>2014-03-31 16:56:43 -0400
commit0b70195e0c3206103be991e196c26fcf168d0334 (patch)
tree61902c09bfa14034a82c151f46a4616a2f013653
parentMerge branch 'for-davem' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-next (diff)
parentat86rf230: mask irq's before deregister device (diff)
downloadlinux-dev-0b70195e0c3206103be991e196c26fcf168d0334.tar.xz
linux-dev-0b70195e0c3206103be991e196c26fcf168d0334.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/xen-netback/netback.c A bug fix overlapped with changing how the netback SKB control block is implemented. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ieee802154/at86rf230.c2
-rw-r--r--drivers/net/xen-netback/netback.c32
2 files changed, 30 insertions, 4 deletions
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index e8004ef73bc1..89417ac41083 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -1249,6 +1249,8 @@ static int at86rf230_remove(struct spi_device *spi)
struct at86rf230_local *lp = spi_get_drvdata(spi);
struct at86rf230_platform_data *pdata = spi->dev.platform_data;
+ /* mask all at86rf230 irq's */
+ at86rf230_write_subreg(lp, SR_IRQ_MASK, 0);
ieee802154_unregister_device(lp->dev);
free_irq(spi->irq, lp);
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index cb784fe5220c..ae34f5fc7fbc 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -191,8 +191,8 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
* into multiple copies tend to give large frags their
* own buffers as before.
*/
- if ((offset + size > MAX_BUFFER_OFFSET) &&
- (size <= MAX_BUFFER_OFFSET) && offset && !head)
+ BUG_ON(size > MAX_BUFFER_OFFSET);
+ if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head)
return true;
return false;
@@ -511,6 +511,8 @@ static void xenvif_rx_action(struct xenvif *vif)
while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) {
RING_IDX max_slots_needed;
+ RING_IDX old_req_cons;
+ RING_IDX ring_slots_used;
int i;
/* We need a cheap worse case estimate for the number of
@@ -522,9 +524,28 @@ static void xenvif_rx_action(struct xenvif *vif)
PAGE_SIZE);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
unsigned int size;
+ unsigned int offset;
+
size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
- max_slots_needed += DIV_ROUND_UP(size, PAGE_SIZE);
+ offset = skb_shinfo(skb)->frags[i].page_offset;
+
+ /* For a worse-case estimate we need to factor in
+ * the fragment page offset as this will affect the
+ * number of times xenvif_gop_frag_copy() will
+ * call start_new_rx_buffer().
+ */
+ max_slots_needed += DIV_ROUND_UP(offset + size,
+ PAGE_SIZE);
}
+
+ /* To avoid the estimate becoming too pessimal for some
+ * frontends that limit posted rx requests, cap the estimate
+ * at MAX_SKB_FRAGS.
+ */
+ if (max_slots_needed > MAX_SKB_FRAGS)
+ max_slots_needed = MAX_SKB_FRAGS;
+
+ /* We may need one more slot for GSO metadata */
if (skb_is_gso(skb) &&
(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))
@@ -539,8 +560,11 @@ static void xenvif_rx_action(struct xenvif *vif)
} else
vif->rx_last_skb_slots = 0;
+ old_req_cons = vif->rx.req_cons;
XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo);
- BUG_ON(XENVIF_RX_CB(skb)->meta_slots_used > max_slots_needed);
+ ring_slots_used = vif->rx.req_cons - old_req_cons;
+
+ BUG_ON(ring_slots_used > max_slots_needed);
__skb_queue_tail(&rxq, skb);
}