aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/virtio_net.c
diff options
context:
space:
mode:
authorJason Wang <jasowang@redhat.com>2016-12-23 22:37:26 +0800
committerDavid S. Miller <davem@davemloft.net>2016-12-23 13:48:54 -0500
commit56a86f84b8332afe8c6fcb4b09d09d9bf094e2db (patch)
treeae161601bc78368c4dcbc2adc6c9d45932f33c1b /drivers/net/virtio_net.c
parentvirtio-net: correctly xmit linearized page on XDP_TX (diff)
downloadlinux-dev-56a86f84b8332afe8c6fcb4b09d09d9bf094e2db.tar.xz
linux-dev-56a86f84b8332afe8c6fcb4b09d09d9bf094e2db.zip
virtio-net: fix page miscount during XDP linearizing
We don't put page during linearizing, the would cause leaking when xmit through XDP_TX or the packet exceeds PAGE_SIZE. Fix them by put page accordingly. Also decrease the number of buffers during linearizing to make sure caller can free buffers correctly when packet exceeds PAGE_SIZE. With this patch, we won't get OOM after linearize huge number of packets. Cc: John Fastabend <john.r.fastabend@intel.com> Signed-off-by: Jason Wang <jasowang@redhat.com> Acked-by: John Fastabend <john.r.fastabend@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to '')
-rw-r--r--drivers/net/virtio_net.c19
1 files changed, 11 insertions, 8 deletions
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index fe4562d395e3..58ad40e17a74 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -483,7 +483,7 @@ xdp_xmit:
* anymore.
*/
static struct page *xdp_linearize_page(struct receive_queue *rq,
- u16 num_buf,
+ u16 *num_buf,
struct page *p,
int offset,
unsigned int *len)
@@ -497,7 +497,7 @@ static struct page *xdp_linearize_page(struct receive_queue *rq,
memcpy(page_address(page) + page_off, page_address(p) + offset, *len);
page_off += *len;
- while (--num_buf) {
+ while (--*num_buf) {
unsigned int buflen;
unsigned long ctx;
void *buf;
@@ -507,19 +507,22 @@ static struct page *xdp_linearize_page(struct receive_queue *rq,
if (unlikely(!ctx))
goto err_buf;
+ buf = mergeable_ctx_to_buf_address(ctx);
+ p = virt_to_head_page(buf);
+ off = buf - page_address(p);
+
/* guard against a misconfigured or uncooperative backend that
* is sending packet larger than the MTU.
*/
- if ((page_off + buflen) > PAGE_SIZE)
+ if ((page_off + buflen) > PAGE_SIZE) {
+ put_page(p);
goto err_buf;
-
- buf = mergeable_ctx_to_buf_address(ctx);
- p = virt_to_head_page(buf);
- off = buf - page_address(p);
+ }
memcpy(page_address(page) + page_off,
page_address(p) + off, buflen);
page_off += buflen;
+ put_page(p);
}
*len = page_off;
@@ -555,7 +558,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
/* This happens when rx buffer size is underestimated */
if (unlikely(num_buf > 1)) {
/* linearize data for XDP */
- xdp_page = xdp_linearize_page(rq, num_buf,
+ xdp_page = xdp_linearize_page(rq, &num_buf,
page, offset, &len);
if (!xdp_page)
goto err_xdp;