diff options
Diffstat (limited to 'drivers/net/xen-netfront.c')
-rw-r--r-- | drivers/net/xen-netfront.c | 356 |
1 files changed, 224 insertions, 132 deletions
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 911f43986a8c..9af2b027c19c 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -66,6 +66,10 @@ module_param_named(max_queues, xennet_max_queues, uint, 0644); MODULE_PARM_DESC(max_queues, "Maximum number of queues per virtual interface"); +static bool __read_mostly xennet_trusted = true; +module_param_named(trusted, xennet_trusted, bool, 0644); +MODULE_PARM_DESC(trusted, "Is the backend trusted"); + #define XENNET_TIMEOUT (5 * HZ) static const struct ethtool_ops xennet_ethtool_ops; @@ -78,8 +82,6 @@ struct netfront_cb { #define RX_COPY_THRESHOLD 256 -#define GRANT_INVALID_REF 0 - #define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, XEN_PAGE_SIZE) #define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, XEN_PAGE_SIZE) @@ -148,6 +150,9 @@ struct netfront_queue { grant_ref_t gref_rx_head; grant_ref_t grant_rx_ref[NET_RX_RING_SIZE]; + unsigned int rx_rsp_unconsumed; + spinlock_t rx_cons_lock; + struct page_pool *page_pool; struct xdp_rxq_info xdp_rxq; }; @@ -172,6 +177,9 @@ struct netfront_info { /* Is device behaving sane? */ bool broken; + /* Should skbs be bounced into a zeroed buffer? */ + bool bounce; + atomic_t rx_gso_checksum_fixup; }; @@ -221,7 +229,7 @@ static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue, { int i = xennet_rxidx(ri); grant_ref_t ref = queue->grant_rx_ref[i]; - queue->grant_rx_ref[i] = GRANT_INVALID_REF; + queue->grant_rx_ref[i] = INVALID_GRANT_REF; return ref; } @@ -270,7 +278,8 @@ static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue) if (unlikely(!skb)) return NULL; - page = page_pool_dev_alloc_pages(queue->page_pool); + page = page_pool_alloc_pages(queue->page_pool, + GFP_ATOMIC | __GFP_NOWARN | __GFP_ZERO); if (unlikely(!page)) { kfree_skb(skb); return NULL; @@ -376,12 +385,13 @@ static int xennet_open(struct net_device *dev) return 0; } -static void xennet_tx_buf_gc(struct netfront_queue *queue) +static bool xennet_tx_buf_gc(struct netfront_queue *queue) { RING_IDX cons, prod; unsigned short id; struct sk_buff *skb; bool more_to_do; + bool work_done = false; const struct device *dev = &queue->info->netdev->dev; BUG_ON(!netif_carrier_ok(queue->info->netdev)); @@ -398,6 +408,8 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue) for (cons = queue->tx.rsp_cons; cons != prod; cons++) { struct xen_netif_tx_response txrsp; + work_done = true; + RING_COPY_RESPONSE(&queue->tx, cons, &txrsp); if (txrsp.status == XEN_NETIF_RSP_NULL) continue; @@ -418,17 +430,15 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue) queue->tx_link[id] = TX_LINK_NONE; skb = queue->tx_skbs[id]; queue->tx_skbs[id] = NULL; - if (unlikely(gnttab_query_foreign_access( - queue->grant_tx_ref[id]) != 0)) { + if (unlikely(!gnttab_end_foreign_access_ref( + queue->grant_tx_ref[id]))) { dev_alert(dev, "Grant still in use by backend domain\n"); goto err; } - gnttab_end_foreign_access_ref( - queue->grant_tx_ref[id], GNTMAP_readonly); gnttab_release_grant_reference( &queue->gref_tx_head, queue->grant_tx_ref[id]); - queue->grant_tx_ref[id] = GRANT_INVALID_REF; + queue->grant_tx_ref[id] = INVALID_GRANT_REF; queue->grant_tx_page[id] = NULL; add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, id); dev_kfree_skb_irq(skb); @@ -441,11 +451,13 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue) xennet_maybe_wake_tx(queue); - return; + return work_done; err: queue->info->broken = true; dev_alert(dev, "Disabled for further use\n"); + + return work_done; } struct xennet_gnttab_make_txreq { @@ -661,6 +673,33 @@ static int xennet_xdp_xmit(struct net_device *dev, int n, return nxmit; } +static struct sk_buff *bounce_skb(const struct sk_buff *skb) +{ + unsigned int headerlen = skb_headroom(skb); + /* Align size to allocate full pages and avoid contiguous data leaks */ + unsigned int size = ALIGN(skb_end_offset(skb) + skb->data_len, + XEN_PAGE_SIZE); + struct sk_buff *n = alloc_skb(size, GFP_ATOMIC | __GFP_ZERO); + + if (!n) + return NULL; + + if (!IS_ALIGNED((uintptr_t)n->head, XEN_PAGE_SIZE)) { + WARN_ONCE(1, "misaligned skb allocated\n"); + kfree_skb(n); + return NULL; + } + + /* Set the data pointer */ + skb_reserve(n, headerlen); + /* Set the tail pointer and length */ + skb_put(n, skb->len); + + BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)); + + skb_copy_header(n, skb); + return n; +} #define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1) @@ -714,9 +753,13 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev /* The first req should be at least ETH_HLEN size or the packet will be * dropped by netback. + * + * If the backend is not trusted bounce all data to zeroed pages to + * avoid exposing contiguous data on the granted page not belonging to + * the skb. */ - if (unlikely(PAGE_SIZE - offset < ETH_HLEN)) { - nskb = skb_copy(skb, GFP_ATOMIC); + if (np->bounce || unlikely(PAGE_SIZE - offset < ETH_HLEN)) { + nskb = bounce_skb(skb); if (!nskb) goto drop; dev_consume_skb_any(skb); @@ -834,6 +877,38 @@ static int xennet_close(struct net_device *dev) return 0; } +static void xennet_destroy_queues(struct netfront_info *info) +{ + unsigned int i; + + for (i = 0; i < info->netdev->real_num_tx_queues; i++) { + struct netfront_queue *queue = &info->queues[i]; + + if (netif_running(info->netdev)) + napi_disable(&queue->napi); + netif_napi_del(&queue->napi); + } + + kfree(info->queues); + info->queues = NULL; +} + +static void xennet_uninit(struct net_device *dev) +{ + struct netfront_info *np = netdev_priv(dev); + xennet_destroy_queues(np); +} + +static void xennet_set_rx_rsp_cons(struct netfront_queue *queue, RING_IDX val) +{ + unsigned long flags; + + spin_lock_irqsave(&queue->rx_cons_lock, flags); + queue->rx.rsp_cons = val; + queue->rx_rsp_unconsumed = XEN_RING_NR_UNCONSUMED_RESPONSES(&queue->rx); + spin_unlock_irqrestore(&queue->rx_cons_lock, flags); +} + static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb, grant_ref_t ref) { @@ -885,7 +960,7 @@ static int xennet_get_extras(struct netfront_queue *queue, xennet_move_rx_slot(queue, skb, ref); } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE); - queue->rx.rsp_cons = cons; + xennet_set_rx_rsp_cons(queue, cons); return err; } @@ -930,7 +1005,7 @@ static u32 xennet_run_xdp(struct netfront_queue *queue, struct page *pdata, break; default: - bpf_warn_invalid_xdp_action(act); + bpf_warn_invalid_xdp_action(queue->info->netdev, prog, act); } return act; @@ -950,7 +1025,6 @@ static int xennet_get_responses(struct netfront_queue *queue, struct device *dev = &queue->info->netdev->dev; struct bpf_prog *xdp_prog; struct xdp_buff xdp; - unsigned long ret; int slots = 1; int err = 0; u32 verdict; @@ -969,22 +1043,12 @@ static int xennet_get_responses(struct netfront_queue *queue, } for (;;) { - if (unlikely(rx->status < 0 || - rx->offset + rx->status > XEN_PAGE_SIZE)) { - if (net_ratelimit()) - dev_warn(dev, "rx->offset: %u, size: %d\n", - rx->offset, rx->status); - xennet_move_rx_slot(queue, skb, ref); - err = -EINVAL; - goto next; - } - /* * This definitely indicates a bug, either in this driver or in * the backend driver. In future this should flag the bad * situation to the system controller to reboot the backend. */ - if (ref == GRANT_INVALID_REF) { + if (ref == INVALID_GRANT_REF) { if (net_ratelimit()) dev_warn(dev, "Bad rx response id %d.\n", rx->id); @@ -992,8 +1056,23 @@ static int xennet_get_responses(struct netfront_queue *queue, goto next; } - ret = gnttab_end_foreign_access_ref(ref, 0); - BUG_ON(!ret); + if (unlikely(rx->status < 0 || + rx->offset + rx->status > XEN_PAGE_SIZE)) { + if (net_ratelimit()) + dev_warn(dev, "rx->offset: %u, size: %d\n", + rx->offset, rx->status); + xennet_move_rx_slot(queue, skb, ref); + err = -EINVAL; + goto next; + } + + if (!gnttab_end_foreign_access_ref(ref)) { + dev_alert(dev, + "Grant still in use by backend domain\n"); + queue->info->broken = true; + dev_alert(dev, "Disabled for further use\n"); + return -EINVAL; + } gnttab_release_grant_reference(&queue->gref_rx_head, ref); @@ -1013,8 +1092,10 @@ static int xennet_get_responses(struct netfront_queue *queue, } } rcu_read_unlock(); -next: + __skb_queue_tail(list, skb); + +next: if (!(rx->flags & XEN_NETRXF_more_data)) break; @@ -1039,7 +1120,7 @@ next: } if (unlikely(err)) - queue->rx.rsp_cons = cons + slots; + xennet_set_rx_rsp_cons(queue, cons + slots); return err; } @@ -1093,7 +1174,8 @@ static int xennet_fill_frags(struct netfront_queue *queue, __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); } if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) { - queue->rx.rsp_cons = ++cons + skb_queue_len(list); + xennet_set_rx_rsp_cons(queue, + ++cons + skb_queue_len(list)); kfree_skb(nskb); return -ENOENT; } @@ -1106,7 +1188,7 @@ static int xennet_fill_frags(struct netfront_queue *queue, kfree_skb(nskb); } - queue->rx.rsp_cons = cons; + xennet_set_rx_rsp_cons(queue, cons); return 0; } @@ -1213,6 +1295,10 @@ static int xennet_poll(struct napi_struct *napi, int budget) &need_xdp_flush); if (unlikely(err)) { + if (queue->info->broken) { + spin_unlock(&queue->rx_lock); + return 0; + } err: while ((skb = __skb_dequeue(&tmpq))) __skb_queue_tail(&errq, skb); @@ -1229,7 +1315,9 @@ err: if (unlikely(xennet_set_skb_gso(skb, gso))) { __skb_queue_head(&tmpq, skb); - queue->rx.rsp_cons += skb_queue_len(&tmpq); + xennet_set_rx_rsp_cons(queue, + queue->rx.rsp_cons + + skb_queue_len(&tmpq)); goto err; } } @@ -1253,7 +1341,8 @@ err: __skb_queue_tail(&rxq, skb); - i = ++queue->rx.rsp_cons; + i = queue->rx.rsp_cons + 1; + xennet_set_rx_rsp_cons(queue, i); work_done++; } if (need_xdp_flush) @@ -1338,10 +1427,9 @@ static void xennet_release_tx_bufs(struct netfront_queue *queue) queue->tx_skbs[i] = NULL; get_page(queue->grant_tx_page[i]); gnttab_end_foreign_access(queue->grant_tx_ref[i], - GNTMAP_readonly, - (unsigned long)page_address(queue->grant_tx_page[i])); + queue->grant_tx_page[i]); queue->grant_tx_page[i] = NULL; - queue->grant_tx_ref[i] = GRANT_INVALID_REF; + queue->grant_tx_ref[i] = INVALID_GRANT_REF; add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, i); dev_kfree_skb_irq(skb); } @@ -1362,7 +1450,7 @@ static void xennet_release_rx_bufs(struct netfront_queue *queue) continue; ref = queue->grant_rx_ref[id]; - if (ref == GRANT_INVALID_REF) + if (ref == INVALID_GRANT_REF) continue; page = skb_frag_page(&skb_shinfo(skb)->frags[0]); @@ -1371,9 +1459,8 @@ static void xennet_release_rx_bufs(struct netfront_queue *queue) * foreign access is ended (which may be deferred). */ get_page(page); - gnttab_end_foreign_access(ref, 0, - (unsigned long)page_address(page)); - queue->grant_rx_ref[id] = GRANT_INVALID_REF; + gnttab_end_foreign_access(ref, page); + queue->grant_rx_ref[id] = INVALID_GRANT_REF; kfree_skb(skb); } @@ -1417,40 +1504,79 @@ static int xennet_set_features(struct net_device *dev, return 0; } -static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id) +static bool xennet_handle_tx(struct netfront_queue *queue, unsigned int *eoi) { - struct netfront_queue *queue = dev_id; unsigned long flags; - if (queue->info->broken) - return IRQ_HANDLED; + if (unlikely(queue->info->broken)) + return false; spin_lock_irqsave(&queue->tx_lock, flags); - xennet_tx_buf_gc(queue); + if (xennet_tx_buf_gc(queue)) + *eoi = 0; spin_unlock_irqrestore(&queue->tx_lock, flags); + return true; +} + +static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id) +{ + unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS; + + if (likely(xennet_handle_tx(dev_id, &eoiflag))) + xen_irq_lateeoi(irq, eoiflag); + return IRQ_HANDLED; } -static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id) +static bool xennet_handle_rx(struct netfront_queue *queue, unsigned int *eoi) { - struct netfront_queue *queue = dev_id; - struct net_device *dev = queue->info->netdev; + unsigned int work_queued; + unsigned long flags; + + if (unlikely(queue->info->broken)) + return false; + + spin_lock_irqsave(&queue->rx_cons_lock, flags); + work_queued = XEN_RING_NR_UNCONSUMED_RESPONSES(&queue->rx); + if (work_queued > queue->rx_rsp_unconsumed) { + queue->rx_rsp_unconsumed = work_queued; + *eoi = 0; + } else if (unlikely(work_queued < queue->rx_rsp_unconsumed)) { + const struct device *dev = &queue->info->netdev->dev; - if (queue->info->broken) - return IRQ_HANDLED; + spin_unlock_irqrestore(&queue->rx_cons_lock, flags); + dev_alert(dev, "RX producer index going backwards\n"); + dev_alert(dev, "Disabled for further use\n"); + queue->info->broken = true; + return false; + } + spin_unlock_irqrestore(&queue->rx_cons_lock, flags); - if (likely(netif_carrier_ok(dev) && - RING_HAS_UNCONSUMED_RESPONSES(&queue->rx))) + if (likely(netif_carrier_ok(queue->info->netdev) && work_queued)) napi_schedule(&queue->napi); + return true; +} + +static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id) +{ + unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS; + + if (likely(xennet_handle_rx(dev_id, &eoiflag))) + xen_irq_lateeoi(irq, eoiflag); + return IRQ_HANDLED; } static irqreturn_t xennet_interrupt(int irq, void *dev_id) { - xennet_tx_interrupt(irq, dev_id); - xennet_rx_interrupt(irq, dev_id); + unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS; + + if (xennet_handle_tx(dev_id, &eoiflag) && + xennet_handle_rx(dev_id, &eoiflag)) + xen_irq_lateeoi(irq, eoiflag); + return IRQ_HANDLED; } @@ -1550,6 +1676,7 @@ static int xennet_xdp(struct net_device *dev, struct netdev_bpf *xdp) } static const struct net_device_ops xennet_netdev_ops = { + .ndo_uninit = xennet_uninit, .ndo_open = xennet_open, .ndo_stop = xennet_close, .ndo_start_xmit = xennet_start_xmit, @@ -1672,8 +1799,8 @@ static int netfront_probe(struct xenbus_device *dev, static void xennet_end_access(int ref, void *page) { /* This frees the page as a side-effect */ - if (ref != GRANT_INVALID_REF) - gnttab_end_foreign_access(ref, 0, (unsigned long)page); + if (ref != INVALID_GRANT_REF) + gnttab_end_foreign_access(ref, virt_to_page(page)); } static void xennet_disconnect_backend(struct netfront_info *info) @@ -1709,8 +1836,8 @@ static void xennet_disconnect_backend(struct netfront_info *info) xennet_end_access(queue->tx_ring_ref, queue->tx.sring); xennet_end_access(queue->rx_ring_ref, queue->rx.sring); - queue->tx_ring_ref = GRANT_INVALID_REF; - queue->rx_ring_ref = GRANT_INVALID_REF; + queue->tx_ring_ref = INVALID_GRANT_REF; + queue->rx_ring_ref = INVALID_GRANT_REF; queue->tx.sring = NULL; queue->rx.sring = NULL; @@ -1768,9 +1895,10 @@ static int setup_netfront_single(struct netfront_queue *queue) if (err < 0) goto fail; - err = bind_evtchn_to_irqhandler(queue->tx_evtchn, - xennet_interrupt, - 0, queue->info->netdev->name, queue); + err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn, + xennet_interrupt, 0, + queue->info->netdev->name, + queue); if (err < 0) goto bind_fail; queue->rx_evtchn = queue->tx_evtchn; @@ -1798,18 +1926,18 @@ static int setup_netfront_split(struct netfront_queue *queue) snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name), "%s-tx", queue->name); - err = bind_evtchn_to_irqhandler(queue->tx_evtchn, - xennet_tx_interrupt, - 0, queue->tx_irq_name, queue); + err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn, + xennet_tx_interrupt, 0, + queue->tx_irq_name, queue); if (err < 0) goto bind_tx_fail; queue->tx_irq = err; snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name), "%s-rx", queue->name); - err = bind_evtchn_to_irqhandler(queue->rx_evtchn, - xennet_rx_interrupt, - 0, queue->rx_irq_name, queue); + err = bind_evtchn_to_irqhandler_lateeoi(queue->rx_evtchn, + xennet_rx_interrupt, 0, + queue->rx_irq_name, queue); if (err < 0) goto bind_rx_fail; queue->rx_irq = err; @@ -1834,41 +1962,26 @@ static int setup_netfront(struct xenbus_device *dev, { struct xen_netif_tx_sring *txs; struct xen_netif_rx_sring *rxs; - grant_ref_t gref; int err; - queue->tx_ring_ref = GRANT_INVALID_REF; - queue->rx_ring_ref = GRANT_INVALID_REF; + queue->tx_ring_ref = INVALID_GRANT_REF; + queue->rx_ring_ref = INVALID_GRANT_REF; queue->rx.sring = NULL; queue->tx.sring = NULL; - txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); - if (!txs) { - err = -ENOMEM; - xenbus_dev_fatal(dev, err, "allocating tx ring page"); + err = xenbus_setup_ring(dev, GFP_NOIO | __GFP_HIGH, (void **)&txs, + 1, &queue->tx_ring_ref); + if (err) goto fail; - } - SHARED_RING_INIT(txs); - FRONT_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE); - err = xenbus_grant_ring(dev, txs, 1, &gref); - if (err < 0) - goto grant_tx_ring_fail; - queue->tx_ring_ref = gref; + XEN_FRONT_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE); - rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); - if (!rxs) { - err = -ENOMEM; - xenbus_dev_fatal(dev, err, "allocating rx ring page"); - goto alloc_rx_ring_fail; - } - SHARED_RING_INIT(rxs); - FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE); + err = xenbus_setup_ring(dev, GFP_NOIO | __GFP_HIGH, (void **)&rxs, + 1, &queue->rx_ring_ref); + if (err) + goto fail; - err = xenbus_grant_ring(dev, rxs, 1, &gref); - if (err < 0) - goto grant_rx_ring_fail; - queue->rx_ring_ref = gref; + XEN_FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE); if (feature_split_evtchn) err = setup_netfront_split(queue); @@ -1880,22 +1993,14 @@ static int setup_netfront(struct xenbus_device *dev, err = setup_netfront_single(queue); if (err) - goto alloc_evtchn_fail; + goto fail; return 0; - /* If we fail to setup netfront, it is safe to just revoke access to - * granted pages because backend is not accessing it at this point. - */ -alloc_evtchn_fail: - gnttab_end_foreign_access_ref(queue->rx_ring_ref, 0); -grant_rx_ring_fail: - free_page((unsigned long)rxs); -alloc_rx_ring_fail: - gnttab_end_foreign_access_ref(queue->tx_ring_ref, 0); -grant_tx_ring_fail: - free_page((unsigned long)txs); -fail: + fail: + xenbus_teardown_ring((void **)&queue->rx.sring, 1, &queue->rx_ring_ref); + xenbus_teardown_ring((void **)&queue->tx.sring, 1, &queue->tx_ring_ref); + return err; } @@ -1911,6 +2016,7 @@ static int xennet_init_queue(struct netfront_queue *queue) spin_lock_init(&queue->tx_lock); spin_lock_init(&queue->rx_lock); + spin_lock_init(&queue->rx_cons_lock); timer_setup(&queue->rx_refill_timer, rx_refill_timeout, 0); @@ -1923,7 +2029,7 @@ static int xennet_init_queue(struct netfront_queue *queue) queue->tx_pend_queue = TX_LINK_NONE; for (i = 0; i < NET_TX_RING_SIZE; i++) { queue->tx_link[i] = i + 1; - queue->grant_tx_ref[i] = GRANT_INVALID_REF; + queue->grant_tx_ref[i] = INVALID_GRANT_REF; queue->grant_tx_page[i] = NULL; } queue->tx_link[NET_TX_RING_SIZE - 1] = TX_LINK_NONE; @@ -1931,7 +2037,7 @@ static int xennet_init_queue(struct netfront_queue *queue) /* Clear out rx_skbs */ for (i = 0; i < NET_RX_RING_SIZE; i++) { queue->rx_skbs[i] = NULL; - queue->grant_rx_ref[i] = GRANT_INVALID_REF; + queue->grant_rx_ref[i] = INVALID_GRANT_REF; } /* A grant for every tx ring slot */ @@ -2040,22 +2146,6 @@ error: return err; } -static void xennet_destroy_queues(struct netfront_info *info) -{ - unsigned int i; - - for (i = 0; i < info->netdev->real_num_tx_queues; i++) { - struct netfront_queue *queue = &info->queues[i]; - - if (netif_running(info->netdev)) - napi_disable(&queue->napi); - netif_napi_del(&queue->napi); - } - - kfree(info->queues); - info->queues = NULL; -} - static int xennet_create_page_pool(struct netfront_queue *queue) @@ -2134,8 +2224,7 @@ static int xennet_create_queues(struct netfront_info *info, return ret; } - netif_napi_add(queue->info->netdev, &queue->napi, - xennet_poll, 64); + netif_napi_add(queue->info->netdev, &queue->napi, xennet_poll); if (netif_running(info->netdev)) napi_enable(&queue->napi); } @@ -2165,6 +2254,10 @@ static int talk_to_netback(struct xenbus_device *dev, info->netdev->irq = 0; + /* Check if backend is trusted. */ + info->bounce = !xennet_trusted || + !xenbus_read_unsigned(dev->nodename, "trusted", 1); + /* Check if backend supports multiple queues */ max_queues = xenbus_read_unsigned(info->xbdev->otherend, "multi-queue-max-queues", 1); @@ -2332,6 +2425,9 @@ static int xennet_connect(struct net_device *dev) return err; if (np->netback_has_xdp_headroom) pr_info("backend supports XDP headroom\n"); + if (np->bounce) + dev_info(&np->xbdev->dev, + "bouncing transmitted data to zeroed pages\n"); /* talk_to_netback() sets the correct number of queues */ num_queues = dev->real_num_tx_queues; @@ -2367,10 +2463,6 @@ static int xennet_connect(struct net_device *dev) if (queue->tx_irq != queue->rx_irq) notify_remote_via_irq(queue->rx_irq); - spin_lock_irq(&queue->tx_lock); - xennet_tx_buf_gc(queue); - spin_unlock_irq(&queue->tx_lock); - spin_lock_bh(&queue->rx_lock); xennet_alloc_rx_buffers(queue); spin_unlock_bh(&queue->rx_lock); |