aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netback/rx.c
diff options
context:
space:
mode:
authorPaul Durrant <Paul.Durrant@citrix.com>2016-10-04 10:29:13 +0100
committerDavid S. Miller <davem@davemloft.net>2016-10-06 20:37:35 -0400
commitfedbc8c132bcf836358103195d8b6df6c03d9daf (patch)
treec1ec65b5672d0dda794abd74c2f0ac5ded628ad4 /drivers/net/xen-netback/rx.c
parentxen-netback: separate guest side rx code into separate module (diff)
downloadlinux-dev-fedbc8c132bcf836358103195d8b6df6c03d9daf.tar.xz
linux-dev-fedbc8c132bcf836358103195d8b6df6c03d9daf.zip
xen-netback: retire guest rx side prefix GSO feature
As far as I am aware only very old Windows network frontends make use of this style of passing GSO packets from backend to frontend. These frontends can easily be replaced by the freely available Xen Project Windows PV network frontend, which uses the 'default' mechanism for passing GSO packets, which is also used by all Linux frontends. NOTE: Removal of this feature will not cause breakage in old Windows frontends. They simply will no longer receive GSO packets - the packets instead being fragmented in the backend. Signed-off-by: Paul Durrant <paul.durrant@citrix.com> Reviewed-by: David Vrabel <david.vrabel@citrix.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/xen-netback/rx.c')
-rw-r--r--drivers/net/xen-netback/rx.c26
1 files changed, 0 insertions, 26 deletions
diff --git a/drivers/net/xen-netback/rx.c b/drivers/net/xen-netback/rx.c
index 03836aaac1c2..6bd7d6e84b8e 100644
--- a/drivers/net/xen-netback/rx.c
+++ b/drivers/net/xen-netback/rx.c
@@ -347,16 +347,6 @@ static int xenvif_gop_skb(struct sk_buff *skb,
gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
}
- /* Set up a GSO prefix descriptor, if necessary */
- if ((1 << gso_type) & vif->gso_prefix_mask) {
- RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
- meta = npo->meta + npo->meta_prod++;
- meta->gso_type = gso_type;
- meta->gso_size = skb_shinfo(skb)->gso_size;
- meta->size = 0;
- meta->id = req.id;
- }
-
RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
meta = npo->meta + npo->meta_prod++;
@@ -511,22 +501,6 @@ static void xenvif_rx_action(struct xenvif_queue *queue)
while ((skb = __skb_dequeue(&rxq)) != NULL) {
struct xen_netif_extra_info *extra = NULL;
- if ((1 << queue->meta[npo.meta_cons].gso_type) &
- vif->gso_prefix_mask) {
- resp = RING_GET_RESPONSE(&queue->rx,
- queue->rx.rsp_prod_pvt++);
-
- resp->flags = XEN_NETRXF_gso_prefix |
- XEN_NETRXF_more_data;
-
- resp->offset = queue->meta[npo.meta_cons].gso_size;
- resp->id = queue->meta[npo.meta_cons].id;
- resp->status = XENVIF_RX_CB(skb)->meta_slots_used;
-
- npo.meta_cons++;
- XENVIF_RX_CB(skb)->meta_slots_used--;
- }
-
queue->stats.tx_bytes += skb->len;
queue->stats.tx_packets++;