aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/drivers/net/hyperv/netvsc_drv.c
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--drivers/net/hyperv/netvsc_drv.c (renamed from drivers/staging/hv/netvsc_drv.c)175
1 files changed, 114 insertions, 61 deletions
diff --git a/drivers/staging/hv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 93b0e91cbf98..466c58a7353d 100644
--- a/drivers/staging/hv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -43,24 +43,60 @@
struct net_device_context {
/* point back to our device context */
struct hv_device *device_ctx;
- atomic_t avail;
struct delayed_work dwork;
};
-#define PACKET_PAGES_LOWATER 8
-/* Need this many pages to handle worst case fragmented packet */
-#define PACKET_PAGES_HIWATER (MAX_SKB_FRAGS + 2)
-
static int ring_size = 128;
module_param(ring_size, int, S_IRUGO);
MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
-/* no-op so the netdev core doesn't return -EINVAL when modifying the the
- * multicast address list in SIOCADDMULTI. hv is setup to get all multicast
- * when it calls RndisFilterOnOpen() */
+struct set_multicast_work {
+ struct work_struct work;
+ struct net_device *net;
+};
+
+static void do_set_multicast(struct work_struct *w)
+{
+ struct set_multicast_work *swk =
+ container_of(w, struct set_multicast_work, work);
+ struct net_device *net = swk->net;
+
+ struct net_device_context *ndevctx = netdev_priv(net);
+ struct netvsc_device *nvdev;
+ struct rndis_device *rdev;
+
+ nvdev = hv_get_drvdata(ndevctx->device_ctx);
+ if (nvdev == NULL)
+ goto out;
+
+ rdev = nvdev->extension;
+ if (rdev == NULL)
+ goto out;
+
+ if (net->flags & IFF_PROMISC)
+ rndis_filter_set_packet_filter(rdev,
+ NDIS_PACKET_TYPE_PROMISCUOUS);
+ else
+ rndis_filter_set_packet_filter(rdev,
+ NDIS_PACKET_TYPE_BROADCAST |
+ NDIS_PACKET_TYPE_ALL_MULTICAST |
+ NDIS_PACKET_TYPE_DIRECTED);
+
+out:
+ kfree(w);
+}
+
static void netvsc_set_multicast_list(struct net_device *net)
{
+ struct set_multicast_work *swk =
+ kmalloc(sizeof(struct set_multicast_work), GFP_ATOMIC);
+ if (swk == NULL)
+ return;
+
+ swk->net = net;
+ INIT_WORK(&swk->work, do_set_multicast);
+ schedule_work(&swk->work);
}
static int netvsc_open(struct net_device *net)
@@ -87,7 +123,7 @@ static int netvsc_close(struct net_device *net)
struct hv_device *device_obj = net_device_ctx->device_ctx;
int ret;
- netif_stop_queue(net);
+ netif_tx_disable(net);
ret = rndis_filter_close(device_obj);
if (ret != 0)
@@ -104,18 +140,8 @@ static void netvsc_xmit_completion(void *context)
kfree(packet);
- if (skb) {
- struct net_device *net = skb->dev;
- struct net_device_context *net_device_ctx = netdev_priv(net);
- unsigned int num_pages = skb_shinfo(skb)->nr_frags + 2;
-
+ if (skb)
dev_kfree_skb_any(skb);
-
- atomic_add(num_pages, &net_device_ctx->avail);
- if (atomic_read(&net_device_ctx->avail) >=
- PACKET_PAGES_HIWATER)
- netif_wake_queue(net);
- }
}
static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
@@ -123,12 +149,12 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
struct net_device_context *net_device_ctx = netdev_priv(net);
struct hv_netvsc_packet *packet;
int ret;
- unsigned int i, num_pages;
+ unsigned int i, num_pages, npg_data;
- /* Add 1 for skb->data and additional one for RNDIS */
- num_pages = skb_shinfo(skb)->nr_frags + 1 + 1;
- if (num_pages > atomic_read(&net_device_ctx->avail))
- return NETDEV_TX_BUSY;
+ /* Add multipages for skb->data and additional 2 for RNDIS */
+ npg_data = (((unsigned long)skb->data + skb_headlen(skb) - 1)
+ >> PAGE_SHIFT) - ((unsigned long)skb->data >> PAGE_SHIFT) + 1;
+ num_pages = skb_shinfo(skb)->nr_frags + npg_data + 2;
/* Allocate a netvsc packet based on # of frags. */
packet = kzalloc(sizeof(struct hv_netvsc_packet) +
@@ -147,25 +173,40 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
sizeof(struct hv_netvsc_packet) +
(num_pages * sizeof(struct hv_page_buffer));
- /* Setup the rndis header */
- packet->page_buf_cnt = num_pages;
+ /* If the rndis msg goes beyond 1 page, we will add 1 later */
+ packet->page_buf_cnt = num_pages - 1;
/* Initialize it from the skb */
- packet->total_data_buflen = skb->len;
+ packet->total_data_buflen = skb->len;
/* Start filling in the page buffers starting after RNDIS buffer. */
packet->page_buf[1].pfn = virt_to_phys(skb->data) >> PAGE_SHIFT;
packet->page_buf[1].offset
= (unsigned long)skb->data & (PAGE_SIZE - 1);
- packet->page_buf[1].len = skb_headlen(skb);
+ if (npg_data == 1)
+ packet->page_buf[1].len = skb_headlen(skb);
+ else
+ packet->page_buf[1].len = PAGE_SIZE
+ - packet->page_buf[1].offset;
+
+ for (i = 2; i <= npg_data; i++) {
+ packet->page_buf[i].pfn = virt_to_phys(skb->data
+ + PAGE_SIZE * (i-1)) >> PAGE_SHIFT;
+ packet->page_buf[i].offset = 0;
+ packet->page_buf[i].len = PAGE_SIZE;
+ }
+ if (npg_data > 1)
+ packet->page_buf[npg_data].len = (((unsigned long)skb->data
+ + skb_headlen(skb) - 1) & (PAGE_SIZE - 1)) + 1;
/* Additional fragments are after SKB data */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
const skb_frag_t *f = &skb_shinfo(skb)->frags[i];
- packet->page_buf[i+2].pfn = page_to_pfn(skb_frag_page(f));
- packet->page_buf[i+2].offset = f->page_offset;
- packet->page_buf[i+2].len = skb_frag_size(f);
+ packet->page_buf[i+npg_data+1].pfn =
+ page_to_pfn(skb_frag_page(f));
+ packet->page_buf[i+npg_data+1].offset = f->page_offset;
+ packet->page_buf[i+npg_data+1].len = skb_frag_size(f);
}
/* Set the completion routine */
@@ -178,10 +219,6 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
if (ret == 0) {
net->stats.tx_bytes += skb->len;
net->stats.tx_packets++;
-
- atomic_sub(num_pages, &net_device_ctx->avail);
- if (atomic_read(&net_device_ctx->avail) < PACKET_PAGES_LOWATER)
- netif_stop_queue(net);
} else {
/* we are shutting down or bus overloaded, just drop packet */
net->stats.tx_dropped++;
@@ -219,7 +256,7 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj,
schedule_delayed_work(&ndev_ctx->dwork, msecs_to_jiffies(20));
} else {
netif_carrier_off(net);
- netif_stop_queue(net);
+ netif_tx_disable(net);
}
}
@@ -232,9 +269,6 @@ int netvsc_recv_callback(struct hv_device *device_obj,
{
struct net_device *net = dev_get_drvdata(&device_obj->device);
struct sk_buff *skb;
- void *data;
- int i;
- unsigned long flags;
struct netvsc_device *net_device;
net_device = hv_get_drvdata(device_obj);
@@ -253,33 +287,18 @@ int netvsc_recv_callback(struct hv_device *device_obj,
return 0;
}
- /* for kmap_atomic */
- local_irq_save(flags);
-
/*
* Copy to skb. This copy is needed here since the memory pointed by
* hv_netvsc_packet cannot be deallocated
*/
- for (i = 0; i < packet->page_buf_cnt; i++) {
- data = kmap_atomic(pfn_to_page(packet->page_buf[i].pfn),
- KM_IRQ1);
- data = (void *)(unsigned long)data +
- packet->page_buf[i].offset;
-
- memcpy(skb_put(skb, packet->page_buf[i].len), data,
- packet->page_buf[i].len);
-
- kunmap_atomic((void *)((unsigned long)data -
- packet->page_buf[i].offset), KM_IRQ1);
- }
-
- local_irq_restore(flags);
+ memcpy(skb_put(skb, packet->total_data_buflen), packet->data,
+ packet->total_data_buflen);
skb->protocol = eth_type_trans(skb, net);
skb->ip_summed = CHECKSUM_NONE;
net->stats.rx_packets++;
- net->stats.rx_bytes += skb->len;
+ net->stats.rx_bytes += packet->total_data_buflen;
/*
* Pass the skb back up. Network stack will deallocate the skb when it
@@ -299,6 +318,39 @@ static void netvsc_get_drvinfo(struct net_device *net,
strcpy(info->fw_version, "N/A");
}
+static int netvsc_change_mtu(struct net_device *ndev, int mtu)
+{
+ struct net_device_context *ndevctx = netdev_priv(ndev);
+ struct hv_device *hdev = ndevctx->device_ctx;
+ struct netvsc_device *nvdev = hv_get_drvdata(hdev);
+ struct netvsc_device_info device_info;
+ int limit = ETH_DATA_LEN;
+
+ if (nvdev == NULL || nvdev->destroy)
+ return -ENODEV;
+
+ if (nvdev->nvsp_version == NVSP_PROTOCOL_VERSION_2)
+ limit = NETVSC_MTU;
+
+ if (mtu < 68 || mtu > limit)
+ return -EINVAL;
+
+ nvdev->start_remove = true;
+ cancel_delayed_work_sync(&ndevctx->dwork);
+ netif_tx_disable(ndev);
+ rndis_filter_device_remove(hdev);
+
+ ndev->mtu = mtu;
+
+ ndevctx->device_ctx = hdev;
+ hv_set_drvdata(hdev, ndev);
+ device_info.ring_size = ring_size;
+ rndis_filter_device_add(hdev, &device_info);
+ netif_wake_queue(ndev);
+
+ return 0;
+}
+
static const struct ethtool_ops ethtool_ops = {
.get_drvinfo = netvsc_get_drvinfo,
.get_link = ethtool_op_get_link,
@@ -309,7 +361,7 @@ static const struct net_device_ops device_ops = {
.ndo_stop = netvsc_close,
.ndo_start_xmit = netvsc_start_xmit,
.ndo_set_rx_mode = netvsc_set_multicast_list,
- .ndo_change_mtu = eth_change_mtu,
+ .ndo_change_mtu = netvsc_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
};
@@ -351,7 +403,6 @@ static int netvsc_probe(struct hv_device *dev,
net_device_ctx = netdev_priv(net);
net_device_ctx->device_ctx = dev;
- atomic_set(&net_device_ctx->avail, ring_size);
hv_set_drvdata(dev, net);
INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_send_garp);
@@ -403,11 +454,13 @@ static int netvsc_remove(struct hv_device *dev)
return 0;
}
+ net_device->start_remove = true;
+
ndev_ctx = netdev_priv(net);
cancel_delayed_work_sync(&ndev_ctx->dwork);
/* Stop outbound asap */
- netif_stop_queue(net);
+ netif_tx_disable(net);
unregister_netdev(net);