aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/hyperv/netvsc.c
diff options
context:
space:
mode:
authorKY Srinivasan <kys@microsoft.com>2015-05-11 15:39:46 -0700
committerDavid S. Miller <davem@davemloft.net>2015-05-12 23:10:43 -0400
commit82fa3c776e5abba7ed6e4b4f4983d14731c37d6a (patch)
treecb52ac0c3e8ccd801c42d0ad6703dd7ffe4adf76 /drivers/net/hyperv/netvsc.c
parentpktgen: fix packet generation (diff)
downloadlinux-dev-82fa3c776e5abba7ed6e4b4f4983d14731c37d6a.tar.xz
linux-dev-82fa3c776e5abba7ed6e4b4f4983d14731c37d6a.zip
hv_netvsc: Use the xmit_more skb flag to optimize signaling the host
Based on the information given to this driver (via the xmit_more skb flag), we can defer signaling the host if more packets are on the way. This will help make the host more efficient since it can potentially process a larger batch of packets. Implement this optimization. Signed-off-by: K. Y. Srinivasan <kys@microsoft.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/hyperv/netvsc.c')
-rw-r--r--drivers/net/hyperv/netvsc.c43
1 files changed, 27 insertions, 16 deletions
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 2d9ef533cc48..1c4f265f4e7c 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -743,6 +743,7 @@ static inline int netvsc_send_pkt(
u64 req_id;
int ret;
struct hv_page_buffer *pgbuf;
+ u32 ring_avail = hv_ringbuf_avail_percent(&out_channel->outbound);
nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
if (packet->is_data_pkt) {
@@ -769,32 +770,42 @@ static inline int netvsc_send_pkt(
if (out_channel->rescind)
return -ENODEV;
+ /*
+ * It is possible that once we successfully place this packet
+ * on the ringbuffer, we may stop the queue. In that case, we want
+ * to notify the host independent of the xmit_more flag. We don't
+ * need to be precise here; in the worst case we may signal the host
+ * unnecessarily.
+ */
+ if (ring_avail < (RING_AVAIL_PERCENT_LOWATER + 1))
+ packet->xmit_more = false;
+
if (packet->page_buf_cnt) {
pgbuf = packet->cp_partial ? packet->page_buf +
packet->rmsg_pgcnt : packet->page_buf;
- ret = vmbus_sendpacket_pagebuffer(out_channel,
- pgbuf,
- packet->page_buf_cnt,
- &nvmsg,
- sizeof(struct nvsp_message),
- req_id);
+ ret = vmbus_sendpacket_pagebuffer_ctl(out_channel,
+ pgbuf,
+ packet->page_buf_cnt,
+ &nvmsg,
+ sizeof(struct nvsp_message),
+ req_id,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED,
+ !packet->xmit_more);
} else {
- ret = vmbus_sendpacket(
- out_channel, &nvmsg,
- sizeof(struct nvsp_message),
- req_id,
- VM_PKT_DATA_INBAND,
- VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ ret = vmbus_sendpacket_ctl(out_channel, &nvmsg,
+ sizeof(struct nvsp_message),
+ req_id,
+ VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED,
+ !packet->xmit_more);
}
if (ret == 0) {
atomic_inc(&net_device->num_outstanding_sends);
atomic_inc(&net_device->queue_sends[q_idx]);
- if (hv_ringbuf_avail_percent(&out_channel->outbound) <
- RING_AVAIL_PERCENT_LOWATER) {
- netif_tx_stop_queue(netdev_get_tx_queue(
- ndev, q_idx));
+ if (ring_avail < RING_AVAIL_PERCENT_LOWATER) {
+ netif_tx_stop_queue(netdev_get_tx_queue(ndev, q_idx));
if (atomic_read(&net_device->
queue_sends[q_idx]) < 1)