aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/hyperv
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/hyperv')
-rw-r--r--drivers/net/hyperv/hyperv_net.h208
-rw-r--r--drivers/net/hyperv/netvsc.c491
-rw-r--r--drivers/net/hyperv/netvsc_bpf.c138
-rw-r--r--drivers/net/hyperv/netvsc_drv.c523
-rw-r--r--drivers/net/hyperv/rndis_filter.c345
5 files changed, 1258 insertions, 447 deletions
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index abda736e7c7d..dd5919ec408b 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -15,6 +15,7 @@
#include <linux/list.h>
#include <linux/hyperv.h>
#include <linux/rndis.h>
+#include <linux/jhash.h>
/* RSS related */
#define OID_GEN_RECEIVE_SCALE_CAPABILITIES 0x00010203 /* query only */
@@ -105,9 +106,43 @@ struct ndis_recv_scale_param { /* NDIS_RECEIVE_SCALE_PARAMETERS */
u32 processor_masks_entry_size;
};
-/* Fwd declaration */
-struct ndis_tcp_ip_checksum_info;
-struct ndis_pkt_8021q_info;
+struct ndis_tcp_ip_checksum_info {
+ union {
+ struct {
+ u32 is_ipv4:1;
+ u32 is_ipv6:1;
+ u32 tcp_checksum:1;
+ u32 udp_checksum:1;
+ u32 ip_header_checksum:1;
+ u32 reserved:11;
+ u32 tcp_header_offset:10;
+ } transmit;
+ struct {
+ u32 tcp_checksum_failed:1;
+ u32 udp_checksum_failed:1;
+ u32 ip_checksum_failed:1;
+ u32 tcp_checksum_succeeded:1;
+ u32 udp_checksum_succeeded:1;
+ u32 ip_checksum_succeeded:1;
+ u32 loopback:1;
+ u32 tcp_checksum_value_invalid:1;
+ u32 ip_checksum_value_invalid:1;
+ } receive;
+ u32 value;
+ };
+};
+
+struct ndis_pkt_8021q_info {
+ union {
+ struct {
+ u32 pri:3; /* User Priority */
+ u32 cfi:1; /* Canonical Format ID */
+ u32 vlanid:12; /* VLAN ID */
+ u32 reserved:16;
+ };
+ u32 value;
+ };
+};
/*
* Represent netvsc packet which contains 1 RNDIS and 1 ethernet frame
@@ -130,6 +165,7 @@ struct hv_netvsc_packet {
u32 total_bytes;
u32 send_buf_index;
u32 total_data_buflen;
+ struct hv_dma_range *dma_range;
};
#define NETVSC_HASH_KEYLEN 40
@@ -194,13 +230,15 @@ int netvsc_send(struct net_device *net,
struct sk_buff *skb,
bool xdp_tx);
void netvsc_linkstatus_callback(struct net_device *net,
- struct rndis_message *resp);
+ struct rndis_message *resp,
+ void *data, u32 data_buflen);
int netvsc_recv_callback(struct net_device *net,
struct netvsc_device *nvdev,
struct netvsc_channel *nvchan);
void netvsc_channel_cb(void *context);
int netvsc_poll(struct napi_struct *napi, int budget);
+void netvsc_xdp_xmit(struct sk_buff *skb, struct net_device *ndev);
u32 netvsc_run_xdp(struct net_device *ndev, struct netvsc_channel *nvchan,
struct xdp_buff *xdp);
unsigned int netvsc_xdp_fraglen(unsigned int len);
@@ -210,6 +248,8 @@ int netvsc_xdp_set(struct net_device *dev, struct bpf_prog *prog,
struct netvsc_device *nvdev);
int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog);
int netvsc_bpf(struct net_device *dev, struct netdev_bpf *bpf);
+int netvsc_ndoxdp_xmit(struct net_device *ndev, int n,
+ struct xdp_frame **frames, u32 flags);
int rndis_set_subchannel(struct net_device *ndev,
struct netvsc_device *nvdev,
@@ -234,7 +274,7 @@ int rndis_filter_receive(struct net_device *ndev,
int rndis_filter_set_device_mac(struct netvsc_device *ndev,
const char *mac);
-void netvsc_switch_datapath(struct net_device *nv_dev, bool vf);
+int netvsc_switch_datapath(struct net_device *nv_dev, bool vf);
#define NVSP_INVALID_PROTOCOL_VERSION ((u32)0xFFFFFFFF)
@@ -847,6 +887,30 @@ struct nvsp_message {
#define NETVSC_XDP_HDRM 256
+#define NETVSC_MIN_OUT_MSG_SIZE (sizeof(struct vmpacket_descriptor) + \
+ sizeof(struct nvsp_message))
+#define NETVSC_MIN_IN_MSG_SIZE sizeof(struct vmpacket_descriptor)
+
+/* Estimated requestor size:
+ * out_ring_size/min_out_msg_size + in_ring_size/min_in_msg_size
+ */
+static inline u32 netvsc_rqstor_size(unsigned long ringbytes)
+{
+ return ringbytes / NETVSC_MIN_OUT_MSG_SIZE +
+ ringbytes / NETVSC_MIN_IN_MSG_SIZE;
+}
+
+/* XFER PAGE packets can specify a maximum of 375 ranges for NDIS >= 6.0
+ * and a maximum of 64 ranges for NDIS < 6.0 with no RSC; with RSC, this
+ * limit is raised to 562 (= NVSP_RSC_MAX).
+ */
+#define NETVSC_MAX_XFER_PAGE_RANGES NVSP_RSC_MAX
+#define NETVSC_XFER_HEADER_SIZE(rng_cnt) \
+ (offsetof(struct vmtransfer_page_packet_header, ranges) + \
+ (rng_cnt) * sizeof(struct vmtransfer_page_range))
+#define NETVSC_MAX_PKT_SIZE (NETVSC_XFER_HEADER_SIZE(NETVSC_MAX_XFER_PAGE_RANGES) + \
+ sizeof(struct nvsp_message) + (sizeof(u32) * VRSS_SEND_TAB_SIZE))
+
struct multi_send_data {
struct sk_buff *skb; /* skb containing the pkt */
struct hv_netvsc_packet *pkt; /* netvsc pkt pending */
@@ -867,9 +931,10 @@ struct multi_recv_comp {
#define NVSP_RSC_MAX 562 /* Max #RSC frags in a vmbus xfer page pkt */
struct nvsc_rsc {
- const struct ndis_pkt_8021q_info *vlan;
- const struct ndis_tcp_ip_checksum_info *csum_info;
- const u32 *hash_info;
+ struct ndis_pkt_8021q_info vlan;
+ struct ndis_tcp_ip_checksum_info csum_info;
+ u32 hash_info;
+ u8 ppi_flags; /* valid/present bits for the above PPIs */
u8 is_last; /* last RNDIS msg in a vmtransfer_page */
u32 cnt; /* #fragments in an RSC packet */
u32 pktlen; /* Full packet length */
@@ -877,12 +942,25 @@ struct nvsc_rsc {
u32 len[NVSP_RSC_MAX];
};
-struct netvsc_stats {
+#define NVSC_RSC_VLAN BIT(0) /* valid/present bit for 'vlan' */
+#define NVSC_RSC_CSUM_INFO BIT(1) /* valid/present bit for 'csum_info' */
+#define NVSC_RSC_HASH_INFO BIT(2) /* valid/present bit for 'hash_info' */
+
+struct netvsc_stats_tx {
+ u64 packets;
+ u64 bytes;
+ u64 xdp_xmit;
+ struct u64_stats_sync syncp;
+};
+
+struct netvsc_stats_rx {
u64 packets;
u64 bytes;
u64 broadcast;
u64 multicast;
u64 xdp_drop;
+ u64 xdp_redirect;
+ u64 xdp_tx;
struct u64_stats_sync syncp;
};
@@ -897,6 +975,7 @@ struct netvsc_ethtool_stats {
unsigned long rx_no_memory;
unsigned long stop_queue;
unsigned long wake_queue;
+ unsigned long vlan_error;
};
struct netvsc_ethtool_pcpu_stats {
@@ -972,15 +1051,69 @@ struct net_device_context {
u32 vf_alloc;
/* Serial number of the VF to team with */
u32 vf_serial;
+ /* completion variable to confirm vf association */
+ struct completion vf_add;
+ /* Is the current data path through the VF NIC? */
+ bool data_path_is_vf;
/* Used to temporarily save the config info across hibernation */
struct netvsc_device_info *saved_netvsc_dev_info;
};
+/* Azure hosts don't support non-TCP port numbers in hashing for fragmented
+ * packets. We can use ethtool to change UDP hash level when necessary.
+ */
+static inline u32 netvsc_get_hash(struct sk_buff *skb,
+ const struct net_device_context *ndc)
+{
+ struct flow_keys flow;
+ u32 hash, pkt_proto = 0;
+ static u32 hashrnd __read_mostly;
+
+ net_get_random_once(&hashrnd, sizeof(hashrnd));
+
+ if (!skb_flow_dissect_flow_keys(skb, &flow, 0))
+ return 0;
+
+ switch (flow.basic.ip_proto) {
+ case IPPROTO_TCP:
+ if (flow.basic.n_proto == htons(ETH_P_IP))
+ pkt_proto = HV_TCP4_L4HASH;
+ else if (flow.basic.n_proto == htons(ETH_P_IPV6))
+ pkt_proto = HV_TCP6_L4HASH;
+
+ break;
+
+ case IPPROTO_UDP:
+ if (flow.basic.n_proto == htons(ETH_P_IP))
+ pkt_proto = HV_UDP4_L4HASH;
+ else if (flow.basic.n_proto == htons(ETH_P_IPV6))
+ pkt_proto = HV_UDP6_L4HASH;
+
+ break;
+ }
+
+ if (pkt_proto & ndc->l4_hash) {
+ return skb_get_hash(skb);
+ } else {
+ if (flow.basic.n_proto == htons(ETH_P_IP))
+ hash = jhash2((u32 *)&flow.addrs.v4addrs, 2, hashrnd);
+ else if (flow.basic.n_proto == htons(ETH_P_IPV6))
+ hash = jhash2((u32 *)&flow.addrs.v6addrs, 8, hashrnd);
+ else
+ return 0;
+
+ __skb_set_sw_hash(skb, hash, false);
+ }
+
+ return hash;
+}
+
/* Per channel data */
struct netvsc_channel {
struct vmbus_channel *channel;
struct netvsc_device *net_device;
+ void *recv_buf; /* buffer to copy packets out from the receive buffer */
const struct vmpacket_descriptor *desc;
struct napi_struct napi;
struct multi_send_data msd;
@@ -990,9 +1123,10 @@ struct netvsc_channel {
struct bpf_prog __rcu *bpf_prog;
struct xdp_rxq_info xdp_rxq;
+ bool xdp_flush;
- struct netvsc_stats tx_stats;
- struct netvsc_stats rx_stats;
+ struct netvsc_stats_tx tx_stats;
+ struct netvsc_stats_rx rx_stats;
};
/* Per netvsc device */
@@ -1005,15 +1139,18 @@ struct netvsc_device {
/* Receive buffer allocated by us but manages by NetVSP */
void *recv_buf;
+ void *recv_original_buf;
u32 recv_buf_size; /* allocated bytes */
- u32 recv_buf_gpadl_handle;
+ struct vmbus_gpadl recv_buf_gpadl_handle;
u32 recv_section_cnt;
u32 recv_section_size;
u32 recv_completion_cnt;
/* Send buffer allocated by us */
void *send_buf;
- u32 send_buf_gpadl_handle;
+ void *send_original_buf;
+ u32 send_buf_size;
+ struct vmbus_gpadl send_buf_gpadl_handle;
u32 send_section_cnt;
u32 send_section_size;
unsigned long *send_section_map;
@@ -1101,6 +1238,7 @@ struct rndis_set_request {
u32 info_buflen;
u32 info_buf_offset;
u32 dev_vc_handle;
+ u8 info_buf[];
};
/* Response to NdisSetRequest */
@@ -1213,18 +1351,6 @@ struct rndis_pktinfo_id {
u16 pkt_id;
};
-struct ndis_pkt_8021q_info {
- union {
- struct {
- u32 pri:3; /* User Priority */
- u32 cfi:1; /* Canonical Format ID */
- u32 vlanid:12; /* VLAN ID */
- u32 reserved:16;
- };
- u32 value;
- };
-};
-
struct ndis_object_header {
u8 type;
u8 revision;
@@ -1415,32 +1541,6 @@ struct ndis_offload_params {
};
};
-struct ndis_tcp_ip_checksum_info {
- union {
- struct {
- u32 is_ipv4:1;
- u32 is_ipv6:1;
- u32 tcp_checksum:1;
- u32 udp_checksum:1;
- u32 ip_header_checksum:1;
- u32 reserved:11;
- u32 tcp_header_offset:10;
- } transmit;
- struct {
- u32 tcp_checksum_failed:1;
- u32 udp_checksum_failed:1;
- u32 ip_checksum_failed:1;
- u32 tcp_checksum_succeeded:1;
- u32 udp_checksum_succeeded:1;
- u32 ip_checksum_succeeded:1;
- u32 loopback:1;
- u32 tcp_checksum_value_invalid:1;
- u32 ip_checksum_value_invalid:1;
- } receive;
- u32 value;
- };
-};
-
struct ndis_tcp_lso_info {
union {
struct {
@@ -1694,4 +1794,10 @@ struct rndis_message {
#define TRANSPORT_INFO_IPV6_TCP 0x10
#define TRANSPORT_INFO_IPV6_UDP 0x20
+#define RETRY_US_LO 5000
+#define RETRY_US_HI 10000
+#define RETRY_MAX 2000 /* >10 sec */
+
+void netvsc_dma_unmap(struct hv_device *hv_dev,
+ struct hv_netvsc_packet *packet);
#endif /* _HYPERV_NET_H */
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 1b320bcf150a..9352dad58996 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -20,8 +20,10 @@
#include <linux/vmalloc.h>
#include <linux/rtnetlink.h>
#include <linux/prefetch.h>
+#include <linux/filter.h>
#include <asm/sync_bitops.h>
+#include <asm/mshyperv.h>
#include "hyperv_net.h"
#include "netvsc_trace.h"
@@ -30,12 +32,17 @@
* Switch the data path from the synthetic interface to the VF
* interface.
*/
-void netvsc_switch_datapath(struct net_device *ndev, bool vf)
+int netvsc_switch_datapath(struct net_device *ndev, bool vf)
{
struct net_device_context *net_device_ctx = netdev_priv(ndev);
struct hv_device *dev = net_device_ctx->device_ctx;
struct netvsc_device *nv_dev = rtnl_dereference(net_device_ctx->nvdev);
struct nvsp_message *init_pkt = &nv_dev->channel_init_pkt;
+ int ret, retry = 0;
+
+ /* Block sending traffic to VF if it's about to be gone */
+ if (!vf)
+ net_device_ctx->data_path_is_vf = vf;
memset(init_pkt, 0, sizeof(struct nvsp_message));
init_pkt->hdr.msg_type = NVSP_MSG4_TYPE_SWITCH_DATA_PATH;
@@ -46,12 +53,41 @@ void netvsc_switch_datapath(struct net_device *ndev, bool vf)
init_pkt->msg.v4_msg.active_dp.active_datapath =
NVSP_DATAPATH_SYNTHETIC;
+again:
trace_nvsp_send(ndev, init_pkt);
- vmbus_sendpacket(dev->channel, init_pkt,
+ ret = vmbus_sendpacket(dev->channel, init_pkt,
sizeof(struct nvsp_message),
- (unsigned long)init_pkt,
- VM_PKT_DATA_INBAND, 0);
+ (unsigned long)init_pkt, VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+
+ /* If failed to switch to/from VF, let data_path_is_vf stay false,
+ * so we use synthetic path to send data.
+ */
+ if (ret) {
+ if (ret != -EAGAIN) {
+ netdev_err(ndev,
+ "Unable to send sw datapath msg, err: %d\n",
+ ret);
+ return ret;
+ }
+
+ if (retry++ < RETRY_MAX) {
+ usleep_range(RETRY_US_LO, RETRY_US_HI);
+ goto again;
+ } else {
+ netdev_err(
+ ndev,
+ "Retry failed to send sw datapath msg, err: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ wait_for_completion(&nv_dev->channel_init_wait);
+ net_device_ctx->data_path_is_vf = vf;
+
+ return 0;
}
/* Worker to setup sub channels on initial setup
@@ -118,12 +154,22 @@ static void free_netvsc_device(struct rcu_head *head)
int i;
kfree(nvdev->extension);
- vfree(nvdev->recv_buf);
- vfree(nvdev->send_buf);
- kfree(nvdev->send_section_map);
+
+ if (nvdev->recv_original_buf)
+ vfree(nvdev->recv_original_buf);
+ else
+ vfree(nvdev->recv_buf);
+
+ if (nvdev->send_original_buf)
+ vfree(nvdev->send_original_buf);
+ else
+ vfree(nvdev->send_buf);
+
+ bitmap_free(nvdev->send_section_map);
for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
xdp_rxq_info_unreg(&nvdev->chan_table[i].xdp_rxq);
+ kfree(nvdev->chan_table[i].recv_buf);
vfree(nvdev->chan_table[i].mrc.slots);
}
@@ -163,7 +209,7 @@ static void netvsc_revoke_recv_buf(struct hv_device *device,
ret = vmbus_sendpacket(device->channel,
revoke_packet,
sizeof(struct nvsp_message),
- (unsigned long)revoke_packet,
+ VMBUS_RQST_ID_NO_RESPONSE,
VM_PKT_DATA_INBAND, 0);
/* If the failure is because the channel is rescinded;
* ignore the failure since we cannot send on a rescinded
@@ -213,7 +259,7 @@ static void netvsc_revoke_send_buf(struct hv_device *device,
ret = vmbus_sendpacket(device->channel,
revoke_packet,
sizeof(struct nvsp_message),
- (unsigned long)revoke_packet,
+ VMBUS_RQST_ID_NO_RESPONSE,
VM_PKT_DATA_INBAND, 0);
/* If the failure is because the channel is rescinded;
@@ -242,9 +288,9 @@ static void netvsc_teardown_recv_gpadl(struct hv_device *device,
{
int ret;
- if (net_device->recv_buf_gpadl_handle) {
+ if (net_device->recv_buf_gpadl_handle.gpadl_handle) {
ret = vmbus_teardown_gpadl(device->channel,
- net_device->recv_buf_gpadl_handle);
+ &net_device->recv_buf_gpadl_handle);
/* If we failed here, we might as well return and have a leak
* rather than continue and a bugchk
@@ -254,7 +300,6 @@ static void netvsc_teardown_recv_gpadl(struct hv_device *device,
"unable to teardown receive buffer's gpadl\n");
return;
}
- net_device->recv_buf_gpadl_handle = 0;
}
}
@@ -264,9 +309,9 @@ static void netvsc_teardown_send_gpadl(struct hv_device *device,
{
int ret;
- if (net_device->send_buf_gpadl_handle) {
+ if (net_device->send_buf_gpadl_handle.gpadl_handle) {
ret = vmbus_teardown_gpadl(device->channel,
- net_device->send_buf_gpadl_handle);
+ &net_device->send_buf_gpadl_handle);
/* If we failed here, we might as well return and have a leak
* rather than continue and a bugchk
@@ -276,7 +321,6 @@ static void netvsc_teardown_send_gpadl(struct hv_device *device,
"unable to teardown send buffer's gpadl\n");
return;
}
- net_device->send_buf_gpadl_handle = 0;
}
}
@@ -302,8 +346,8 @@ static int netvsc_init_buf(struct hv_device *device,
struct net_device *ndev = hv_get_drvdata(device);
struct nvsp_message *init_packet;
unsigned int buf_size;
- size_t map_words;
- int ret = 0;
+ int i, ret = 0;
+ void *vaddr;
/* Get receive buffer area. */
buf_size = device_info->recv_sections * device_info->recv_section_size;
@@ -339,12 +383,23 @@ static int netvsc_init_buf(struct hv_device *device,
goto cleanup;
}
+ if (hv_isolation_type_snp()) {
+ vaddr = hv_map_memory(net_device->recv_buf, buf_size);
+ if (!vaddr) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ net_device->recv_original_buf = net_device->recv_buf;
+ net_device->recv_buf = vaddr;
+ }
+
/* Notify the NetVsp of the gpadl handle */
init_packet = &net_device->channel_init_pkt;
memset(init_packet, 0, sizeof(struct nvsp_message));
init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF;
init_packet->msg.v1_msg.send_recv_buf.
- gpadl_handle = net_device->recv_buf_gpadl_handle;
+ gpadl_handle = net_device->recv_buf_gpadl_handle.gpadl_handle;
init_packet->msg.v1_msg.
send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
@@ -388,10 +443,30 @@ static int netvsc_init_buf(struct hv_device *device,
net_device->recv_section_size = resp->sections[0].sub_alloc_size;
net_device->recv_section_cnt = resp->sections[0].num_sub_allocs;
- /* Setup receive completion ring */
- net_device->recv_completion_cnt
- = round_up(net_device->recv_section_cnt + 1,
- PAGE_SIZE / sizeof(u64));
+ /* Ensure buffer will not overflow */
+ if (net_device->recv_section_size < NETVSC_MTU_MIN || (u64)net_device->recv_section_size *
+ (u64)net_device->recv_section_cnt > (u64)buf_size) {
+ netdev_err(ndev, "invalid recv_section_size %u\n",
+ net_device->recv_section_size);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
+ struct netvsc_channel *nvchan = &net_device->chan_table[i];
+
+ nvchan->recv_buf = kzalloc(net_device->recv_section_size, GFP_KERNEL);
+ if (nvchan->recv_buf == NULL) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ }
+
+ /* Setup receive completion ring.
+ * Add 1 to the recv_section_cnt because at least one entry in a
+ * ring buffer has to be empty.
+ */
+ net_device->recv_completion_cnt = net_device->recv_section_cnt + 1;
ret = netvsc_alloc_recv_comp_ring(net_device, 0);
if (ret)
goto cleanup;
@@ -407,6 +482,7 @@ static int netvsc_init_buf(struct hv_device *device,
ret = -ENOMEM;
goto cleanup;
}
+ net_device->send_buf_size = buf_size;
/* Establish the gpadl handle for this buffer on this
* channel. Note: This call uses the vmbus connection rather
@@ -421,12 +497,23 @@ static int netvsc_init_buf(struct hv_device *device,
goto cleanup;
}
+ if (hv_isolation_type_snp()) {
+ vaddr = hv_map_memory(net_device->send_buf, buf_size);
+ if (!vaddr) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ net_device->send_original_buf = net_device->send_buf;
+ net_device->send_buf = vaddr;
+ }
+
/* Notify the NetVsp of the gpadl handle */
init_packet = &net_device->channel_init_pkt;
memset(init_packet, 0, sizeof(struct nvsp_message));
init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF;
init_packet->msg.v1_msg.send_send_buf.gpadl_handle =
- net_device->send_buf_gpadl_handle;
+ net_device->send_buf_gpadl_handle.gpadl_handle;
init_packet->msg.v1_msg.send_send_buf.id = NETVSC_SEND_BUFFER_ID;
trace_nvsp_send(ndev, init_packet);
@@ -459,6 +546,12 @@ static int netvsc_init_buf(struct hv_device *device,
/* Parse the response */
net_device->send_section_size = init_packet->msg.
v1_msg.send_send_buf_complete.section_size;
+ if (net_device->send_section_size < NETVSC_MTU_MIN) {
+ netdev_err(ndev, "invalid send_section_size %u\n",
+ net_device->send_section_size);
+ ret = -EINVAL;
+ goto cleanup;
+ }
/* Section count is simply the size divided by the section size. */
net_device->send_section_cnt = buf_size / net_device->send_section_size;
@@ -467,10 +560,9 @@ static int netvsc_init_buf(struct hv_device *device,
net_device->send_section_size, net_device->send_section_cnt);
/* Setup state for managing the send buffer. */
- map_words = DIV_ROUND_UP(net_device->send_section_cnt, BITS_PER_LONG);
-
- net_device->send_section_map = kcalloc(map_words, sizeof(ulong), GFP_KERNEL);
- if (net_device->send_section_map == NULL) {
+ net_device->send_section_map = bitmap_zalloc(net_device->send_section_cnt,
+ GFP_KERNEL);
+ if (!net_device->send_section_map) {
ret = -ENOMEM;
goto cleanup;
}
@@ -528,7 +620,10 @@ static int negotiate_nvsp_ver(struct hv_device *device,
init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1;
if (nvsp_ver >= NVSP_PROTOCOL_VERSION_5) {
- init_packet->msg.v2_msg.send_ndis_config.capability.sriov = 1;
+ if (hv_is_isolation_supported())
+ netdev_info(ndev, "SR-IOV not advertised by guests on the host supporting isolation\n");
+ else
+ init_packet->msg.v2_msg.send_ndis_config.capability.sriov = 1;
/* Teaming bit is needed to receive link speed updates */
init_packet->msg.v2_msg.send_ndis_config.capability.teaming = 1;
@@ -541,7 +636,7 @@ static int negotiate_nvsp_ver(struct hv_device *device,
ret = vmbus_sendpacket(device->channel, init_packet,
sizeof(struct nvsp_message),
- (unsigned long)init_packet,
+ VMBUS_RQST_ID_NO_RESPONSE,
VM_PKT_DATA_INBAND, 0);
return ret;
@@ -575,6 +670,13 @@ static int netvsc_connect_vsp(struct hv_device *device,
goto cleanup;
}
+ if (hv_is_isolation_supported() && net_device->nvsp_version < NVSP_PROTOCOL_VERSION_61) {
+ netdev_err(ndev, "Invalid NVSP version 0x%x (expected >= 0x%x) from the host supporting isolation\n",
+ net_device->nvsp_version, NVSP_PROTOCOL_VERSION_61);
+ ret = -EPROTO;
+ goto cleanup;
+ }
+
pr_debug("Negotiated NVSP version:%x\n", net_device->nvsp_version);
/* Send the ndis version */
@@ -598,7 +700,7 @@ static int netvsc_connect_vsp(struct hv_device *device,
/* Send the init request */
ret = vmbus_sendpacket(device->channel, init_packet,
sizeof(struct nvsp_message),
- (unsigned long)init_packet,
+ VMBUS_RQST_ID_NO_RESPONSE,
VM_PKT_DATA_INBAND, 0);
if (ret != 0)
goto cleanup;
@@ -635,9 +737,12 @@ void netvsc_device_remove(struct hv_device *device)
RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
- /* And disassociate NAPI context from device */
- for (i = 0; i < net_device->num_chn; i++)
+ /* Disable NAPI and disassociate its context from the device. */
+ for (i = 0; i < net_device->num_chn; i++) {
+ /* See also vmbus_reset_channel_cb(). */
+ napi_disable(&net_device->chan_table[i].napi);
netif_napi_del(&net_device->chan_table[i].napi);
+ }
/*
* At this point, no one should be accessing net_device
@@ -657,6 +762,12 @@ void netvsc_device_remove(struct hv_device *device)
netvsc_teardown_send_gpadl(device, net_device, ndev);
}
+ if (net_device->recv_original_buf)
+ hv_unmap_memory(net_device->recv_buf);
+
+ if (net_device->send_original_buf)
+ hv_unmap_memory(net_device->send_buf);
+
/* Release all resources */
free_netvsc_device_rcu(net_device);
}
@@ -676,17 +787,26 @@ static void netvsc_send_tx_complete(struct net_device *ndev,
const struct vmpacket_descriptor *desc,
int budget)
{
- struct sk_buff *skb = (struct sk_buff *)(unsigned long)desc->trans_id;
struct net_device_context *ndev_ctx = netdev_priv(ndev);
+ struct sk_buff *skb;
u16 q_idx = 0;
int queue_sends;
+ u64 cmd_rqst;
+
+ cmd_rqst = channel->request_addr_callback(channel, desc->trans_id);
+ if (cmd_rqst == VMBUS_RQST_ERROR) {
+ netdev_err(ndev, "Invalid transaction ID %llx\n", desc->trans_id);
+ return;
+ }
+
+ skb = (struct sk_buff *)(unsigned long)cmd_rqst;
/* Notify the layer above us */
if (likely(skb)) {
- const struct hv_netvsc_packet *packet
+ struct hv_netvsc_packet *packet
= (struct hv_netvsc_packet *)skb->cb;
u32 send_index = packet->send_buf_index;
- struct netvsc_stats *tx_stats;
+ struct netvsc_stats_tx *tx_stats;
if (send_index != NETVSC_INVALID_INDEX)
netvsc_free_send_slot(net_device, send_index);
@@ -699,6 +819,7 @@ static void netvsc_send_tx_complete(struct net_device *ndev,
tx_stats->bytes += packet->total_bytes;
u64_stats_update_end(&tx_stats->syncp);
+ netvsc_dma_unmap(ndev_ctx->device_ctx, packet);
napi_consume_skb(skb, budget);
}
@@ -726,13 +847,74 @@ static void netvsc_send_completion(struct net_device *ndev,
const struct vmpacket_descriptor *desc,
int budget)
{
- const struct nvsp_message *nvsp_packet = hv_pkt_data(desc);
+ const struct nvsp_message *nvsp_packet;
+ u32 msglen = hv_pkt_datalen(desc);
+ struct nvsp_message *pkt_rqst;
+ u64 cmd_rqst;
+
+ /* First check if this is a VMBUS completion without data payload */
+ if (!msglen) {
+ cmd_rqst = incoming_channel->request_addr_callback(incoming_channel,
+ desc->trans_id);
+ if (cmd_rqst == VMBUS_RQST_ERROR) {
+ netdev_err(ndev, "Invalid transaction ID %llx\n", desc->trans_id);
+ return;
+ }
+ pkt_rqst = (struct nvsp_message *)(uintptr_t)cmd_rqst;
+ switch (pkt_rqst->hdr.msg_type) {
+ case NVSP_MSG4_TYPE_SWITCH_DATA_PATH:
+ complete(&net_device->channel_init_wait);
+ break;
+
+ default:
+ netdev_err(ndev, "Unexpected VMBUS completion!!\n");
+ }
+ return;
+ }
+
+ /* Ensure packet is big enough to read header fields */
+ if (msglen < sizeof(struct nvsp_message_header)) {
+ netdev_err(ndev, "nvsp_message length too small: %u\n", msglen);
+ return;
+ }
+
+ nvsp_packet = hv_pkt_data(desc);
switch (nvsp_packet->hdr.msg_type) {
case NVSP_MSG_TYPE_INIT_COMPLETE:
+ if (msglen < sizeof(struct nvsp_message_header) +
+ sizeof(struct nvsp_message_init_complete)) {
+ netdev_err(ndev, "nvsp_msg length too small: %u\n",
+ msglen);
+ return;
+ }
+ fallthrough;
+
case NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE:
+ if (msglen < sizeof(struct nvsp_message_header) +
+ sizeof(struct nvsp_1_message_send_receive_buffer_complete)) {
+ netdev_err(ndev, "nvsp_msg1 length too small: %u\n",
+ msglen);
+ return;
+ }
+ fallthrough;
+
case NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE:
+ if (msglen < sizeof(struct nvsp_message_header) +
+ sizeof(struct nvsp_1_message_send_send_buffer_complete)) {
+ netdev_err(ndev, "nvsp_msg1 length too small: %u\n",
+ msglen);
+ return;
+ }
+ fallthrough;
+
case NVSP_MSG5_TYPE_SUBCHANNEL:
+ if (msglen < sizeof(struct nvsp_message_header) +
+ sizeof(struct nvsp_5_subchannel_complete)) {
+ netdev_err(ndev, "nvsp_msg5 length too small: %u\n",
+ msglen);
+ return;
+ }
/* Copy the response back */
memcpy(&net_device->channel_init_pkt, nvsp_packet,
sizeof(struct nvsp_message));
@@ -790,7 +972,7 @@ static void netvsc_copy_to_send_buf(struct netvsc_device *net_device,
}
for (i = 0; i < page_count; i++) {
- char *src = phys_to_virt(pb[i].pfn << PAGE_SHIFT);
+ char *src = phys_to_virt(pb[i].pfn << HV_HYP_PAGE_SHIFT);
u32 offset = pb[i].offset;
u32 len = pb[i].len;
@@ -802,6 +984,88 @@ static void netvsc_copy_to_send_buf(struct netvsc_device *net_device,
memset(dest, 0, padding);
}
+void netvsc_dma_unmap(struct hv_device *hv_dev,
+ struct hv_netvsc_packet *packet)
+{
+ u32 page_count = packet->cp_partial ?
+ packet->page_buf_cnt - packet->rmsg_pgcnt :
+ packet->page_buf_cnt;
+ int i;
+
+ if (!hv_is_isolation_supported())
+ return;
+
+ if (!packet->dma_range)
+ return;
+
+ for (i = 0; i < page_count; i++)
+ dma_unmap_single(&hv_dev->device, packet->dma_range[i].dma,
+ packet->dma_range[i].mapping_size,
+ DMA_TO_DEVICE);
+
+ kfree(packet->dma_range);
+}
+
+/* netvsc_dma_map - Map swiotlb bounce buffer with data page of
+ * packet sent by vmbus_sendpacket_pagebuffer() in the Isolation
+ * VM.
+ *
+ * In isolation VM, netvsc send buffer has been marked visible to
+ * host and so the data copied to send buffer doesn't need to use
+ * bounce buffer. The data pages handled by vmbus_sendpacket_pagebuffer()
+ * may not be copied to send buffer and so these pages need to be
+ * mapped with swiotlb bounce buffer. netvsc_dma_map() is to do
+ * that. The pfns in the struct hv_page_buffer need to be converted
+ * to bounce buffer's pfn. The loop here is necessary because the
+ * entries in the page buffer array are not necessarily full
+ * pages of data. Each entry in the array has a separate offset and
+ * len that may be non-zero, even for entries in the middle of the
+ * array. And the entries are not physically contiguous. So each
+ * entry must be individually mapped rather than as a contiguous unit.
+ * So not use dma_map_sg() here.
+ */
+static int netvsc_dma_map(struct hv_device *hv_dev,
+ struct hv_netvsc_packet *packet,
+ struct hv_page_buffer *pb)
+{
+ u32 page_count = packet->cp_partial ?
+ packet->page_buf_cnt - packet->rmsg_pgcnt :
+ packet->page_buf_cnt;
+ dma_addr_t dma;
+ int i;
+
+ if (!hv_is_isolation_supported())
+ return 0;
+
+ packet->dma_range = kcalloc(page_count,
+ sizeof(*packet->dma_range),
+ GFP_KERNEL);
+ if (!packet->dma_range)
+ return -ENOMEM;
+
+ for (i = 0; i < page_count; i++) {
+ char *src = phys_to_virt((pb[i].pfn << HV_HYP_PAGE_SHIFT)
+ + pb[i].offset);
+ u32 len = pb[i].len;
+
+ dma = dma_map_single(&hv_dev->device, src, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&hv_dev->device, dma)) {
+ kfree(packet->dma_range);
+ return -ENOMEM;
+ }
+
+ /* pb[].offset and pb[].len are not changed during dma mapping
+ * and so not reassign.
+ */
+ packet->dma_range[i].dma = dma;
+ packet->dma_range[i].mapping_size = len;
+ pb[i].pfn = dma >> HV_HYP_PAGE_SHIFT;
+ }
+
+ return 0;
+}
+
static inline int netvsc_send_pkt(
struct hv_device *device,
struct hv_netvsc_packet *packet,
@@ -822,6 +1086,7 @@ static inline int netvsc_send_pkt(
int ret;
u32 ring_avail = hv_get_avail_to_write_percent(&out_channel->outbound);
+ memset(&nvmsg, 0, sizeof(struct nvsp_message));
nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
if (skb)
rpkt->channel_type = 0; /* 0 is RMC_DATA */
@@ -841,14 +1106,24 @@ static inline int netvsc_send_pkt(
trace_nvsp_send_pkt(ndev, out_channel, rpkt);
+ packet->dma_range = NULL;
if (packet->page_buf_cnt) {
if (packet->cp_partial)
pb += packet->rmsg_pgcnt;
+ ret = netvsc_dma_map(ndev_ctx->device_ctx, packet, pb);
+ if (ret) {
+ ret = -EAGAIN;
+ goto exit;
+ }
+
ret = vmbus_sendpacket_pagebuffer(out_channel,
pb, packet->page_buf_cnt,
&nvmsg, sizeof(nvmsg),
req_id);
+
+ if (ret)
+ netvsc_dma_unmap(ndev_ctx->device_ctx, packet);
} else {
ret = vmbus_sendpacket(out_channel,
&nvmsg, sizeof(nvmsg),
@@ -856,6 +1131,7 @@ static inline int netvsc_send_pkt(
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
}
+exit:
if (ret == 0) {
atomic_inc_return(&nvchan->queue_sends);
@@ -898,6 +1174,26 @@ static inline void move_pkt_msd(struct hv_netvsc_packet **msd_send,
}
/* RCU already held by caller */
+/* Batching/bouncing logic is designed to attempt to optimize
+ * performance.
+ *
+ * For small, non-LSO packets we copy the packet to a send buffer
+ * which is pre-registered with the Hyper-V side. This enables the
+ * hypervisor to avoid remapping the aperture to access the packet
+ * descriptor and data.
+ *
+ * If we already started using a buffer and the netdev is transmitting
+ * a burst of packets, keep on copying into the buffer until it is
+ * full or we are done collecting a burst. If there is an existing
+ * buffer with space for the RNDIS descriptor but not the packet, copy
+ * the RNDIS descriptor to the buffer, keeping the packet in place.
+ *
+ * If we do batching and send more than one packet using a single
+ * NetVSC message, free the SKBs of the packets copied, except for the
+ * last packet. This is done to streamline the handling of the case
+ * where the last packet only had the RNDIS descriptor copied to the
+ * send buffer, with the data pointers included in the NetVSC message.
+ */
int netvsc_send(struct net_device *ndev,
struct hv_netvsc_packet *packet,
struct rndis_message *rndis_msg,
@@ -1113,19 +1409,28 @@ static void enq_receive_complete(struct net_device *ndev,
static int netvsc_receive(struct net_device *ndev,
struct netvsc_device *net_device,
struct netvsc_channel *nvchan,
- const struct vmpacket_descriptor *desc,
- const struct nvsp_message *nvsp)
+ const struct vmpacket_descriptor *desc)
{
struct net_device_context *net_device_ctx = netdev_priv(ndev);
struct vmbus_channel *channel = nvchan->channel;
const struct vmtransfer_page_packet_header *vmxferpage_packet
= container_of(desc, const struct vmtransfer_page_packet_header, d);
+ const struct nvsp_message *nvsp = hv_pkt_data(desc);
+ u32 msglen = hv_pkt_datalen(desc);
u16 q_idx = channel->offermsg.offer.sub_channel_index;
char *recv_buf = net_device->recv_buf;
u32 status = NVSP_STAT_SUCCESS;
int i;
int count = 0;
+ /* Ensure packet is big enough to read header fields */
+ if (msglen < sizeof(struct nvsp_message_header)) {
+ netif_err(net_device_ctx, rx_err, ndev,
+ "invalid nvsp header, length too small: %u\n",
+ msglen);
+ return 0;
+ }
+
/* Make sure this is a valid nvsp packet */
if (unlikely(nvsp->hdr.msg_type != NVSP_MSG1_TYPE_SEND_RNDIS_PKT)) {
netif_err(net_device_ctx, rx_err, ndev,
@@ -1134,6 +1439,14 @@ static int netvsc_receive(struct net_device *ndev,
return 0;
}
+ /* Validate xfer page pkt header */
+ if ((desc->offset8 << 3) < sizeof(struct vmtransfer_page_packet_header)) {
+ netif_err(net_device_ctx, rx_err, ndev,
+ "Invalid xfer page pkt, offset too small: %u\n",
+ desc->offset8 << 3);
+ return 0;
+ }
+
if (unlikely(vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID)) {
netif_err(net_device_ctx, rx_err, ndev,
"Invalid xfer page set id - expecting %x got %x\n",
@@ -1144,6 +1457,14 @@ static int netvsc_receive(struct net_device *ndev,
count = vmxferpage_packet->range_cnt;
+ /* Check count for a valid value */
+ if (NETVSC_XFER_HEADER_SIZE(count) > desc->offset8 << 3) {
+ netif_err(net_device_ctx, rx_err, ndev,
+ "Range count is not valid: %d\n",
+ count);
+ return 0;
+ }
+
/* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
for (i = 0; i < count; i++) {
u32 offset = vmxferpage_packet->ranges[i].byte_offset;
@@ -1151,7 +1472,8 @@ static int netvsc_receive(struct net_device *ndev,
void *data;
int ret;
- if (unlikely(offset + buflen > net_device->recv_buf_size)) {
+ if (unlikely(offset > net_device->recv_buf_size ||
+ buflen > net_device->recv_buf_size - offset)) {
nvchan->rsc.cnt = 0;
status = NVSP_STAT_FAIL;
netif_err(net_device_ctx, rx_err, ndev,
@@ -1161,6 +1483,19 @@ static int netvsc_receive(struct net_device *ndev,
continue;
}
+ /* We're going to copy (sections of) the packet into nvchan->recv_buf;
+ * make sure that nvchan->recv_buf is large enough to hold the packet.
+ */
+ if (unlikely(buflen > net_device->recv_section_size)) {
+ nvchan->rsc.cnt = 0;
+ status = NVSP_STAT_FAIL;
+ netif_err(net_device_ctx, rx_err, ndev,
+ "Packet too big: buflen=%u recv_section_size=%u\n",
+ buflen, net_device->recv_section_size);
+
+ continue;
+ }
+
data = recv_buf + offset;
nvchan->rsc.is_last = (i == count - 1);
@@ -1171,8 +1506,11 @@ static int netvsc_receive(struct net_device *ndev,
ret = rndis_filter_receive(ndev, net_device,
nvchan, data, buflen);
- if (unlikely(ret != NVSP_STAT_SUCCESS))
+ if (unlikely(ret != NVSP_STAT_SUCCESS)) {
+ /* Drop incomplete packet */
+ nvchan->rsc.cnt = 0;
status = NVSP_STAT_FAIL;
+ }
}
enq_receive_complete(ndev, net_device, q_idx,
@@ -1190,6 +1528,13 @@ static void netvsc_send_table(struct net_device *ndev,
u32 count, offset, *tab;
int i;
+ /* Ensure packet is big enough to read send_table fields */
+ if (msglen < sizeof(struct nvsp_message_header) +
+ sizeof(struct nvsp_5_send_indirect_table)) {
+ netdev_err(ndev, "nvsp_v5_msg length too small: %u\n", msglen);
+ return;
+ }
+
count = nvmsg->msg.v5_msg.send_table.count;
offset = nvmsg->msg.v5_msg.send_table.offset;
@@ -1208,7 +1553,7 @@ static void netvsc_send_table(struct net_device *ndev,
sizeof(union nvsp_6_message_uber);
/* Boundary check for all versions */
- if (offset > msglen - count * sizeof(u32)) {
+ if (msglen < count * sizeof(u32) || offset > msglen - count * sizeof(u32)) {
netdev_err(ndev, "Received send-table offset too big:%u\n",
offset);
return;
@@ -1221,12 +1566,24 @@ static void netvsc_send_table(struct net_device *ndev,
}
static void netvsc_send_vf(struct net_device *ndev,
- const struct nvsp_message *nvmsg)
+ const struct nvsp_message *nvmsg,
+ u32 msglen)
{
struct net_device_context *net_device_ctx = netdev_priv(ndev);
+ /* Ensure packet is big enough to read its fields */
+ if (msglen < sizeof(struct nvsp_message_header) +
+ sizeof(struct nvsp_4_send_vf_association)) {
+ netdev_err(ndev, "nvsp_v4_msg length too small: %u\n", msglen);
+ return;
+ }
+
net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
+
+ if (net_device_ctx->vf_alloc)
+ complete(&net_device_ctx->vf_add);
+
netdev_info(ndev, "VF slot %u %s\n",
net_device_ctx->vf_serial,
net_device_ctx->vf_alloc ? "added" : "removed");
@@ -1234,16 +1591,27 @@ static void netvsc_send_vf(struct net_device *ndev,
static void netvsc_receive_inband(struct net_device *ndev,
struct netvsc_device *nvscdev,
- const struct nvsp_message *nvmsg,
- u32 msglen)
+ const struct vmpacket_descriptor *desc)
{
+ const struct nvsp_message *nvmsg = hv_pkt_data(desc);
+ u32 msglen = hv_pkt_datalen(desc);
+
+ /* Ensure packet is big enough to read header fields */
+ if (msglen < sizeof(struct nvsp_message_header)) {
+ netdev_err(ndev, "inband nvsp_message length too small: %u\n", msglen);
+ return;
+ }
+
switch (nvmsg->hdr.msg_type) {
case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE:
netvsc_send_table(ndev, nvscdev, nvmsg, msglen);
break;
case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION:
- netvsc_send_vf(ndev, nvmsg);
+ if (hv_is_isolation_supported())
+ netdev_err(ndev, "Ignore VF_ASSOCIATION msg from the host supporting isolation\n");
+ else
+ netvsc_send_vf(ndev, nvmsg, msglen);
break;
}
}
@@ -1257,23 +1625,19 @@ static int netvsc_process_raw_pkt(struct hv_device *device,
{
struct vmbus_channel *channel = nvchan->channel;
const struct nvsp_message *nvmsg = hv_pkt_data(desc);
- u32 msglen = hv_pkt_datalen(desc);
trace_nvsp_recv(ndev, channel, nvmsg);
switch (desc->type) {
case VM_PKT_COMP:
- netvsc_send_completion(ndev, net_device, channel,
- desc, budget);
+ netvsc_send_completion(ndev, net_device, channel, desc, budget);
break;
case VM_PKT_DATA_USING_XFER_PAGES:
- return netvsc_receive(ndev, net_device, nvchan,
- desc, nvmsg);
- break;
+ return netvsc_receive(ndev, net_device, nvchan, desc);
case VM_PKT_DATA_INBAND:
- netvsc_receive_inband(ndev, net_device, nvmsg, msglen);
+ netvsc_receive_inband(ndev, net_device, desc);
break;
default:
@@ -1311,12 +1675,17 @@ int netvsc_poll(struct napi_struct *napi, int budget)
if (!nvchan->desc)
nvchan->desc = hv_pkt_iter_first(channel);
+ nvchan->xdp_flush = false;
+
while (nvchan->desc && work_done < budget) {
work_done += netvsc_process_raw_pkt(device, nvchan, net_device,
ndev, nvchan->desc, budget);
nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc);
}
+ if (nvchan->xdp_flush)
+ xdp_do_flush();
+
/* Send any pending receive completions */
ret = send_recv_completions(ndev, net_device, nvchan);
@@ -1397,7 +1766,7 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
u64_stats_init(&nvchan->tx_stats.syncp);
u64_stats_init(&nvchan->rx_stats.syncp);
- ret = xdp_rxq_info_reg(&nvchan->xdp_rxq, ndev, i);
+ ret = xdp_rxq_info_reg(&nvchan->xdp_rxq, ndev, i, 0);
if (ret) {
netdev_err(ndev, "xdp_rxq_info_reg fail: %d\n", ret);
@@ -1414,10 +1783,14 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
}
/* Enable NAPI handler before init callbacks */
- netif_napi_add(ndev, &net_device->chan_table[0].napi,
- netvsc_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(ndev, &net_device->chan_table[0].napi, netvsc_poll);
/* Open the channel */
+ device->channel->next_request_id_callback = vmbus_next_request_id;
+ device->channel->request_addr_callback = vmbus_request_addr;
+ device->channel->rqstor_size = netvsc_rqstor_size(netvsc_ring_bytes);
+ device->channel->max_pkt_size = NETVSC_MAX_PKT_SIZE;
+
ret = vmbus_open(device->channel, netvsc_ring_bytes,
netvsc_ring_bytes, NULL, 0,
netvsc_channel_cb, net_device->chan_table);
@@ -1458,6 +1831,12 @@ cleanup:
netif_napi_del(&net_device->chan_table[0].napi);
cleanup2:
+ if (net_device->recv_original_buf)
+ hv_unmap_memory(net_device->recv_buf);
+
+ if (net_device->send_original_buf)
+ hv_unmap_memory(net_device->send_buf);
+
free_netvsc_device(&net_device->rcu);
return ERR_PTR(ret);
diff --git a/drivers/net/hyperv/netvsc_bpf.c b/drivers/net/hyperv/netvsc_bpf.c
index b86611041db6..4a9522689fa4 100644
--- a/drivers/net/hyperv/netvsc_bpf.c
+++ b/drivers/net/hyperv/netvsc_bpf.c
@@ -10,6 +10,7 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
+#include <linux/netpoll.h>
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <linux/kernel.h>
@@ -23,11 +24,13 @@
u32 netvsc_run_xdp(struct net_device *ndev, struct netvsc_channel *nvchan,
struct xdp_buff *xdp)
{
+ struct netvsc_stats_rx *rx_stats = &nvchan->rx_stats;
void *data = nvchan->rsc.data[0];
u32 len = nvchan->rsc.len[0];
struct page *page = NULL;
struct bpf_prog *prog;
u32 act = XDP_PASS;
+ bool drop = true;
xdp->data_hard_start = NULL;
@@ -37,6 +40,12 @@ u32 netvsc_run_xdp(struct net_device *ndev, struct netvsc_channel *nvchan,
if (!prog)
goto out;
+ /* Ensure that the below memcpy() won't overflow the page buffer. */
+ if (len > ndev->mtu + ETH_HLEN) {
+ act = XDP_DROP;
+ goto out;
+ }
+
/* allocate page buffer for data */
page = alloc_page(GFP_ATOMIC);
if (!page) {
@@ -44,12 +53,8 @@ u32 netvsc_run_xdp(struct net_device *ndev, struct netvsc_channel *nvchan,
goto out;
}
- xdp->data_hard_start = page_address(page);
- xdp->data = xdp->data_hard_start + NETVSC_XDP_HDRM;
- xdp_set_data_meta_invalid(xdp);
- xdp->data_end = xdp->data + len;
- xdp->rxq = &nvchan->xdp_rxq;
- xdp->handle = 0;
+ xdp_init_buff(xdp, PAGE_SIZE, &nvchan->xdp_rxq);
+ xdp_prepare_buff(xdp, page_address(page), NETVSC_XDP_HDRM, len, false);
memcpy(xdp->data, data, len);
@@ -58,21 +63,46 @@ u32 netvsc_run_xdp(struct net_device *ndev, struct netvsc_channel *nvchan,
switch (act) {
case XDP_PASS:
case XDP_TX:
+ drop = false;
+ break;
+
case XDP_DROP:
break;
+ case XDP_REDIRECT:
+ if (!xdp_do_redirect(ndev, xdp, prog)) {
+ nvchan->xdp_flush = true;
+ drop = false;
+
+ u64_stats_update_begin(&rx_stats->syncp);
+
+ rx_stats->xdp_redirect++;
+ rx_stats->packets++;
+ rx_stats->bytes += nvchan->rsc.pktlen;
+
+ u64_stats_update_end(&rx_stats->syncp);
+
+ break;
+ } else {
+ u64_stats_update_begin(&rx_stats->syncp);
+ rx_stats->xdp_drop++;
+ u64_stats_update_end(&rx_stats->syncp);
+ }
+
+ fallthrough;
+
case XDP_ABORTED:
trace_xdp_exception(ndev, prog, act);
break;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(ndev, prog, act);
}
out:
rcu_read_unlock();
- if (page && act != XDP_PASS && act != XDP_TX) {
+ if (page && drop) {
__free_page(page);
xdp->data_hard_start = NULL;
}
@@ -135,7 +165,6 @@ int netvsc_xdp_set(struct net_device *dev, struct bpf_prog *prog,
int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog)
{
struct netdev_bpf xdp;
- bpf_op_t ndo_bpf;
int ret;
ASSERT_RTNL();
@@ -143,8 +172,7 @@ int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog)
if (!vf_netdev)
return 0;
- ndo_bpf = vf_netdev->netdev_ops->ndo_bpf;
- if (!ndo_bpf)
+ if (!vf_netdev->netdev_ops->ndo_bpf)
return 0;
memset(&xdp, 0, sizeof(xdp));
@@ -155,7 +183,7 @@ int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog)
xdp.command = XDP_SETUP_PROG;
xdp.prog = prog;
- ret = ndo_bpf(vf_netdev, &xdp);
+ ret = vf_netdev->netdev_ops->ndo_bpf(vf_netdev, &xdp);
if (ret && prog)
bpf_prog_put(prog);
@@ -163,16 +191,6 @@ int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog)
return ret;
}
-static u32 netvsc_xdp_query(struct netvsc_device *nvdev)
-{
- struct bpf_prog *prog = netvsc_xdp_get(nvdev);
-
- if (prog)
- return prog->aux->id;
-
- return 0;
-}
-
int netvsc_bpf(struct net_device *dev, struct netdev_bpf *bpf)
{
struct net_device_context *ndevctx = netdev_priv(dev);
@@ -182,12 +200,7 @@ int netvsc_bpf(struct net_device *dev, struct netdev_bpf *bpf)
int ret;
if (!nvdev || nvdev->destroy) {
- if (bpf->command == XDP_QUERY_PROG) {
- bpf->prog_id = 0;
- return 0; /* Query must always succeed */
- } else {
- return -ENODEV;
- }
+ return -ENODEV;
}
switch (bpf->command) {
@@ -208,11 +221,72 @@ int netvsc_bpf(struct net_device *dev, struct netdev_bpf *bpf)
return ret;
- case XDP_QUERY_PROG:
- bpf->prog_id = netvsc_xdp_query(nvdev);
- return 0;
-
default:
return -EINVAL;
}
}
+
+static int netvsc_ndoxdp_xmit_fm(struct net_device *ndev,
+ struct xdp_frame *frame, u16 q_idx)
+{
+ struct sk_buff *skb;
+
+ skb = xdp_build_skb_from_frame(frame, ndev);
+ if (unlikely(!skb))
+ return -ENOMEM;
+
+ netvsc_get_hash(skb, netdev_priv(ndev));
+
+ skb_record_rx_queue(skb, q_idx);
+
+ netvsc_xdp_xmit(skb, ndev);
+
+ return 0;
+}
+
+int netvsc_ndoxdp_xmit(struct net_device *ndev, int n,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct net_device_context *ndev_ctx = netdev_priv(ndev);
+ const struct net_device_ops *vf_ops;
+ struct netvsc_stats_tx *tx_stats;
+ struct netvsc_device *nvsc_dev;
+ struct net_device *vf_netdev;
+ int i, count = 0;
+ u16 q_idx;
+
+ /* Don't transmit if netvsc_device is gone */
+ nvsc_dev = rcu_dereference_bh(ndev_ctx->nvdev);
+ if (unlikely(!nvsc_dev || nvsc_dev->destroy))
+ return 0;
+
+ /* If VF is present and up then redirect packets to it.
+ * Skip the VF if it is marked down or has no carrier.
+ * If netpoll is in uses, then VF can not be used either.
+ */
+ vf_netdev = rcu_dereference_bh(ndev_ctx->vf_netdev);
+ if (vf_netdev && netif_running(vf_netdev) &&
+ netif_carrier_ok(vf_netdev) && !netpoll_tx_running(ndev) &&
+ vf_netdev->netdev_ops->ndo_xdp_xmit &&
+ ndev_ctx->data_path_is_vf) {
+ vf_ops = vf_netdev->netdev_ops;
+ return vf_ops->ndo_xdp_xmit(vf_netdev, n, frames, flags);
+ }
+
+ q_idx = smp_processor_id() % ndev->real_num_tx_queues;
+
+ for (i = 0; i < n; i++) {
+ if (netvsc_ndoxdp_xmit_fm(ndev, frames[i], q_idx))
+ break;
+
+ count++;
+ }
+
+ tx_stats = &nvsc_dev->chan_table[q_idx].tx_stats;
+
+ u64_stats_update_begin(&tx_stats->syncp);
+ tx_stats->xdp_xmit += count;
+ u64_stats_update_end(&tx_stats->syncp);
+
+ return count;
+}
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 2c0a24c606fc..89eb4f179a3c 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -10,6 +10,7 @@
#include <linux/init.h>
#include <linux/atomic.h>
+#include <linux/ethtool.h>
#include <linux/module.h>
#include <linux/highmem.h>
#include <linux/device.h>
@@ -37,9 +38,6 @@
#include "hyperv_net.h"
#define RING_SIZE_MIN 64
-#define RETRY_US_LO 5000
-#define RETRY_US_HI 10000
-#define RETRY_MAX 2000 /* >10 sec */
#define LINKCHANGE_INT (2 * HZ)
#define VF_TAKEOVER_INT (HZ / 10)
@@ -244,56 +242,6 @@ static inline void *init_ppi_data(struct rndis_message *msg,
return ppi + 1;
}
-/* Azure hosts don't support non-TCP port numbers in hashing for fragmented
- * packets. We can use ethtool to change UDP hash level when necessary.
- */
-static inline u32 netvsc_get_hash(
- struct sk_buff *skb,
- const struct net_device_context *ndc)
-{
- struct flow_keys flow;
- u32 hash, pkt_proto = 0;
- static u32 hashrnd __read_mostly;
-
- net_get_random_once(&hashrnd, sizeof(hashrnd));
-
- if (!skb_flow_dissect_flow_keys(skb, &flow, 0))
- return 0;
-
- switch (flow.basic.ip_proto) {
- case IPPROTO_TCP:
- if (flow.basic.n_proto == htons(ETH_P_IP))
- pkt_proto = HV_TCP4_L4HASH;
- else if (flow.basic.n_proto == htons(ETH_P_IPV6))
- pkt_proto = HV_TCP6_L4HASH;
-
- break;
-
- case IPPROTO_UDP:
- if (flow.basic.n_proto == htons(ETH_P_IP))
- pkt_proto = HV_UDP4_L4HASH;
- else if (flow.basic.n_proto == htons(ETH_P_IPV6))
- pkt_proto = HV_UDP6_L4HASH;
-
- break;
- }
-
- if (pkt_proto & ndc->l4_hash) {
- return skb_get_hash(skb);
- } else {
- if (flow.basic.n_proto == htons(ETH_P_IP))
- hash = jhash2((u32 *)&flow.addrs.v4addrs, 2, hashrnd);
- else if (flow.basic.n_proto == htons(ETH_P_IPV6))
- hash = jhash2((u32 *)&flow.addrs.v6addrs, 8, hashrnd);
- else
- return 0;
-
- __skb_set_sw_hash(skb, hash, false);
- }
-
- return hash;
-}
-
static inline int netvsc_get_tx_queue(struct net_device *ndev,
struct sk_buff *skb, int old_idx)
{
@@ -367,38 +315,35 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
}
rcu_read_unlock();
- while (unlikely(txq >= ndev->real_num_tx_queues))
+ while (txq >= ndev->real_num_tx_queues)
txq -= ndev->real_num_tx_queues;
return txq;
}
-static u32 fill_pg_buf(struct page *page, u32 offset, u32 len,
+static u32 fill_pg_buf(unsigned long hvpfn, u32 offset, u32 len,
struct hv_page_buffer *pb)
{
int j = 0;
- /* Deal with compound pages by ignoring unused part
- * of the page.
- */
- page += (offset >> PAGE_SHIFT);
- offset &= ~PAGE_MASK;
+ hvpfn += offset >> HV_HYP_PAGE_SHIFT;
+ offset = offset & ~HV_HYP_PAGE_MASK;
while (len > 0) {
unsigned long bytes;
- bytes = PAGE_SIZE - offset;
+ bytes = HV_HYP_PAGE_SIZE - offset;
if (bytes > len)
bytes = len;
- pb[j].pfn = page_to_pfn(page);
+ pb[j].pfn = hvpfn;
pb[j].offset = offset;
pb[j].len = bytes;
offset += bytes;
len -= bytes;
- if (offset == PAGE_SIZE && len) {
- page++;
+ if (offset == HV_HYP_PAGE_SIZE && len) {
+ hvpfn++;
offset = 0;
j++;
}
@@ -421,23 +366,26 @@ static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
* 2. skb linear data
* 3. skb fragment data
*/
- slots_used += fill_pg_buf(virt_to_page(hdr),
- offset_in_page(hdr),
- len, &pb[slots_used]);
+ slots_used += fill_pg_buf(virt_to_hvpfn(hdr),
+ offset_in_hvpage(hdr),
+ len,
+ &pb[slots_used]);
packet->rmsg_size = len;
packet->rmsg_pgcnt = slots_used;
- slots_used += fill_pg_buf(virt_to_page(data),
- offset_in_page(data),
- skb_headlen(skb), &pb[slots_used]);
+ slots_used += fill_pg_buf(virt_to_hvpfn(data),
+ offset_in_hvpage(data),
+ skb_headlen(skb),
+ &pb[slots_used]);
for (i = 0; i < frags; i++) {
skb_frag_t *frag = skb_shinfo(skb)->frags + i;
- slots_used += fill_pg_buf(skb_frag_page(frag),
- skb_frag_off(frag),
- skb_frag_size(frag), &pb[slots_used]);
+ slots_used += fill_pg_buf(page_to_hvpfn(skb_frag_page(frag)),
+ skb_frag_off(frag),
+ skb_frag_size(frag),
+ &pb[slots_used]);
}
return slots_used;
}
@@ -453,8 +401,8 @@ static int count_skb_frag_slots(struct sk_buff *skb)
unsigned long offset = skb_frag_off(frag);
/* Skip unused frames from start of page */
- offset &= ~PAGE_MASK;
- pages += PFN_UP(offset + size);
+ offset &= ~HV_HYP_PAGE_MASK;
+ pages += HVPFN_UP(offset + size);
}
return pages;
}
@@ -462,12 +410,12 @@ static int count_skb_frag_slots(struct sk_buff *skb)
static int netvsc_get_slots(struct sk_buff *skb)
{
char *data = skb->data;
- unsigned int offset = offset_in_page(data);
+ unsigned int offset = offset_in_hvpage(data);
unsigned int len = skb_headlen(skb);
int slots;
int frag_slots;
- slots = DIV_ROUND_UP(offset + len, PAGE_SIZE);
+ slots = DIV_ROUND_UP(offset + len, HV_HYP_PAGE_SIZE);
frag_slots = count_skb_frag_slots(skb);
return slots + frag_slots;
}
@@ -502,7 +450,7 @@ static int netvsc_vf_xmit(struct net_device *net, struct net_device *vf_netdev,
int rc;
skb->dev = vf_netdev;
- skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping;
+ skb_record_rx_queue(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
rc = dev_queue_xmit(skb);
if (likely(rc == NET_XMIT_SUCCESS || rc == NET_XMIT_CN)) {
@@ -532,12 +480,14 @@ static int netvsc_xmit(struct sk_buff *skb, struct net_device *net, bool xdp_tx)
u32 hash;
struct hv_page_buffer pb[MAX_PAGE_BUFFER_COUNT];
- /* if VF is present and up then redirect packets
- * already called with rcu_read_lock_bh
+ /* If VF is present and up then redirect packets to it.
+ * Skip the VF if it is marked down or has no carrier.
+ * If netpoll is in uses, then VF can not be used either.
*/
vf_netdev = rcu_dereference_bh(net_device_ctx->vf_netdev);
if (vf_netdev && netif_running(vf_netdev) &&
- !netpoll_tx_running(net))
+ netif_carrier_ok(vf_netdev) && !netpoll_tx_running(net) &&
+ net_device_ctx->data_path_is_vf)
return netvsc_vf_xmit(net, vf_netdev, skb);
/* We will atmost need two pages to describe the rndis
@@ -605,6 +555,29 @@ static int netvsc_xmit(struct sk_buff *skb, struct net_device *net, bool xdp_tx)
*hash_info = hash;
}
+ /* When using AF_PACKET we need to drop VLAN header from
+ * the frame and update the SKB to allow the HOST OS
+ * to transmit the 802.1Q packet
+ */
+ if (skb->protocol == htons(ETH_P_8021Q)) {
+ u16 vlan_tci;
+
+ skb_reset_mac_header(skb);
+ if (eth_type_vlan(eth_hdr(skb)->h_proto)) {
+ if (unlikely(__skb_vlan_pop(skb, &vlan_tci) != 0)) {
+ ++net_device_ctx->eth_stats.vlan_error;
+ goto drop;
+ }
+
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
+ /* Update the NDIS header pkt lengths */
+ packet->total_data_buflen -= VLAN_HLEN;
+ packet->total_bytes -= VLAN_HLEN;
+ rndis_msg->msg_len = packet->total_data_buflen;
+ rndis_msg->msg.pkt.data_len = packet->total_data_buflen;
+ }
+ }
+
if (skb_vlan_tag_present(skb)) {
struct ndis_pkt_8021q_info *vlan;
@@ -638,10 +611,7 @@ static int netvsc_xmit(struct sk_buff *skb, struct net_device *net, bool xdp_tx)
} else {
lso_info->lso_v2_transmit.ip_version =
NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
- ipv6_hdr(skb)->payload_len = 0;
- tcp_hdr(skb)->check =
- ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
+ tcp_v6_gso_csum_prep(skb);
}
lso_info->lso_v2_transmit.tcp_header_offset = skb_transport_offset(skb);
lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
@@ -710,7 +680,8 @@ no_memory:
goto drop;
}
-static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t netvsc_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
{
return netvsc_xmit(skb, ndev, false);
}
@@ -719,19 +690,44 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *ndev)
* netvsc_linkstatus_callback - Link up/down notification
*/
void netvsc_linkstatus_callback(struct net_device *net,
- struct rndis_message *resp)
+ struct rndis_message *resp,
+ void *data, u32 data_buflen)
{
struct rndis_indicate_status *indicate = &resp->msg.indicate_status;
struct net_device_context *ndev_ctx = netdev_priv(net);
struct netvsc_reconfig *event;
unsigned long flags;
+ /* Ensure the packet is big enough to access its fields */
+ if (resp->msg_len - RNDIS_HEADER_SIZE < sizeof(struct rndis_indicate_status)) {
+ netdev_err(net, "invalid rndis_indicate_status packet, len: %u\n",
+ resp->msg_len);
+ return;
+ }
+
+ /* Copy the RNDIS indicate status into nvchan->recv_buf */
+ memcpy(indicate, data + RNDIS_HEADER_SIZE, sizeof(*indicate));
+
/* Update the physical link speed when changing to another vSwitch */
if (indicate->status == RNDIS_STATUS_LINK_SPEED_CHANGE) {
u32 speed;
- speed = *(u32 *)((void *)indicate
- + indicate->status_buf_offset) / 10000;
+ /* Validate status_buf_offset and status_buflen.
+ *
+ * Certain (pre-Fe) implementations of Hyper-V's vSwitch didn't account
+ * for the status buffer field in resp->msg_len; perform the validation
+ * using data_buflen (>= resp->msg_len).
+ */
+ if (indicate->status_buflen < sizeof(speed) ||
+ indicate->status_buf_offset < sizeof(*indicate) ||
+ data_buflen - RNDIS_HEADER_SIZE < indicate->status_buf_offset ||
+ data_buflen - RNDIS_HEADER_SIZE - indicate->status_buf_offset
+ < indicate->status_buflen) {
+ netdev_err(net, "invalid rndis_indicate_status packet\n");
+ return;
+ }
+
+ speed = *(u32 *)(data + RNDIS_HEADER_SIZE + indicate->status_buf_offset) / 10000;
ndev_ctx->speed = speed;
return;
}
@@ -757,7 +753,8 @@ void netvsc_linkstatus_callback(struct net_device *net,
schedule_delayed_work(&ndev_ctx->dwork, 0);
}
-static void netvsc_xdp_xmit(struct sk_buff *skb, struct net_device *ndev)
+/* This function should only be called after skb_record_rx_queue() */
+void netvsc_xdp_xmit(struct sk_buff *skb, struct net_device *ndev)
{
int rc;
@@ -786,10 +783,11 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
struct xdp_buff *xdp)
{
struct napi_struct *napi = &nvchan->napi;
- const struct ndis_pkt_8021q_info *vlan = nvchan->rsc.vlan;
+ const struct ndis_pkt_8021q_info *vlan = &nvchan->rsc.vlan;
const struct ndis_tcp_ip_checksum_info *csum_info =
- nvchan->rsc.csum_info;
- const u32 *hash_info = nvchan->rsc.hash_info;
+ &nvchan->rsc.csum_info;
+ const u32 *hash_info = &nvchan->rsc.hash_info;
+ u8 ppi_flags = nvchan->rsc.ppi_flags;
struct sk_buff *skb;
void *xbuf = xdp->data_hard_start;
int i;
@@ -797,7 +795,7 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
if (xbuf) {
unsigned int hdroom = xdp->data - xdp->data_hard_start;
unsigned int xlen = xdp->data_end - xdp->data;
- unsigned int frag_size = netvsc_xdp_fraglen(hdroom + xlen);
+ unsigned int frag_size = xdp->frame_sz;
skb = build_skb(xbuf, frag_size);
@@ -833,22 +831,28 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
* We compute it here if the flags are set, because on Linux, the IP
* checksum is always checked.
*/
- if (csum_info && csum_info->receive.ip_checksum_value_invalid &&
+ if ((ppi_flags & NVSC_RSC_CSUM_INFO) && csum_info->receive.ip_checksum_value_invalid &&
csum_info->receive.ip_checksum_succeeded &&
- skb->protocol == htons(ETH_P_IP))
+ skb->protocol == htons(ETH_P_IP)) {
+ /* Check that there is enough space to hold the IP header. */
+ if (skb_headlen(skb) < sizeof(struct iphdr)) {
+ kfree_skb(skb);
+ return NULL;
+ }
netvsc_comp_ipcsum(skb);
+ }
/* Do L4 checksum offload if enabled and present. */
- if (csum_info && (net->features & NETIF_F_RXCSUM)) {
+ if ((ppi_flags & NVSC_RSC_CSUM_INFO) && (net->features & NETIF_F_RXCSUM)) {
if (csum_info->receive.tcp_checksum_succeeded ||
csum_info->receive.udp_checksum_succeeded)
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
- if (hash_info && (net->features & NETIF_F_RXHASH))
+ if ((ppi_flags & NVSC_RSC_HASH_INFO) && (net->features & NETIF_F_RXHASH))
skb_set_hash(skb, *hash_info, PKT_HASH_TYPE_L4);
- if (vlan) {
+ if (ppi_flags & NVSC_RSC_VLAN) {
u16 vlan_tci = vlan->vlanid | (vlan->pri << VLAN_PRIO_SHIFT) |
(vlan->cfi ? VLAN_CFI_MASK : 0);
@@ -871,7 +875,7 @@ int netvsc_recv_callback(struct net_device *net,
struct vmbus_channel *channel = nvchan->channel;
u16 q_idx = channel->offermsg.offer.sub_channel_index;
struct sk_buff *skb;
- struct netvsc_stats *rx_stats = &nvchan->rx_stats;
+ struct netvsc_stats_rx *rx_stats = &nvchan->rx_stats;
struct xdp_buff xdp;
u32 act;
@@ -880,6 +884,9 @@ int netvsc_recv_callback(struct net_device *net,
act = netvsc_run_xdp(net, nvchan, &xdp);
+ if (act == XDP_REDIRECT)
+ return NVSP_STAT_SUCCESS;
+
if (act != XDP_PASS && act != XDP_TX) {
u64_stats_update_begin(&rx_stats->syncp);
rx_stats->xdp_drop++;
@@ -904,6 +911,9 @@ int netvsc_recv_callback(struct net_device *net,
* statistics will not work correctly.
*/
u64_stats_update_begin(&rx_stats->syncp);
+ if (act == XDP_TX)
+ rx_stats->xdp_tx++;
+
rx_stats->packets++;
rx_stats->bytes += nvchan->rsc.pktlen;
@@ -925,8 +935,8 @@ int netvsc_recv_callback(struct net_device *net,
static void netvsc_get_drvinfo(struct net_device *net,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
+ strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strscpy(info->fw_version, "N/A", sizeof(info->fw_version));
}
static void netvsc_get_channels(struct net_device *net,
@@ -1143,23 +1153,6 @@ out:
return ret;
}
-static bool
-netvsc_validate_ethtool_ss_cmd(const struct ethtool_link_ksettings *cmd)
-{
- struct ethtool_link_ksettings diff1 = *cmd;
- struct ethtool_link_ksettings diff2 = {};
-
- diff1.base.speed = 0;
- diff1.base.duplex = 0;
- /* advertising and cmd are usually set */
- ethtool_link_ksettings_zero_link_mode(&diff1, advertising);
- diff1.base.cmd = 0;
- /* We set port to PORT_OTHER */
- diff2.base.port = PORT_OTHER;
-
- return !memcmp(&diff1, &diff2, sizeof(diff1));
-}
-
static void netvsc_init_settings(struct net_device *dev)
{
struct net_device_context *ndc = netdev_priv(dev);
@@ -1176,6 +1169,12 @@ static int netvsc_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *cmd)
{
struct net_device_context *ndc = netdev_priv(dev);
+ struct net_device *vf_netdev;
+
+ vf_netdev = rtnl_dereference(ndc->vf_netdev);
+
+ if (vf_netdev)
+ return __ethtool_get_link_ksettings(vf_netdev, cmd);
cmd->base.speed = ndc->speed;
cmd->base.duplex = ndc->duplex;
@@ -1188,18 +1187,18 @@ static int netvsc_set_link_ksettings(struct net_device *dev,
const struct ethtool_link_ksettings *cmd)
{
struct net_device_context *ndc = netdev_priv(dev);
- u32 speed;
+ struct net_device *vf_netdev = rtnl_dereference(ndc->vf_netdev);
- speed = cmd->base.speed;
- if (!ethtool_validate_speed(speed) ||
- !ethtool_validate_duplex(cmd->base.duplex) ||
- !netvsc_validate_ethtool_ss_cmd(cmd))
- return -EINVAL;
+ if (vf_netdev) {
+ if (!vf_netdev->ethtool_ops->set_link_ksettings)
+ return -EOPNOTSUPP;
- ndc->speed = speed;
- ndc->duplex = cmd->base.duplex;
+ return vf_netdev->ethtool_ops->set_link_ksettings(vf_netdev,
+ cmd);
+ }
- return 0;
+ return ethtool_virtdev_set_link_ksettings(dev, cmd,
+ &ndc->speed, &ndc->duplex);
}
static int netvsc_change_mtu(struct net_device *ndev, int mtu)
@@ -1310,28 +1309,29 @@ static void netvsc_get_pcpu_stats(struct net_device *net,
/* fetch percpu stats of netvsc */
for (i = 0; i < nvdev->num_chn; i++) {
const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
- const struct netvsc_stats *stats;
+ const struct netvsc_stats_tx *tx_stats;
+ const struct netvsc_stats_rx *rx_stats;
struct netvsc_ethtool_pcpu_stats *this_tot =
&pcpu_tot[nvchan->channel->target_cpu];
u64 packets, bytes;
unsigned int start;
- stats = &nvchan->tx_stats;
+ tx_stats = &nvchan->tx_stats;
do {
- start = u64_stats_fetch_begin_irq(&stats->syncp);
- packets = stats->packets;
- bytes = stats->bytes;
- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+ start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
+ packets = tx_stats->packets;
+ bytes = tx_stats->bytes;
+ } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
this_tot->tx_bytes += bytes;
this_tot->tx_packets += packets;
- stats = &nvchan->rx_stats;
+ rx_stats = &nvchan->rx_stats;
do {
- start = u64_stats_fetch_begin_irq(&stats->syncp);
- packets = stats->packets;
- bytes = stats->bytes;
- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+ start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
+ packets = rx_stats->packets;
+ bytes = rx_stats->bytes;
+ } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
this_tot->rx_bytes += bytes;
this_tot->rx_packets += packets;
@@ -1363,27 +1363,28 @@ static void netvsc_get_stats64(struct net_device *net,
for (i = 0; i < nvdev->num_chn; i++) {
const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
- const struct netvsc_stats *stats;
+ const struct netvsc_stats_tx *tx_stats;
+ const struct netvsc_stats_rx *rx_stats;
u64 packets, bytes, multicast;
unsigned int start;
- stats = &nvchan->tx_stats;
+ tx_stats = &nvchan->tx_stats;
do {
- start = u64_stats_fetch_begin_irq(&stats->syncp);
- packets = stats->packets;
- bytes = stats->bytes;
- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+ start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
+ packets = tx_stats->packets;
+ bytes = tx_stats->bytes;
+ } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
t->tx_bytes += bytes;
t->tx_packets += packets;
- stats = &nvchan->rx_stats;
+ rx_stats = &nvchan->rx_stats;
do {
- start = u64_stats_fetch_begin_irq(&stats->syncp);
- packets = stats->packets;
- bytes = stats->bytes;
- multicast = stats->multicast + stats->broadcast;
- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+ start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
+ packets = rx_stats->packets;
+ bytes = rx_stats->bytes;
+ multicast = rx_stats->multicast + rx_stats->broadcast;
+ } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
t->rx_bytes += bytes;
t->rx_packets += packets;
@@ -1440,6 +1441,7 @@ static const struct {
{ "rx_no_memory", offsetof(struct netvsc_ethtool_stats, rx_no_memory) },
{ "stop_queue", offsetof(struct netvsc_ethtool_stats, stop_queue) },
{ "wake_queue", offsetof(struct netvsc_ethtool_stats, wake_queue) },
+ { "vlan_error", offsetof(struct netvsc_ethtool_stats, vlan_error) },
}, pcpu_stats[] = {
{ "cpu%u_rx_packets",
offsetof(struct netvsc_ethtool_pcpu_stats, rx_packets) },
@@ -1471,8 +1473,8 @@ static const struct {
/* statistics per queue (rx/tx packets/bytes) */
#define NETVSC_PCPU_STATS_LEN (num_present_cpus() * ARRAY_SIZE(pcpu_stats))
-/* 5 statistics per queue (rx/tx packets/bytes, rx xdp_drop) */
-#define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 5)
+/* 8 statistics per queue (rx/tx packets/bytes, XDP actions) */
+#define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 8)
static int netvsc_get_sset_count(struct net_device *dev, int string_set)
{
@@ -1499,12 +1501,16 @@ static void netvsc_get_ethtool_stats(struct net_device *dev,
struct net_device_context *ndc = netdev_priv(dev);
struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
const void *nds = &ndc->eth_stats;
- const struct netvsc_stats *qstats;
+ const struct netvsc_stats_tx *tx_stats;
+ const struct netvsc_stats_rx *rx_stats;
struct netvsc_vf_pcpu_stats sum;
struct netvsc_ethtool_pcpu_stats *pcpu_sum;
unsigned int start;
u64 packets, bytes;
u64 xdp_drop;
+ u64 xdp_redirect;
+ u64 xdp_tx;
+ u64 xdp_xmit;
int i, j, cpu;
if (!nvdev)
@@ -1518,31 +1524,40 @@ static void netvsc_get_ethtool_stats(struct net_device *dev,
data[i++] = *(u64 *)((void *)&sum + vf_stats[j].offset);
for (j = 0; j < nvdev->num_chn; j++) {
- qstats = &nvdev->chan_table[j].tx_stats;
+ tx_stats = &nvdev->chan_table[j].tx_stats;
do {
- start = u64_stats_fetch_begin_irq(&qstats->syncp);
- packets = qstats->packets;
- bytes = qstats->bytes;
- } while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
+ start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
+ packets = tx_stats->packets;
+ bytes = tx_stats->bytes;
+ xdp_xmit = tx_stats->xdp_xmit;
+ } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
data[i++] = packets;
data[i++] = bytes;
+ data[i++] = xdp_xmit;
- qstats = &nvdev->chan_table[j].rx_stats;
+ rx_stats = &nvdev->chan_table[j].rx_stats;
do {
- start = u64_stats_fetch_begin_irq(&qstats->syncp);
- packets = qstats->packets;
- bytes = qstats->bytes;
- xdp_drop = qstats->xdp_drop;
- } while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
+ start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
+ packets = rx_stats->packets;
+ bytes = rx_stats->bytes;
+ xdp_drop = rx_stats->xdp_drop;
+ xdp_redirect = rx_stats->xdp_redirect;
+ xdp_tx = rx_stats->xdp_tx;
+ } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
data[i++] = packets;
data[i++] = bytes;
data[i++] = xdp_drop;
+ data[i++] = xdp_redirect;
+ data[i++] = xdp_tx;
}
pcpu_sum = kvmalloc_array(num_possible_cpus(),
sizeof(struct netvsc_ethtool_pcpu_stats),
GFP_KERNEL);
+ if (!pcpu_sum)
+ return;
+
netvsc_get_pcpu_stats(dev, pcpu_sum);
for_each_present_cpu(cpu) {
struct netvsc_ethtool_pcpu_stats *this_sum = &pcpu_sum[cpu];
@@ -1566,34 +1581,26 @@ static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
switch (stringset) {
case ETH_SS_STATS:
- for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++) {
- memcpy(p, netvsc_stats[i].name, ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++)
+ ethtool_sprintf(&p, netvsc_stats[i].name);
- for (i = 0; i < ARRAY_SIZE(vf_stats); i++) {
- memcpy(p, vf_stats[i].name, ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < ARRAY_SIZE(vf_stats); i++)
+ ethtool_sprintf(&p, vf_stats[i].name);
for (i = 0; i < nvdev->num_chn; i++) {
- sprintf(p, "tx_queue_%u_packets", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "tx_queue_%u_bytes", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_packets", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_bytes", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_xdp_drop", i);
- p += ETH_GSTRING_LEN;
+ ethtool_sprintf(&p, "tx_queue_%u_packets", i);
+ ethtool_sprintf(&p, "tx_queue_%u_bytes", i);
+ ethtool_sprintf(&p, "tx_queue_%u_xdp_xmit", i);
+ ethtool_sprintf(&p, "rx_queue_%u_packets", i);
+ ethtool_sprintf(&p, "rx_queue_%u_bytes", i);
+ ethtool_sprintf(&p, "rx_queue_%u_xdp_drop", i);
+ ethtool_sprintf(&p, "rx_queue_%u_xdp_redirect", i);
+ ethtool_sprintf(&p, "rx_queue_%u_xdp_tx", i);
}
for_each_present_cpu(cpu) {
- for (i = 0; i < ARRAY_SIZE(pcpu_stats); i++) {
- sprintf(p, pcpu_stats[i].name, cpu);
- p += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < ARRAY_SIZE(pcpu_stats); i++)
+ ethtool_sprintf(&p, pcpu_stats[i].name, cpu);
}
break;
@@ -1825,7 +1832,9 @@ static void __netvsc_get_ringparam(struct netvsc_device *nvdev,
}
static void netvsc_get_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct net_device_context *ndevctx = netdev_priv(ndev);
struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
@@ -1837,7 +1846,9 @@ static void netvsc_get_ringparam(struct net_device *ndev,
}
static int netvsc_set_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct net_device_context *ndevctx = netdev_priv(ndev);
struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
@@ -1947,6 +1958,23 @@ syncvf:
return ret;
}
+static int netvsc_get_regs_len(struct net_device *netdev)
+{
+ return VRSS_SEND_TAB_SIZE * sizeof(u32);
+}
+
+static void netvsc_get_regs(struct net_device *netdev,
+ struct ethtool_regs *regs, void *p)
+{
+ struct net_device_context *ndc = netdev_priv(netdev);
+ u32 *regs_buff = p;
+
+ /* increase the version, if buffer format is changed. */
+ regs->version = 1;
+
+ memcpy(regs_buff, ndc->tx_table, VRSS_SEND_TAB_SIZE * sizeof(u32));
+}
+
static u32 netvsc_get_msglevel(struct net_device *ndev)
{
struct net_device_context *ndev_ctx = netdev_priv(ndev);
@@ -1963,6 +1991,8 @@ static void netvsc_set_msglevel(struct net_device *ndev, u32 val)
static const struct ethtool_ops ethtool_ops = {
.get_drvinfo = netvsc_get_drvinfo,
+ .get_regs_len = netvsc_get_regs_len,
+ .get_regs = netvsc_get_regs,
.get_msglevel = netvsc_get_msglevel,
.set_msglevel = netvsc_set_msglevel,
.get_link = ethtool_op_get_link,
@@ -1998,6 +2028,7 @@ static const struct net_device_ops device_ops = {
.ndo_select_queue = netvsc_select_queue,
.ndo_get_stats64 = netvsc_get_stats64,
.ndo_bpf = netvsc_bpf,
+ .ndo_xdp_xmit = netvsc_ndoxdp_xmit,
};
/*
@@ -2011,11 +2042,11 @@ static void netvsc_link_change(struct work_struct *w)
container_of(w, struct net_device_context, dwork.work);
struct hv_device *device_obj = ndev_ctx->device_ctx;
struct net_device *net = hv_get_drvdata(device_obj);
+ unsigned long flags, next_reconfig, delay;
+ struct netvsc_reconfig *event = NULL;
struct netvsc_device *net_device;
struct rndis_device *rdev;
- struct netvsc_reconfig *event = NULL;
- bool notify = false, reschedule = false;
- unsigned long flags, next_reconfig, delay;
+ bool reschedule = false;
/* if changes are happening, comeback later */
if (!rtnl_trylock()) {
@@ -2064,7 +2095,7 @@ static void netvsc_link_change(struct work_struct *w)
netif_carrier_on(net);
netvsc_tx_enable(net_device, net);
} else {
- notify = true;
+ __netdev_notify_peers(net);
}
kfree(event);
break;
@@ -2093,9 +2124,6 @@ static void netvsc_link_change(struct work_struct *w)
rtnl_unlock();
- if (notify)
- netdev_notify_peers(net);
-
/* link_watch only sends one notification with current state per
* second, handle next reconfig event in 2 seconds.
*/
@@ -2249,6 +2277,7 @@ static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev)
{
struct device *parent = vf_netdev->dev.parent;
struct net_device_context *ndev_ctx;
+ struct net_device *ndev;
struct pci_dev *pdev;
u32 serial;
@@ -2271,8 +2300,29 @@ static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev)
if (!ndev_ctx->vf_alloc)
continue;
- if (ndev_ctx->vf_serial == serial)
- return hv_get_drvdata(ndev_ctx->device_ctx);
+ if (ndev_ctx->vf_serial != serial)
+ continue;
+
+ ndev = hv_get_drvdata(ndev_ctx->device_ctx);
+ if (ndev->addr_len != vf_netdev->addr_len ||
+ memcmp(ndev->perm_addr, vf_netdev->perm_addr,
+ ndev->addr_len) != 0)
+ continue;
+
+ return ndev;
+
+ }
+
+ /* Fallback path to check synthetic vf with
+ * help of mac addr
+ */
+ list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
+ ndev = hv_get_drvdata(ndev_ctx->device_ctx);
+ if (ether_addr_equal(vf_netdev->perm_addr, ndev->perm_addr)) {
+ netdev_notice(vf_netdev,
+ "falling back to mac addr based matching\n");
+ return ndev;
+ }
}
netdev_notice(vf_netdev,
@@ -2326,6 +2376,9 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
dev_hold(vf_netdev);
rcu_assign_pointer(net_device_ctx->vf_netdev, vf_netdev);
+ if (ndev->needed_headroom < vf_netdev->needed_headroom)
+ ndev->needed_headroom = vf_netdev->needed_headroom;
+
vf_netdev->wanted_features = ndev->features;
netdev_update_features(vf_netdev);
@@ -2335,13 +2388,26 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
return NOTIFY_OK;
}
-/* VF up/down change detected, schedule to change data path */
-static int netvsc_vf_changed(struct net_device *vf_netdev)
+/* Change the data path when VF UP/DOWN/CHANGE are detected.
+ *
+ * Typically a UP or DOWN event is followed by a CHANGE event, so
+ * net_device_ctx->data_path_is_vf is used to cache the current data path
+ * to avoid the duplicate call of netvsc_switch_datapath() and the duplicate
+ * message.
+ *
+ * During hibernation, if a VF NIC driver (e.g. mlx5) preserves the network
+ * interface, there is only the CHANGE event and no UP or DOWN event.
+ */
+static int netvsc_vf_changed(struct net_device *vf_netdev, unsigned long event)
{
struct net_device_context *net_device_ctx;
struct netvsc_device *netvsc_dev;
struct net_device *ndev;
- bool vf_is_up = netif_running(vf_netdev);
+ bool vf_is_up = false;
+ int ret;
+
+ if (event != NETDEV_GOING_DOWN)
+ vf_is_up = netif_running(vf_netdev);
ndev = get_netvsc_byref(vf_netdev);
if (!ndev)
@@ -2352,9 +2418,25 @@ static int netvsc_vf_changed(struct net_device *vf_netdev)
if (!netvsc_dev)
return NOTIFY_DONE;
- netvsc_switch_datapath(ndev, vf_is_up);
- netdev_info(ndev, "Data path switched %s VF: %s\n",
- vf_is_up ? "to" : "from", vf_netdev->name);
+ if (net_device_ctx->data_path_is_vf == vf_is_up)
+ return NOTIFY_OK;
+
+ if (vf_is_up && !net_device_ctx->vf_alloc) {
+ netdev_info(ndev, "Waiting for the VF association from host\n");
+ wait_for_completion(&net_device_ctx->vf_add);
+ }
+
+ ret = netvsc_switch_datapath(ndev, vf_is_up);
+
+ if (ret) {
+ netdev_err(ndev,
+ "Data path failed to switch %s VF: %s, err: %d\n",
+ vf_is_up ? "to" : "from", vf_netdev->name, ret);
+ return NOTIFY_DONE;
+ } else {
+ netdev_info(ndev, "Data path switched %s VF: %s\n",
+ vf_is_up ? "to" : "from", vf_netdev->name);
+ }
return NOTIFY_OK;
}
@@ -2375,11 +2457,14 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev)
netvsc_vf_setxdp(vf_netdev, NULL);
+ reinit_completion(&net_device_ctx->vf_add);
netdev_rx_handler_unregister(vf_netdev);
netdev_upper_dev_unlink(vf_netdev, ndev);
RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL);
dev_put(vf_netdev);
+ ndev->needed_headroom = RNDIS_AND_PPI_SIZE;
+
return NOTIFY_OK;
}
@@ -2412,6 +2497,7 @@ static int netvsc_probe(struct hv_device *dev,
INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
+ init_completion(&net_device_ctx->vf_add);
spin_lock_init(&net_device_ctx->lock);
INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
INIT_DELAYED_WORK(&net_device_ctx->vf_takeover, netvsc_vf_setup);
@@ -2424,6 +2510,7 @@ static int netvsc_probe(struct hv_device *dev,
net->netdev_ops = &device_ops;
net->ethtool_ops = &ethtool_ops;
SET_NETDEV_DEV(net, &dev->device);
+ dma_set_min_align_mask(&dev->device, HV_HYP_PAGE_SIZE - 1);
/* We always need headroom for rndis header */
net->needed_headroom = RNDIS_AND_PPI_SIZE;
@@ -2449,7 +2536,7 @@ static int netvsc_probe(struct hv_device *dev,
goto rndis_failed;
}
- memcpy(net->dev_addr, device_info->mac_adr, ETH_ALEN);
+ eth_hw_addr_set(net, device_info->mac_adr);
/* We must get rtnl lock before scheduling nvdev->subchan_work,
* otherwise netvsc_subchan_work() can get rtnl lock first and wait
@@ -2470,6 +2557,8 @@ static int netvsc_probe(struct hv_device *dev,
NETIF_F_HW_VLAN_CTAG_RX;
net->vlan_features = net->features;
+ netdev_lockdep_set_classes(net);
+
/* MTU range: 68 - 1500 or 65521 */
net->min_mtu = NETVSC_MTU_MIN;
if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
@@ -2554,8 +2643,8 @@ static int netvsc_remove(struct hv_device *dev)
static int netvsc_suspend(struct hv_device *dev)
{
struct net_device_context *ndev_ctx;
- struct net_device *vf_netdev, *net;
struct netvsc_device *nvdev;
+ struct net_device *net;
int ret;
net = hv_get_drvdata(dev);
@@ -2571,13 +2660,12 @@ static int netvsc_suspend(struct hv_device *dev)
goto out;
}
- vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
- if (vf_netdev)
- netvsc_unregister_vf(vf_netdev);
-
/* Save the current config info */
ndev_ctx->saved_netvsc_dev_info = netvsc_devinfo_get(nvdev);
-
+ if (!ndev_ctx->saved_netvsc_dev_info) {
+ ret = -ENOMEM;
+ goto out;
+ }
ret = netvsc_detach(net, nvdev);
out:
rtnl_unlock();
@@ -2595,6 +2683,12 @@ static int netvsc_resume(struct hv_device *dev)
rtnl_lock();
net_device_ctx = netdev_priv(net);
+
+ /* Reset the data path to the netvsc NIC before re-opening the vmbus
+ * channel. Later netvsc_netdev_event() will switch the data path to
+ * the VF upon the UP or CHANGE event.
+ */
+ net_device_ctx->data_path_is_vf = false;
device_info = net_device_ctx->saved_netvsc_dev_info;
ret = netvsc_attach(net, device_info);
@@ -2651,8 +2745,7 @@ static int netvsc_netdev_event(struct notifier_block *this,
return NOTIFY_DONE;
/* Avoid Bonding master dev with same MAC registering as VF */
- if ((event_dev->priv_flags & IFF_BONDING) &&
- (event_dev->flags & IFF_MASTER))
+ if (netif_is_bond_master(event_dev))
return NOTIFY_DONE;
switch (event) {
@@ -2662,7 +2755,9 @@ static int netvsc_netdev_event(struct notifier_block *this,
return netvsc_unregister_vf(event_dev);
case NETDEV_UP:
case NETDEV_DOWN:
- return netvsc_vf_changed(event_dev);
+ case NETDEV_CHANGE:
+ case NETDEV_GOING_DOWN:
+ return netvsc_vf_changed(event_dev, event);
default:
return NOTIFY_DONE;
}
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index b81ceba38218..eea777ec2541 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -6,6 +6,7 @@
* Haiyang Zhang <haiyangz@microsoft.com>
* Hank Janssen <hjanssen@microsoft.com>
*/
+#include <linux/ethtool.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/wait.h>
@@ -19,13 +20,14 @@
#include <linux/vmalloc.h>
#include <linux/rtnetlink.h>
#include <linux/ucs2_string.h>
+#include <linux/string.h>
#include "hyperv_net.h"
#include "netvsc_trace.h"
static void rndis_set_multicast(struct work_struct *w);
-#define RNDIS_EXT_LEN PAGE_SIZE
+#define RNDIS_EXT_LEN HV_HYP_PAGE_SIZE
struct rndis_request {
struct list_head list_ent;
struct completion wait_event;
@@ -126,70 +128,89 @@ static void put_rndis_request(struct rndis_device *dev,
}
static void dump_rndis_message(struct net_device *netdev,
- const struct rndis_message *rndis_msg)
+ const struct rndis_message *rndis_msg,
+ const void *data)
{
switch (rndis_msg->ndis_msg_type) {
case RNDIS_MSG_PACKET:
- netdev_dbg(netdev, "RNDIS_MSG_PACKET (len %u, "
- "data offset %u data len %u, # oob %u, "
- "oob offset %u, oob len %u, pkt offset %u, "
- "pkt len %u\n",
- rndis_msg->msg_len,
- rndis_msg->msg.pkt.data_offset,
- rndis_msg->msg.pkt.data_len,
- rndis_msg->msg.pkt.num_oob_data_elements,
- rndis_msg->msg.pkt.oob_data_offset,
- rndis_msg->msg.pkt.oob_data_len,
- rndis_msg->msg.pkt.per_pkt_info_offset,
- rndis_msg->msg.pkt.per_pkt_info_len);
+ if (rndis_msg->msg_len - RNDIS_HEADER_SIZE >= sizeof(struct rndis_packet)) {
+ const struct rndis_packet *pkt = data + RNDIS_HEADER_SIZE;
+ netdev_dbg(netdev, "RNDIS_MSG_PACKET (len %u, "
+ "data offset %u data len %u, # oob %u, "
+ "oob offset %u, oob len %u, pkt offset %u, "
+ "pkt len %u\n",
+ rndis_msg->msg_len,
+ pkt->data_offset,
+ pkt->data_len,
+ pkt->num_oob_data_elements,
+ pkt->oob_data_offset,
+ pkt->oob_data_len,
+ pkt->per_pkt_info_offset,
+ pkt->per_pkt_info_len);
+ }
break;
case RNDIS_MSG_INIT_C:
- netdev_dbg(netdev, "RNDIS_MSG_INIT_C "
- "(len %u, id 0x%x, status 0x%x, major %d, minor %d, "
- "device flags %d, max xfer size 0x%x, max pkts %u, "
- "pkt aligned %u)\n",
- rndis_msg->msg_len,
- rndis_msg->msg.init_complete.req_id,
- rndis_msg->msg.init_complete.status,
- rndis_msg->msg.init_complete.major_ver,
- rndis_msg->msg.init_complete.minor_ver,
- rndis_msg->msg.init_complete.dev_flags,
- rndis_msg->msg.init_complete.max_xfer_size,
- rndis_msg->msg.init_complete.
- max_pkt_per_msg,
- rndis_msg->msg.init_complete.
- pkt_alignment_factor);
+ if (rndis_msg->msg_len - RNDIS_HEADER_SIZE >=
+ sizeof(struct rndis_initialize_complete)) {
+ const struct rndis_initialize_complete *init_complete =
+ data + RNDIS_HEADER_SIZE;
+ netdev_dbg(netdev, "RNDIS_MSG_INIT_C "
+ "(len %u, id 0x%x, status 0x%x, major %d, minor %d, "
+ "device flags %d, max xfer size 0x%x, max pkts %u, "
+ "pkt aligned %u)\n",
+ rndis_msg->msg_len,
+ init_complete->req_id,
+ init_complete->status,
+ init_complete->major_ver,
+ init_complete->minor_ver,
+ init_complete->dev_flags,
+ init_complete->max_xfer_size,
+ init_complete->max_pkt_per_msg,
+ init_complete->pkt_alignment_factor);
+ }
break;
case RNDIS_MSG_QUERY_C:
- netdev_dbg(netdev, "RNDIS_MSG_QUERY_C "
- "(len %u, id 0x%x, status 0x%x, buf len %u, "
- "buf offset %u)\n",
- rndis_msg->msg_len,
- rndis_msg->msg.query_complete.req_id,
- rndis_msg->msg.query_complete.status,
- rndis_msg->msg.query_complete.
- info_buflen,
- rndis_msg->msg.query_complete.
- info_buf_offset);
+ if (rndis_msg->msg_len - RNDIS_HEADER_SIZE >=
+ sizeof(struct rndis_query_complete)) {
+ const struct rndis_query_complete *query_complete =
+ data + RNDIS_HEADER_SIZE;
+ netdev_dbg(netdev, "RNDIS_MSG_QUERY_C "
+ "(len %u, id 0x%x, status 0x%x, buf len %u, "
+ "buf offset %u)\n",
+ rndis_msg->msg_len,
+ query_complete->req_id,
+ query_complete->status,
+ query_complete->info_buflen,
+ query_complete->info_buf_offset);
+ }
break;
case RNDIS_MSG_SET_C:
- netdev_dbg(netdev,
- "RNDIS_MSG_SET_C (len %u, id 0x%x, status 0x%x)\n",
- rndis_msg->msg_len,
- rndis_msg->msg.set_complete.req_id,
- rndis_msg->msg.set_complete.status);
+ if (rndis_msg->msg_len - RNDIS_HEADER_SIZE + sizeof(struct rndis_set_complete)) {
+ const struct rndis_set_complete *set_complete =
+ data + RNDIS_HEADER_SIZE;
+ netdev_dbg(netdev,
+ "RNDIS_MSG_SET_C (len %u, id 0x%x, status 0x%x)\n",
+ rndis_msg->msg_len,
+ set_complete->req_id,
+ set_complete->status);
+ }
break;
case RNDIS_MSG_INDICATE:
- netdev_dbg(netdev, "RNDIS_MSG_INDICATE "
- "(len %u, status 0x%x, buf len %u, buf offset %u)\n",
- rndis_msg->msg_len,
- rndis_msg->msg.indicate_status.status,
- rndis_msg->msg.indicate_status.status_buflen,
- rndis_msg->msg.indicate_status.status_buf_offset);
+ if (rndis_msg->msg_len - RNDIS_HEADER_SIZE >=
+ sizeof(struct rndis_indicate_status)) {
+ const struct rndis_indicate_status *indicate_status =
+ data + RNDIS_HEADER_SIZE;
+ netdev_dbg(netdev, "RNDIS_MSG_INDICATE "
+ "(len %u, status 0x%x, buf len %u, buf offset %u)\n",
+ rndis_msg->msg_len,
+ indicate_status->status,
+ indicate_status->status_buflen,
+ indicate_status->status_buf_offset);
+ }
break;
default:
@@ -215,18 +236,17 @@ static int rndis_filter_send_request(struct rndis_device *dev,
packet->page_buf_cnt = 1;
pb[0].pfn = virt_to_phys(&req->request_msg) >>
- PAGE_SHIFT;
+ HV_HYP_PAGE_SHIFT;
pb[0].len = req->request_msg.msg_len;
- pb[0].offset =
- (unsigned long)&req->request_msg & (PAGE_SIZE - 1);
+ pb[0].offset = offset_in_hvpage(&req->request_msg);
/* Add one page_buf when request_msg crossing page boundary */
- if (pb[0].offset + pb[0].len > PAGE_SIZE) {
+ if (pb[0].offset + pb[0].len > HV_HYP_PAGE_SIZE) {
packet->page_buf_cnt++;
- pb[0].len = PAGE_SIZE -
+ pb[0].len = HV_HYP_PAGE_SIZE -
pb[0].offset;
pb[1].pfn = virt_to_phys((void *)&req->request_msg
- + pb[0].len) >> PAGE_SHIFT;
+ + pb[0].len) >> HV_HYP_PAGE_SHIFT;
pb[1].offset = 0;
pb[1].len = req->request_msg.msg_len -
pb[0].len;
@@ -246,11 +266,20 @@ static void rndis_set_link_state(struct rndis_device *rdev,
{
u32 link_status;
struct rndis_query_complete *query_complete;
+ u32 msg_len = request->response_msg.msg_len;
+
+ /* Ensure the packet is big enough to access its fields */
+ if (msg_len - RNDIS_HEADER_SIZE < sizeof(struct rndis_query_complete))
+ return;
query_complete = &request->response_msg.msg.query_complete;
if (query_complete->status == RNDIS_STATUS_SUCCESS &&
- query_complete->info_buflen == sizeof(u32)) {
+ query_complete->info_buflen >= sizeof(u32) &&
+ query_complete->info_buf_offset >= sizeof(*query_complete) &&
+ msg_len - RNDIS_HEADER_SIZE >= query_complete->info_buf_offset &&
+ msg_len - RNDIS_HEADER_SIZE - query_complete->info_buf_offset
+ >= query_complete->info_buflen) {
memcpy(&link_status, (void *)((unsigned long)query_complete +
query_complete->info_buf_offset), sizeof(u32));
rdev->link_state = link_status != 0;
@@ -259,8 +288,10 @@ static void rndis_set_link_state(struct rndis_device *rdev,
static void rndis_filter_receive_response(struct net_device *ndev,
struct netvsc_device *nvdev,
- const struct rndis_message *resp)
+ struct rndis_message *resp,
+ void *data)
{
+ u32 *req_id = &resp->msg.init_complete.req_id;
struct rndis_device *dev = nvdev->extension;
struct rndis_request *request = NULL;
bool found = false;
@@ -275,14 +306,26 @@ static void rndis_filter_receive_response(struct net_device *ndev,
return;
}
+ /* Ensure the packet is big enough to read req_id. Req_id is the 1st
+ * field in any request/response message, so the payload should have at
+ * least sizeof(u32) bytes
+ */
+ if (resp->msg_len - RNDIS_HEADER_SIZE < sizeof(u32)) {
+ netdev_err(ndev, "rndis msg_len too small: %u\n",
+ resp->msg_len);
+ return;
+ }
+
+ /* Copy the request ID into nvchan->recv_buf */
+ *req_id = *(u32 *)(data + RNDIS_HEADER_SIZE);
+
spin_lock_irqsave(&dev->request_lock, flags);
list_for_each_entry(request, &dev->req_list, list_ent) {
/*
* All request/response message contains RequestId as the 1st
* field
*/
- if (request->request_msg.msg.init_req.req_id
- == resp->msg.init_complete.req_id) {
+ if (request->request_msg.msg.init_req.req_id == *req_id) {
found = true;
break;
}
@@ -292,8 +335,11 @@ static void rndis_filter_receive_response(struct net_device *ndev,
if (found) {
if (resp->msg_len <=
sizeof(struct rndis_message) + RNDIS_EXT_LEN) {
- memcpy(&request->response_msg, resp,
- resp->msg_len);
+ memcpy(&request->response_msg, resp, RNDIS_HEADER_SIZE + sizeof(*req_id));
+ unsafe_memcpy((void *)&request->response_msg + RNDIS_HEADER_SIZE + sizeof(*req_id),
+ data + RNDIS_HEADER_SIZE + sizeof(*req_id),
+ resp->msg_len - RNDIS_HEADER_SIZE - sizeof(*req_id),
+ "request->response_msg is followed by a padding of RNDIS_EXT_LEN inside rndis_request");
if (request->request_msg.ndis_msg_type ==
RNDIS_MSG_QUERY && request->request_msg.msg.
query_req.oid == RNDIS_OID_GEN_MEDIA_CONNECT_STATUS)
@@ -317,12 +363,14 @@ static void rndis_filter_receive_response(struct net_device *ndev,
}
}
+ netvsc_dma_unmap(((struct net_device_context *)
+ netdev_priv(ndev))->device_ctx, &request->pkt);
complete(&request->wait_event);
} else {
netdev_err(ndev,
"no rndis request found for this response "
"(id 0x%x res type 0x%x)\n",
- resp->msg.init_complete.req_id,
+ *req_id,
resp->ndis_msg_type);
}
}
@@ -331,8 +379,10 @@ static void rndis_filter_receive_response(struct net_device *ndev,
* Get the Per-Packet-Info with the specified type
* return NULL if not found.
*/
-static inline void *rndis_get_ppi(struct rndis_packet *rpkt,
- u32 type, u8 internal)
+static inline void *rndis_get_ppi(struct net_device *ndev,
+ struct rndis_packet *rpkt,
+ u32 rpkt_len, u32 type, u8 internal,
+ u32 ppi_size, void *data)
{
struct rndis_per_packet_info *ppi;
int len;
@@ -340,13 +390,48 @@ static inline void *rndis_get_ppi(struct rndis_packet *rpkt,
if (rpkt->per_pkt_info_offset == 0)
return NULL;
+ /* Validate info_offset and info_len */
+ if (rpkt->per_pkt_info_offset < sizeof(struct rndis_packet) ||
+ rpkt->per_pkt_info_offset > rpkt_len) {
+ netdev_err(ndev, "Invalid per_pkt_info_offset: %u\n",
+ rpkt->per_pkt_info_offset);
+ return NULL;
+ }
+
+ if (rpkt->per_pkt_info_len < sizeof(*ppi) ||
+ rpkt->per_pkt_info_len > rpkt_len - rpkt->per_pkt_info_offset) {
+ netdev_err(ndev, "Invalid per_pkt_info_len: %u\n",
+ rpkt->per_pkt_info_len);
+ return NULL;
+ }
+
ppi = (struct rndis_per_packet_info *)((ulong)rpkt +
rpkt->per_pkt_info_offset);
+ /* Copy the PPIs into nvchan->recv_buf */
+ memcpy(ppi, data + RNDIS_HEADER_SIZE + rpkt->per_pkt_info_offset, rpkt->per_pkt_info_len);
len = rpkt->per_pkt_info_len;
while (len > 0) {
- if (ppi->type == type && ppi->internal == internal)
+ /* Validate ppi_offset and ppi_size */
+ if (ppi->size > len) {
+ netdev_err(ndev, "Invalid ppi size: %u\n", ppi->size);
+ continue;
+ }
+
+ if (ppi->ppi_offset >= ppi->size) {
+ netdev_err(ndev, "Invalid ppi_offset: %u\n", ppi->ppi_offset);
+ continue;
+ }
+
+ if (ppi->type == type && ppi->internal == internal) {
+ /* ppi->size should be big enough to hold the returned object. */
+ if (ppi->size - ppi->ppi_offset < ppi_size) {
+ netdev_err(ndev, "Invalid ppi: size %u ppi_offset %u\n",
+ ppi->size, ppi->ppi_offset);
+ continue;
+ }
return (void *)((ulong)ppi + ppi->ppi_offset);
+ }
len -= ppi->size;
ppi = (struct rndis_per_packet_info *)((ulong)ppi + ppi->size);
}
@@ -366,10 +451,29 @@ void rsc_add_data(struct netvsc_channel *nvchan,
if (cnt) {
nvchan->rsc.pktlen += len;
} else {
- nvchan->rsc.vlan = vlan;
- nvchan->rsc.csum_info = csum_info;
+ /* The data/values pointed by vlan, csum_info and hash_info are shared
+ * across the different 'fragments' of the RSC packet; store them into
+ * the packet itself.
+ */
+ if (vlan != NULL) {
+ memcpy(&nvchan->rsc.vlan, vlan, sizeof(*vlan));
+ nvchan->rsc.ppi_flags |= NVSC_RSC_VLAN;
+ } else {
+ nvchan->rsc.ppi_flags &= ~NVSC_RSC_VLAN;
+ }
+ if (csum_info != NULL) {
+ memcpy(&nvchan->rsc.csum_info, csum_info, sizeof(*csum_info));
+ nvchan->rsc.ppi_flags |= NVSC_RSC_CSUM_INFO;
+ } else {
+ nvchan->rsc.ppi_flags &= ~NVSC_RSC_CSUM_INFO;
+ }
nvchan->rsc.pktlen = len;
- nvchan->rsc.hash_info = hash_info;
+ if (hash_info != NULL) {
+ nvchan->rsc.hash_info = *hash_info;
+ nvchan->rsc.ppi_flags |= NVSC_RSC_HASH_INFO;
+ } else {
+ nvchan->rsc.ppi_flags &= ~NVSC_RSC_HASH_INFO;
+ }
}
nvchan->rsc.data[cnt] = data;
@@ -381,21 +485,38 @@ static int rndis_filter_receive_data(struct net_device *ndev,
struct netvsc_device *nvdev,
struct netvsc_channel *nvchan,
struct rndis_message *msg,
- u32 data_buflen)
+ void *data, u32 data_buflen)
{
struct rndis_packet *rndis_pkt = &msg->msg.pkt;
const struct ndis_tcp_ip_checksum_info *csum_info;
const struct ndis_pkt_8021q_info *vlan;
const struct rndis_pktinfo_id *pktinfo_id;
const u32 *hash_info;
- u32 data_offset;
- void *data;
+ u32 data_offset, rpkt_len;
bool rsc_more = false;
int ret;
+ /* Ensure data_buflen is big enough to read header fields */
+ if (data_buflen < RNDIS_HEADER_SIZE + sizeof(struct rndis_packet)) {
+ netdev_err(ndev, "invalid rndis pkt, data_buflen too small: %u\n",
+ data_buflen);
+ return NVSP_STAT_FAIL;
+ }
+
+ /* Copy the RNDIS packet into nvchan->recv_buf */
+ memcpy(rndis_pkt, data + RNDIS_HEADER_SIZE, sizeof(*rndis_pkt));
+
+ /* Validate rndis_pkt offset */
+ if (rndis_pkt->data_offset >= data_buflen - RNDIS_HEADER_SIZE) {
+ netdev_err(ndev, "invalid rndis packet offset: %u\n",
+ rndis_pkt->data_offset);
+ return NVSP_STAT_FAIL;
+ }
+
/* Remove the rndis header and pass it back up the stack */
data_offset = RNDIS_HEADER_SIZE + rndis_pkt->data_offset;
+ rpkt_len = data_buflen - RNDIS_HEADER_SIZE;
data_buflen -= data_offset;
/*
@@ -410,15 +531,17 @@ static int rndis_filter_receive_data(struct net_device *ndev,
return NVSP_STAT_FAIL;
}
- vlan = rndis_get_ppi(rndis_pkt, IEEE_8021Q_INFO, 0);
-
- csum_info = rndis_get_ppi(rndis_pkt, TCPIP_CHKSUM_PKTINFO, 0);
+ vlan = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, IEEE_8021Q_INFO, 0, sizeof(*vlan),
+ data);
- hash_info = rndis_get_ppi(rndis_pkt, NBL_HASH_VALUE, 0);
+ csum_info = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, TCPIP_CHKSUM_PKTINFO, 0,
+ sizeof(*csum_info), data);
- pktinfo_id = rndis_get_ppi(rndis_pkt, RNDIS_PKTINFO_ID, 1);
+ hash_info = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, NBL_HASH_VALUE, 0,
+ sizeof(*hash_info), data);
- data = (void *)msg + data_offset;
+ pktinfo_id = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, RNDIS_PKTINFO_ID, 1,
+ sizeof(*pktinfo_id), data);
/* Identify RSC frags, drop erroneous packets */
if (pktinfo_id && (pktinfo_id->flag & RNDIS_PKTINFO_SUBALLOC)) {
@@ -447,7 +570,7 @@ static int rndis_filter_receive_data(struct net_device *ndev,
* the data packet to the stack, without the rndis trailer padding
*/
rsc_add_data(nvchan, vlan, csum_info, hash_info,
- data, rndis_pkt->data_len);
+ data + data_offset, rndis_pkt->data_len);
if (rsc_more)
return NVSP_STAT_SUCCESS;
@@ -458,8 +581,6 @@ static int rndis_filter_receive_data(struct net_device *ndev,
return ret;
drop:
- /* Drop incomplete packet */
- nvchan->rsc.cnt = 0;
return NVSP_STAT_FAIL;
}
@@ -469,25 +590,41 @@ int rndis_filter_receive(struct net_device *ndev,
void *data, u32 buflen)
{
struct net_device_context *net_device_ctx = netdev_priv(ndev);
- struct rndis_message *rndis_msg = data;
+ struct rndis_message *rndis_msg = nvchan->recv_buf;
+
+ if (buflen < RNDIS_HEADER_SIZE) {
+ netdev_err(ndev, "Invalid rndis_msg (buflen: %u)\n", buflen);
+ return NVSP_STAT_FAIL;
+ }
+
+ /* Copy the RNDIS msg header into nvchan->recv_buf */
+ memcpy(rndis_msg, data, RNDIS_HEADER_SIZE);
+
+ /* Validate incoming rndis_message packet */
+ if (rndis_msg->msg_len < RNDIS_HEADER_SIZE ||
+ buflen < rndis_msg->msg_len) {
+ netdev_err(ndev, "Invalid rndis_msg (buflen: %u, msg_len: %u)\n",
+ buflen, rndis_msg->msg_len);
+ return NVSP_STAT_FAIL;
+ }
if (netif_msg_rx_status(net_device_ctx))
- dump_rndis_message(ndev, rndis_msg);
+ dump_rndis_message(ndev, rndis_msg, data);
switch (rndis_msg->ndis_msg_type) {
case RNDIS_MSG_PACKET:
return rndis_filter_receive_data(ndev, net_dev, nvchan,
- rndis_msg, buflen);
+ rndis_msg, data, buflen);
case RNDIS_MSG_INIT_C:
case RNDIS_MSG_QUERY_C:
case RNDIS_MSG_SET_C:
/* completion msgs */
- rndis_filter_receive_response(ndev, net_dev, rndis_msg);
+ rndis_filter_receive_response(ndev, net_dev, rndis_msg, data);
break;
case RNDIS_MSG_INDICATE:
/* notification msgs */
- netvsc_linkstatus_callback(ndev, rndis_msg);
+ netvsc_linkstatus_callback(ndev, rndis_msg, data, buflen);
break;
default:
netdev_err(ndev,
@@ -508,6 +645,7 @@ static int rndis_filter_query_device(struct rndis_device *dev,
u32 inresult_size = *result_size;
struct rndis_query_request *query;
struct rndis_query_complete *query_complete;
+ u32 msg_len;
int ret = 0;
if (!result)
@@ -575,8 +713,19 @@ static int rndis_filter_query_device(struct rndis_device *dev,
/* Copy the response back */
query_complete = &request->response_msg.msg.query_complete;
+ msg_len = request->response_msg.msg_len;
+
+ /* Ensure the packet is big enough to access its fields */
+ if (msg_len - RNDIS_HEADER_SIZE < sizeof(struct rndis_query_complete)) {
+ ret = -1;
+ goto cleanup;
+ }
- if (query_complete->info_buflen > inresult_size) {
+ if (query_complete->info_buflen > inresult_size ||
+ query_complete->info_buf_offset < sizeof(*query_complete) ||
+ msg_len - RNDIS_HEADER_SIZE < query_complete->info_buf_offset ||
+ msg_len - RNDIS_HEADER_SIZE - query_complete->info_buf_offset
+ < query_complete->info_buflen) {
ret = -1;
goto cleanup;
}
@@ -906,10 +1055,8 @@ static int rndis_filter_set_packet_filter(struct rndis_device *dev,
set = &request->request_msg.msg.set_req;
set->oid = RNDIS_OID_GEN_CURRENT_PACKET_FILTER;
set->info_buflen = sizeof(u32);
- set->info_buf_offset = sizeof(struct rndis_set_request);
-
- memcpy((void *)(unsigned long)set + sizeof(struct rndis_set_request),
- &new_filter, sizeof(u32));
+ set->info_buf_offset = offsetof(typeof(*set), info_buf);
+ memcpy(set->info_buf, &new_filter, sizeof(u32));
ret = rndis_filter_send_request(dev, request);
if (ret == 0) {
@@ -1114,6 +1261,11 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
/* Set the channel before opening.*/
nvchan->channel = new_sc;
+ new_sc->next_request_id_callback = vmbus_next_request_id;
+ new_sc->request_addr_callback = vmbus_request_addr;
+ new_sc->rqstor_size = netvsc_rqstor_size(netvsc_ring_bytes);
+ new_sc->max_pkt_size = NETVSC_MAX_PKT_SIZE;
+
ret = vmbus_open(new_sc, netvsc_ring_bytes,
netvsc_ring_bytes, NULL, 0,
netvsc_channel_cb, nvchan);
@@ -1165,6 +1317,11 @@ int rndis_set_subchannel(struct net_device *ndev,
return -EIO;
}
+ /* Check that number of allocated sub channel is within the expected range */
+ if (init_packet->msg.v5_msg.subchn_comp.num_subchannels > nvdev->num_chn - 1) {
+ netdev_err(ndev, "invalid number of allocated sub channel\n");
+ return -EINVAL;
+ }
nvdev->num_chn = 1 +
init_packet->msg.v5_msg.subchn_comp.num_subchannels;
@@ -1194,7 +1351,7 @@ static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
struct net_device_context *net_device_ctx = netdev_priv(net);
struct ndis_offload hwcaps;
struct ndis_offload_params offloads;
- unsigned int gso_max_size = GSO_MAX_SIZE;
+ unsigned int gso_max_size = GSO_LEGACY_MAX_SIZE;
int ret;
/* Find HW offload capabilities */
@@ -1276,7 +1433,7 @@ static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
*/
net->features &= ~NETVSC_SUPPORTED_HW_FEATURES | net->hw_features;
- netif_set_gso_max_size(net, gso_max_size);
+ netif_set_tso_max_size(net, gso_max_size);
ret = rndis_filter_set_offload_params(net, nvdev, &offloads);
@@ -1420,7 +1577,7 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
for (i = 1; i < net_device->num_chn; i++)
netif_napi_add(net, &net_device->chan_table[i].napi,
- netvsc_poll, NAPI_POLL_WEIGHT);
+ netvsc_poll);
return net_device;