aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/virtio_net.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/virtio_net.c')
-rw-r--r--drivers/net/virtio_net.c856
1 files changed, 746 insertions, 110 deletions
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 55db6a336f7e..7106932c6f88 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -135,6 +135,9 @@ struct send_queue {
struct virtnet_sq_stats stats;
struct napi_struct napi;
+
+ /* Record whether sq is in reset state. */
+ bool reset;
};
/* Internal representation of a receive virtqueue */
@@ -169,6 +172,24 @@ struct receive_queue {
struct xdp_rxq_info xdp_rxq;
};
+/* This structure can contain rss message with maximum settings for indirection table and keysize
+ * Note, that default structure that describes RSS configuration virtio_net_rss_config
+ * contains same info but can't handle table values.
+ * In any case, structure would be passed to virtio hw through sg_buf split by parts
+ * because table sizes may be differ according to the device configuration.
+ */
+#define VIRTIO_NET_RSS_MAX_KEY_SIZE 40
+#define VIRTIO_NET_RSS_MAX_TABLE_LEN 128
+struct virtio_net_ctrl_rss {
+ u32 hash_types;
+ u16 indirection_table_mask;
+ u16 unclassified_queue;
+ u16 indirection_table[VIRTIO_NET_RSS_MAX_TABLE_LEN];
+ u16 max_tx_vq;
+ u8 hash_key_length;
+ u8 key[VIRTIO_NET_RSS_MAX_KEY_SIZE];
+};
+
/* Control VQ buffers: protected by the rtnl lock */
struct control_buf {
struct virtio_net_ctrl_hdr hdr;
@@ -178,6 +199,7 @@ struct control_buf {
u8 allmulti;
__virtio16 vid;
__virtio64 offloads;
+ struct virtio_net_ctrl_rss rss;
};
struct virtnet_info {
@@ -203,9 +225,20 @@ struct virtnet_info {
/* I like... big packets and I cannot lie! */
bool big_packets;
+ /* number of sg entries allocated for big packets */
+ unsigned int big_packets_num_skbfrags;
+
/* Host will merge rx buffers for big packets (shake it! shake it!) */
bool mergeable_rx_bufs;
+ /* Host supports rss and/or hash report */
+ bool has_rss;
+ bool has_rss_hash_report;
+ u8 rss_key_size;
+ u16 rss_indir_table_size;
+ u32 rss_hash_types_supported;
+ u32 rss_hash_types_saved;
+
/* Has control virtqueue */
bool has_cvq;
@@ -215,9 +248,15 @@ struct virtnet_info {
/* Packet virtio header size */
u8 hdr_len;
- /* Work struct for refilling if we run low on memory. */
+ /* Work struct for delayed refilling if we run low on memory. */
struct delayed_work refill;
+ /* Is delayed refill enabled? */
+ bool refill_enabled;
+
+ /* The lock to synchronize the access to refill_enabled */
+ spinlock_t refill_lock;
+
/* Work struct for config space updates */
struct work_struct config_work;
@@ -234,6 +273,12 @@ struct virtnet_info {
u8 duplex;
u32 speed;
+ /* Interrupt coalescing settings */
+ u32 tx_usecs;
+ u32 rx_usecs;
+ u32 tx_max_packets;
+ u32 rx_max_packets;
+
unsigned long guest_offloads;
unsigned long guest_offloads_capable;
@@ -242,15 +287,18 @@ struct virtnet_info {
};
struct padded_vnet_hdr {
- struct virtio_net_hdr_mrg_rxbuf hdr;
+ struct virtio_net_hdr_v1_hash hdr;
/*
* hdr is in a separate sg buffer, and data sg buffer shares same page
* with this header sg. This padding makes next sg 16 byte aligned
* after the header.
*/
- char padding[4];
+ char padding[12];
};
+static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf);
+static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
+
static bool is_xdp_frame(void *ptr)
{
return (unsigned long)ptr & VIRTIO_XDP_FLAG;
@@ -321,6 +369,20 @@ static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
return p;
}
+static void enable_delayed_refill(struct virtnet_info *vi)
+{
+ spin_lock_bh(&vi->refill_lock);
+ vi->refill_enabled = true;
+ spin_unlock_bh(&vi->refill_lock);
+}
+
+static void disable_delayed_refill(struct virtnet_info *vi)
+{
+ spin_lock_bh(&vi->refill_lock);
+ vi->refill_enabled = false;
+ spin_unlock_bh(&vi->refill_lock);
+}
+
static void virtqueue_napi_schedule(struct napi_struct *napi,
struct virtqueue *vq)
{
@@ -396,7 +458,7 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
hdr_len = vi->hdr_len;
if (vi->mergeable_rx_bufs)
- hdr_padded_len = sizeof(*hdr);
+ hdr_padded_len = hdr_len;
else
hdr_padded_len = sizeof(struct padded_vnet_hdr);
@@ -733,7 +795,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
pr_debug("%s: rx error: len %u exceeds max size %d\n",
dev->name, len, GOOD_PACKET_LEN);
dev->stats.rx_length_errors++;
- goto err_len;
+ goto err;
}
if (likely(!vi->xdp_enabled)) {
@@ -812,7 +874,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
rcu_read_unlock();
goto xdp_xmit;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(vi->dev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(vi->dev, xdp_prog, act);
@@ -825,10 +887,8 @@ static struct sk_buff *receive_small(struct net_device *dev,
skip_xdp:
skb = build_skb(buf, buflen);
- if (!skb) {
- put_page(page);
+ if (!skb)
goto err;
- }
skb_reserve(skb, headroom - delta);
skb_put(skb, len);
if (!xdp_prog) {
@@ -839,13 +899,12 @@ skip_xdp:
if (metasize)
skb_metadata_set(skb, metasize);
-err:
return skb;
err_xdp:
rcu_read_unlock();
stats->xdp_drops++;
-err_len:
+err:
stats->drops++;
put_page(page);
xdp_xmit:
@@ -981,6 +1040,24 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
* xdp.data_meta were adjusted
*/
len = xdp.data_end - xdp.data + vi->hdr_len + metasize;
+
+ /* recalculate headroom if xdp.data or xdp_data_meta
+ * were adjusted, note that offset should always point
+ * to the start of the reserved bytes for virtio_net
+ * header which are followed by xdp.data, that means
+ * that offset is equal to the headroom (when buf is
+ * starting at the beginning of the page, otherwise
+ * there is a base offset inside the page) but it's used
+ * with a different starting point (buf start) than
+ * xdp.data (buf start + vnet hdr size). If xdp.data or
+ * data_meta were adjusted by the xdp prog then the
+ * headroom size has changed and so has the offset, we
+ * can use data_hard_start, which points at buf start +
+ * vnet hdr size, to calculate the new headroom and use
+ * it later to compute buf start in page_to_skb()
+ */
+ headroom = xdp.data - xdp.data_hard_start - metasize;
+
/* We can only create skb based on xdp_page. */
if (unlikely(xdp_page != page)) {
rcu_read_unlock();
@@ -988,15 +1065,18 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
head_skb = page_to_skb(vi, rq, xdp_page, offset,
len, PAGE_SIZE, false,
metasize,
- VIRTIO_XDP_HEADROOM);
+ headroom);
return head_skb;
}
break;
case XDP_TX:
stats->xdp_tx++;
xdpf = xdp_convert_buff_to_frame(&xdp);
- if (unlikely(!xdpf))
+ if (unlikely(!xdpf)) {
+ if (unlikely(xdp_page != page))
+ put_page(xdp_page);
goto err_xdp;
+ }
err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
if (unlikely(!err)) {
xdp_return_frame_rx_napi(xdpf);
@@ -1025,7 +1105,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
rcu_read_unlock();
goto xdp_xmit;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(vi->dev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(vi->dev, xdp_prog, act);
@@ -1126,6 +1206,35 @@ xdp_xmit:
return NULL;
}
+static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash *hdr_hash,
+ struct sk_buff *skb)
+{
+ enum pkt_hash_types rss_hash_type;
+
+ if (!hdr_hash || !skb)
+ return;
+
+ switch (__le16_to_cpu(hdr_hash->hash_report)) {
+ case VIRTIO_NET_HASH_REPORT_TCPv4:
+ case VIRTIO_NET_HASH_REPORT_UDPv4:
+ case VIRTIO_NET_HASH_REPORT_TCPv6:
+ case VIRTIO_NET_HASH_REPORT_UDPv6:
+ case VIRTIO_NET_HASH_REPORT_TCPv6_EX:
+ case VIRTIO_NET_HASH_REPORT_UDPv6_EX:
+ rss_hash_type = PKT_HASH_TYPE_L4;
+ break;
+ case VIRTIO_NET_HASH_REPORT_IPv4:
+ case VIRTIO_NET_HASH_REPORT_IPv6:
+ case VIRTIO_NET_HASH_REPORT_IPv6_EX:
+ rss_hash_type = PKT_HASH_TYPE_L3;
+ break;
+ case VIRTIO_NET_HASH_REPORT_NONE:
+ default:
+ rss_hash_type = PKT_HASH_TYPE_NONE;
+ }
+ skb_set_hash(skb, __le32_to_cpu(hdr_hash->hash_value), rss_hash_type);
+}
+
static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
void *buf, unsigned int len, void **ctx,
unsigned int *xdp_xmit,
@@ -1160,6 +1269,8 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
return;
hdr = skb_vnet_hdr(skb);
+ if (dev->features & NETIF_F_RXHASH && vi->has_rss_hash_report)
+ virtio_skb_set_hash((const struct virtio_net_hdr_v1_hash *)hdr, skb);
if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID)
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1223,10 +1334,10 @@ static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,
char *p;
int i, err, offset;
- sg_init_table(rq->sg, MAX_SKB_FRAGS + 2);
+ sg_init_table(rq->sg, vi->big_packets_num_skbfrags + 2);
- /* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */
- for (i = MAX_SKB_FRAGS + 1; i > 1; --i) {
+ /* page in rq->sg[vi->big_packets_num_skbfrags + 1] is list tail */
+ for (i = vi->big_packets_num_skbfrags + 1; i > 1; --i) {
first = get_a_page(rq, gfp);
if (!first) {
if (list)
@@ -1257,7 +1368,7 @@ static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,
/* chain first in list head */
first->private = (unsigned long)list;
- err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2,
+ err = virtqueue_add_inbuf(rq->vq, rq->sg, vi->big_packets_num_skbfrags + 2,
first, gfp);
if (err < 0)
give_pages(rq, first);
@@ -1269,7 +1380,8 @@ static unsigned int get_mergeable_buf_len(struct receive_queue *rq,
struct ewma_pkt_len *avg_pkt_len,
unsigned int room)
{
- const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
+ struct virtnet_info *vi = rq->vq->vdev->priv;
+ const size_t hdr_len = vi->hdr_len;
unsigned int len;
if (room)
@@ -1453,8 +1565,12 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
}
if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) {
- if (!try_fill_recv(vi, rq, GFP_ATOMIC))
- schedule_delayed_work(&vi->refill, 0);
+ if (!try_fill_recv(vi, rq, GFP_ATOMIC)) {
+ spin_lock(&vi->refill_lock);
+ if (vi->refill_enabled)
+ schedule_delayed_work(&vi->refill, 0);
+ spin_unlock(&vi->refill_lock);
+ }
}
u64_stats_update_begin(&rq->stats.syncp);
@@ -1527,6 +1643,11 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
return;
if (__netif_tx_trylock(txq)) {
+ if (sq->reset) {
+ __netif_tx_unlock(txq);
+ return;
+ }
+
do {
virtqueue_disable_cb(sq->vq);
free_old_xmit_skbs(sq, true);
@@ -1577,6 +1698,8 @@ static int virtnet_open(struct net_device *dev)
struct virtnet_info *vi = netdev_priv(dev);
int i, err;
+ enable_delayed_refill(vi);
+
for (i = 0; i < vi->max_queue_pairs; i++) {
if (i < vi->curr_queue_pairs)
/* Make sure we have some buffers: if oom use wq. */
@@ -1772,6 +1895,70 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
+static int virtnet_rx_resize(struct virtnet_info *vi,
+ struct receive_queue *rq, u32 ring_num)
+{
+ bool running = netif_running(vi->dev);
+ int err, qindex;
+
+ qindex = rq - vi->rq;
+
+ if (running)
+ napi_disable(&rq->napi);
+
+ err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_free_unused_buf);
+ if (err)
+ netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err);
+
+ if (!try_fill_recv(vi, rq, GFP_KERNEL))
+ schedule_delayed_work(&vi->refill, 0);
+
+ if (running)
+ virtnet_napi_enable(rq->vq, &rq->napi);
+ return err;
+}
+
+static int virtnet_tx_resize(struct virtnet_info *vi,
+ struct send_queue *sq, u32 ring_num)
+{
+ bool running = netif_running(vi->dev);
+ struct netdev_queue *txq;
+ int err, qindex;
+
+ qindex = sq - vi->sq;
+
+ if (running)
+ virtnet_napi_tx_disable(&sq->napi);
+
+ txq = netdev_get_tx_queue(vi->dev, qindex);
+
+ /* 1. wait all ximt complete
+ * 2. fix the race of netif_stop_subqueue() vs netif_start_subqueue()
+ */
+ __netif_tx_lock_bh(txq);
+
+ /* Prevent rx poll from accessing sq. */
+ sq->reset = true;
+
+ /* Prevent the upper layer from trying to send packets. */
+ netif_stop_subqueue(vi->dev, qindex);
+
+ __netif_tx_unlock_bh(txq);
+
+ err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf);
+ if (err)
+ netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err);
+
+ __netif_tx_lock_bh(txq);
+ sq->reset = false;
+ netif_tx_wake_queue(txq);
+ __netif_tx_unlock_bh(txq);
+
+ if (running)
+ virtnet_napi_tx_enable(vi, sq->vq, &sq->napi);
+ return err;
+}
+
/*
* Send command via the control virtqueue and check status. Commands
* supported by the hypervisor, as indicated by feature bits, should
@@ -1959,6 +2146,8 @@ static int virtnet_close(struct net_device *dev)
struct virtnet_info *vi = netdev_priv(dev);
int i;
+ /* Make sure NAPI doesn't schedule refill work */
+ disable_delayed_refill(vi);
/* Make sure refill_work doesn't re-enable napi! */
cancel_delayed_work_sync(&vi->refill);
@@ -2104,7 +2293,7 @@ static void virtnet_set_affinity(struct virtnet_info *vi)
stragglers = num_cpu >= vi->curr_queue_pairs ?
num_cpu % vi->curr_queue_pairs :
0;
- cpu = cpumask_next(-1, cpu_online_mask);
+ cpu = cpumask_first(cpu_online_mask);
for (i = 0; i < vi->curr_queue_pairs; i++) {
group_size = stride + (i < stragglers ? 1 : 0);
@@ -2174,16 +2363,233 @@ static void virtnet_cpu_notif_remove(struct virtnet_info *vi)
}
static void virtnet_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct virtnet_info *vi = netdev_priv(dev);
- ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq);
- ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq);
- ring->rx_pending = ring->rx_max_pending;
- ring->tx_pending = ring->tx_max_pending;
+ ring->rx_max_pending = vi->rq[0].vq->num_max;
+ ring->tx_max_pending = vi->sq[0].vq->num_max;
+ ring->rx_pending = virtqueue_get_vring_size(vi->rq[0].vq);
+ ring->tx_pending = virtqueue_get_vring_size(vi->sq[0].vq);
}
+static int virtnet_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+ u32 rx_pending, tx_pending;
+ struct receive_queue *rq;
+ struct send_queue *sq;
+ int i, err;
+
+ if (ring->rx_mini_pending || ring->rx_jumbo_pending)
+ return -EINVAL;
+
+ rx_pending = virtqueue_get_vring_size(vi->rq[0].vq);
+ tx_pending = virtqueue_get_vring_size(vi->sq[0].vq);
+
+ if (ring->rx_pending == rx_pending &&
+ ring->tx_pending == tx_pending)
+ return 0;
+
+ if (ring->rx_pending > vi->rq[0].vq->num_max)
+ return -EINVAL;
+
+ if (ring->tx_pending > vi->sq[0].vq->num_max)
+ return -EINVAL;
+
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+ rq = vi->rq + i;
+ sq = vi->sq + i;
+
+ if (ring->tx_pending != tx_pending) {
+ err = virtnet_tx_resize(vi, sq, ring->tx_pending);
+ if (err)
+ return err;
+ }
+
+ if (ring->rx_pending != rx_pending) {
+ err = virtnet_rx_resize(vi, rq, ring->rx_pending);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static bool virtnet_commit_rss_command(struct virtnet_info *vi)
+{
+ struct net_device *dev = vi->dev;
+ struct scatterlist sgs[4];
+ unsigned int sg_buf_size;
+
+ /* prepare sgs */
+ sg_init_table(sgs, 4);
+
+ sg_buf_size = offsetof(struct virtio_net_ctrl_rss, indirection_table);
+ sg_set_buf(&sgs[0], &vi->ctrl->rss, sg_buf_size);
+
+ sg_buf_size = sizeof(uint16_t) * (vi->ctrl->rss.indirection_table_mask + 1);
+ sg_set_buf(&sgs[1], vi->ctrl->rss.indirection_table, sg_buf_size);
+
+ sg_buf_size = offsetof(struct virtio_net_ctrl_rss, key)
+ - offsetof(struct virtio_net_ctrl_rss, max_tx_vq);
+ sg_set_buf(&sgs[2], &vi->ctrl->rss.max_tx_vq, sg_buf_size);
+
+ sg_buf_size = vi->rss_key_size;
+ sg_set_buf(&sgs[3], vi->ctrl->rss.key, sg_buf_size);
+
+ if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
+ vi->has_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG
+ : VIRTIO_NET_CTRL_MQ_HASH_CONFIG, sgs)) {
+ dev_warn(&dev->dev, "VIRTIONET issue with committing RSS sgs\n");
+ return false;
+ }
+ return true;
+}
+
+static void virtnet_init_default_rss(struct virtnet_info *vi)
+{
+ u32 indir_val = 0;
+ int i = 0;
+
+ vi->ctrl->rss.hash_types = vi->rss_hash_types_supported;
+ vi->rss_hash_types_saved = vi->rss_hash_types_supported;
+ vi->ctrl->rss.indirection_table_mask = vi->rss_indir_table_size
+ ? vi->rss_indir_table_size - 1 : 0;
+ vi->ctrl->rss.unclassified_queue = 0;
+
+ for (; i < vi->rss_indir_table_size; ++i) {
+ indir_val = ethtool_rxfh_indir_default(i, vi->curr_queue_pairs);
+ vi->ctrl->rss.indirection_table[i] = indir_val;
+ }
+
+ vi->ctrl->rss.max_tx_vq = vi->curr_queue_pairs;
+ vi->ctrl->rss.hash_key_length = vi->rss_key_size;
+
+ netdev_rss_key_fill(vi->ctrl->rss.key, vi->rss_key_size);
+}
+
+static void virtnet_get_hashflow(const struct virtnet_info *vi, struct ethtool_rxnfc *info)
+{
+ info->data = 0;
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv4) {
+ info->data = RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) {
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ }
+ break;
+ case TCP_V6_FLOW:
+ if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv6) {
+ info->data = RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) {
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ }
+ break;
+ case UDP_V4_FLOW:
+ if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv4) {
+ info->data = RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) {
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ }
+ break;
+ case UDP_V6_FLOW:
+ if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv6) {
+ info->data = RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) {
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ }
+ break;
+ case IPV4_FLOW:
+ if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4)
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+
+ break;
+ case IPV6_FLOW:
+ if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6)
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+
+ break;
+ default:
+ info->data = 0;
+ break;
+ }
+}
+
+static bool virtnet_set_hashflow(struct virtnet_info *vi, struct ethtool_rxnfc *info)
+{
+ u32 new_hashtypes = vi->rss_hash_types_saved;
+ bool is_disable = info->data & RXH_DISCARD;
+ bool is_l4 = info->data == (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3);
+
+ /* supports only 'sd', 'sdfn' and 'r' */
+ if (!((info->data == (RXH_IP_SRC | RXH_IP_DST)) | is_l4 | is_disable))
+ return false;
+
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv4 | VIRTIO_NET_RSS_HASH_TYPE_TCPv4);
+ if (!is_disable)
+ new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv4
+ | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_TCPv4 : 0);
+ break;
+ case UDP_V4_FLOW:
+ new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv4 | VIRTIO_NET_RSS_HASH_TYPE_UDPv4);
+ if (!is_disable)
+ new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv4
+ | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_UDPv4 : 0);
+ break;
+ case IPV4_FLOW:
+ new_hashtypes &= ~VIRTIO_NET_RSS_HASH_TYPE_IPv4;
+ if (!is_disable)
+ new_hashtypes = VIRTIO_NET_RSS_HASH_TYPE_IPv4;
+ break;
+ case TCP_V6_FLOW:
+ new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv6 | VIRTIO_NET_RSS_HASH_TYPE_TCPv6);
+ if (!is_disable)
+ new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv6
+ | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_TCPv6 : 0);
+ break;
+ case UDP_V6_FLOW:
+ new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv6 | VIRTIO_NET_RSS_HASH_TYPE_UDPv6);
+ if (!is_disable)
+ new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv6
+ | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_UDPv6 : 0);
+ break;
+ case IPV6_FLOW:
+ new_hashtypes &= ~VIRTIO_NET_RSS_HASH_TYPE_IPv6;
+ if (!is_disable)
+ new_hashtypes = VIRTIO_NET_RSS_HASH_TYPE_IPv6;
+ break;
+ default:
+ /* unsupported flow */
+ return false;
+ }
+
+ /* if unsupported hashtype was set */
+ if (new_hashtypes != (new_hashtypes & vi->rss_hash_types_supported))
+ return false;
+
+ if (new_hashtypes != vi->rss_hash_types_saved) {
+ vi->rss_hash_types_saved = new_hashtypes;
+ vi->ctrl->rss.hash_types = vi->rss_hash_types_saved;
+ if (vi->dev->features & NETIF_F_RXHASH)
+ return virtnet_commit_rss_command(vi);
+ }
+
+ return true;
+}
static void virtnet_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
@@ -2191,9 +2597,9 @@ static void virtnet_get_drvinfo(struct net_device *dev,
struct virtnet_info *vi = netdev_priv(dev);
struct virtio_device *vdev = vi->vdev;
- strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info));
+ strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strscpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info));
}
@@ -2343,27 +2749,89 @@ static int virtnet_get_link_ksettings(struct net_device *dev,
return 0;
}
+static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
+ struct ethtool_coalesce *ec)
+{
+ struct scatterlist sgs_tx, sgs_rx;
+ struct virtio_net_ctrl_coal_tx coal_tx;
+ struct virtio_net_ctrl_coal_rx coal_rx;
+
+ coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs);
+ coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames);
+ sg_init_one(&sgs_tx, &coal_tx, sizeof(coal_tx));
+
+ if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
+ VIRTIO_NET_CTRL_NOTF_COAL_TX_SET,
+ &sgs_tx))
+ return -EINVAL;
+
+ /* Save parameters */
+ vi->tx_usecs = ec->tx_coalesce_usecs;
+ vi->tx_max_packets = ec->tx_max_coalesced_frames;
+
+ coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs);
+ coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames);
+ sg_init_one(&sgs_rx, &coal_rx, sizeof(coal_rx));
+
+ if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
+ VIRTIO_NET_CTRL_NOTF_COAL_RX_SET,
+ &sgs_rx))
+ return -EINVAL;
+
+ /* Save parameters */
+ vi->rx_usecs = ec->rx_coalesce_usecs;
+ vi->rx_max_packets = ec->rx_max_coalesced_frames;
+
+ return 0;
+}
+
+static int virtnet_coal_params_supported(struct ethtool_coalesce *ec)
+{
+ /* usecs coalescing is supported only if VIRTIO_NET_F_NOTF_COAL
+ * feature is negotiated.
+ */
+ if (ec->rx_coalesce_usecs || ec->tx_coalesce_usecs)
+ return -EOPNOTSUPP;
+
+ if (ec->tx_max_coalesced_frames > 1 ||
+ ec->rx_max_coalesced_frames != 1)
+ return -EINVAL;
+
+ return 0;
+}
+
static int virtnet_set_coalesce(struct net_device *dev,
struct ethtool_coalesce *ec,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
struct virtnet_info *vi = netdev_priv(dev);
- int i, napi_weight;
-
- if (ec->tx_max_coalesced_frames > 1 ||
- ec->rx_max_coalesced_frames != 1)
- return -EINVAL;
+ int ret, i, napi_weight;
+ bool update_napi = false;
+ /* Can't change NAPI weight if the link is up */
napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0;
if (napi_weight ^ vi->sq[0].napi.weight) {
if (dev->flags & IFF_UP)
return -EBUSY;
+ else
+ update_napi = true;
+ }
+
+ if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL))
+ ret = virtnet_send_notf_coal_cmds(vi, ec);
+ else
+ ret = virtnet_coal_params_supported(ec);
+
+ if (ret)
+ return ret;
+
+ if (update_napi) {
for (i = 0; i < vi->max_queue_pairs; i++)
vi->sq[i].napi.weight = napi_weight;
}
- return 0;
+ return ret;
}
static int virtnet_get_coalesce(struct net_device *dev,
@@ -2371,16 +2839,19 @@ static int virtnet_get_coalesce(struct net_device *dev,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
- struct ethtool_coalesce ec_default = {
- .cmd = ETHTOOL_GCOALESCE,
- .rx_max_coalesced_frames = 1,
- };
struct virtnet_info *vi = netdev_priv(dev);
- memcpy(ec, &ec_default, sizeof(ec_default));
+ if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) {
+ ec->rx_coalesce_usecs = vi->rx_usecs;
+ ec->tx_coalesce_usecs = vi->tx_usecs;
+ ec->tx_max_coalesced_frames = vi->tx_max_packets;
+ ec->rx_max_coalesced_frames = vi->rx_max_packets;
+ } else {
+ ec->rx_max_coalesced_frames = 1;
- if (vi->sq[0].napi.weight)
- ec->tx_max_coalesced_frames = 1;
+ if (vi->sq[0].napi.weight)
+ ec->tx_max_coalesced_frames = 1;
+ }
return 0;
}
@@ -2412,11 +2883,99 @@ static void virtnet_update_settings(struct virtnet_info *vi)
vi->duplex = duplex;
}
+static u32 virtnet_get_rxfh_key_size(struct net_device *dev)
+{
+ return ((struct virtnet_info *)netdev_priv(dev))->rss_key_size;
+}
+
+static u32 virtnet_get_rxfh_indir_size(struct net_device *dev)
+{
+ return ((struct virtnet_info *)netdev_priv(dev))->rss_indir_table_size;
+}
+
+static int virtnet_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+ int i;
+
+ if (indir) {
+ for (i = 0; i < vi->rss_indir_table_size; ++i)
+ indir[i] = vi->ctrl->rss.indirection_table[i];
+ }
+
+ if (key)
+ memcpy(key, vi->ctrl->rss.key, vi->rss_key_size);
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+
+ return 0;
+}
+
+static int virtnet_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key, const u8 hfunc)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+ int i;
+
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+
+ if (indir) {
+ for (i = 0; i < vi->rss_indir_table_size; ++i)
+ vi->ctrl->rss.indirection_table[i] = indir[i];
+ }
+ if (key)
+ memcpy(vi->ctrl->rss.key, key, vi->rss_key_size);
+
+ virtnet_commit_rss_command(vi);
+
+ return 0;
+}
+
+static int virtnet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rule_locs)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+ int rc = 0;
+
+ switch (info->cmd) {
+ case ETHTOOL_GRXRINGS:
+ info->data = vi->curr_queue_pairs;
+ break;
+ case ETHTOOL_GRXFH:
+ virtnet_get_hashflow(vi, info);
+ break;
+ default:
+ rc = -EOPNOTSUPP;
+ }
+
+ return rc;
+}
+
+static int virtnet_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+ int rc = 0;
+
+ switch (info->cmd) {
+ case ETHTOOL_SRXFH:
+ if (!virtnet_set_hashflow(vi, info))
+ rc = -EINVAL;
+
+ break;
+ default:
+ rc = -EOPNOTSUPP;
+ }
+
+ return rc;
+}
+
static const struct ethtool_ops virtnet_ethtool_ops = {
- .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES,
+ .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USECS,
.get_drvinfo = virtnet_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ringparam = virtnet_get_ringparam,
+ .set_ringparam = virtnet_set_ringparam,
.get_strings = virtnet_get_strings,
.get_sset_count = virtnet_get_sset_count,
.get_ethtool_stats = virtnet_get_ethtool_stats,
@@ -2427,12 +2986,17 @@ static const struct ethtool_ops virtnet_ethtool_ops = {
.set_link_ksettings = virtnet_set_link_ksettings,
.set_coalesce = virtnet_set_coalesce,
.get_coalesce = virtnet_get_coalesce,
+ .get_rxfh_key_size = virtnet_get_rxfh_key_size,
+ .get_rxfh_indir_size = virtnet_get_rxfh_indir_size,
+ .get_rxfh = virtnet_get_rxfh,
+ .set_rxfh = virtnet_set_rxfh,
+ .get_rxnfc = virtnet_get_rxnfc,
+ .set_rxnfc = virtnet_set_rxnfc,
};
static void virtnet_freeze_down(struct virtio_device *vdev)
{
struct virtnet_info *vi = vdev->priv;
- int i;
/* Make sure no work handler is accessing the device */
flush_work(&vi->config_work);
@@ -2440,14 +3004,8 @@ static void virtnet_freeze_down(struct virtio_device *vdev)
netif_tx_lock_bh(vi->dev);
netif_device_detach(vi->dev);
netif_tx_unlock_bh(vi->dev);
- cancel_delayed_work_sync(&vi->refill);
-
- if (netif_running(vi->dev)) {
- for (i = 0; i < vi->max_queue_pairs; i++) {
- napi_disable(&vi->rq[i].napi);
- virtnet_napi_tx_disable(&vi->sq[i].napi);
- }
- }
+ if (netif_running(vi->dev))
+ virtnet_close(vi->dev);
}
static int init_vqs(struct virtnet_info *vi);
@@ -2455,7 +3013,7 @@ static int init_vqs(struct virtnet_info *vi);
static int virtnet_restore_up(struct virtio_device *vdev)
{
struct virtnet_info *vi = vdev->priv;
- int err, i;
+ int err;
err = init_vqs(vi);
if (err)
@@ -2463,16 +3021,12 @@ static int virtnet_restore_up(struct virtio_device *vdev)
virtio_device_ready(vdev);
- if (netif_running(vi->dev)) {
- for (i = 0; i < vi->curr_queue_pairs; i++)
- if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
- schedule_delayed_work(&vi->refill, 0);
+ enable_delayed_refill(vi);
- for (i = 0; i < vi->max_queue_pairs; i++) {
- virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
- virtnet_napi_tx_enable(vi, vi->sq[i].vq,
- &vi->sq[i].napi);
- }
+ if (netif_running(vi->dev)) {
+ err = virtnet_open(vi->dev);
+ if (err)
+ return err;
}
netif_tx_lock_bh(vi->dev);
@@ -2679,6 +3233,16 @@ static int virtnet_set_features(struct net_device *dev,
vi->guest_offloads = offloads;
}
+ if ((dev->features ^ features) & NETIF_F_RXHASH) {
+ if (features & NETIF_F_RXHASH)
+ vi->ctrl->rss.hash_types = vi->rss_hash_types_saved;
+ else
+ vi->ctrl->rss.hash_types = VIRTIO_NET_HASH_REPORT_NONE;
+
+ if (!virtnet_commit_rss_command(vi))
+ return -EINVAL;
+ }
+
return 0;
}
@@ -2694,7 +3258,7 @@ static void virtnet_tx_timeout(struct net_device *dev, unsigned int txqueue)
netdev_err(dev, "TX timeout on queue: %u, sq: %s, vq: 0x%x, name: %s, %u usecs ago\n",
txqueue, sq->name, sq->vq->index, sq->vq->name,
- jiffies_to_usecs(jiffies - txq->trans_start));
+ jiffies_to_usecs(jiffies - READ_ONCE(txq->trans_start)));
}
static const struct net_device_ops virtnet_netdev = {
@@ -2805,6 +3369,27 @@ static void free_receive_page_frags(struct virtnet_info *vi)
put_page(vi->rq[i].alloc_frag.page);
}
+static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf)
+{
+ if (!is_xdp_frame(buf))
+ dev_kfree_skb(buf);
+ else
+ xdp_return_frame(ptr_to_xdp(buf));
+}
+
+static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf)
+{
+ struct virtnet_info *vi = vq->vdev->priv;
+ int i = vq2rxq(vq);
+
+ if (vi->mergeable_rx_bufs)
+ put_page(virt_to_head_page(buf));
+ else if (vi->big_packets)
+ give_pages(&vi->rq[i], buf);
+ else
+ put_page(virt_to_head_page(buf));
+}
+
static void free_unused_bufs(struct virtnet_info *vi)
{
void *buf;
@@ -2812,26 +3397,14 @@ static void free_unused_bufs(struct virtnet_info *vi)
for (i = 0; i < vi->max_queue_pairs; i++) {
struct virtqueue *vq = vi->sq[i].vq;
- while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
- if (!is_xdp_frame(buf))
- dev_kfree_skb(buf);
- else
- xdp_return_frame(ptr_to_xdp(buf));
- }
+ while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
+ virtnet_sq_free_unused_buf(vq, buf);
}
for (i = 0; i < vi->max_queue_pairs; i++) {
struct virtqueue *vq = vi->rq[i].vq;
-
- while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
- if (vi->mergeable_rx_bufs) {
- put_page(virt_to_head_page(buf));
- } else if (vi->big_packets) {
- give_pages(&vi->rq[i], buf);
- } else {
- put_page(virt_to_head_page(buf));
- }
- }
+ while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
+ virtnet_rq_free_unused_buf(vq, buf);
}
}
@@ -2852,7 +3425,7 @@ static void virtnet_del_vqs(struct virtnet_info *vi)
*/
static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqueue *vq)
{
- const unsigned int hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
+ const unsigned int hdr_len = vi->hdr_len;
unsigned int rq_size = virtqueue_get_vring_size(vq);
unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu;
unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len;
@@ -2967,10 +3540,11 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
INIT_DELAYED_WORK(&vi->refill, refill_work);
for (i = 0; i < vi->max_queue_pairs; i++) {
vi->rq[i].pages = NULL;
- netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll,
- napi_weight);
- netif_tx_napi_add(vi->dev, &vi->sq[i].napi, virtnet_poll_tx,
- napi_tx ? napi_weight : 0);
+ netif_napi_add_weight(vi->dev, &vi->rq[i].napi, virtnet_poll,
+ napi_weight);
+ netif_napi_add_tx_weight(vi->dev, &vi->sq[i].napi,
+ virtnet_poll_tx,
+ napi_tx ? napi_weight : 0);
sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
@@ -3073,6 +3647,12 @@ static bool virtnet_validate_features(struct virtio_device *vdev)
"VIRTIO_NET_F_CTRL_VQ") ||
VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") ||
VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR,
+ "VIRTIO_NET_F_CTRL_VQ") ||
+ VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_RSS,
+ "VIRTIO_NET_F_CTRL_VQ") ||
+ VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_HASH_REPORT,
+ "VIRTIO_NET_F_CTRL_VQ") ||
+ VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_NOTF_COAL,
"VIRTIO_NET_F_CTRL_VQ"))) {
return false;
}
@@ -3105,21 +3685,44 @@ static int virtnet_validate(struct virtio_device *vdev)
return 0;
}
+static bool virtnet_check_guest_gso(const struct virtnet_info *vi)
+{
+ return virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
+ virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
+ virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
+ virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO);
+}
+
+static void virtnet_set_big_packets(struct virtnet_info *vi, const int mtu)
+{
+ bool guest_gso = virtnet_check_guest_gso(vi);
+
+ /* If device can receive ANY guest GSO packets, regardless of mtu,
+ * allocate packets of maximum size, otherwise limit it to only
+ * mtu size worth only.
+ */
+ if (mtu > ETH_DATA_LEN || guest_gso) {
+ vi->big_packets = true;
+ vi->big_packets_num_skbfrags = guest_gso ? MAX_SKB_FRAGS : DIV_ROUND_UP(mtu, PAGE_SIZE);
+ }
+}
+
static int virtnet_probe(struct virtio_device *vdev)
{
int i, err = -ENOMEM;
struct net_device *dev;
struct virtnet_info *vi;
u16 max_queue_pairs;
- int mtu;
+ int mtu = 0;
- /* Find if host supports multiqueue virtio_net device */
- err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ,
- struct virtio_net_config,
- max_virtqueue_pairs, &max_queue_pairs);
+ /* Find if host supports multiqueue/rss virtio_net device */
+ max_queue_pairs = 1;
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_MQ) || virtio_has_feature(vdev, VIRTIO_NET_F_RSS))
+ max_queue_pairs =
+ virtio_cread16(vdev, offsetof(struct virtio_net_config, max_virtqueue_pairs));
/* We need at least 2 queue's */
- if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
+ if (max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
max_queue_pairs = 1;
@@ -3196,19 +3799,45 @@ static int virtnet_probe(struct virtio_device *vdev)
vdev->priv = vi;
INIT_WORK(&vi->config_work, virtnet_config_changed_work);
-
- /* If we can receive ANY GSO packets, we must allocate large ones. */
- if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
- virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
- virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) ||
- virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO))
- vi->big_packets = true;
+ spin_lock_init(&vi->refill_lock);
if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
vi->mergeable_rx_bufs = true;
- if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) ||
- virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
+ if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) {
+ vi->rx_usecs = 0;
+ vi->tx_usecs = 0;
+ vi->tx_max_packets = 0;
+ vi->rx_max_packets = 0;
+ }
+
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_HASH_REPORT))
+ vi->has_rss_hash_report = true;
+
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_RSS))
+ vi->has_rss = true;
+
+ if (vi->has_rss || vi->has_rss_hash_report) {
+ vi->rss_indir_table_size =
+ virtio_cread16(vdev, offsetof(struct virtio_net_config,
+ rss_max_indirection_table_length));
+ vi->rss_key_size =
+ virtio_cread8(vdev, offsetof(struct virtio_net_config, rss_max_key_size));
+
+ vi->rss_hash_types_supported =
+ virtio_cread32(vdev, offsetof(struct virtio_net_config, supported_hash_types));
+ vi->rss_hash_types_supported &=
+ ~(VIRTIO_NET_RSS_HASH_TYPE_IP_EX |
+ VIRTIO_NET_RSS_HASH_TYPE_TCP_EX |
+ VIRTIO_NET_RSS_HASH_TYPE_UDP_EX);
+
+ dev->hw_features |= NETIF_F_RXHASH;
+ }
+
+ if (vi->has_rss_hash_report)
+ vi->hdr_len = sizeof(struct virtio_net_hdr_v1_hash);
+ else if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) ||
+ virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
else
vi->hdr_len = sizeof(struct virtio_net_hdr);
@@ -3237,12 +3866,10 @@ static int virtnet_probe(struct virtio_device *vdev)
dev->mtu = mtu;
dev->max_mtu = mtu;
-
- /* TODO: size buffers correctly in this case. */
- if (dev->mtu > ETH_DATA_LEN)
- vi->big_packets = true;
}
+ virtnet_set_big_packets(vi, mtu);
+
if (vi->any_header_sg)
dev->needed_headroom = vi->hdr_len;
@@ -3275,14 +3902,23 @@ static int virtnet_probe(struct virtio_device *vdev)
}
}
- err = register_netdev(dev);
+ if (vi->has_rss || vi->has_rss_hash_report)
+ virtnet_init_default_rss(vi);
+
+ /* serialize netdev register + virtio_device_ready() with ndo_open() */
+ rtnl_lock();
+
+ err = register_netdevice(dev);
if (err) {
pr_debug("virtio_net: registering device failed\n");
+ rtnl_unlock();
goto free_failover;
}
virtio_device_ready(vdev);
+ rtnl_unlock();
+
err = virtnet_cpu_notif_add(vi);
if (err) {
pr_debug("virtio_net: registering cpu notifier failed\n");
@@ -3313,7 +3949,7 @@ static int virtnet_probe(struct virtio_device *vdev)
return 0;
free_unregister_netdev:
- vi->vdev->config->reset(vdev);
+ virtio_reset_device(vdev);
unregister_netdev(dev);
free_failover:
@@ -3329,7 +3965,7 @@ free:
static void remove_vq_common(struct virtnet_info *vi)
{
- vi->vdev->config->reset(vi->vdev);
+ virtio_reset_device(vi->vdev);
/* Free unused buffers in both send and recv, if any. */
free_unused_bufs(vi);
@@ -3406,7 +4042,8 @@ static struct virtio_device_id id_table[] = {
VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \
VIRTIO_NET_F_CTRL_MAC_ADDR, \
VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \
- VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY
+ VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY, \
+ VIRTIO_NET_F_RSS, VIRTIO_NET_F_HASH_REPORT, VIRTIO_NET_F_NOTF_COAL
static unsigned int features[] = {
VIRTNET_FEATURES,
@@ -3450,8 +4087,7 @@ static __init int virtio_net_driver_init(void)
NULL, virtnet_cpu_dead);
if (ret)
goto err_dead;
-
- ret = register_virtio_driver(&virtio_net_driver);
+ ret = register_virtio_driver(&virtio_net_driver);
if (ret)
goto err_virtio;
return 0;