diff options
-rw-r--r-- | drivers/net/virtio_net.c | 40 |
1 files changed, 36 insertions, 4 deletions
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index e578885c1093..7bd63a677123 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -2823,13 +2823,18 @@ static void virtnet_napi_do_enable(struct virtqueue *vq, static void virtnet_napi_enable(struct receive_queue *rq) { + struct virtnet_info *vi = rq->vq->vdev->priv; + int qidx = vq2rxq(rq->vq); + virtnet_napi_do_enable(rq->vq, &rq->napi); + netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_RX, &rq->napi); } static void virtnet_napi_tx_enable(struct send_queue *sq) { struct virtnet_info *vi = sq->vq->vdev->priv; struct napi_struct *napi = &sq->napi; + int qidx = vq2txq(sq->vq); if (!napi->weight) return; @@ -2843,20 +2848,28 @@ static void virtnet_napi_tx_enable(struct send_queue *sq) } virtnet_napi_do_enable(sq->vq, napi); + netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_TX, napi); } static void virtnet_napi_tx_disable(struct send_queue *sq) { + struct virtnet_info *vi = sq->vq->vdev->priv; struct napi_struct *napi = &sq->napi; + int qidx = vq2txq(sq->vq); - if (napi->weight) + if (napi->weight) { + netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_TX, NULL); napi_disable(napi); + } } static void virtnet_napi_disable(struct receive_queue *rq) { + struct virtnet_info *vi = rq->vq->vdev->priv; struct napi_struct *napi = &rq->napi; + int qidx = vq2rxq(rq->vq); + netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_RX, NULL); napi_disable(napi); } @@ -2870,9 +2883,23 @@ static void refill_work(struct work_struct *work) for (i = 0; i < vi->curr_queue_pairs; i++) { struct receive_queue *rq = &vi->rq[i]; - virtnet_napi_disable(rq); + /* + * When queue API support is added in the future and the call + * below becomes napi_disable_locked, this driver will need to + * be refactored. + * + * One possible solution would be to: + * - cancel refill_work with cancel_delayed_work (note: + * non-sync) + * - cancel refill_work with cancel_delayed_work_sync in + * virtnet_remove after the netdev is unregistered + * - wrap all of the work in a lock (perhaps the netdev + * instance lock) + * - check netif_running() and return early to avoid a race + */ + napi_disable(&rq->napi); still_empty = !try_fill_recv(vi, rq, GFP_KERNEL); - virtnet_napi_enable(rq); + virtnet_napi_do_enable(rq->vq, &rq->napi); /* In theory, this can happen: if we don't get any buffers in * we will *never* try to fill again. @@ -5650,8 +5677,11 @@ static void virtnet_freeze_down(struct virtio_device *vdev) netif_tx_lock_bh(vi->dev); netif_device_detach(vi->dev); netif_tx_unlock_bh(vi->dev); - if (netif_running(vi->dev)) + if (netif_running(vi->dev)) { + rtnl_lock(); virtnet_close(vi->dev); + rtnl_unlock(); + } } static int init_vqs(struct virtnet_info *vi); @@ -5671,7 +5701,9 @@ static int virtnet_restore_up(struct virtio_device *vdev) enable_rx_mode_work(vi); if (netif_running(vi->dev)) { + rtnl_lock(); err = virtnet_open(vi->dev); + rtnl_unlock(); if (err) return err; } |