diff options
Diffstat (limited to 'drivers/net/ethernet/cavium/thunder/nicvf_main.c')
-rw-r--r-- | drivers/net/ethernet/cavium/thunder/nicvf_main.c | 88 |
1 files changed, 33 insertions, 55 deletions
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 016957285f99..98f3dc460ca7 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -126,8 +126,7 @@ static void nicvf_write_to_mbx(struct nicvf *nic, union nic_mbx *mbx) int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx) { - int timeout = NIC_MBOX_MSG_TIMEOUT; - int sleep = 10; + unsigned long timeout; int ret = 0; mutex_lock(&nic->rx_mode_mtx); @@ -137,6 +136,7 @@ int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx) nicvf_write_to_mbx(nic, mbx); + timeout = jiffies + msecs_to_jiffies(NIC_MBOX_MSG_TIMEOUT); /* Wait for previous message to be acked, timeout 2sec */ while (!nic->pf_acked) { if (nic->pf_nacked) { @@ -146,11 +146,10 @@ int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx) ret = -EINVAL; break; } - msleep(sleep); + usleep_range(8000, 10000); if (nic->pf_acked) break; - timeout -= sleep; - if (!timeout) { + if (time_after(jiffies, timeout)) { netdev_err(nic->netdev, "PF didn't ACK to mbox msg 0x%02x from VF%d\n", (mbx->msg.msg & 0xFF), nic->vf_id); @@ -222,8 +221,7 @@ static void nicvf_handle_mbx_intr(struct nicvf *nic) nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F; nic->node = mbx.nic_cfg.node_id; if (!nic->set_mac_pending) - ether_addr_copy(nic->netdev->dev_addr, - mbx.nic_cfg.mac_addr); + eth_hw_addr_set(nic->netdev, mbx.nic_cfg.mac_addr); nic->sqs_mode = mbx.nic_cfg.sqs_mode; nic->loopback_supported = mbx.nic_cfg.loopback_supported; nic->link_up = false; @@ -531,6 +529,7 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog, struct cqe_rx_t *cqe_rx, struct snd_queue *sq, struct rcv_queue *rq, struct sk_buff **skb) { + unsigned char *hard_start, *data; struct xdp_buff xdp; struct page *page; u32 action; @@ -548,16 +547,14 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog, cpu_addr = (u64)phys_to_virt(cpu_addr); page = virt_to_page((void *)cpu_addr); - xdp.data_hard_start = page_address(page); - xdp.data = (void *)cpu_addr; - xdp_set_data_meta_invalid(&xdp); - xdp.data_end = xdp.data + len; - xdp.rxq = &rq->xdp_rxq; + xdp_init_buff(&xdp, RCV_FRAG_LEN + XDP_PACKET_HEADROOM, + &rq->xdp_rxq); + hard_start = page_address(page); + data = (unsigned char *)cpu_addr; + xdp_prepare_buff(&xdp, hard_start, data - hard_start, len, false); orig_data = xdp.data; - rcu_read_lock(); action = bpf_prog_run_xdp(prog, &xdp); - rcu_read_unlock(); len = xdp.data_end - xdp.data; /* Check if XDP program has changed headers */ @@ -593,11 +590,11 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog, nicvf_xdp_sq_append_pkt(nic, sq, (u64)xdp.data, dma_addr, len); return true; default: - bpf_warn_invalid_xdp_action(action); - /* fall through */ + bpf_warn_invalid_xdp_action(nic->netdev, prog, action); + fallthrough; case XDP_ABORTED: trace_xdp_exception(nic->netdev, prog, action); - /* fall through */ + fallthrough; case XDP_DROP: /* Check if it's a recycled page, if not * unmap the DMA mapping. @@ -985,9 +982,9 @@ static int nicvf_poll(struct napi_struct *napi, int budget) * * As of now only CQ errors are handled */ -static void nicvf_handle_qs_err(unsigned long data) +static void nicvf_handle_qs_err(struct tasklet_struct *t) { - struct nicvf *nic = (struct nicvf *)data; + struct nicvf *nic = from_tasklet(nic, t, qs_err_task); struct queue_set *qs = nic->qs; int qidx; u64 status; @@ -1226,7 +1223,7 @@ static int nicvf_register_misc_interrupt(struct nicvf *nic) if (ret < 0) { netdev_err(nic->netdev, "Req for #%d msix vectors failed\n", nic->num_vec); - return 1; + return ret; } sprintf(nic->irq_name[irq], "%s Mbox", "NICVF"); @@ -1245,7 +1242,7 @@ static int nicvf_register_misc_interrupt(struct nicvf *nic) if (!nicvf_check_pf_ready(nic)) { nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0); nicvf_unregister_interrupts(nic); - return 1; + return -EIO; } return 0; @@ -1475,8 +1472,7 @@ int nicvf_open(struct net_device *netdev) } cq_poll->cq_idx = qidx; cq_poll->nicvf = nic; - netif_napi_add(netdev, &cq_poll->napi, nicvf_poll, - NAPI_POLL_WEIGHT); + netif_napi_add(netdev, &cq_poll->napi, nicvf_poll); napi_enable(&cq_poll->napi); nic->napi[qidx] = cq_poll; } @@ -1493,12 +1489,10 @@ int nicvf_open(struct net_device *netdev) } /* Init tasklet for handling Qset err interrupt */ - tasklet_init(&nic->qs_err_task, nicvf_handle_qs_err, - (unsigned long)nic); + tasklet_setup(&nic->qs_err_task, nicvf_handle_qs_err); /* Init RBDR tasklet which will refill RBDR */ - tasklet_init(&nic->rbdr_task, nicvf_rbdr_task, - (unsigned long)nic); + tasklet_setup(&nic->rbdr_task, nicvf_rbdr_task); INIT_DELAYED_WORK(&nic->rbdr_work, nicvf_rbdr_work); /* Configure CPI alorithm */ @@ -1616,7 +1610,7 @@ static int nicvf_set_mac_address(struct net_device *netdev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + eth_hw_addr_set(netdev, addr->sa_data); if (nic->pdev->msix_enabled) { if (nicvf_hw_set_mac_addr(nic, netdev)) @@ -1906,9 +1900,6 @@ static int nicvf_xdp(struct net_device *netdev, struct netdev_bpf *xdp) switch (xdp->command) { case XDP_SETUP_PROG: return nicvf_xdp_setup(nic, xdp->prog); - case XDP_QUERY_PROG: - xdp->prog_id = nic->xdp_prog ? nic->xdp_prog->aux->id : 0; - return 0; default: return -EINVAL; } @@ -1925,10 +1916,6 @@ static int nicvf_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr) if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) return -EFAULT; - /* reserved for future extensions */ - if (config.flags) - return -EINVAL; - switch (config.tx_type) { case HWTSTAMP_TX_OFF: case HWTSTAMP_TX_ON: @@ -2036,17 +2023,14 @@ static void nicvf_set_rx_mode_task(struct work_struct *work_arg) u8 mode; struct xcast_addr_list *mc; - if (!vf_work) - return; - /* Save message data locally to prevent them from * being overwritten by next ndo_set_rx_mode call(). */ - spin_lock(&nic->rx_mode_wq_lock); + spin_lock_bh(&nic->rx_mode_wq_lock); mode = vf_work->mode; mc = vf_work->mc; vf_work->mc = NULL; - spin_unlock(&nic->rx_mode_wq_lock); + spin_unlock_bh(&nic->rx_mode_wq_lock); __nicvf_set_rx_mode_task(mode, mc, nic); } @@ -2070,8 +2054,8 @@ static void nicvf_set_rx_mode(struct net_device *netdev) mode |= BGX_XCAST_MCAST_FILTER; /* here we need to copy mc addrs */ if (netdev_mc_count(netdev)) { - mc_list = kmalloc(offsetof(typeof(*mc_list), - mc[netdev_mc_count(netdev)]), + mc_list = kmalloc(struct_size(mc_list, mc, + netdev_mc_count(netdev)), GFP_ATOMIC); if (unlikely(!mc_list)) return; @@ -2103,7 +2087,7 @@ static const struct net_device_ops nicvf_netdev_ops = { .ndo_fix_features = nicvf_fix_features, .ndo_set_features = nicvf_set_features, .ndo_bpf = nicvf_xdp, - .ndo_do_ioctl = nicvf_ioctl, + .ndo_eth_ioctl = nicvf_ioctl, .ndo_set_rx_mode = nicvf_set_rx_mode, }; @@ -2126,10 +2110,8 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } err = pci_enable_device(pdev); - if (err) { - dev_err(dev, "Failed to enable PCI device\n"); - return err; - } + if (err) + return dev_err_probe(dev, err, "Failed to enable PCI device\n"); err = pci_request_regions(pdev, DRV_NAME); if (err) { @@ -2137,18 +2119,12 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_disable_device; } - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48)); + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)); if (err) { dev_err(dev, "Unable to get usable DMA configuration\n"); goto err_release_regions; } - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48)); - if (err) { - dev_err(dev, "unable to get 48-bit DMA for consistent allocations\n"); - goto err_release_regions; - } - qcount = netif_get_num_default_rss_queues(); /* Restrict multiqset support only for host bound VFs */ @@ -2180,6 +2156,9 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) nic->max_queues *= 2; nic->ptp_clock = ptp_clock; + /* Initialize mutex that serializes usage of VF's mailbox */ + mutex_init(&nic->rx_mode_mtx); + /* MAP VF's configuration registers */ nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); if (!nic->reg_base) { @@ -2256,7 +2235,6 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_WORK(&nic->rx_mode_work.work, nicvf_set_rx_mode_task); spin_lock_init(&nic->rx_mode_wq_lock); - mutex_init(&nic->rx_mode_mtx); err = register_netdev(netdev); if (err) { |