diff options
Diffstat (limited to 'drivers/net/ethernet/cisco/enic/enic_main.c')
-rw-r--r-- | drivers/net/ethernet/cisco/enic/enic_main.c | 283 |
1 files changed, 124 insertions, 159 deletions
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index ddf60dc9ad16..29500d32e362 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -80,7 +80,6 @@ static const struct pci_device_id enic_id_table[] = { MODULE_DESCRIPTION(DRV_DESCRIPTION); MODULE_AUTHOR("Scott Feldman <scofeldm@cisco.com>"); MODULE_LICENSE("GPL"); -MODULE_VERSION(DRV_VERSION); MODULE_DEVICE_TABLE(pci, enic_id_table); #define ENIC_LARGE_PKT_THRESHOLD 1000 @@ -151,10 +150,10 @@ static void enic_set_affinity_hint(struct enic *enic) !cpumask_available(enic->msix[i].affinity_mask) || cpumask_empty(enic->msix[i].affinity_mask)) continue; - err = irq_set_affinity_hint(enic->msix_entry[i].vector, - enic->msix[i].affinity_mask); + err = irq_update_affinity_hint(enic->msix_entry[i].vector, + enic->msix[i].affinity_mask); if (err) - netdev_warn(enic->netdev, "irq_set_affinity_hint failed, err %d\n", + netdev_warn(enic->netdev, "irq_update_affinity_hint failed, err %d\n", err); } @@ -174,53 +173,21 @@ static void enic_unset_affinity_hint(struct enic *enic) int i; for (i = 0; i < enic->intr_count; i++) - irq_set_affinity_hint(enic->msix_entry[i].vector, NULL); + irq_update_affinity_hint(enic->msix_entry[i].vector, NULL); } -static void enic_udp_tunnel_add(struct net_device *netdev, - struct udp_tunnel_info *ti) +static int enic_udp_tunnel_set_port(struct net_device *netdev, + unsigned int table, unsigned int entry, + struct udp_tunnel_info *ti) { struct enic *enic = netdev_priv(netdev); - __be16 port = ti->port; int err; spin_lock_bh(&enic->devcmd_lock); - if (ti->type != UDP_TUNNEL_TYPE_VXLAN) { - netdev_info(netdev, "udp_tnl: only vxlan tunnel offload supported"); - goto error; - } - - switch (ti->sa_family) { - case AF_INET6: - if (!(enic->vxlan.flags & ENIC_VXLAN_OUTER_IPV6)) { - netdev_info(netdev, "vxlan: only IPv4 offload supported"); - goto error; - } - /* Fall through */ - case AF_INET: - break; - default: - goto error; - } - - if (enic->vxlan.vxlan_udp_port_number) { - if (ntohs(port) == enic->vxlan.vxlan_udp_port_number) - netdev_warn(netdev, "vxlan: udp port already offloaded"); - else - netdev_info(netdev, "vxlan: offload supported for only one UDP port"); - - goto error; - } - if ((vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ) != 1) && - !(enic->vxlan.flags & ENIC_VXLAN_MULTI_WQ)) { - netdev_info(netdev, "vxlan: vxlan offload with multi wq not supported on this adapter"); - goto error; - } - err = vnic_dev_overlay_offload_cfg(enic->vdev, OVERLAY_CFG_VXLAN_PORT_UPDATE, - ntohs(port)); + ntohs(ti->port)); if (err) goto error; @@ -229,52 +196,50 @@ static void enic_udp_tunnel_add(struct net_device *netdev, if (err) goto error; - enic->vxlan.vxlan_udp_port_number = ntohs(port); - - netdev_info(netdev, "vxlan fw-vers-%d: offload enabled for udp port: %d, sa_family: %d ", - (int)enic->vxlan.patch_level, ntohs(port), ti->sa_family); - - goto unlock; - + enic->vxlan.vxlan_udp_port_number = ntohs(ti->port); error: - netdev_info(netdev, "failed to offload udp port: %d, sa_family: %d, type: %d", - ntohs(port), ti->sa_family, ti->type); -unlock: spin_unlock_bh(&enic->devcmd_lock); + + return err; } -static void enic_udp_tunnel_del(struct net_device *netdev, - struct udp_tunnel_info *ti) +static int enic_udp_tunnel_unset_port(struct net_device *netdev, + unsigned int table, unsigned int entry, + struct udp_tunnel_info *ti) { struct enic *enic = netdev_priv(netdev); int err; spin_lock_bh(&enic->devcmd_lock); - if ((ntohs(ti->port) != enic->vxlan.vxlan_udp_port_number) || - ti->type != UDP_TUNNEL_TYPE_VXLAN) { - netdev_info(netdev, "udp_tnl: port:%d, sa_family: %d, type: %d not offloaded", - ntohs(ti->port), ti->sa_family, ti->type); - goto unlock; - } - err = vnic_dev_overlay_offload_ctrl(enic->vdev, OVERLAY_FEATURE_VXLAN, OVERLAY_OFFLOAD_DISABLE); - if (err) { - netdev_err(netdev, "vxlan: del offload udp port: %d failed", - ntohs(ti->port)); + if (err) goto unlock; - } enic->vxlan.vxlan_udp_port_number = 0; - netdev_info(netdev, "vxlan: del offload udp port %d, family %d\n", - ntohs(ti->port), ti->sa_family); - unlock: spin_unlock_bh(&enic->devcmd_lock); + + return err; } +static const struct udp_tunnel_nic_info enic_udp_tunnels = { + .set_port = enic_udp_tunnel_set_port, + .unset_port = enic_udp_tunnel_unset_port, + .tables = { + { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, + }, +}, enic_udp_tunnels_v4 = { + .set_port = enic_udp_tunnel_set_port, + .unset_port = enic_udp_tunnel_unset_port, + .flags = UDP_TUNNEL_NIC_INFO_IPV4_ONLY, + .tables = { + { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, + }, +}; + static netdev_features_t enic_features_check(struct sk_buff *skb, struct net_device *dev, netdev_features_t features) @@ -307,7 +272,7 @@ static netdev_features_t enic_features_check(struct sk_buff *skb, case ntohs(ETH_P_IPV6): if (!(enic->vxlan.flags & ENIC_VXLAN_INNER_IPV6)) goto out; - /* Fall through */ + fallthrough; case ntohs(ETH_P_IP): break; default: @@ -361,11 +326,11 @@ static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) struct enic *enic = vnic_dev_priv(wq->vdev); if (buf->sop) - pci_unmap_single(enic->pdev, buf->dma_addr, - buf->len, PCI_DMA_TODEVICE); + dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len, + DMA_TO_DEVICE); else - pci_unmap_page(enic->pdev, buf->dma_addr, - buf->len, PCI_DMA_TODEVICE); + dma_unmap_page(&enic->pdev->dev, buf->dma_addr, buf->len, + DMA_TO_DEVICE); if (buf->os_buf) dev_kfree_skb_any(buf->os_buf); @@ -609,8 +574,8 @@ static int enic_queue_wq_skb_vlan(struct enic *enic, struct vnic_wq *wq, dma_addr_t dma_addr; int err = 0; - dma_addr = pci_map_single(enic->pdev, skb->data, head_len, - PCI_DMA_TODEVICE); + dma_addr = dma_map_single(&enic->pdev->dev, skb->data, head_len, + DMA_TO_DEVICE); if (unlikely(enic_dma_map_check(enic, dma_addr))) return -ENOMEM; @@ -640,8 +605,8 @@ static int enic_queue_wq_skb_csum_l4(struct enic *enic, struct vnic_wq *wq, dma_addr_t dma_addr; int err = 0; - dma_addr = pci_map_single(enic->pdev, skb->data, head_len, - PCI_DMA_TODEVICE); + dma_addr = dma_map_single(&enic->pdev->dev, skb->data, head_len, + DMA_TO_DEVICE); if (unlikely(enic_dma_map_check(enic, dma_addr))) return -ENOMEM; @@ -696,8 +661,7 @@ static void enic_preload_tcp_csum(struct sk_buff *skb) tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) { - tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, - &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); + tcp_v6_gso_csum_prep(skb); } } @@ -716,11 +680,10 @@ static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq, skb_frag_t *frag; if (skb->encapsulation) { - hdr_len = skb_inner_transport_header(skb) - skb->data; - hdr_len += inner_tcp_hdrlen(skb); + hdr_len = skb_inner_tcp_all_headers(skb); enic_preload_tcp_csum_encap(skb); } else { - hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + hdr_len = skb_tcp_all_headers(skb); enic_preload_tcp_csum(skb); } @@ -729,8 +692,9 @@ static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq, */ while (frag_len_left) { len = min(frag_len_left, (unsigned int)WQ_ENET_MAX_DESC_LEN); - dma_addr = pci_map_single(enic->pdev, skb->data + offset, len, - PCI_DMA_TODEVICE); + dma_addr = dma_map_single(&enic->pdev->dev, + skb->data + offset, len, + DMA_TO_DEVICE); if (unlikely(enic_dma_map_check(enic, dma_addr))) return -ENOMEM; enic_queue_wq_desc_tso(wq, skb, dma_addr, len, mss, hdr_len, @@ -788,8 +752,8 @@ static inline int enic_queue_wq_skb_encap(struct enic *enic, struct vnic_wq *wq, dma_addr_t dma_addr; int err = 0; - dma_addr = pci_map_single(enic->pdev, skb->data, head_len, - PCI_DMA_TODEVICE); + dma_addr = dma_map_single(&enic->pdev->dev, skb->data, head_len, + DMA_TO_DEVICE); if (unlikely(enic_dma_map_check(enic, dma_addr))) return -ENOMEM; @@ -803,7 +767,7 @@ static inline int enic_queue_wq_skb_encap(struct enic *enic, struct vnic_wq *wq, return err; } -static inline void enic_queue_wq_skb(struct enic *enic, +static inline int enic_queue_wq_skb(struct enic *enic, struct vnic_wq *wq, struct sk_buff *skb) { unsigned int mss = skb_shinfo(skb)->gso_size; @@ -849,6 +813,7 @@ static inline void enic_queue_wq_skb(struct enic *enic, wq->to_use = buf->next; dev_kfree_skb(skb); } + return err; } /* netif_tx_lock held, process context with BHs disabled, or BH */ @@ -892,7 +857,8 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, return NETDEV_TX_BUSY; } - enic_queue_wq_skb(enic, wq, skb); + if (enic_queue_wq_skb(enic, wq, skb)) + goto error; if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS) netif_tx_stop_queue(txq); @@ -900,6 +866,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, if (!netdev_xmit_more() || netif_xmit_stopped(txq)) vnic_wq_doorbell(wq); +error: spin_unlock(&enic->wq_lock[txq_map]); return NETDEV_TX_OK; @@ -914,7 +881,7 @@ static void enic_get_stats(struct net_device *netdev, int err; err = enic_dev_stats_dump(enic, &stats); - /* return only when pci_zalloc_consistent fails in vnic_dev_stats_dump + /* return only when dma_alloc_coherent fails in vnic_dev_stats_dump * For other failures, like devcmd failure, we return previously * recorded stats. */ @@ -1017,7 +984,7 @@ static int enic_set_mac_addr(struct net_device *netdev, char *addr) return -EADDRNOTAVAIL; } - memcpy(netdev->dev_addr, addr, netdev->addr_len); + eth_hw_addr_set(netdev, addr); return 0; } @@ -1130,6 +1097,7 @@ static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) static int enic_set_vf_port(struct net_device *netdev, int vf, struct nlattr *port[]) { + static const u8 zero_addr[ETH_ALEN] = {}; struct enic *enic = netdev_priv(netdev); struct enic_port_profile prev_pp; struct enic_port_profile *pp; @@ -1194,7 +1162,7 @@ static int enic_set_vf_port(struct net_device *netdev, int vf, } else { memset(pp, 0, sizeof(*pp)); if (vf == PORT_SELF_VF) - eth_zero_addr(netdev->dev_addr); + eth_hw_addr_set(netdev, zero_addr); } } else { /* Set flag to indicate that the port assoc/disassoc @@ -1206,7 +1174,7 @@ static int enic_set_vf_port(struct net_device *netdev, int vf, if (pp->request == PORT_REQUEST_DISASSOCIATE) { eth_zero_addr(pp->mac_addr); if (vf == PORT_SELF_VF) - eth_zero_addr(netdev->dev_addr); + eth_hw_addr_set(netdev, zero_addr); } } @@ -1258,8 +1226,8 @@ static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf) if (!buf->os_buf) return; - pci_unmap_single(enic->pdev, buf->dma_addr, - buf->len, PCI_DMA_FROMDEVICE); + dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len, + DMA_FROM_DEVICE); dev_kfree_skb_any(buf->os_buf); buf->os_buf = NULL; } @@ -1284,8 +1252,8 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq) if (!skb) return -ENOMEM; - dma_addr = pci_map_single(enic->pdev, skb->data, len, - PCI_DMA_FROMDEVICE); + dma_addr = dma_map_single(&enic->pdev->dev, skb->data, len, + DMA_FROM_DEVICE); if (unlikely(enic_dma_map_check(enic, dma_addr))) { dev_kfree_skb(skb); return -ENOMEM; @@ -1317,8 +1285,8 @@ static bool enic_rxcopybreak(struct net_device *netdev, struct sk_buff **skb, new_skb = netdev_alloc_skb_ip_align(netdev, len); if (!new_skb) return false; - pci_dma_sync_single_for_cpu(enic->pdev, buf->dma_addr, len, - DMA_FROM_DEVICE); + dma_sync_single_for_cpu(&enic->pdev->dev, buf->dma_addr, len, + DMA_FROM_DEVICE); memcpy(new_skb->data, (*skb)->data, len); *skb = new_skb; @@ -1367,8 +1335,8 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq, enic->rq_truncated_pkts++; } - pci_unmap_single(enic->pdev, buf->dma_addr, buf->len, - PCI_DMA_FROMDEVICE); + dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len, + DMA_FROM_DEVICE); dev_kfree_skb_any(skb); buf->os_buf = NULL; @@ -1382,8 +1350,8 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq, if (!enic_rxcopybreak(netdev, &skb, buf, bytes_written)) { buf->os_buf = NULL; - pci_unmap_single(enic->pdev, buf->dma_addr, buf->len, - PCI_DMA_FROMDEVICE); + dma_unmap_single(&enic->pdev->dev, buf->dma_addr, + buf->len, DMA_FROM_DEVICE); } prefetch(skb->data - NET_IP_ALIGN); @@ -1456,8 +1424,8 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq, /* Buffer overflow */ - pci_unmap_single(enic->pdev, buf->dma_addr, buf->len, - PCI_DMA_FROMDEVICE); + dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len, + DMA_FROM_DEVICE); dev_kfree_skb_any(skb); buf->os_buf = NULL; } @@ -2142,8 +2110,6 @@ static int enic_dev_wait(struct vnic_dev *vdev, int done; int err; - BUG_ON(in_interrupt()); - err = start(vdev, arg); if (err) return err; @@ -2214,9 +2180,9 @@ int __enic_set_rsskey(struct enic *enic) dma_addr_t rss_key_buf_pa; int i, kidx, bidx, err; - rss_key_buf_va = pci_zalloc_consistent(enic->pdev, - sizeof(union vnic_rss_key), - &rss_key_buf_pa); + rss_key_buf_va = dma_alloc_coherent(&enic->pdev->dev, + sizeof(union vnic_rss_key), + &rss_key_buf_pa, GFP_ATOMIC); if (!rss_key_buf_va) return -ENOMEM; @@ -2231,8 +2197,8 @@ int __enic_set_rsskey(struct enic *enic) sizeof(union vnic_rss_key)); spin_unlock_bh(&enic->devcmd_lock); - pci_free_consistent(enic->pdev, sizeof(union vnic_rss_key), - rss_key_buf_va, rss_key_buf_pa); + dma_free_coherent(&enic->pdev->dev, sizeof(union vnic_rss_key), + rss_key_buf_va, rss_key_buf_pa); return err; } @@ -2251,8 +2217,9 @@ static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits) unsigned int i; int err; - rss_cpu_buf_va = pci_alloc_consistent(enic->pdev, - sizeof(union vnic_rss_cpu), &rss_cpu_buf_pa); + rss_cpu_buf_va = dma_alloc_coherent(&enic->pdev->dev, + sizeof(union vnic_rss_cpu), + &rss_cpu_buf_pa, GFP_ATOMIC); if (!rss_cpu_buf_va) return -ENOMEM; @@ -2265,8 +2232,8 @@ static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits) sizeof(union vnic_rss_cpu)); spin_unlock_bh(&enic->devcmd_lock); - pci_free_consistent(enic->pdev, sizeof(union vnic_rss_cpu), - rss_cpu_buf_va, rss_cpu_buf_pa); + dma_free_coherent(&enic->pdev->dev, sizeof(union vnic_rss_cpu), + rss_cpu_buf_va, rss_cpu_buf_pa); return err; } @@ -2331,6 +2298,13 @@ static int enic_set_rss_nic_cfg(struct enic *enic) rss_hash_bits, rss_base_cpu, rss_enable); } +static void enic_set_api_busy(struct enic *enic, bool busy) +{ + spin_lock(&enic->enic_api_lock); + enic->enic_api_busy = busy; + spin_unlock(&enic->enic_api_lock); +} + static void enic_reset(struct work_struct *work) { struct enic *enic = container_of(work, struct enic, reset); @@ -2340,7 +2314,9 @@ static void enic_reset(struct work_struct *work) rtnl_lock(); - spin_lock(&enic->enic_api_lock); + /* Stop any activity from infiniband */ + enic_set_api_busy(enic, true); + enic_stop(enic->netdev); enic_dev_soft_reset(enic); enic_reset_addr_lists(enic); @@ -2348,7 +2324,10 @@ static void enic_reset(struct work_struct *work) enic_set_rss_nic_cfg(enic); enic_dev_set_ig_vlan_rewrite_mode(enic); enic_open(enic->netdev); - spin_unlock(&enic->enic_api_lock); + + /* Allow infiniband to fiddle with the device again */ + enic_set_api_busy(enic, false); + call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev); rtnl_unlock(); @@ -2360,7 +2339,9 @@ static void enic_tx_hang_reset(struct work_struct *work) rtnl_lock(); - spin_lock(&enic->enic_api_lock); + /* Stop any activity from infiniband */ + enic_set_api_busy(enic, true); + enic_dev_hang_notify(enic); enic_stop(enic->netdev); enic_dev_hang_reset(enic); @@ -2369,7 +2350,10 @@ static void enic_tx_hang_reset(struct work_struct *work) enic_set_rss_nic_cfg(enic); enic_dev_set_ig_vlan_rewrite_mode(enic); enic_open(enic->netdev); - spin_unlock(&enic->enic_api_lock); + + /* Allow infiniband to fiddle with the device again */ + enic_set_api_busy(enic, false); + call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev); rtnl_unlock(); @@ -2528,8 +2512,6 @@ static const struct net_device_ops enic_netdev_dynamic_ops = { #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = enic_rx_flow_steer, #endif - .ndo_udp_tunnel_add = enic_udp_tunnel_add, - .ndo_udp_tunnel_del = enic_udp_tunnel_del, .ndo_features_check = enic_features_check, }; @@ -2554,8 +2536,6 @@ static const struct net_device_ops enic_netdev_ops = { #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = enic_rx_flow_steer, #endif - .ndo_udp_tunnel_add = enic_udp_tunnel_add, - .ndo_udp_tunnel_del = enic_udp_tunnel_del, .ndo_features_check = enic_features_check, }; @@ -2563,13 +2543,15 @@ static void enic_dev_deinit(struct enic *enic) { unsigned int i; - for (i = 0; i < enic->rq_count; i++) { - napi_hash_del(&enic->napi[i]); - netif_napi_del(&enic->napi[i]); - } + for (i = 0; i < enic->rq_count; i++) + __netif_napi_del(&enic->napi[i]); + if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) for (i = 0; i < enic->wq_count; i++) - netif_napi_del(&enic->napi[enic_cq_wq(enic, i)]); + __netif_napi_del(&enic->napi[enic_cq_wq(enic, i)]); + + /* observe RCU grace period after __netif_napi_del() calls */ + synchronize_net(); enic_free_vnic_resources(enic); enic_clear_intr_mode(enic); @@ -2651,16 +2633,17 @@ static int enic_dev_init(struct enic *enic) switch (vnic_dev_get_intr_mode(enic->vdev)) { default: - netif_napi_add(netdev, &enic->napi[0], enic_poll, 64); + netif_napi_add(netdev, &enic->napi[0], enic_poll); break; case VNIC_DEV_INTR_MODE_MSIX: for (i = 0; i < enic->rq_count; i++) { netif_napi_add(netdev, &enic->napi[i], - enic_poll_msix_rq, NAPI_POLL_WEIGHT); + enic_poll_msix_rq); } for (i = 0; i < enic->wq_count; i++) - netif_napi_add(netdev, &enic->napi[enic_cq_wq(enic, i)], - enic_poll_msix_wq, NAPI_POLL_WEIGHT); + netif_napi_add(netdev, + &enic->napi[enic_cq_wq(enic, i)], + enic_poll_msix_wq); break; } @@ -2735,26 +2718,14 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) * fail to 32-bit. */ - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(47)); + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(47)); if (err) { - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (err) { dev_err(dev, "No usable DMA configuration, aborting\n"); goto err_out_release_regions; } - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); - if (err) { - dev_err(dev, "Unable to obtain %u-bit DMA " - "for consistent allocations, aborting\n", 32); - goto err_out_release_regions; - } } else { - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(47)); - if (err) { - dev_err(dev, "Unable to obtain %u-bit DMA " - "for consistent allocations, aborting\n", 47); - goto err_out_release_regions; - } using_dac = 1; } @@ -2965,6 +2936,13 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) patch_level = fls(patch_level); patch_level = patch_level ? patch_level - 1 : 0; enic->vxlan.patch_level = patch_level; + + if (vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ) == 1 || + enic->vxlan.flags & ENIC_VXLAN_MULTI_WQ) { + netdev->udp_tunnel_nic_info = &enic_udp_tunnels_v4; + if (enic->vxlan.flags & ENIC_VXLAN_OUTER_IPV6) + netdev->udp_tunnel_nic_info = &enic_udp_tunnels; + } } netdev->features |= netdev->hw_features; @@ -3054,17 +3032,4 @@ static struct pci_driver enic_driver = { .remove = enic_remove, }; -static int __init enic_init_module(void) -{ - pr_info("%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION); - - return pci_register_driver(&enic_driver); -} - -static void __exit enic_cleanup_module(void) -{ - pci_unregister_driver(&enic_driver); -} - -module_init(enic_init_module); -module_exit(enic_cleanup_module); +module_pci_driver(enic_driver); |