diff options
author | 2024-11-15 15:38:48 -0800 | |
---|---|---|
committer | 2024-11-15 15:38:48 -0800 | |
commit | 253239044808e0036f4e36b0347d161b893c4767 (patch) | |
tree | b92ec20e5f7cc83b3442a7e4c97a3f21da3a80c8 | |
parent | net: phy: microchip_t1: Clause-45 PHY loopback support for LAN887x (diff) | |
parent | enic: Move kdump check into enic_adjust_resources() (diff) | |
download | wireguard-linux-253239044808e0036f4e36b0347d161b893c4767.tar.xz wireguard-linux-253239044808e0036f4e36b0347d161b893c4767.zip |
Merge branch 'enic-use-all-the-resources-configured-on-vic'
Nelson Escobar says:
====================
enic: Use all the resources configured on VIC
Allow users to configure and use more than 8 rx queues and 8 tx queues
on the Cisco VIC.
This series changes the maximum number of tx and rx queues supported
from 8 to the hardware limit of 256, and allocates memory based on the
number of resources configured on the VIC.
v3: https://lore.kernel.org/20241108-remove_vic_resource_limits-v3-0-3ba8123bcffc@cisco.com
v2: https://lore.kernel.org/20241024-remove_vic_resource_limits-v2-0-039b8cae5fdd@cisco.com
v1: https://lore.kernel.org/20241022041707.27402-2-neescoba@cisco.com
====================
Link: https://patch.msgid.link/20241113-remove_vic_resource_limits-v4-0-a34cf8570c67@cisco.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to '')
-rw-r--r-- | drivers/net/ethernet/cisco/enic/enic.h | 62 | ||||
-rw-r--r-- | drivers/net/ethernet/cisco/enic/enic_ethtool.c | 8 | ||||
-rw-r--r-- | drivers/net/ethernet/cisco/enic/enic_main.c | 386 | ||||
-rw-r--r-- | drivers/net/ethernet/cisco/enic/enic_res.c | 42 |
4 files changed, 299 insertions, 199 deletions
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h index 0cc3644ee855..10b7e02ba4d0 100644 --- a/drivers/net/ethernet/cisco/enic/enic.h +++ b/drivers/net/ethernet/cisco/enic/enic.h @@ -23,10 +23,8 @@ #define ENIC_BARS_MAX 6 -#define ENIC_WQ_MAX 8 -#define ENIC_RQ_MAX 8 -#define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX) -#define ENIC_INTR_MAX (ENIC_CQ_MAX + 2) +#define ENIC_WQ_MAX 256 +#define ENIC_RQ_MAX 256 #define ENIC_WQ_NAPI_BUDGET 256 @@ -162,6 +160,17 @@ struct enic_rq_stats { u64 desc_skip; /* Rx pkt went into later buffer */ }; +struct enic_wq { + spinlock_t lock; /* spinlock for wq */ + struct vnic_wq vwq; + struct enic_wq_stats stats; +} ____cacheline_aligned; + +struct enic_rq { + struct vnic_rq vrq; + struct enic_rq_stats stats; +} ____cacheline_aligned; + /* Per-instance private data structure */ struct enic { struct net_device *netdev; @@ -173,8 +182,8 @@ struct enic { struct work_struct reset; struct work_struct tx_hang_reset; struct work_struct change_mtu_work; - struct msix_entry msix_entry[ENIC_INTR_MAX]; - struct enic_msix_entry msix[ENIC_INTR_MAX]; + struct msix_entry *msix_entry; + struct enic_msix_entry *msix; u32 msg_enable; spinlock_t devcmd_lock; u8 mac_addr[ETH_ALEN]; @@ -193,28 +202,25 @@ struct enic { bool enic_api_busy; struct enic_port_profile *pp; - /* work queue cache line section */ - ____cacheline_aligned struct vnic_wq wq[ENIC_WQ_MAX]; - spinlock_t wq_lock[ENIC_WQ_MAX]; - struct enic_wq_stats wq_stats[ENIC_WQ_MAX]; + struct enic_wq *wq; + unsigned int wq_avail; unsigned int wq_count; u16 loop_enable; u16 loop_tag; - /* receive queue cache line section */ - ____cacheline_aligned struct vnic_rq rq[ENIC_RQ_MAX]; - struct enic_rq_stats rq_stats[ENIC_RQ_MAX]; + struct enic_rq *rq; + unsigned int rq_avail; unsigned int rq_count; struct vxlan_offload vxlan; - struct napi_struct napi[ENIC_RQ_MAX + ENIC_WQ_MAX]; + struct napi_struct *napi; - /* interrupt resource cache line section */ - ____cacheline_aligned struct vnic_intr intr[ENIC_INTR_MAX]; + struct vnic_intr *intr; + unsigned int intr_avail; unsigned int intr_count; u32 __iomem *legacy_pba; /* memory-mapped */ - /* completion queue cache line section */ - ____cacheline_aligned struct vnic_cq cq[ENIC_CQ_MAX]; + struct vnic_cq *cq; + unsigned int cq_avail; unsigned int cq_count; struct enic_rfs_flw_tbl rfs_h; u32 rx_copybreak; @@ -272,18 +278,28 @@ static inline unsigned int enic_msix_wq_intr(struct enic *enic, return enic->cq[enic_cq_wq(enic, wq)].interrupt_offset; } -static inline unsigned int enic_msix_err_intr(struct enic *enic) -{ - return enic->rq_count + enic->wq_count; -} +/* MSIX interrupts are organized as the error interrupt, then the notify + * interrupt followed by all the I/O interrupts. The error interrupt needs + * to fit in 7 bits due to hardware constraints + */ +#define ENIC_MSIX_RESERVED_INTR 2 +#define ENIC_MSIX_ERR_INTR 0 +#define ENIC_MSIX_NOTIFY_INTR 1 +#define ENIC_MSIX_IO_INTR_BASE ENIC_MSIX_RESERVED_INTR +#define ENIC_MSIX_MIN_INTR (ENIC_MSIX_RESERVED_INTR + 2) #define ENIC_LEGACY_IO_INTR 0 #define ENIC_LEGACY_ERR_INTR 1 #define ENIC_LEGACY_NOTIFY_INTR 2 +static inline unsigned int enic_msix_err_intr(struct enic *enic) +{ + return ENIC_MSIX_ERR_INTR; +} + static inline unsigned int enic_msix_notify_intr(struct enic *enic) { - return enic->rq_count + enic->wq_count + 1; + return ENIC_MSIX_NOTIFY_INTR; } static inline bool enic_is_err_intr(struct enic *enic, int intr) diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c index 4fe85780a950..95b071153fed 100644 --- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c +++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c @@ -337,7 +337,7 @@ static void enic_get_ethtool_stats(struct net_device *netdev, for (i = 0; i < NUM_ENIC_GEN_STATS; i++) *(data++) = ((u64 *)&enic->gen_stats)[enic_gen_stats[i].index]; for (i = 0; i < enic->rq_count; i++) { - struct enic_rq_stats *rqstats = &enic->rq_stats[i]; + struct enic_rq_stats *rqstats = &enic->rq[i].stats; int index; for (j = 0; j < NUM_ENIC_PER_RQ_STATS; j++) { @@ -346,7 +346,7 @@ static void enic_get_ethtool_stats(struct net_device *netdev, } } for (i = 0; i < enic->wq_count; i++) { - struct enic_wq_stats *wqstats = &enic->wq_stats[i]; + struct enic_wq_stats *wqstats = &enic->wq[i].stats; int index; for (j = 0; j < NUM_ENIC_PER_WQ_STATS; j++) { @@ -695,8 +695,8 @@ static void enic_get_channels(struct net_device *netdev, switch (vnic_dev_get_intr_mode(enic->vdev)) { case VNIC_DEV_INTR_MODE_MSIX: - channels->max_rx = ENIC_RQ_MAX; - channels->max_tx = ENIC_WQ_MAX; + channels->max_rx = min(enic->rq_avail, ENIC_RQ_MAX); + channels->max_tx = min(enic->wq_avail, ENIC_WQ_MAX); channels->rx_count = enic->rq_count; channels->tx_count = enic->wq_count; break; diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index ffed14b63d41..9913952ccb42 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -342,8 +342,8 @@ static void enic_wq_free_buf(struct vnic_wq *wq, { struct enic *enic = vnic_dev_priv(wq->vdev); - enic->wq_stats[wq->index].cq_work++; - enic->wq_stats[wq->index].cq_bytes += buf->len; + enic->wq[wq->index].stats.cq_work++; + enic->wq[wq->index].stats.cq_bytes += buf->len; enic_free_wq_buf(wq, buf); } @@ -352,20 +352,20 @@ static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, { struct enic *enic = vnic_dev_priv(vdev); - spin_lock(&enic->wq_lock[q_number]); + spin_lock(&enic->wq[q_number].lock); - vnic_wq_service(&enic->wq[q_number], cq_desc, + vnic_wq_service(&enic->wq[q_number].vwq, cq_desc, completed_index, enic_wq_free_buf, opaque); if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number)) && - vnic_wq_desc_avail(&enic->wq[q_number]) >= + vnic_wq_desc_avail(&enic->wq[q_number].vwq) >= (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)) { netif_wake_subqueue(enic->netdev, q_number); - enic->wq_stats[q_number].wake++; + enic->wq[q_number].stats.wake++; } - spin_unlock(&enic->wq_lock[q_number]); + spin_unlock(&enic->wq[q_number].lock); return 0; } @@ -377,7 +377,7 @@ static bool enic_log_q_error(struct enic *enic) bool err = false; for (i = 0; i < enic->wq_count; i++) { - error_status = vnic_wq_error_status(&enic->wq[i]); + error_status = vnic_wq_error_status(&enic->wq[i].vwq); err |= error_status; if (error_status) netdev_err(enic->netdev, "WQ[%d] error_status %d\n", @@ -385,7 +385,7 @@ static bool enic_log_q_error(struct enic *enic) } for (i = 0; i < enic->rq_count; i++) { - error_status = vnic_rq_error_status(&enic->rq[i]); + error_status = vnic_rq_error_status(&enic->rq[i].vrq); err |= error_status; if (error_status) netdev_err(enic->netdev, "RQ[%d] error_status %d\n", @@ -598,9 +598,9 @@ static int enic_queue_wq_skb_vlan(struct enic *enic, struct vnic_wq *wq, err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback); /* The enic_queue_wq_desc() above does not do HW checksum */ - enic->wq_stats[wq->index].csum_none++; - enic->wq_stats[wq->index].packets++; - enic->wq_stats[wq->index].bytes += skb->len; + enic->wq[wq->index].stats.csum_none++; + enic->wq[wq->index].stats.packets++; + enic->wq[wq->index].stats.bytes += skb->len; return err; } @@ -634,9 +634,9 @@ static int enic_queue_wq_skb_csum_l4(struct enic *enic, struct vnic_wq *wq, if (!eop) err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback); - enic->wq_stats[wq->index].csum_partial++; - enic->wq_stats[wq->index].packets++; - enic->wq_stats[wq->index].bytes += skb->len; + enic->wq[wq->index].stats.csum_partial++; + enic->wq[wq->index].stats.packets++; + enic->wq[wq->index].stats.bytes += skb->len; return err; } @@ -699,11 +699,11 @@ static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq, if (skb->encapsulation) { hdr_len = skb_inner_tcp_all_headers(skb); enic_preload_tcp_csum_encap(skb); - enic->wq_stats[wq->index].encap_tso++; + enic->wq[wq->index].stats.encap_tso++; } else { hdr_len = skb_tcp_all_headers(skb); enic_preload_tcp_csum(skb); - enic->wq_stats[wq->index].tso++; + enic->wq[wq->index].stats.tso++; } /* Queue WQ_ENET_MAX_DESC_LEN length descriptors @@ -757,8 +757,8 @@ tso_out_stats: pkts = len / mss; if ((len % mss) > 0) pkts++; - enic->wq_stats[wq->index].packets += pkts; - enic->wq_stats[wq->index].bytes += (len + (pkts * hdr_len)); + enic->wq[wq->index].stats.packets += pkts; + enic->wq[wq->index].stats.bytes += (len + (pkts * hdr_len)); return 0; } @@ -792,9 +792,9 @@ static inline int enic_queue_wq_skb_encap(struct enic *enic, struct vnic_wq *wq, if (!eop) err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback); - enic->wq_stats[wq->index].encap_csum++; - enic->wq_stats[wq->index].packets++; - enic->wq_stats[wq->index].bytes += skb->len; + enic->wq[wq->index].stats.encap_csum++; + enic->wq[wq->index].stats.packets++; + enic->wq[wq->index].stats.bytes += skb->len; return err; } @@ -812,7 +812,7 @@ static inline int enic_queue_wq_skb(struct enic *enic, /* VLAN tag from trunking driver */ vlan_tag_insert = 1; vlan_tag = skb_vlan_tag_get(skb); - enic->wq_stats[wq->index].add_vlan++; + enic->wq[wq->index].stats.add_vlan++; } else if (enic->loop_enable) { vlan_tag = enic->loop_tag; loopback = 1; @@ -859,11 +859,11 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, struct netdev_queue *txq; txq_map = skb_get_queue_mapping(skb) % enic->wq_count; - wq = &enic->wq[txq_map]; + wq = &enic->wq[txq_map].vwq; if (skb->len <= 0) { dev_kfree_skb_any(skb); - enic->wq_stats[wq->index].null_pkt++; + enic->wq[wq->index].stats.null_pkt++; return NETDEV_TX_OK; } @@ -878,19 +878,19 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC && skb_linearize(skb)) { dev_kfree_skb_any(skb); - enic->wq_stats[wq->index].skb_linear_fail++; + enic->wq[wq->index].stats.skb_linear_fail++; return NETDEV_TX_OK; } - spin_lock(&enic->wq_lock[txq_map]); + spin_lock(&enic->wq[txq_map].lock); if (vnic_wq_desc_avail(wq) < skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) { netif_tx_stop_queue(txq); /* This is a hard error, log it */ netdev_err(netdev, "BUG! Tx ring full when queue awake!\n"); - spin_unlock(&enic->wq_lock[txq_map]); - enic->wq_stats[wq->index].desc_full_awake++; + spin_unlock(&enic->wq[txq_map].lock); + enic->wq[wq->index].stats.desc_full_awake++; return NETDEV_TX_BUSY; } @@ -899,14 +899,14 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS) { netif_tx_stop_queue(txq); - enic->wq_stats[wq->index].stopped++; + enic->wq[wq->index].stats.stopped++; } skb_tx_timestamp(skb); if (!netdev_xmit_more() || netif_xmit_stopped(txq)) vnic_wq_doorbell(wq); error: - spin_unlock(&enic->wq_lock[txq_map]); + spin_unlock(&enic->wq[txq_map].lock); return NETDEV_TX_OK; } @@ -940,10 +940,10 @@ static void enic_get_stats(struct net_device *netdev, net_stats->rx_errors = stats->rx.rx_errors; net_stats->multicast = stats->rx.rx_multicast_frames_ok; - for (i = 0; i < ENIC_RQ_MAX; i++) { - struct enic_rq_stats *rqs = &enic->rq_stats[i]; + for (i = 0; i < enic->rq_count; i++) { + struct enic_rq_stats *rqs = &enic->rq[i].stats; - if (!enic->rq->ctrl) + if (!enic->rq[i].vrq.ctrl) break; pkt_truncated += rqs->pkt_truncated; bad_fcs += rqs->bad_fcs; @@ -1313,7 +1313,7 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq) } skb = netdev_alloc_skb_ip_align(netdev, len); if (!skb) { - enic->rq_stats[rq->index].no_skb++; + enic->rq[rq->index].stats.no_skb++; return -ENOMEM; } @@ -1366,7 +1366,7 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq, struct net_device *netdev = enic->netdev; struct sk_buff *skb; struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)]; - struct enic_rq_stats *rqstats = &enic->rq_stats[rq->index]; + struct enic_rq_stats *rqstats = &enic->rq[rq->index].stats; u8 type, color, eop, sop, ingress_port, vlan_stripped; u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof; @@ -1512,7 +1512,7 @@ static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, { struct enic *enic = vnic_dev_priv(vdev); - vnic_rq_service(&enic->rq[q_number], cq_desc, + vnic_rq_service(&enic->rq[q_number].vrq, cq_desc, completed_index, VNIC_RQ_RETURN_DESC, enic_rq_indicate_buf, opaque); @@ -1609,7 +1609,7 @@ static int enic_poll(struct napi_struct *napi, int budget) 0 /* don't unmask intr */, 0 /* don't reset intr timer */); - err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf); + err = vnic_rq_fill(&enic->rq[0].vrq, enic_rq_alloc_buf); /* Buffer allocation failed. Stay in polling * mode so we can try to fill the ring again. @@ -1621,7 +1621,7 @@ static int enic_poll(struct napi_struct *napi, int budget) /* Call the function which refreshes the intr coalescing timer * value based on the traffic. */ - enic_calc_int_moderation(enic, &enic->rq[0]); + enic_calc_int_moderation(enic, &enic->rq[0].vrq); if ((rq_work_done < budget) && napi_complete_done(napi, rq_work_done)) { @@ -1630,11 +1630,11 @@ static int enic_poll(struct napi_struct *napi, int budget) */ if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) - enic_set_int_moderation(enic, &enic->rq[0]); + enic_set_int_moderation(enic, &enic->rq[0].vrq); vnic_intr_unmask(&enic->intr[intr]); - enic->rq_stats[0].napi_complete++; + enic->rq[0].stats.napi_complete++; } else { - enic->rq_stats[0].napi_repoll++; + enic->rq[0].stats.napi_repoll++; } return rq_work_done; @@ -1683,7 +1683,7 @@ static int enic_poll_msix_wq(struct napi_struct *napi, int budget) struct net_device *netdev = napi->dev; struct enic *enic = netdev_priv(netdev); unsigned int wq_index = (napi - &enic->napi[0]) - enic->rq_count; - struct vnic_wq *wq = &enic->wq[wq_index]; + struct vnic_wq *wq = &enic->wq[wq_index].vwq; unsigned int cq; unsigned int intr; unsigned int wq_work_to_do = ENIC_WQ_NAPI_BUDGET; @@ -1737,7 +1737,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget) 0 /* don't unmask intr */, 0 /* don't reset intr timer */); - err = vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf); + err = vnic_rq_fill(&enic->rq[rq].vrq, enic_rq_alloc_buf); /* Buffer allocation failed. Stay in polling mode * so we can try to fill the ring again. @@ -1749,7 +1749,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget) /* Call the function which refreshes the intr coalescing timer * value based on the traffic. */ - enic_calc_int_moderation(enic, &enic->rq[rq]); + enic_calc_int_moderation(enic, &enic->rq[rq].vrq); if ((work_done < budget) && napi_complete_done(napi, work_done)) { @@ -1758,11 +1758,11 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget) */ if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) - enic_set_int_moderation(enic, &enic->rq[rq]); + enic_set_int_moderation(enic, &enic->rq[rq].vrq); vnic_intr_unmask(&enic->intr[intr]); - enic->rq_stats[rq].napi_complete++; + enic->rq[rq].stats.napi_complete++; } else { - enic->rq_stats[rq].napi_repoll++; + enic->rq[rq].stats.napi_repoll++; } return work_done; @@ -1792,7 +1792,7 @@ static void enic_free_intr(struct enic *enic) free_irq(enic->pdev->irq, enic); break; case VNIC_DEV_INTR_MODE_MSIX: - for (i = 0; i < ARRAY_SIZE(enic->msix); i++) + for (i = 0; i < enic->intr_count; i++) if (enic->msix[i].requested) free_irq(enic->msix_entry[i].vector, enic->msix[i].devid); @@ -1859,7 +1859,7 @@ static int enic_request_intr(struct enic *enic) enic->msix[intr].isr = enic_isr_msix_notify; enic->msix[intr].devid = enic; - for (i = 0; i < ARRAY_SIZE(enic->msix); i++) + for (i = 0; i < enic->intr_count; i++) enic->msix[i].requested = 0; for (i = 0; i < enic->intr_count; i++) { @@ -1989,10 +1989,10 @@ static int enic_open(struct net_device *netdev) for (i = 0; i < enic->rq_count; i++) { /* enable rq before updating rq desc */ - vnic_rq_enable(&enic->rq[i]); - vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf); + vnic_rq_enable(&enic->rq[i].vrq); + vnic_rq_fill(&enic->rq[i].vrq, enic_rq_alloc_buf); /* Need at least one buffer on ring to get going */ - if (vnic_rq_desc_used(&enic->rq[i]) == 0) { + if (vnic_rq_desc_used(&enic->rq[i].vrq) == 0) { netdev_err(netdev, "Unable to alloc receive buffers\n"); err = -ENOMEM; goto err_out_free_rq; @@ -2000,7 +2000,7 @@ static int enic_open(struct net_device *netdev) } for (i = 0; i < enic->wq_count; i++) - vnic_wq_enable(&enic->wq[i]); + vnic_wq_enable(&enic->wq[i].vwq); if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic)) enic_dev_add_station_addr(enic); @@ -2027,9 +2027,9 @@ static int enic_open(struct net_device *netdev) err_out_free_rq: for (i = 0; i < enic->rq_count; i++) { - ret = vnic_rq_disable(&enic->rq[i]); + ret = vnic_rq_disable(&enic->rq[i].vrq); if (!ret) - vnic_rq_clean(&enic->rq[i], enic_free_rq_buf); + vnic_rq_clean(&enic->rq[i].vrq, enic_free_rq_buf); } enic_dev_notify_unset(enic); err_out_free_intr: @@ -2071,12 +2071,12 @@ static int enic_stop(struct net_device *netdev) enic_dev_del_station_addr(enic); for (i = 0; i < enic->wq_count; i++) { - err = vnic_wq_disable(&enic->wq[i]); + err = vnic_wq_disable(&enic->wq[i].vwq); if (err) return err; } for (i = 0; i < enic->rq_count; i++) { - err = vnic_rq_disable(&enic->rq[i]); + err = vnic_rq_disable(&enic->rq[i].vrq); if (err) return err; } @@ -2086,9 +2086,9 @@ static int enic_stop(struct net_device *netdev) enic_free_intr(enic); for (i = 0; i < enic->wq_count; i++) - vnic_wq_clean(&enic->wq[i], enic_free_wq_buf); + vnic_wq_clean(&enic->wq[i].vwq, enic_free_wq_buf); for (i = 0; i < enic->rq_count; i++) - vnic_rq_clean(&enic->rq[i], enic_free_rq_buf); + vnic_rq_clean(&enic->rq[i].vrq, enic_free_rq_buf); for (i = 0; i < enic->cq_count; i++) vnic_cq_clean(&enic->cq[i]); for (i = 0; i < enic->intr_count; i++) @@ -2442,112 +2442,56 @@ static void enic_tx_hang_reset(struct work_struct *work) static int enic_set_intr_mode(struct enic *enic) { - unsigned int n = min_t(unsigned int, enic->rq_count, ENIC_RQ_MAX); - unsigned int m = min_t(unsigned int, enic->wq_count, ENIC_WQ_MAX); unsigned int i; + int num_intr; /* Set interrupt mode (INTx, MSI, MSI-X) depending * on system capabilities. * * Try MSI-X first - * - * We need n RQs, m WQs, n+m CQs, and n+m+2 INTRs - * (the second to last INTR is used for WQ/RQ errors) - * (the last INTR is used for notifications) - */ - - BUG_ON(ARRAY_SIZE(enic->msix_entry) < n + m + 2); - for (i = 0; i < n + m + 2; i++) - enic->msix_entry[i].entry = i; - - /* Use multiple RQs if RSS is enabled */ - if (ENIC_SETTING(enic, RSS) && - enic->config.intr_mode < 1 && - enic->rq_count >= n && - enic->wq_count >= m && - enic->cq_count >= n + m && - enic->intr_count >= n + m + 2) { - - if (pci_enable_msix_range(enic->pdev, enic->msix_entry, - n + m + 2, n + m + 2) > 0) { - - enic->rq_count = n; - enic->wq_count = m; - enic->cq_count = n + m; - enic->intr_count = n + m + 2; - - vnic_dev_set_intr_mode(enic->vdev, - VNIC_DEV_INTR_MODE_MSIX); - - return 0; - } - } - if (enic->config.intr_mode < 1 && - enic->rq_count >= 1 && - enic->wq_count >= m && - enic->cq_count >= 1 + m && - enic->intr_count >= 1 + m + 2) { - if (pci_enable_msix_range(enic->pdev, enic->msix_entry, - 1 + m + 2, 1 + m + 2) > 0) { - - enic->rq_count = 1; - enic->wq_count = m; - enic->cq_count = 1 + m; - enic->intr_count = 1 + m + 2; - + enic->intr_avail >= ENIC_MSIX_MIN_INTR) { + for (i = 0; i < enic->intr_avail; i++) + enic->msix_entry[i].entry = i; + + num_intr = pci_enable_msix_range(enic->pdev, enic->msix_entry, + ENIC_MSIX_MIN_INTR, + enic->intr_avail); + if (num_intr > 0) { vnic_dev_set_intr_mode(enic->vdev, - VNIC_DEV_INTR_MODE_MSIX); - + VNIC_DEV_INTR_MODE_MSIX); + enic->intr_avail = num_intr; return 0; } } /* Next try MSI * - * We need 1 RQ, 1 WQ, 2 CQs, and 1 INTR + * We need 1 INTR */ if (enic->config.intr_mode < 2 && - enic->rq_count >= 1 && - enic->wq_count >= 1 && - enic->cq_count >= 2 && - enic->intr_count >= 1 && + enic->intr_avail >= 1 && !pci_enable_msi(enic->pdev)) { - - enic->rq_count = 1; - enic->wq_count = 1; - enic->cq_count = 2; - enic->intr_count = 1; - + enic->intr_avail = 1; vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSI); - return 0; } /* Next try INTx * - * We need 1 RQ, 1 WQ, 2 CQs, and 3 INTRs + * We need 3 INTRs * (the first INTR is used for WQ/RQ) * (the second INTR is used for WQ/RQ errors) * (the last INTR is used for notifications) */ if (enic->config.intr_mode < 3 && - enic->rq_count >= 1 && - enic->wq_count >= 1 && - enic->cq_count >= 2 && - enic->intr_count >= 3) { - - enic->rq_count = 1; - enic->wq_count = 1; - enic->cq_count = 2; - enic->intr_count = 3; - + enic->intr_avail >= 3) { + enic->intr_avail = 3; vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_INTX); - return 0; } @@ -2572,11 +2516,81 @@ static void enic_clear_intr_mode(struct enic *enic) vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN); } +static int enic_adjust_resources(struct enic *enic) +{ + unsigned int max_queues; + unsigned int rq_default; + unsigned int rq_avail; + unsigned int wq_avail; + + if (enic->rq_avail < 1 || enic->wq_avail < 1 || enic->cq_avail < 2) { + dev_err(enic_get_dev(enic), + "Not enough resources available rq: %d wq: %d cq: %d\n", + enic->rq_avail, enic->wq_avail, + enic->cq_avail); + return -ENOSPC; + } + + if (is_kdump_kernel()) { + dev_info(enic_get_dev(enic), "Running from within kdump kernel. Using minimal resources\n"); + enic->rq_avail = 1; + enic->wq_avail = 1; + enic->config.rq_desc_count = ENIC_MIN_RQ_DESCS; + enic->config.wq_desc_count = ENIC_MIN_WQ_DESCS; + enic->config.mtu = min_t(u16, 1500, enic->config.mtu); + } + + /* if RSS isn't set, then we can only use one RQ */ + if (!ENIC_SETTING(enic, RSS)) + enic->rq_avail = 1; + + switch (vnic_dev_get_intr_mode(enic->vdev)) { + case VNIC_DEV_INTR_MODE_INTX: + case VNIC_DEV_INTR_MODE_MSI: + enic->rq_count = 1; + enic->wq_count = 1; + enic->cq_count = 2; + enic->intr_count = enic->intr_avail; + break; + case VNIC_DEV_INTR_MODE_MSIX: + /* Adjust the number of wqs/rqs/cqs/interrupts that will be + * used based on which resource is the most constrained + */ + wq_avail = min(enic->wq_avail, ENIC_WQ_MAX); + rq_default = netif_get_num_default_rss_queues(); + rq_avail = min3(enic->rq_avail, ENIC_RQ_MAX, rq_default); + max_queues = min(enic->cq_avail, + enic->intr_avail - ENIC_MSIX_RESERVED_INTR); + if (wq_avail + rq_avail <= max_queues) { + enic->rq_count = rq_avail; + enic->wq_count = wq_avail; + } else { + /* recalculate wq/rq count */ + if (rq_avail < wq_avail) { + enic->rq_count = min(rq_avail, max_queues / 2); + enic->wq_count = max_queues - enic->rq_count; + } else { + enic->wq_count = min(wq_avail, max_queues / 2); + enic->rq_count = max_queues - enic->wq_count; + } + } + enic->cq_count = enic->rq_count + enic->wq_count; + enic->intr_count = enic->cq_count + ENIC_MSIX_RESERVED_INTR; + + break; + default: + dev_err(enic_get_dev(enic), "Unknown interrupt mode\n"); + return -EINVAL; + } + + return 0; +} + static void enic_get_queue_stats_rx(struct net_device *dev, int idx, struct netdev_queue_stats_rx *rxs) { struct enic *enic = netdev_priv(dev); - struct enic_rq_stats *rqstats = &enic->rq_stats[idx]; + struct enic_rq_stats *rqstats = &enic->rq[idx].stats; rxs->bytes = rqstats->bytes; rxs->packets = rqstats->packets; @@ -2590,7 +2604,7 @@ static void enic_get_queue_stats_tx(struct net_device *dev, int idx, struct netdev_queue_stats_tx *txs) { struct enic *enic = netdev_priv(dev); - struct enic_wq_stats *wqstats = &enic->wq_stats[idx]; + struct enic_wq_stats *wqstats = &enic->wq[idx].stats; txs->bytes = wqstats->bytes; txs->packets = wqstats->packets; @@ -2674,6 +2688,71 @@ static const struct netdev_stat_ops enic_netdev_stat_ops = { .get_base_stats = enic_get_base_stats, }; +static void enic_free_enic_resources(struct enic *enic) +{ + kfree(enic->wq); + enic->wq = NULL; + + kfree(enic->rq); + enic->rq = NULL; + + kfree(enic->cq); + enic->cq = NULL; + + kfree(enic->napi); + enic->napi = NULL; + + kfree(enic->msix_entry); + enic->msix_entry = NULL; + + kfree(enic->msix); + enic->msix = NULL; + + kfree(enic->intr); + enic->intr = NULL; +} + +static int enic_alloc_enic_resources(struct enic *enic) +{ + enic->wq = kcalloc(enic->wq_avail, sizeof(struct enic_wq), GFP_KERNEL); + if (!enic->wq) + goto free_queues; + + enic->rq = kcalloc(enic->rq_avail, sizeof(struct enic_rq), GFP_KERNEL); + if (!enic->rq) + goto free_queues; + + enic->cq = kcalloc(enic->cq_avail, sizeof(struct vnic_cq), GFP_KERNEL); + if (!enic->cq) + goto free_queues; + + enic->napi = kcalloc(enic->wq_avail + enic->rq_avail, + sizeof(struct napi_struct), GFP_KERNEL); + if (!enic->napi) + goto free_queues; + + enic->msix_entry = kcalloc(enic->intr_avail, sizeof(struct msix_entry), + GFP_KERNEL); + if (!enic->msix_entry) + goto free_queues; + + enic->msix = kcalloc(enic->intr_avail, sizeof(struct enic_msix_entry), + GFP_KERNEL); + if (!enic->msix) + goto free_queues; + + enic->intr = kcalloc(enic->intr_avail, sizeof(struct vnic_intr), + GFP_KERNEL); + if (!enic->intr) + goto free_queues; + + return 0; + +free_queues: + enic_free_enic_resources(enic); + return -ENOMEM; +} + static void enic_dev_deinit(struct enic *enic) { unsigned int i; @@ -2691,18 +2770,7 @@ static void enic_dev_deinit(struct enic *enic) enic_free_vnic_resources(enic); enic_clear_intr_mode(enic); enic_free_affinity_hint(enic); -} - -static void enic_kdump_kernel_config(struct enic *enic) -{ - if (is_kdump_kernel()) { - dev_info(enic_get_dev(enic), "Running from within kdump kernel. Using minimal resources\n"); - enic->rq_count = 1; - enic->wq_count = 1; - enic->config.rq_desc_count = ENIC_MIN_RQ_DESCS; - enic->config.wq_desc_count = ENIC_MIN_WQ_DESCS; - enic->config.mtu = min_t(u16, 1500, enic->config.mtu); - } + enic_free_enic_resources(enic); } static int enic_dev_init(struct enic *enic) @@ -2734,19 +2802,26 @@ static int enic_dev_init(struct enic *enic) enic_get_res_counts(enic); - /* modify resource count if we are in kdump_kernel - */ - enic_kdump_kernel_config(enic); + err = enic_alloc_enic_resources(enic); + if (err) { + dev_err(dev, "Failed to allocate enic resources\n"); + return err; + } - /* Set interrupt mode based on resource counts and system - * capabilities - */ + /* Set interrupt mode based on system capabilities */ err = enic_set_intr_mode(enic); if (err) { dev_err(dev, "Failed to set intr mode based on resource " "counts and system capabilities, aborting\n"); - return err; + goto err_out_free_vnic_resources; + } + + /* Adjust resource counts based on most constrained resources */ + err = enic_adjust_resources(enic); + if (err) { + dev_err(dev, "Failed to adjust resources\n"); + goto err_out_free_vnic_resources; } /* Allocate and configure vNIC resources @@ -2788,6 +2863,7 @@ err_out_free_vnic_resources: enic_free_affinity_hint(enic); enic_clear_intr_mode(enic); enic_free_vnic_resources(enic); + enic_free_enic_resources(enic); return err; } @@ -2993,7 +3069,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work); for (i = 0; i < enic->wq_count; i++) - spin_lock_init(&enic->wq_lock[i]); + spin_lock_init(&enic->wq[i].lock); /* Register net device */ diff --git a/drivers/net/ethernet/cisco/enic/enic_res.c b/drivers/net/ethernet/cisco/enic/enic_res.c index 1c48aebdbab0..126125199833 100644 --- a/drivers/net/ethernet/cisco/enic/enic_res.c +++ b/drivers/net/ethernet/cisco/enic/enic_res.c @@ -176,9 +176,9 @@ void enic_free_vnic_resources(struct enic *enic) unsigned int i; for (i = 0; i < enic->wq_count; i++) - vnic_wq_free(&enic->wq[i]); + vnic_wq_free(&enic->wq[i].vwq); for (i = 0; i < enic->rq_count; i++) - vnic_rq_free(&enic->rq[i]); + vnic_rq_free(&enic->rq[i].vrq); for (i = 0; i < enic->cq_count; i++) vnic_cq_free(&enic->cq[i]); for (i = 0; i < enic->intr_count; i++) @@ -187,16 +187,21 @@ void enic_free_vnic_resources(struct enic *enic) void enic_get_res_counts(struct enic *enic) { - enic->wq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ); - enic->rq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_RQ); - enic->cq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_CQ); - enic->intr_count = vnic_dev_get_res_count(enic->vdev, - RES_TYPE_INTR_CTRL); + enic->wq_avail = vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ); + enic->rq_avail = vnic_dev_get_res_count(enic->vdev, RES_TYPE_RQ); + enic->cq_avail = vnic_dev_get_res_count(enic->vdev, RES_TYPE_CQ); + enic->intr_avail = vnic_dev_get_res_count(enic->vdev, + RES_TYPE_INTR_CTRL); + + enic->wq_count = enic->wq_avail; + enic->rq_count = enic->rq_avail; + enic->cq_count = enic->cq_avail; + enic->intr_count = enic->intr_avail; dev_info(enic_get_dev(enic), "vNIC resources avail: wq %d rq %d cq %d intr %d\n", - enic->wq_count, enic->rq_count, - enic->cq_count, enic->intr_count); + enic->wq_avail, enic->rq_avail, + enic->cq_avail, enic->intr_avail); } void enic_init_vnic_resources(struct enic *enic) @@ -221,9 +226,12 @@ void enic_init_vnic_resources(struct enic *enic) switch (intr_mode) { case VNIC_DEV_INTR_MODE_INTX: + error_interrupt_enable = 1; + error_interrupt_offset = ENIC_LEGACY_ERR_INTR; + break; case VNIC_DEV_INTR_MODE_MSIX: error_interrupt_enable = 1; - error_interrupt_offset = enic->intr_count - 2; + error_interrupt_offset = enic_msix_err_intr(enic); break; default: error_interrupt_enable = 0; @@ -233,7 +241,7 @@ void enic_init_vnic_resources(struct enic *enic) for (i = 0; i < enic->rq_count; i++) { cq_index = i; - vnic_rq_init(&enic->rq[i], + vnic_rq_init(&enic->rq[i].vrq, cq_index, error_interrupt_enable, error_interrupt_offset); @@ -241,7 +249,7 @@ void enic_init_vnic_resources(struct enic *enic) for (i = 0; i < enic->wq_count; i++) { cq_index = enic->rq_count + i; - vnic_wq_init(&enic->wq[i], + vnic_wq_init(&enic->wq[i].vwq, cq_index, error_interrupt_enable, error_interrupt_offset); @@ -249,15 +257,15 @@ void enic_init_vnic_resources(struct enic *enic) /* Init CQ resources * - * CQ[0 - n+m-1] point to INTR[0] for INTx, MSI - * CQ[0 - n+m-1] point to INTR[0 - n+m-1] for MSI-X + * All CQs point to INTR[0] for INTx, MSI + * CQ[i] point to INTR[ENIC_MSIX_IO_INTR_BASE + i] for MSI-X */ for (i = 0; i < enic->cq_count; i++) { switch (intr_mode) { case VNIC_DEV_INTR_MODE_MSIX: - interrupt_offset = i; + interrupt_offset = ENIC_MSIX_IO_INTR_BASE + i; break; default: interrupt_offset = 0; @@ -322,7 +330,7 @@ int enic_alloc_vnic_resources(struct enic *enic) */ for (i = 0; i < enic->wq_count; i++) { - err = vnic_wq_alloc(enic->vdev, &enic->wq[i], i, + err = vnic_wq_alloc(enic->vdev, &enic->wq[i].vwq, i, enic->config.wq_desc_count, sizeof(struct wq_enet_desc)); if (err) @@ -330,7 +338,7 @@ int enic_alloc_vnic_resources(struct enic *enic) } for (i = 0; i < enic->rq_count; i++) { - err = vnic_rq_alloc(enic->vdev, &enic->rq[i], i, + err = vnic_rq_alloc(enic->vdev, &enic->rq[i].vrq, i, enic->config.rq_desc_count, sizeof(struct rq_enet_desc)); if (err) |