aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/ice
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/ice')
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h27
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c16
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.c116
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fdir.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c233
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.h11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_type.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.c66
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fw_update.c51
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fw_update.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c127
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c138
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.h7
22 files changed, 447 insertions, 408 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index fe140ff38f74..a0723831c4e4 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -284,6 +284,10 @@ struct ice_vsi {
spinlock_t arfs_lock; /* protects aRFS hash table and filter state */
atomic_t *arfs_last_fltr_id;
+ /* devlink port data */
+ struct devlink_port devlink_port;
+ bool devlink_port_registered;
+
u16 max_frame;
u16 rx_buf_len;
@@ -321,9 +325,9 @@ struct ice_vsi {
struct ice_ring **xdp_rings; /* XDP ring array */
u16 num_xdp_txq; /* Used XDP queues */
u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
- struct xdp_umem **xsk_umems;
- u16 num_xsk_umems_used;
- u16 num_xsk_umems;
+ struct xsk_buff_pool **xsk_pools;
+ u16 num_xsk_pools_used;
+ u16 num_xsk_pools;
} ____cacheline_internodealigned_in_smp;
/* struct that defines an interrupt vector */
@@ -375,9 +379,6 @@ enum ice_pf_flags {
struct ice_pf {
struct pci_dev *pdev;
- /* devlink port data */
- struct devlink_port devlink_port;
-
struct devlink_region *nvm_region;
struct devlink_region *devcaps_region;
@@ -507,25 +508,25 @@ static inline void ice_set_ring_xdp(struct ice_ring *ring)
}
/**
- * ice_xsk_umem - get XDP UMEM bound to a ring
- * @ring - ring to use
+ * ice_xsk_pool - get XSK buffer pool bound to a ring
+ * @ring: ring to use
*
- * Returns a pointer to xdp_umem structure if there is an UMEM present,
+ * Returns a pointer to xdp_umem structure if there is a buffer pool present,
* NULL otherwise.
*/
-static inline struct xdp_umem *ice_xsk_umem(struct ice_ring *ring)
+static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_ring *ring)
{
- struct xdp_umem **umems = ring->vsi->xsk_umems;
+ struct xsk_buff_pool **pools = ring->vsi->xsk_pools;
u16 qid = ring->q_index;
if (ice_ring_is_xdp(ring))
qid -= ring->vsi->num_xdp_txq;
- if (qid >= ring->vsi->num_xsk_umems || !umems || !umems[qid] ||
+ if (qid >= ring->vsi->num_xsk_pools || !pools || !pools[qid] ||
!ice_is_xdp_ena_vsi(ring->vsi))
return NULL;
- return umems[qid];
+ return pools[qid];
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index ba9375218fef..b06fbe99d8e9 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -1422,7 +1422,7 @@ struct ice_aqc_nvm_comp_tbl {
u8 cvs[]; /* Component Version String */
} __packed;
-/**
+/*
* Send to PF command (indirect 0x0801) ID is only used by PF
*
* Send to VF command (indirect 0x0802) ID is only used by PF
@@ -1826,8 +1826,8 @@ struct ice_aqc_event_lan_overflow {
* @opcode: AQ command opcode
* @datalen: length in bytes of indirect/external data buffer
* @retval: return value from firmware
- * @cookie_h: opaque data high-half
- * @cookie_l: opaque data low-half
+ * @cookie_high: opaque data high-half
+ * @cookie_low: opaque data low-half
* @params: command-specific parameters
*
* Descriptor format for commands the driver posts on the Admin Transmit Queue
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index 87008476d8fe..fe4320e2d1f2 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -308,12 +308,12 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
ring->q_index);
- ring->xsk_umem = ice_xsk_umem(ring);
- if (ring->xsk_umem) {
+ ring->xsk_pool = ice_xsk_pool(ring);
+ if (ring->xsk_pool) {
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
ring->rx_buf_len =
- xsk_umem_get_rx_frame_size(ring->xsk_umem);
+ xsk_pool_get_rx_frame_size(ring->xsk_pool);
/* For AF_XDP ZC, we disallow packets to span on
* multiple buffers, thus letting us skip that
* handling in the fast-path.
@@ -324,7 +324,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
NULL);
if (err)
return err;
- xsk_buff_set_rxq_info(ring->xsk_umem, &ring->xdp_rxq);
+ xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
ring->q_index);
@@ -417,9 +417,9 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
writel(0, ring->tail);
- if (ring->xsk_umem) {
- if (!xsk_buff_can_alloc(ring->xsk_umem, num_bufs)) {
- dev_warn(dev, "UMEM does not provide enough addresses to fill %d buffers on Rx ring %d\n",
+ if (ring->xsk_pool) {
+ if (!xsk_buff_can_alloc(ring->xsk_pool, num_bufs)) {
+ dev_warn(dev, "XSK buffer pool does not provide enough addresses to fill %d buffers on Rx ring %d\n",
num_bufs, ring->q_index);
dev_warn(dev, "Change Rx ring/fill queue size to avoid performance issues\n");
@@ -428,7 +428,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
err = ice_alloc_rx_bufs_zc(ring, num_bufs);
if (err)
- dev_info(dev, "Failed to allocate some buffers on UMEM enabled Rx ring %d (pf_q %d)\n",
+ dev_info(dev, "Failed to allocate some buffers on XSK buffer pool enabled Rx ring %d (pf_q %d)\n",
ring->q_index, pf_q);
return 0;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c
index 111d6bfe4222..511da59bd6f2 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.c
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.c
@@ -6,18 +6,14 @@
#include "ice_devlink.h"
#include "ice_fw_update.h"
-static int ice_info_get_dsn(struct ice_pf *pf, char *buf, size_t len)
+static void ice_info_get_dsn(struct ice_pf *pf, char *buf, size_t len)
{
u8 dsn[8];
/* Copy the DSN into an array in Big Endian format */
put_unaligned_be64(pci_get_dsn(pf->pdev), dsn);
- snprintf(buf, len, "%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x",
- dsn[0], dsn[1], dsn[2], dsn[3],
- dsn[4], dsn[5], dsn[6], dsn[7]);
-
- return 0;
+ snprintf(buf, len, "%8phD", dsn);
}
static int ice_info_pba(struct ice_pf *pf, char *buf, size_t len)
@@ -106,6 +102,13 @@ static int ice_info_ddp_pkg_version(struct ice_pf *pf, char *buf, size_t len)
return 0;
}
+static int ice_info_ddp_pkg_bundle_id(struct ice_pf *pf, char *buf, size_t len)
+{
+ snprintf(buf, len, "0x%08x", pf->hw.active_track_id);
+
+ return 0;
+}
+
static int ice_info_netlist_ver(struct ice_pf *pf, char *buf, size_t len)
{
struct ice_netlist_ver_info *netlist = &pf->hw.netlist_ver;
@@ -150,6 +153,7 @@ static const struct ice_devlink_version {
running(DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID, ice_info_eetrack),
running("fw.app.name", ice_info_ddp_pkg_name),
running(DEVLINK_INFO_VERSION_GENERIC_FW_APP, ice_info_ddp_pkg_version),
+ running("fw.app.bundle_id", ice_info_ddp_pkg_bundle_id),
running("fw.netlist", ice_info_netlist_ver),
running("fw.netlist.build", ice_info_netlist_build),
};
@@ -180,11 +184,7 @@ static int ice_devlink_info_get(struct devlink *devlink,
return err;
}
- err = ice_info_get_dsn(pf, buf, sizeof(buf));
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Unable to obtain serial number");
- return err;
- }
+ ice_info_get_dsn(pf, buf, sizeof(buf));
err = devlink_info_serial_number_put(req, buf);
if (err) {
@@ -233,8 +233,7 @@ static int ice_devlink_info_get(struct devlink *devlink,
/**
* ice_devlink_flash_update - Update firmware stored in flash on the device
* @devlink: pointer to devlink associated with device to update
- * @path: the path of the firmware file to use via request_firmware
- * @component: name of the component to update, or NULL
+ * @params: flash update parameters
* @extack: netlink extended ACK structure
*
* Perform a device flash update. The bulk of the update logic is contained
@@ -243,38 +242,52 @@ static int ice_devlink_info_get(struct devlink *devlink,
* Returns: zero on success, or an error code on failure.
*/
static int
-ice_devlink_flash_update(struct devlink *devlink, const char *path,
- const char *component, struct netlink_ext_ack *extack)
+ice_devlink_flash_update(struct devlink *devlink,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack)
{
struct ice_pf *pf = devlink_priv(devlink);
struct device *dev = &pf->pdev->dev;
struct ice_hw *hw = &pf->hw;
const struct firmware *fw;
+ u8 preservation;
int err;
- /* individual component update is not yet supported */
- if (component)
+ if (!params->overwrite_mask) {
+ /* preserve all settings and identifiers */
+ preservation = ICE_AQC_NVM_PRESERVE_ALL;
+ } else if (params->overwrite_mask == DEVLINK_FLASH_OVERWRITE_SETTINGS) {
+ /* overwrite settings, but preserve the vital device identifiers */
+ preservation = ICE_AQC_NVM_PRESERVE_SELECTED;
+ } else if (params->overwrite_mask == (DEVLINK_FLASH_OVERWRITE_SETTINGS |
+ DEVLINK_FLASH_OVERWRITE_IDENTIFIERS)) {
+ /* overwrite both settings and identifiers, preserve nothing */
+ preservation = ICE_AQC_NVM_NO_PRESERVATION;
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "Requested overwrite mask is not supported");
return -EOPNOTSUPP;
+ }
if (!hw->dev_caps.common_cap.nvm_unified_update) {
NL_SET_ERR_MSG_MOD(extack, "Current firmware does not support unified update");
return -EOPNOTSUPP;
}
- err = ice_check_for_pending_update(pf, component, extack);
+ err = ice_check_for_pending_update(pf, NULL, extack);
if (err)
return err;
- err = request_firmware(&fw, path, dev);
+ err = request_firmware(&fw, params->file_name, dev);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Unable to read file from disk");
return err;
}
+ dev_dbg(dev, "Beginning flash update with file '%s'\n", params->file_name);
+
devlink_flash_update_begin_notify(devlink);
- devlink_flash_update_status_notify(devlink, "Preparing to flash",
- component, 0, 0);
- err = ice_flash_pldm_image(pf, fw, extack);
+ devlink_flash_update_status_notify(devlink, "Preparing to flash", NULL, 0, 0);
+ err = ice_flash_pldm_image(pf, fw, preservation, extack);
devlink_flash_update_end_notify(devlink);
release_firmware(fw);
@@ -283,6 +296,7 @@ ice_devlink_flash_update(struct devlink *devlink, const char *path,
}
static const struct devlink_ops ice_devlink_ops = {
+ .supported_flash_update_params = DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK,
.info_get = ice_devlink_info_get,
.flash_update = ice_devlink_flash_update,
};
@@ -352,55 +366,66 @@ void ice_devlink_unregister(struct ice_pf *pf)
}
/**
- * ice_devlink_create_port - Create a devlink port for this PF
- * @pf: the PF to create a port for
+ * ice_devlink_create_port - Create a devlink port for this VSI
+ * @vsi: the VSI to create a port for
*
- * Create and register a devlink_port for this PF. Note that although each
- * physical function is connected to a separate devlink instance, the port
- * will still be numbered according to the physical function ID.
+ * Create and register a devlink_port for this VSI.
*
* Return: zero on success or an error code on failure.
*/
-int ice_devlink_create_port(struct ice_pf *pf)
+int ice_devlink_create_port(struct ice_vsi *vsi)
{
- struct devlink *devlink = priv_to_devlink(pf);
- struct ice_vsi *vsi = ice_get_main_vsi(pf);
- struct device *dev = ice_pf_to_dev(pf);
struct devlink_port_attrs attrs = {};
+ struct ice_port_info *pi;
+ struct devlink *devlink;
+ struct device *dev;
+ struct ice_pf *pf;
int err;
- if (!vsi) {
- dev_err(dev, "%s: unable to find main VSI\n", __func__);
- return -EIO;
- }
+ /* Currently we only create devlink_port instances for PF VSIs */
+ if (vsi->type != ICE_VSI_PF)
+ return -EINVAL;
+
+ pf = vsi->back;
+ devlink = priv_to_devlink(pf);
+ dev = ice_pf_to_dev(pf);
+ pi = pf->hw.port_info;
attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
- attrs.phys.port_number = pf->hw.pf_id;
- devlink_port_attrs_set(&pf->devlink_port, &attrs);
- err = devlink_port_register(devlink, &pf->devlink_port, pf->hw.pf_id);
+ attrs.phys.port_number = pi->lport;
+ devlink_port_attrs_set(&vsi->devlink_port, &attrs);
+ err = devlink_port_register(devlink, &vsi->devlink_port, vsi->idx);
if (err) {
dev_err(dev, "devlink_port_register failed: %d\n", err);
return err;
}
+ vsi->devlink_port_registered = true;
+
return 0;
}
/**
- * ice_devlink_destroy_port - Destroy the devlink_port for this PF
- * @pf: the PF to cleanup
+ * ice_devlink_destroy_port - Destroy the devlink_port for this VSI
+ * @vsi: the VSI to cleanup
*
- * Unregisters the devlink_port structure associated with this PF.
+ * Unregisters the devlink_port structure associated with this VSI.
*/
-void ice_devlink_destroy_port(struct ice_pf *pf)
+void ice_devlink_destroy_port(struct ice_vsi *vsi)
{
- devlink_port_type_clear(&pf->devlink_port);
- devlink_port_unregister(&pf->devlink_port);
+ if (!vsi->devlink_port_registered)
+ return;
+
+ devlink_port_type_clear(&vsi->devlink_port);
+ devlink_port_unregister(&vsi->devlink_port);
+
+ vsi->devlink_port_registered = false;
}
/**
* ice_devlink_nvm_snapshot - Capture a snapshot of the Shadow RAM contents
* @devlink: the devlink instance
+ * @ops: the devlink region being snapshotted
* @extack: extended ACK response structure
* @data: on exit points to snapshot data buffer
*
@@ -413,6 +438,7 @@ void ice_devlink_destroy_port(struct ice_pf *pf)
* error code on failure.
*/
static int ice_devlink_nvm_snapshot(struct devlink *devlink,
+ const struct devlink_region_ops *ops,
struct netlink_ext_ack *extack, u8 **data)
{
struct ice_pf *pf = devlink_priv(devlink);
@@ -456,6 +482,7 @@ static int ice_devlink_nvm_snapshot(struct devlink *devlink,
/**
* ice_devlink_devcaps_snapshot - Capture snapshot of device capabilities
* @devlink: the devlink instance
+ * @ops: the devlink region being snapshotted
* @extack: extended ACK response structure
* @data: on exit points to snapshot data buffer
*
@@ -468,6 +495,7 @@ static int ice_devlink_nvm_snapshot(struct devlink *devlink,
*/
static int
ice_devlink_devcaps_snapshot(struct devlink *devlink,
+ const struct devlink_region_ops *ops,
struct netlink_ext_ack *extack, u8 **data)
{
struct ice_pf *pf = devlink_priv(devlink);
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.h b/drivers/net/ethernet/intel/ice/ice_devlink.h
index 6e806a08dc23..e07e74426bde 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.h
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.h
@@ -8,8 +8,8 @@ struct ice_pf *ice_allocate_pf(struct device *dev);
int ice_devlink_register(struct ice_pf *pf);
void ice_devlink_unregister(struct ice_pf *pf);
-int ice_devlink_create_port(struct ice_pf *pf);
-void ice_devlink_destroy_port(struct ice_pf *pf);
+int ice_devlink_create_port(struct ice_vsi *vsi);
+void ice_devlink_destroy_port(struct ice_vsi *vsi);
void ice_devlink_init_regions(struct ice_pf *pf);
void ice_devlink_destroy_regions(struct ice_pf *pf);
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
index d7430ce6af26..2d27f66ac853 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
@@ -1268,8 +1268,7 @@ ice_fdir_write_all_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input,
bool is_tun = tun == ICE_FD_HW_SEG_TUN;
int err;
- if (is_tun && !ice_get_open_tunnel_port(&pf->hw, TNL_ALL,
- &port_num))
+ if (is_tun && !ice_get_open_tunnel_port(&pf->hw, &port_num))
continue;
err = ice_fdir_write_fltr(pf, input, add, is_tun);
if (err)
@@ -1647,8 +1646,7 @@ int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)
}
/* return error if not an update and no available filters */
- fltrs_needed = ice_get_open_tunnel_port(hw, TNL_ALL, &tunnel_port) ?
- 2 : 1;
+ fltrs_needed = ice_get_open_tunnel_port(hw, &tunnel_port) ? 2 : 1;
if (!ice_fdir_find_fltr_by_idx(hw, fsp->location) &&
ice_fdir_num_avail_fltr(hw, pf->vsi[vsi->idx]) < fltrs_needed) {
dev_err(dev, "Failed to add filter. The maximum number of flow director filters has been reached.\n");
diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.c b/drivers/net/ethernet/intel/ice/ice_fdir.c
index 6834df14332f..59c0c6a0f8c5 100644
--- a/drivers/net/ethernet/intel/ice/ice_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_fdir.c
@@ -556,7 +556,7 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
memcpy(pkt, ice_fdir_pkt[idx].pkt, ice_fdir_pkt[idx].pkt_len);
loc = pkt;
} else {
- if (!ice_get_open_tunnel_port(hw, TNL_ALL, &tnl_port))
+ if (!ice_get_open_tunnel_port(hw, &tnl_port))
return ICE_ERR_DOES_NOT_EXIST;
if (!ice_fdir_pkt[idx].tun_pkt)
return ICE_ERR_PARAM;
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
index b17ae3e20157..9095b4d274ad 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
@@ -489,8 +489,6 @@ static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
if ((label_name[len] - '0') == hw->pf_id) {
hw->tnl.tbl[hw->tnl.count].type = tnls[i].type;
hw->tnl.tbl[hw->tnl.count].valid = false;
- hw->tnl.tbl[hw->tnl.count].in_use = false;
- hw->tnl.tbl[hw->tnl.count].marked = false;
hw->tnl.tbl[hw->tnl.count].boost_addr = val;
hw->tnl.tbl[hw->tnl.count].port = 0;
hw->tnl.count++;
@@ -505,8 +503,11 @@ static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
for (i = 0; i < hw->tnl.count; i++) {
ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr,
&hw->tnl.tbl[i].boost_entry);
- if (hw->tnl.tbl[i].boost_entry)
+ if (hw->tnl.tbl[i].boost_entry) {
hw->tnl.tbl[i].valid = true;
+ if (hw->tnl.tbl[i].type < __TNL_TYPE_CNT)
+ hw->tnl.valid_count[hw->tnl.tbl[i].type]++;
+ }
}
}
@@ -1626,104 +1627,59 @@ static struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
}
/**
- * ice_tunnel_port_in_use_hlpr - helper function to determine tunnel usage
+ * ice_get_open_tunnel_port - retrieve an open tunnel port
* @hw: pointer to the HW structure
- * @port: port to search for
- * @index: optionally returns index
- *
- * Returns whether a port is already in use as a tunnel, and optionally its
- * index
+ * @port: returns open port
*/
-static bool ice_tunnel_port_in_use_hlpr(struct ice_hw *hw, u16 port, u16 *index)
+bool
+ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port)
{
+ bool res = false;
u16 i;
+ mutex_lock(&hw->tnl_lock);
+
for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
- if (hw->tnl.tbl[i].in_use && hw->tnl.tbl[i].port == port) {
- if (index)
- *index = i;
- return true;
+ if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].port) {
+ *port = hw->tnl.tbl[i].port;
+ res = true;
+ break;
}
- return false;
-}
-
-/**
- * ice_tunnel_port_in_use
- * @hw: pointer to the HW structure
- * @port: port to search for
- * @index: optionally returns index
- *
- * Returns whether a port is already in use as a tunnel, and optionally its
- * index
- */
-bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index)
-{
- bool res;
-
- mutex_lock(&hw->tnl_lock);
- res = ice_tunnel_port_in_use_hlpr(hw, port, index);
mutex_unlock(&hw->tnl_lock);
return res;
}
/**
- * ice_find_free_tunnel_entry
+ * ice_tunnel_idx_to_entry - convert linear index to the sparse one
* @hw: pointer to the HW structure
- * @type: tunnel type
- * @index: optionally returns index
+ * @type: type of tunnel
+ * @idx: linear index
*
- * Returns whether there is a free tunnel entry, and optionally its index
+ * Stack assumes we have 2 linear tables with indexes [0, count_valid),
+ * but really the port table may be sprase, and types are mixed, so convert
+ * the stack index into the device index.
*/
-static bool
-ice_find_free_tunnel_entry(struct ice_hw *hw, enum ice_tunnel_type type,
- u16 *index)
+static u16 ice_tunnel_idx_to_entry(struct ice_hw *hw, enum ice_tunnel_type type,
+ u16 idx)
{
u16 i;
for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
- if (hw->tnl.tbl[i].valid && !hw->tnl.tbl[i].in_use &&
- hw->tnl.tbl[i].type == type) {
- if (index)
- *index = i;
- return true;
- }
+ if (hw->tnl.tbl[i].valid &&
+ hw->tnl.tbl[i].type == type &&
+ idx--)
+ return i;
- return false;
-}
-
-/**
- * ice_get_open_tunnel_port - retrieve an open tunnel port
- * @hw: pointer to the HW structure
- * @type: tunnel type (TNL_ALL will return any open port)
- * @port: returns open port
- */
-bool
-ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type,
- u16 *port)
-{
- bool res = false;
- u16 i;
-
- mutex_lock(&hw->tnl_lock);
-
- for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
- if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
- (type == TNL_ALL || hw->tnl.tbl[i].type == type)) {
- *port = hw->tnl.tbl[i].port;
- res = true;
- break;
- }
-
- mutex_unlock(&hw->tnl_lock);
-
- return res;
+ WARN_ON_ONCE(1);
+ return 0;
}
/**
* ice_create_tunnel
* @hw: pointer to the HW structure
+ * @index: device table entry
* @type: type of tunnel
* @port: port of tunnel to create
*
@@ -1731,27 +1687,16 @@ ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type,
* creating a package buffer with the tunnel info and issuing an update package
* command.
*/
-enum ice_status
-ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port)
+static enum ice_status
+ice_create_tunnel(struct ice_hw *hw, u16 index,
+ enum ice_tunnel_type type, u16 port)
{
struct ice_boost_tcam_section *sect_rx, *sect_tx;
enum ice_status status = ICE_ERR_MAX_LIMIT;
struct ice_buf_build *bld;
- u16 index;
mutex_lock(&hw->tnl_lock);
- if (ice_tunnel_port_in_use_hlpr(hw, port, &index)) {
- hw->tnl.tbl[index].ref++;
- status = 0;
- goto ice_create_tunnel_end;
- }
-
- if (!ice_find_free_tunnel_entry(hw, type, &index)) {
- status = ICE_ERR_OUT_OF_RANGE;
- goto ice_create_tunnel_end;
- }
-
bld = ice_pkg_buf_alloc(hw);
if (!bld) {
status = ICE_ERR_NO_MEMORY;
@@ -1790,11 +1735,8 @@ ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port)
memcpy(sect_tx->tcam, sect_rx->tcam, sizeof(*sect_tx->tcam));
status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
- if (!status) {
+ if (!status)
hw->tnl.tbl[index].port = port;
- hw->tnl.tbl[index].in_use = true;
- hw->tnl.tbl[index].ref = 1;
- }
ice_create_tunnel_err:
ice_pkg_buf_free(hw, bld);
@@ -1808,46 +1750,31 @@ ice_create_tunnel_end:
/**
* ice_destroy_tunnel
* @hw: pointer to the HW structure
+ * @index: device table entry
+ * @type: type of tunnel
* @port: port of tunnel to destroy (ignored if the all parameter is true)
- * @all: flag that states to destroy all tunnels
*
* Destroys a tunnel or all tunnels by creating an update package buffer
* targeting the specific updates requested and then performing an update
* package.
*/
-enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all)
+static enum ice_status
+ice_destroy_tunnel(struct ice_hw *hw, u16 index, enum ice_tunnel_type type,
+ u16 port)
{
struct ice_boost_tcam_section *sect_rx, *sect_tx;
enum ice_status status = ICE_ERR_MAX_LIMIT;
struct ice_buf_build *bld;
- u16 count = 0;
- u16 index;
- u16 size;
- u16 i;
mutex_lock(&hw->tnl_lock);
- if (!all && ice_tunnel_port_in_use_hlpr(hw, port, &index))
- if (hw->tnl.tbl[index].ref > 1) {
- hw->tnl.tbl[index].ref--;
- status = 0;
- goto ice_destroy_tunnel_end;
- }
-
- /* determine count */
- for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
- if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
- (all || hw->tnl.tbl[i].port == port))
- count++;
-
- if (!count) {
- status = ICE_ERR_PARAM;
+ if (WARN_ON(!hw->tnl.tbl[index].valid ||
+ hw->tnl.tbl[index].type != type ||
+ hw->tnl.tbl[index].port != port)) {
+ status = ICE_ERR_OUT_OF_RANGE;
goto ice_destroy_tunnel_end;
}
- /* size of section - there is at least one entry */
- size = struct_size(sect_rx, tcam, count);
-
bld = ice_pkg_buf_alloc(hw);
if (!bld) {
status = ICE_ERR_NO_MEMORY;
@@ -1859,13 +1786,13 @@ enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all)
goto ice_destroy_tunnel_err;
sect_rx = ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
- size);
+ struct_size(sect_rx, tcam, 1));
if (!sect_rx)
goto ice_destroy_tunnel_err;
sect_rx->count = cpu_to_le16(1);
sect_tx = ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
- size);
+ struct_size(sect_tx, tcam, 1));
if (!sect_tx)
goto ice_destroy_tunnel_err;
sect_tx->count = cpu_to_le16(1);
@@ -1873,26 +1800,14 @@ enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all)
/* copy original boost entry to update package buffer, one copy to Rx
* section, another copy to the Tx section
*/
- for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
- if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
- (all || hw->tnl.tbl[i].port == port)) {
- memcpy(sect_rx->tcam + i, hw->tnl.tbl[i].boost_entry,
- sizeof(*sect_rx->tcam));
- memcpy(sect_tx->tcam + i, hw->tnl.tbl[i].boost_entry,
- sizeof(*sect_tx->tcam));
- hw->tnl.tbl[i].marked = true;
- }
+ memcpy(sect_rx->tcam, hw->tnl.tbl[index].boost_entry,
+ sizeof(*sect_rx->tcam));
+ memcpy(sect_tx->tcam, hw->tnl.tbl[index].boost_entry,
+ sizeof(*sect_tx->tcam));
status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
if (!status)
- for (i = 0; i < hw->tnl.count &&
- i < ICE_TUNNEL_MAX_ENTRIES; i++)
- if (hw->tnl.tbl[i].marked) {
- hw->tnl.tbl[i].ref = 0;
- hw->tnl.tbl[i].port = 0;
- hw->tnl.tbl[i].in_use = false;
- hw->tnl.tbl[i].marked = false;
- }
+ hw->tnl.tbl[index].port = 0;
ice_destroy_tunnel_err:
ice_pkg_buf_free(hw, bld);
@@ -1903,6 +1818,52 @@ ice_destroy_tunnel_end:
return status;
}
+int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
+ unsigned int idx, struct udp_tunnel_info *ti)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+ enum ice_tunnel_type tnl_type;
+ enum ice_status status;
+ u16 index;
+
+ tnl_type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? TNL_VXLAN : TNL_GENEVE;
+ index = ice_tunnel_idx_to_entry(&pf->hw, idx, tnl_type);
+
+ status = ice_create_tunnel(&pf->hw, index, tnl_type, ntohs(ti->port));
+ if (status) {
+ netdev_err(netdev, "Error adding UDP tunnel - %s\n",
+ ice_stat_str(status));
+ return -EIO;
+ }
+
+ udp_tunnel_nic_set_port_priv(netdev, table, idx, index);
+ return 0;
+}
+
+int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
+ unsigned int idx, struct udp_tunnel_info *ti)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+ enum ice_tunnel_type tnl_type;
+ enum ice_status status;
+
+ tnl_type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? TNL_VXLAN : TNL_GENEVE;
+
+ status = ice_destroy_tunnel(&pf->hw, ti->hw_priv, tnl_type,
+ ntohs(ti->port));
+ if (status) {
+ netdev_err(netdev, "Error removing UDP tunnel - %s\n",
+ ice_stat_str(status));
+ return -EIO;
+ }
+
+ return 0;
+}
+
/* PTG Management */
/**
@@ -4915,7 +4876,7 @@ ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
if (last_profile) {
/* If there are no profiles left for this VSIG,
- * then simply remove the the VSIG.
+ * then simply remove the VSIG.
*/
status = ice_rem_vsig(hw, blk, vsig, &chg);
if (status)
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
index 568ea519af51..20deddb807c5 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
@@ -19,12 +19,11 @@
#define ICE_PKG_CNT 4
bool
-ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type,
- u16 *port);
-enum ice_status
-ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port);
-enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all);
-bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index);
+ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port);
+int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
+ unsigned int idx, struct udp_tunnel_info *ti);
+int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
+ unsigned int idx, struct udp_tunnel_info *ti);
enum ice_status
ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_type.h b/drivers/net/ethernet/intel/ice/ice_flex_type.h
index c1c99a267a98..24063c1351b2 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_type.h
@@ -298,6 +298,7 @@ struct ice_pkg_enum {
enum ice_tunnel_type {
TNL_VXLAN = 0,
TNL_GENEVE,
+ __TNL_TYPE_CNT,
TNL_LAST = 0xFF,
TNL_ALL = 0xFF,
};
@@ -311,11 +312,8 @@ struct ice_tunnel_entry {
enum ice_tunnel_type type;
u16 boost_addr;
u16 port;
- u16 ref;
struct ice_boost_tcam_entry *boost_entry;
u8 valid;
- u8 in_use;
- u8 marked;
};
#define ICE_TUNNEL_MAX_ENTRIES 16
@@ -323,6 +321,7 @@ struct ice_tunnel_entry {
struct ice_tunnel_table {
struct ice_tunnel_entry tbl[ICE_TUNNEL_MAX_ENTRIES];
u16 count;
+ u16 valid_count[__TNL_TYPE_CNT];
};
struct ice_pkg_es {
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c
index fe677621dd51..eadc85aee389 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.c
+++ b/drivers/net/ethernet/intel/ice/ice_flow.c
@@ -99,6 +99,54 @@ static const u32 ice_ptypes_ipv6_il[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
+/* Packet types for packets with an Outer/First/Single IPv4 header - no L4 */
+static const u32 ice_ipv4_ofos_no_l4[] = {
+ 0x10C00000, 0x04000800, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Innermost/Last IPv4 header - no L4 */
+static const u32 ice_ipv4_il_no_l4[] = {
+ 0x60000000, 0x18043008, 0x80000002, 0x6010c021,
+ 0x00000008, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Outer/First/Single IPv6 header - no L4 */
+static const u32 ice_ipv6_ofos_no_l4[] = {
+ 0x00000000, 0x00000000, 0x43000000, 0x10002000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Innermost/Last IPv6 header - no L4 */
+static const u32 ice_ipv6_il_no_l4[] = {
+ 0x00000000, 0x02180430, 0x0000010c, 0x086010c0,
+ 0x00000430, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
/* UDP Packet types for non-tunneled packets or tunneled
* packets with inner UDP.
*/
@@ -250,11 +298,23 @@ ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
hdrs = prof->segs[i].hdrs;
- if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
+ if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
+ !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)) {
+ src = !i ? (const unsigned long *)ice_ipv4_ofos_no_l4 :
+ (const unsigned long *)ice_ipv4_il_no_l4;
+ bitmap_and(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ } else if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
src = !i ? (const unsigned long *)ice_ptypes_ipv4_ofos :
(const unsigned long *)ice_ptypes_ipv4_il;
bitmap_and(params->ptypes, params->ptypes, src,
ICE_FLOW_PTYPE_MAX);
+ } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
+ !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)) {
+ src = !i ? (const unsigned long *)ice_ipv6_ofos_no_l4 :
+ (const unsigned long *)ice_ipv6_il_no_l4;
+ bitmap_and(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
} else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
src = !i ? (const unsigned long *)ice_ptypes_ipv6_ofos :
(const unsigned long *)ice_ptypes_ipv6_il;
@@ -385,7 +445,7 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
* ice_flow_xtract_raws - Create extract sequence entries for raw bytes
* @hw: pointer to the HW struct
* @params: information about the flow to be processed
- * @seg: index of packet segment whose raw fields are to be be extracted
+ * @seg: index of packet segment whose raw fields are to be extracted
*/
static enum ice_status
ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
@@ -999,7 +1059,7 @@ enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
*
* This helper function stores information of a field being matched, including
* the type of the field and the locations of the value to match, the mask, and
- * and the upper-bound value in the start of the input buffer for a flow entry.
+ * the upper-bound value in the start of the input buffer for a flow entry.
* This function should only be used for fixed-size data structures.
*
* This function also opportunistically determines the protocol headers to be
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.h b/drivers/net/ethernet/intel/ice/ice_flow.h
index 3913da2116d2..829f90b1e998 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.h
+++ b/drivers/net/ethernet/intel/ice/ice_flow.h
@@ -194,8 +194,8 @@ struct ice_flow_entry {
u16 entry_sz;
};
-#define ICE_FLOW_ENTRY_HNDL(e) ((u64)e)
-#define ICE_FLOW_ENTRY_PTR(h) ((struct ice_flow_entry *)(h))
+#define ICE_FLOW_ENTRY_HNDL(e) ((u64)(uintptr_t)e)
+#define ICE_FLOW_ENTRY_PTR(h) ((struct ice_flow_entry *)(uintptr_t)(h))
struct ice_flow_prof {
struct list_head l_entry;
diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.c b/drivers/net/ethernet/intel/ice/ice_fw_update.c
index 8968fdd4816b..8f81b95e679c 100644
--- a/drivers/net/ethernet/intel/ice/ice_fw_update.c
+++ b/drivers/net/ethernet/intel/ice/ice_fw_update.c
@@ -43,6 +43,8 @@ ice_send_package_data(struct pldmfw *context, const u8 *data, u16 length)
enum ice_status status;
u8 *package_data;
+ dev_dbg(dev, "Sending PLDM record package data to firmware\n");
+
package_data = kmemdup(data, length, GFP_KERNEL);
if (!package_data)
return -ENOMEM;
@@ -229,6 +231,8 @@ ice_send_component_table(struct pldmfw *context, struct pldmfw_component *compon
comp_tbl->cvs_len = component->version_len;
memcpy(comp_tbl->cvs, component->version_string, component->version_len);
+ dev_dbg(dev, "Sending component table to firmware:\n");
+
status = ice_nvm_pass_component_tbl(hw, (u8 *)comp_tbl, length,
transfer_flag, &comp_response,
&comp_response_code, NULL);
@@ -279,11 +283,14 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
memset(&event, 0, sizeof(event));
+ dev_dbg(dev, "Writing block of %u bytes for module 0x%02x at offset %u\n",
+ block_size, module, offset);
+
status = ice_aq_update_nvm(hw, module, offset, block_size, block,
last_cmd, 0, NULL);
if (status) {
- dev_err(dev, "Failed to program flash module 0x%02x at offset %u, err %s aq_err %s\n",
- module, offset, ice_stat_str(status),
+ dev_err(dev, "Failed to flash module 0x%02x with block of size %u at offset %u, err %s aq_err %s\n",
+ module, block_size, offset, ice_stat_str(status),
ice_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to program flash module");
return -EIO;
@@ -297,8 +304,8 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
*/
err = ice_aq_wait_for_event(pf, ice_aqc_opc_nvm_write, 15 * HZ, &event);
if (err) {
- dev_err(dev, "Timed out waiting for firmware write completion for module 0x%02x, err %d\n",
- module, err);
+ dev_err(dev, "Timed out while trying to flash module 0x%02x with block of size %u at offset %u, err %d\n",
+ module, block_size, offset, err);
NL_SET_ERR_MSG_MOD(extack, "Timed out waiting for firmware");
return -EIO;
}
@@ -324,8 +331,8 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
}
if (completion_retval) {
- dev_err(dev, "Firmware failed to program flash module 0x%02x at offset %u, completion err %s\n",
- module, offset,
+ dev_err(dev, "Firmware failed to flash module 0x%02x with block of size %u at offset %u, err %s\n",
+ module, block_size, offset,
ice_aq_str((enum ice_aq_err)completion_retval));
NL_SET_ERR_MSG_MOD(extack, "Firmware failed to program flash module");
return -EIO;
@@ -356,12 +363,15 @@ ice_write_nvm_module(struct ice_pf *pf, u16 module, const char *component,
const u8 *image, u32 length,
struct netlink_ext_ack *extack)
{
+ struct device *dev = ice_pf_to_dev(pf);
struct devlink *devlink;
u32 offset = 0;
bool last_cmd;
u8 *block;
int err;
+ dev_dbg(dev, "Beginning write of flash component '%s', module 0x%02x\n", component, module);
+
devlink = priv_to_devlink(pf);
devlink_flash_update_status_notify(devlink, "Flashing",
@@ -394,6 +404,8 @@ ice_write_nvm_module(struct ice_pf *pf, u16 module, const char *component,
component, offset, length);
} while (!last_cmd);
+ dev_dbg(dev, "Completed write of flash component '%s', module 0x%02x\n", component, module);
+
if (err)
devlink_flash_update_status_notify(devlink, "Flashing failed",
component, length, length);
@@ -431,6 +443,8 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component,
enum ice_status status;
int err;
+ dev_dbg(dev, "Beginning erase of flash component '%s', module 0x%02x\n", component, module);
+
memset(&event, 0, sizeof(event));
devlink = priv_to_devlink(pf);
@@ -476,6 +490,8 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component,
goto out_notify_devlink;
}
+ dev_dbg(dev, "Completed erase of flash component '%s', module 0x%02x\n", component, module);
+
out_notify_devlink:
if (err)
devlink_flash_update_status_notify(devlink, "Erasing failed",
@@ -614,14 +630,9 @@ static int ice_finalize_update(struct pldmfw *context)
struct ice_fwu_priv *priv = container_of(context, struct ice_fwu_priv, context);
struct netlink_ext_ack *extack = priv->extack;
struct ice_pf *pf = priv->pf;
- int err;
/* Finally, notify firmware to activate the written NVM banks */
- err = ice_switch_flash_banks(pf, priv->activate_flags, extack);
- if (err)
- return err;
-
- return 0;
+ return ice_switch_flash_banks(pf, priv->activate_flags, extack);
}
static const struct pldmfw_ops ice_fwu_ops = {
@@ -636,6 +647,7 @@ static const struct pldmfw_ops ice_fwu_ops = {
* ice_flash_pldm_image - Write a PLDM-formatted firmware image to the device
* @pf: private device driver structure
* @fw: firmware object pointing to the relevant firmware file
+ * @preservation: preservation level to request from firmware
* @extack: netlink extended ACK structure
*
* Parse the data for a given firmware file, verifying that it is a valid PLDM
@@ -649,7 +661,7 @@ static const struct pldmfw_ops ice_fwu_ops = {
* Returns: zero on success or a negative error code on failure.
*/
int ice_flash_pldm_image(struct ice_pf *pf, const struct firmware *fw,
- struct netlink_ext_ack *extack)
+ u8 preservation, struct netlink_ext_ack *extack)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
@@ -657,13 +669,24 @@ int ice_flash_pldm_image(struct ice_pf *pf, const struct firmware *fw,
enum ice_status status;
int err;
+ switch (preservation) {
+ case ICE_AQC_NVM_PRESERVE_ALL:
+ case ICE_AQC_NVM_PRESERVE_SELECTED:
+ case ICE_AQC_NVM_NO_PRESERVATION:
+ case ICE_AQC_NVM_FACTORY_DEFAULT:
+ break;
+ default:
+ WARN(1, "Unexpected preservation level request %u", preservation);
+ return -EINVAL;
+ }
+
memset(&priv, 0, sizeof(priv));
priv.context.ops = &ice_fwu_ops;
priv.context.dev = dev;
priv.extack = extack;
priv.pf = pf;
- priv.activate_flags = ICE_AQC_NVM_PRESERVE_ALL;
+ priv.activate_flags = preservation;
status = ice_acquire_nvm(hw, ICE_RES_WRITE);
if (status) {
diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.h b/drivers/net/ethernet/intel/ice/ice_fw_update.h
index 79472cc618b4..c6390f6851ff 100644
--- a/drivers/net/ethernet/intel/ice/ice_fw_update.h
+++ b/drivers/net/ethernet/intel/ice/ice_fw_update.h
@@ -5,7 +5,7 @@
#define _ICE_FW_UPDATE_H_
int ice_flash_pldm_image(struct ice_pf *pf, const struct firmware *fw,
- struct netlink_ext_ack *extack);
+ u8 preservation, struct netlink_ext_ack *extack);
int ice_check_for_pending_update(struct ice_pf *pf, const char *component,
struct netlink_ext_ack *extack);
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index ebbb8f54871c..3df67486d42d 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -7,6 +7,7 @@
#include "ice_lib.h"
#include "ice_fltr.h"
#include "ice_dcb_lib.h"
+#include "ice_devlink.h"
/**
* ice_vsi_type_str - maps VSI type enum to string equivalents
@@ -1755,7 +1756,7 @@ int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
return ret;
for (i = 0; i < vsi->num_xdp_txq; i++)
- vsi->xdp_rings[i]->xsk_umem = ice_xsk_umem(vsi->xdp_rings[i]);
+ vsi->xdp_rings[i]->xsk_pool = ice_xsk_pool(vsi->xdp_rings[i]);
return ret;
}
@@ -2616,8 +2617,10 @@ int ice_vsi_release(struct ice_vsi *vsi)
* PF that is running the work queue items currently. This is done to
* avoid check_flush_dependency() warning on this wq
*/
- if (vsi->netdev && !ice_is_reset_in_progress(pf->state))
+ if (vsi->netdev && !ice_is_reset_in_progress(pf->state)) {
unregister_netdev(vsi->netdev);
+ ice_devlink_destroy_port(vsi);
+ }
if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
ice_rss_clean(vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 54a7f55eb8c1..2dea4d0e9415 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -486,7 +486,6 @@ static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
struct ice_hw *hw = &pf->hw;
dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
- WARN_ON(in_interrupt());
ice_prepare_for_reset(pf);
@@ -1057,7 +1056,9 @@ struct ice_aq_task {
int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
struct ice_rq_event_info *event)
{
+ struct device *dev = ice_pf_to_dev(pf);
struct ice_aq_task *task;
+ unsigned long start;
long ret;
int err;
@@ -1074,6 +1075,8 @@ int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
hlist_add_head(&task->entry, &pf->aq_wait_list);
spin_unlock_bh(&pf->aq_wait_lock);
+ start = jiffies;
+
ret = wait_event_interruptible_timeout(pf->aq_wait_queue, task->state,
timeout);
switch (task->state) {
@@ -1092,6 +1095,11 @@ int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
break;
}
+ dev_dbg(dev, "Waited %u msecs (max %u msecs) for firmware response to op 0x%04x\n",
+ jiffies_to_msecs(jiffies - start),
+ jiffies_to_msecs(timeout),
+ opcode);
+
spin_lock_bh(&pf->aq_wait_lock);
hlist_del(&task->entry);
spin_unlock_bh(&pf->aq_wait_lock);
@@ -2273,7 +2281,7 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
if (ice_setup_tx_ring(xdp_ring))
goto free_xdp_rings;
ice_set_ring_xdp(xdp_ring);
- xdp_ring->xsk_umem = ice_xsk_umem(xdp_ring);
+ xdp_ring->xsk_pool = ice_xsk_pool(xdp_ring);
}
return 0;
@@ -2417,7 +2425,7 @@ int ice_destroy_xdp_rings(struct ice_vsi *vsi)
int i, v_idx;
/* q_vectors are freed in reset path so there's no point in detaching
- * rings; in case of rebuild being triggered not from reset reset bits
+ * rings; in case of rebuild being triggered not from reset bits
* in pf->state won't be set, so additionally check first q_vector
* against NULL
*/
@@ -2517,13 +2525,13 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
if (if_running)
ret = ice_up(vsi);
- if (!ret && prog && vsi->xsk_umems) {
+ if (!ret && prog && vsi->xsk_pools) {
int i;
ice_for_each_rxq(vsi, i) {
struct ice_ring *rx_ring = vsi->rx_rings[i];
- if (rx_ring->xsk_umem)
+ if (rx_ring->xsk_pool)
napi_schedule(&rx_ring->q_vector->napi);
}
}
@@ -2549,8 +2557,8 @@ static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
switch (xdp->command) {
case XDP_SETUP_PROG:
return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
- case XDP_SETUP_XSK_UMEM:
- return ice_xsk_umem_setup(vsi, xdp->xsk.umem,
+ case XDP_SETUP_XSK_POOL:
+ return ice_xsk_pool_setup(vsi, xdp->xsk.pool,
xdp->xsk.queue_id);
default:
return -EINVAL;
@@ -2873,6 +2881,7 @@ static void ice_set_ops(struct net_device *netdev)
}
netdev->netdev_ops = &ice_netdev_ops;
+ netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic;
ice_set_ethtool_ops(netdev);
}
@@ -2953,7 +2962,7 @@ static int ice_cfg_netdev(struct ice_vsi *vsi)
u8 mac_addr[ETH_ALEN];
int err;
- err = ice_devlink_create_port(pf);
+ err = ice_devlink_create_port(vsi);
if (err)
return err;
@@ -2994,7 +3003,7 @@ static int ice_cfg_netdev(struct ice_vsi *vsi)
if (err)
goto err_free_netdev;
- devlink_port_type_eth_set(&pf->devlink_port, vsi->netdev);
+ devlink_port_type_eth_set(&vsi->devlink_port, vsi->netdev);
netif_carrier_off(vsi->netdev);
@@ -3007,7 +3016,7 @@ err_free_netdev:
free_netdev(vsi->netdev);
vsi->netdev = NULL;
err_destroy_devlink_port:
- ice_devlink_destroy_port(pf);
+ ice_devlink_destroy_port(vsi);
return err;
}
@@ -3971,7 +3980,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
struct device *dev = &pdev->dev;
struct ice_pf *pf;
struct ice_hw *hw;
- int err;
+ int i, err;
/* this driver uses devres, see
* Documentation/driver-api/driver-model/devres.rst
@@ -4066,11 +4075,37 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
ice_devlink_init_regions(pf);
+ pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port;
+ pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port;
+ pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
+ pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared;
+ i = 0;
+ if (pf->hw.tnl.valid_count[TNL_VXLAN]) {
+ pf->hw.udp_tunnel_nic.tables[i].n_entries =
+ pf->hw.tnl.valid_count[TNL_VXLAN];
+ pf->hw.udp_tunnel_nic.tables[i].tunnel_types =
+ UDP_TUNNEL_TYPE_VXLAN;
+ i++;
+ }
+ if (pf->hw.tnl.valid_count[TNL_GENEVE]) {
+ pf->hw.udp_tunnel_nic.tables[i].n_entries =
+ pf->hw.tnl.valid_count[TNL_GENEVE];
+ pf->hw.udp_tunnel_nic.tables[i].tunnel_types =
+ UDP_TUNNEL_TYPE_GENEVE;
+ i++;
+ }
+
pf->num_alloc_vsi = hw->func_caps.guar_num_vsi;
if (!pf->num_alloc_vsi) {
err = -EIO;
goto err_init_pf_unroll;
}
+ if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
+ dev_warn(&pf->pdev->dev,
+ "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
+ pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
+ pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
+ }
pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
GFP_KERNEL);
@@ -4216,7 +4251,6 @@ probe_done:
err_send_version_unroll:
ice_vsi_release_all(pf);
err_alloc_sw_unroll:
- ice_devlink_destroy_port(pf);
set_bit(__ICE_SERVICE_DIS, pf->state);
set_bit(__ICE_DOWN, pf->state);
devm_kfree(dev, pf->first_sw);
@@ -4331,7 +4365,6 @@ static void ice_remove(struct pci_dev *pdev)
if (!ice_is_safe_mode(pf))
ice_remove_arfs(pf);
ice_setup_mc_magic_wake(pf);
- ice_devlink_destroy_port(pf);
ice_vsi_release_all(pf);
ice_set_wake(pf);
ice_free_irq_msix_misc(pf);
@@ -6569,70 +6602,6 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
}
/**
- * ice_udp_tunnel_add - Get notifications about UDP tunnel ports that come up
- * @netdev: This physical port's netdev
- * @ti: Tunnel endpoint information
- */
-static void
-ice_udp_tunnel_add(struct net_device *netdev, struct udp_tunnel_info *ti)
-{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_vsi *vsi = np->vsi;
- struct ice_pf *pf = vsi->back;
- enum ice_tunnel_type tnl_type;
- u16 port = ntohs(ti->port);
- enum ice_status status;
-
- switch (ti->type) {
- case UDP_TUNNEL_TYPE_VXLAN:
- tnl_type = TNL_VXLAN;
- break;
- case UDP_TUNNEL_TYPE_GENEVE:
- tnl_type = TNL_GENEVE;
- break;
- default:
- netdev_err(netdev, "Unknown tunnel type\n");
- return;
- }
-
- status = ice_create_tunnel(&pf->hw, tnl_type, port);
- if (status == ICE_ERR_OUT_OF_RANGE)
- netdev_info(netdev, "Max tunneled UDP ports reached, port %d not added\n",
- port);
- else if (status)
- netdev_err(netdev, "Error adding UDP tunnel - %s\n",
- ice_stat_str(status));
-}
-
-/**
- * ice_udp_tunnel_del - Get notifications about UDP tunnel ports that go away
- * @netdev: This physical port's netdev
- * @ti: Tunnel endpoint information
- */
-static void
-ice_udp_tunnel_del(struct net_device *netdev, struct udp_tunnel_info *ti)
-{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_vsi *vsi = np->vsi;
- struct ice_pf *pf = vsi->back;
- u16 port = ntohs(ti->port);
- enum ice_status status;
- bool retval;
-
- retval = ice_tunnel_port_in_use(&pf->hw, port, NULL);
- if (!retval) {
- netdev_info(netdev, "port %d not found in UDP tunnels list\n",
- port);
- return;
- }
-
- status = ice_destroy_tunnel(&pf->hw, port, false);
- if (status)
- netdev_err(netdev, "error deleting port %d from UDP tunnels list\n",
- port);
-}
-
-/**
* ice_open - Called when a network interface becomes active
* @netdev: network interface device structure
*
@@ -6824,6 +6793,6 @@ static const struct net_device_ops ice_netdev_ops = {
.ndo_bpf = ice_xdp,
.ndo_xdp_xmit = ice_xdp_xmit,
.ndo_xsk_wakeup = ice_xsk_wakeup,
- .ndo_udp_tunnel_add = ice_udp_tunnel_add,
- .ndo_udp_tunnel_del = ice_udp_tunnel_del,
+ .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
+ .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
};
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 9d0d6b0025cf..eae75260fe20 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -145,7 +145,7 @@ void ice_clean_tx_ring(struct ice_ring *tx_ring)
{
u16 i;
- if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_umem) {
+ if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_pool) {
ice_xsk_clean_xdp_ring(tx_ring);
goto tx_skip_free;
}
@@ -375,7 +375,7 @@ void ice_clean_rx_ring(struct ice_ring *rx_ring)
if (!rx_ring->rx_buf)
return;
- if (rx_ring->xsk_umem) {
+ if (rx_ring->xsk_pool) {
ice_xsk_clean_rx_ring(rx_ring);
goto rx_skip_free;
}
@@ -919,10 +919,7 @@ ice_build_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
* likely have a consumer accessing first few bytes of meta
* data, and then actual data.
*/
- prefetch(xdp->data_meta);
-#if L1_CACHE_BYTES < 128
- prefetch((void *)(xdp->data + L1_CACHE_BYTES));
-#endif
+ net_prefetch(xdp->data_meta);
/* build an skb around the page buffer */
skb = build_skb(xdp->data_hard_start, truesize);
if (unlikely(!skb))
@@ -964,10 +961,7 @@ ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
struct sk_buff *skb;
/* prefetch first cache line of first page */
- prefetch(xdp->data);
-#if L1_CACHE_BYTES < 128
- prefetch((void *)(xdp->data + L1_CACHE_BYTES));
-#endif /* L1_CACHE_BYTES */
+ net_prefetch(xdp->data);
/* allocate a skb to store the frags */
skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE,
@@ -1616,7 +1610,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
* budget and be more aggressive about cleaning up the Tx descriptors.
*/
ice_for_each_ring(ring, q_vector->tx) {
- bool wd = ring->xsk_umem ?
+ bool wd = ring->xsk_pool ?
ice_clean_tx_irq_zc(ring, budget) :
ice_clean_tx_irq(ring, budget);
@@ -1646,7 +1640,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
* comparison in the irq context instead of many inside the
* ice_clean_rx_irq function and makes the codebase cleaner.
*/
- cleaned = ring->xsk_umem ?
+ cleaned = ring->xsk_pool ?
ice_clean_rx_irq_zc(ring, budget_per_ring) :
ice_clean_rx_irq(ring, budget_per_ring);
work_done += cleaned;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index 51b4df7a59d2..ff1a1cbd078e 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -43,7 +43,7 @@
/**
* ice_compute_pad - compute the padding
- * rx_buf_len: buffer length
+ * @rx_buf_len: buffer length
*
* Figure out the size of half page based on given buffer length and
* then subtract the skb_shared_info followed by subtraction of the
@@ -295,7 +295,7 @@ struct ice_ring {
struct rcu_head rcu; /* to avoid race on free */
struct bpf_prog *xdp_prog;
- struct xdp_umem *xsk_umem;
+ struct xsk_buff_pool *xsk_pool;
/* CL3 - 3rd cacheline starts here */
struct xdp_rxq_info xdp_rxq;
/* CLX - the below items are only accessed infrequently and should be
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 4cdccfadf274..2226a291a394 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -676,6 +676,9 @@ struct ice_hw {
struct mutex tnl_lock;
struct ice_tunnel_table tnl;
+ struct udp_tunnel_nic_shared udp_tunnel_shared;
+ struct udp_tunnel_nic_info udp_tunnel_nic;
+
/* HW block tables */
struct ice_blk_info blk[ICE_BLK_COUNT];
struct mutex fl_profs_locks[ICE_BLK_COUNT]; /* lock fltr profiles */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index 71497776ac62..ec7f6c64132e 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -871,7 +871,7 @@ static int ice_get_max_valid_res_idx(struct ice_res_tracker *res)
* If there are not enough resources available, return an error. This should
* always be caught by ice_set_per_vf_res().
*
- * Return 0 on success, and -EINVAL when there are not enough MSIX vectors in
+ * Return 0 on success, and -EINVAL when there are not enough MSIX vectors
* in the PF's space available for SR-IOV.
*/
static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 20ac5fca68c6..797886524054 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -236,7 +236,7 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
if (err)
goto free_buf;
ice_set_ring_xdp(xdp_ring);
- xdp_ring->xsk_umem = ice_xsk_umem(xdp_ring);
+ xdp_ring->xsk_pool = ice_xsk_pool(xdp_ring);
}
err = ice_setup_rx_ctx(rx_ring);
@@ -260,21 +260,21 @@ free_buf:
}
/**
- * ice_xsk_alloc_umems - allocate a UMEM region for an XDP socket
- * @vsi: VSI to allocate the UMEM on
+ * ice_xsk_alloc_pools - allocate a buffer pool for an XDP socket
+ * @vsi: VSI to allocate the buffer pool on
*
* Returns 0 on success, negative on error
*/
-static int ice_xsk_alloc_umems(struct ice_vsi *vsi)
+static int ice_xsk_alloc_pools(struct ice_vsi *vsi)
{
- if (vsi->xsk_umems)
+ if (vsi->xsk_pools)
return 0;
- vsi->xsk_umems = kcalloc(vsi->num_xsk_umems, sizeof(*vsi->xsk_umems),
+ vsi->xsk_pools = kcalloc(vsi->num_xsk_pools, sizeof(*vsi->xsk_pools),
GFP_KERNEL);
- if (!vsi->xsk_umems) {
- vsi->num_xsk_umems = 0;
+ if (!vsi->xsk_pools) {
+ vsi->num_xsk_pools = 0;
return -ENOMEM;
}
@@ -282,73 +282,73 @@ static int ice_xsk_alloc_umems(struct ice_vsi *vsi)
}
/**
- * ice_xsk_remove_umem - Remove an UMEM for a certain ring/qid
+ * ice_xsk_remove_pool - Remove an buffer pool for a certain ring/qid
* @vsi: VSI from which the VSI will be removed
- * @qid: Ring/qid associated with the UMEM
+ * @qid: Ring/qid associated with the buffer pool
*/
-static void ice_xsk_remove_umem(struct ice_vsi *vsi, u16 qid)
+static void ice_xsk_remove_pool(struct ice_vsi *vsi, u16 qid)
{
- vsi->xsk_umems[qid] = NULL;
- vsi->num_xsk_umems_used--;
+ vsi->xsk_pools[qid] = NULL;
+ vsi->num_xsk_pools_used--;
- if (vsi->num_xsk_umems_used == 0) {
- kfree(vsi->xsk_umems);
- vsi->xsk_umems = NULL;
- vsi->num_xsk_umems = 0;
+ if (vsi->num_xsk_pools_used == 0) {
+ kfree(vsi->xsk_pools);
+ vsi->xsk_pools = NULL;
+ vsi->num_xsk_pools = 0;
}
}
/**
- * ice_xsk_umem_disable - disable a UMEM region
+ * ice_xsk_pool_disable - disable a buffer pool region
* @vsi: Current VSI
* @qid: queue ID
*
* Returns 0 on success, negative on failure
*/
-static int ice_xsk_umem_disable(struct ice_vsi *vsi, u16 qid)
+static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid)
{
- if (!vsi->xsk_umems || qid >= vsi->num_xsk_umems ||
- !vsi->xsk_umems[qid])
+ if (!vsi->xsk_pools || qid >= vsi->num_xsk_pools ||
+ !vsi->xsk_pools[qid])
return -EINVAL;
- xsk_buff_dma_unmap(vsi->xsk_umems[qid], ICE_RX_DMA_ATTR);
- ice_xsk_remove_umem(vsi, qid);
+ xsk_pool_dma_unmap(vsi->xsk_pools[qid], ICE_RX_DMA_ATTR);
+ ice_xsk_remove_pool(vsi, qid);
return 0;
}
/**
- * ice_xsk_umem_enable - enable a UMEM region
+ * ice_xsk_pool_enable - enable a buffer pool region
* @vsi: Current VSI
- * @umem: pointer to a requested UMEM region
+ * @pool: pointer to a requested buffer pool region
* @qid: queue ID
*
* Returns 0 on success, negative on failure
*/
static int
-ice_xsk_umem_enable(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid)
+ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
{
int err;
if (vsi->type != ICE_VSI_PF)
return -EINVAL;
- if (!vsi->num_xsk_umems)
- vsi->num_xsk_umems = min_t(u16, vsi->num_rxq, vsi->num_txq);
- if (qid >= vsi->num_xsk_umems)
+ if (!vsi->num_xsk_pools)
+ vsi->num_xsk_pools = min_t(u16, vsi->num_rxq, vsi->num_txq);
+ if (qid >= vsi->num_xsk_pools)
return -EINVAL;
- err = ice_xsk_alloc_umems(vsi);
+ err = ice_xsk_alloc_pools(vsi);
if (err)
return err;
- if (vsi->xsk_umems && vsi->xsk_umems[qid])
+ if (vsi->xsk_pools && vsi->xsk_pools[qid])
return -EBUSY;
- vsi->xsk_umems[qid] = umem;
- vsi->num_xsk_umems_used++;
+ vsi->xsk_pools[qid] = pool;
+ vsi->num_xsk_pools_used++;
- err = xsk_buff_dma_map(vsi->xsk_umems[qid], ice_pf_to_dev(vsi->back),
+ err = xsk_pool_dma_map(vsi->xsk_pools[qid], ice_pf_to_dev(vsi->back),
ICE_RX_DMA_ATTR);
if (err)
return err;
@@ -357,17 +357,17 @@ ice_xsk_umem_enable(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid)
}
/**
- * ice_xsk_umem_setup - enable/disable a UMEM region depending on its state
+ * ice_xsk_pool_setup - enable/disable a buffer pool region depending on its state
* @vsi: Current VSI
- * @umem: UMEM to enable/associate to a ring, NULL to disable
+ * @pool: buffer pool to enable/associate to a ring, NULL to disable
* @qid: queue ID
*
* Returns 0 on success, negative on failure
*/
-int ice_xsk_umem_setup(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid)
+int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
{
- bool if_running, umem_present = !!umem;
- int ret = 0, umem_failure = 0;
+ bool if_running, pool_present = !!pool;
+ int ret = 0, pool_failure = 0;
if_running = netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi);
@@ -375,26 +375,26 @@ int ice_xsk_umem_setup(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid)
ret = ice_qp_dis(vsi, qid);
if (ret) {
netdev_err(vsi->netdev, "ice_qp_dis error = %d\n", ret);
- goto xsk_umem_if_up;
+ goto xsk_pool_if_up;
}
}
- umem_failure = umem_present ? ice_xsk_umem_enable(vsi, umem, qid) :
- ice_xsk_umem_disable(vsi, qid);
+ pool_failure = pool_present ? ice_xsk_pool_enable(vsi, pool, qid) :
+ ice_xsk_pool_disable(vsi, qid);
-xsk_umem_if_up:
+xsk_pool_if_up:
if (if_running) {
ret = ice_qp_ena(vsi, qid);
- if (!ret && umem_present)
+ if (!ret && pool_present)
napi_schedule(&vsi->xdp_rings[qid]->q_vector->napi);
else if (ret)
netdev_err(vsi->netdev, "ice_qp_ena error = %d\n", ret);
}
- if (umem_failure) {
- netdev_err(vsi->netdev, "Could not %sable UMEM, error = %d\n",
- umem_present ? "en" : "dis", umem_failure);
- return umem_failure;
+ if (pool_failure) {
+ netdev_err(vsi->netdev, "Could not %sable buffer pool, error = %d\n",
+ pool_present ? "en" : "dis", pool_failure);
+ return pool_failure;
}
return ret;
@@ -425,7 +425,7 @@ bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count)
rx_buf = &rx_ring->rx_buf[ntu];
do {
- rx_buf->xdp = xsk_buff_alloc(rx_ring->xsk_umem);
+ rx_buf->xdp = xsk_buff_alloc(rx_ring->xsk_pool);
if (!rx_buf->xdp) {
ret = true;
break;
@@ -595,7 +595,7 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
rx_buf->xdp->data_end = rx_buf->xdp->data + size;
- xsk_buff_dma_sync_for_cpu(rx_buf->xdp);
+ xsk_buff_dma_sync_for_cpu(rx_buf->xdp, rx_ring->xsk_pool);
xdp_res = ice_run_xdp_zc(rx_ring, rx_buf->xdp);
if (xdp_res) {
@@ -645,11 +645,11 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
ice_finalize_xdp_rx(rx_ring, xdp_xmit);
ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
- if (xsk_umem_uses_need_wakeup(rx_ring->xsk_umem)) {
+ if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
- xsk_set_rx_need_wakeup(rx_ring->xsk_umem);
+ xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
else
- xsk_clear_rx_need_wakeup(rx_ring->xsk_umem);
+ xsk_clear_rx_need_wakeup(rx_ring->xsk_pool);
return (int)total_rx_packets;
}
@@ -682,11 +682,11 @@ static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget)
tx_buf = &xdp_ring->tx_buf[xdp_ring->next_to_use];
- if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &desc))
+ if (!xsk_tx_peek_desc(xdp_ring->xsk_pool, &desc))
break;
- dma = xsk_buff_raw_get_dma(xdp_ring->xsk_umem, desc.addr);
- xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_umem, dma,
+ dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc.addr);
+ xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma,
desc.len);
tx_buf->bytecount = desc.len;
@@ -703,7 +703,7 @@ static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget)
if (tx_desc) {
ice_xdp_ring_update_tail(xdp_ring);
- xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
+ xsk_tx_release(xdp_ring->xsk_pool);
}
return budget > 0 && work_done;
@@ -777,10 +777,10 @@ bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget)
xdp_ring->next_to_clean = ntc;
if (xsk_frames)
- xsk_umem_complete_tx(xdp_ring->xsk_umem, xsk_frames);
+ xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
- if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem))
- xsk_set_tx_need_wakeup(xdp_ring->xsk_umem);
+ if (xsk_uses_need_wakeup(xdp_ring->xsk_pool))
+ xsk_set_tx_need_wakeup(xdp_ring->xsk_pool);
ice_update_tx_ring_stats(xdp_ring, total_packets, total_bytes);
xmit_done = ice_xmit_zc(xdp_ring, ICE_DFLT_IRQ_WORK);
@@ -814,7 +814,7 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
if (queue_id >= vsi->num_txq)
return -ENXIO;
- if (!vsi->xdp_rings[queue_id]->xsk_umem)
+ if (!vsi->xdp_rings[queue_id]->xsk_pool)
return -ENXIO;
ring = vsi->xdp_rings[queue_id];
@@ -833,20 +833,20 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
}
/**
- * ice_xsk_any_rx_ring_ena - Checks if Rx rings have AF_XDP UMEM attached
+ * ice_xsk_any_rx_ring_ena - Checks if Rx rings have AF_XDP buff pool attached
* @vsi: VSI to be checked
*
- * Returns true if any of the Rx rings has an AF_XDP UMEM attached
+ * Returns true if any of the Rx rings has an AF_XDP buff pool attached
*/
bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi)
{
int i;
- if (!vsi->xsk_umems)
+ if (!vsi->xsk_pools)
return false;
- for (i = 0; i < vsi->num_xsk_umems; i++) {
- if (vsi->xsk_umems[i])
+ for (i = 0; i < vsi->num_xsk_pools; i++) {
+ if (vsi->xsk_pools[i])
return true;
}
@@ -854,7 +854,7 @@ bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi)
}
/**
- * ice_xsk_clean_rx_ring - clean UMEM queues connected to a given Rx ring
+ * ice_xsk_clean_rx_ring - clean buffer pool queues connected to a given Rx ring
* @rx_ring: ring to be cleaned
*/
void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring)
@@ -872,7 +872,7 @@ void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring)
}
/**
- * ice_xsk_clean_xdp_ring - Clean the XDP Tx ring and its UMEM queues
+ * ice_xsk_clean_xdp_ring - Clean the XDP Tx ring and its buffer pool queues
* @xdp_ring: XDP_Tx ring
*/
void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring)
@@ -896,5 +896,5 @@ void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring)
}
if (xsk_frames)
- xsk_umem_complete_tx(xdp_ring->xsk_umem, xsk_frames);
+ xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.h b/drivers/net/ethernet/intel/ice/ice_xsk.h
index fc1a06b4df36..fad783690134 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.h
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.h
@@ -9,7 +9,8 @@
struct ice_vsi;
#ifdef CONFIG_XDP_SOCKETS
-int ice_xsk_umem_setup(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid);
+int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool,
+ u16 qid);
int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget);
bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget);
int ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags);
@@ -19,8 +20,8 @@ void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring);
void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring);
#else
static inline int
-ice_xsk_umem_setup(struct ice_vsi __always_unused *vsi,
- struct xdp_umem __always_unused *umem,
+ice_xsk_pool_setup(struct ice_vsi __always_unused *vsi,
+ struct xsk_buff_pool __always_unused *pool,
u16 __always_unused qid)
{
return -EOPNOTSUPP;