aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h7
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c28
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c50
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c146
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c4
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c67
11 files changed, 192 insertions, 142 deletions
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index 5fc46c5a4f36..448d1fafc827 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -265,14 +265,9 @@ struct nicvf_drv_stats {
struct cavium_ptp;
-struct xcast_addr {
- struct list_head list;
- u64 addr;
-};
-
struct xcast_addr_list {
- struct list_head list;
int count;
+ u64 mc[];
};
struct nicvf_work {
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 1e9a31fef729..707db3304396 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -1929,7 +1929,7 @@ static void nicvf_set_rx_mode_task(struct work_struct *work_arg)
work.work);
struct nicvf *nic = container_of(vf_work, struct nicvf, rx_mode_work);
union nic_mbx mbx = {};
- struct xcast_addr *xaddr, *next;
+ int idx;
if (!vf_work)
return;
@@ -1956,16 +1956,10 @@ static void nicvf_set_rx_mode_task(struct work_struct *work_arg)
/* check if we have any specific MACs to be added to PF DMAC filter */
if (vf_work->mc) {
/* now go through kernel list of MACs and add them one by one */
- list_for_each_entry_safe(xaddr, next,
- &vf_work->mc->list, list) {
+ for (idx = 0; idx < vf_work->mc->count; idx++) {
mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST;
- mbx.xcast.data.mac = xaddr->addr;
+ mbx.xcast.data.mac = vf_work->mc->mc[idx];
nicvf_send_msg_to_pf(nic, &mbx);
-
- /* after receiving ACK from PF release memory */
- list_del(&xaddr->list);
- kfree(xaddr);
- vf_work->mc->count--;
}
kfree(vf_work->mc);
}
@@ -1996,17 +1990,15 @@ static void nicvf_set_rx_mode(struct net_device *netdev)
mode |= BGX_XCAST_MCAST_FILTER;
/* here we need to copy mc addrs */
if (netdev_mc_count(netdev)) {
- struct xcast_addr *xaddr;
-
- mc_list = kmalloc(sizeof(*mc_list), GFP_ATOMIC);
- INIT_LIST_HEAD(&mc_list->list);
+ mc_list = kmalloc(offsetof(typeof(*mc_list),
+ mc[netdev_mc_count(netdev)]),
+ GFP_ATOMIC);
+ if (unlikely(!mc_list))
+ return;
+ mc_list->count = 0;
netdev_hw_addr_list_for_each(ha, &netdev->mc) {
- xaddr = kmalloc(sizeof(*xaddr),
- GFP_ATOMIC);
- xaddr->addr =
+ mc_list->mc[mc_list->count] =
ether_addr_to_u64(ha->addr);
- list_add_tail(&xaddr->list,
- &mc_list->list);
mc_list->count++;
}
}
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index 80ad16acf0f1..ac2c3f6a12bc 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -377,6 +377,38 @@ static const struct of_device_id fsl_pq_mdio_match[] = {
};
MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match);
+static void set_tbipa(const u32 tbipa_val, struct platform_device *pdev,
+ uint32_t __iomem * (*get_tbipa)(void __iomem *),
+ void __iomem *reg_map, struct resource *reg_res)
+{
+ struct device_node *np = pdev->dev.of_node;
+ uint32_t __iomem *tbipa;
+ bool tbipa_mapped;
+
+ tbipa = of_iomap(np, 1);
+ if (tbipa) {
+ tbipa_mapped = true;
+ } else {
+ tbipa_mapped = false;
+ tbipa = (*get_tbipa)(reg_map);
+
+ /*
+ * Add consistency check to make sure TBI is contained within
+ * the mapped range (not because we would get a segfault,
+ * rather to catch bugs in computing TBI address). Print error
+ * message but continue anyway.
+ */
+ if ((void *)tbipa > reg_map + resource_size(reg_res) - 4)
+ dev_err(&pdev->dev, "invalid register map (should be at least 0x%04zx to contain TBI address)\n",
+ ((void *)tbipa - reg_map) + 4);
+ }
+
+ iowrite32be(be32_to_cpu(tbipa_val), tbipa);
+
+ if (tbipa_mapped)
+ iounmap(tbipa);
+}
+
static int fsl_pq_mdio_probe(struct platform_device *pdev)
{
const struct of_device_id *id =
@@ -450,8 +482,6 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
if (tbi) {
const u32 *prop = of_get_property(tbi, "reg", NULL);
- uint32_t __iomem *tbipa;
-
if (!prop) {
dev_err(&pdev->dev,
"missing 'reg' property in node %pOF\n",
@@ -459,20 +489,8 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
err = -EBUSY;
goto error;
}
-
- tbipa = data->get_tbipa(priv->map);
-
- /*
- * Add consistency check to make sure TBI is contained
- * within the mapped range (not because we would get a
- * segfault, rather to catch bugs in computing TBI
- * address). Print error message but continue anyway.
- */
- if ((void *)tbipa > priv->map + resource_size(&res) - 4)
- dev_err(&pdev->dev, "invalid register map (should be at least 0x%04zx to contain TBI address)\n",
- ((void *)tbipa - priv->map) + 4);
-
- iowrite32be(be32_to_cpup(prop), tbipa);
+ set_tbipa(*prop, pdev,
+ data->get_tbipa, priv->map, &res);
}
}
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index b492af6affc3..aad5658d79d5 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -118,6 +118,7 @@ static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
static int ibmvnic_init(struct ibmvnic_adapter *);
static void release_crq_queue(struct ibmvnic_adapter *);
static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p);
+static int init_crq_queue(struct ibmvnic_adapter *adapter);
struct ibmvnic_stat {
char name[ETH_GSTRING_LEN];
@@ -320,18 +321,16 @@ failure:
dev_info(dev, "replenish pools failure\n");
pool->free_map[pool->next_free] = index;
pool->rx_buff[index].skb = NULL;
- if (!dma_mapping_error(dev, dma_addr))
- dma_unmap_single(dev, dma_addr, pool->buff_size,
- DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
adapter->replenish_add_buff_failure++;
atomic_add(buffers_added, &pool->available);
- if (lpar_rc == H_CLOSED) {
+ if (lpar_rc == H_CLOSED || adapter->failover_pending) {
/* Disable buffer pool replenishment and report carrier off if
- * queue is closed. Firmware guarantees that a signal will
- * be sent to the driver, triggering a reset.
+ * queue is closed or pending failover.
+ * Firmware guarantees that a signal will be sent to the
+ * driver, triggering a reset.
*/
deactivate_rx_pools(adapter);
netif_carrier_off(adapter->netdev);
@@ -1071,6 +1070,14 @@ static int ibmvnic_open(struct net_device *netdev)
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
int rc;
+ /* If device failover is pending, just set device state and return.
+ * Device operation will be handled by reset routine.
+ */
+ if (adapter->failover_pending) {
+ adapter->state = VNIC_OPEN;
+ return 0;
+ }
+
mutex_lock(&adapter->reset_lock);
if (adapter->state != VNIC_CLOSED) {
@@ -1218,7 +1225,6 @@ static int __ibmvnic_close(struct net_device *netdev)
rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
if (rc)
return rc;
- ibmvnic_cleanup(netdev);
adapter->state = VNIC_CLOSED;
return 0;
}
@@ -1228,8 +1234,17 @@ static int ibmvnic_close(struct net_device *netdev)
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
int rc;
+ /* If device failover is pending, just set device state and return.
+ * Device operation will be handled by reset routine.
+ */
+ if (adapter->failover_pending) {
+ adapter->state = VNIC_CLOSED;
+ return 0;
+ }
+
mutex_lock(&adapter->reset_lock);
rc = __ibmvnic_close(netdev);
+ ibmvnic_cleanup(netdev);
mutex_unlock(&adapter->reset_lock);
return rc;
@@ -1562,8 +1577,9 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
dev_kfree_skb_any(skb);
tx_buff->skb = NULL;
- if (lpar_rc == H_CLOSED) {
- /* Disable TX and report carrier off if queue is closed.
+ if (lpar_rc == H_CLOSED || adapter->failover_pending) {
+ /* Disable TX and report carrier off if queue is closed
+ * or pending failover.
* Firmware guarantees that a signal will be sent to the
* driver, triggering a reset or some other action.
*/
@@ -1711,14 +1727,10 @@ static int do_reset(struct ibmvnic_adapter *adapter,
old_num_rx_queues = adapter->req_rx_queues;
old_num_tx_queues = adapter->req_tx_queues;
- if (rwi->reset_reason == VNIC_RESET_MOBILITY) {
- rc = ibmvnic_reenable_crq_queue(adapter);
- if (rc)
- return 0;
- ibmvnic_cleanup(netdev);
- } else if (rwi->reset_reason == VNIC_RESET_FAILOVER) {
- ibmvnic_cleanup(netdev);
- } else {
+ ibmvnic_cleanup(netdev);
+
+ if (adapter->reset_reason != VNIC_RESET_MOBILITY &&
+ adapter->reset_reason != VNIC_RESET_FAILOVER) {
rc = __ibmvnic_close(netdev);
if (rc)
return rc;
@@ -1737,6 +1749,23 @@ static int do_reset(struct ibmvnic_adapter *adapter,
*/
adapter->state = VNIC_PROBED;
+ if (adapter->wait_for_reset) {
+ rc = init_crq_queue(adapter);
+ } else if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
+ rc = ibmvnic_reenable_crq_queue(adapter);
+ release_sub_crqs(adapter, 1);
+ } else {
+ rc = ibmvnic_reset_crq(adapter);
+ if (!rc)
+ rc = vio_enable_interrupts(adapter->vdev);
+ }
+
+ if (rc) {
+ netdev_err(adapter->netdev,
+ "Couldn't initialize crq. rc=%d\n", rc);
+ return rc;
+ }
+
rc = ibmvnic_init(adapter);
if (rc)
return IBMVNIC_INIT_FAILED;
@@ -1878,23 +1907,26 @@ static void __ibmvnic_reset(struct work_struct *work)
mutex_unlock(&adapter->reset_lock);
}
-static void ibmvnic_reset(struct ibmvnic_adapter *adapter,
- enum ibmvnic_reset_reason reason)
+static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
+ enum ibmvnic_reset_reason reason)
{
struct ibmvnic_rwi *rwi, *tmp;
struct net_device *netdev = adapter->netdev;
struct list_head *entry;
+ int ret;
if (adapter->state == VNIC_REMOVING ||
- adapter->state == VNIC_REMOVED) {
- netdev_dbg(netdev, "Adapter removing, skipping reset\n");
- return;
+ adapter->state == VNIC_REMOVED ||
+ adapter->failover_pending) {
+ ret = EBUSY;
+ netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n");
+ goto err;
}
if (adapter->state == VNIC_PROBING) {
netdev_warn(netdev, "Adapter reset during probe\n");
- adapter->init_done_rc = EAGAIN;
- return;
+ ret = adapter->init_done_rc = EAGAIN;
+ goto err;
}
mutex_lock(&adapter->rwi_lock);
@@ -1904,7 +1936,8 @@ static void ibmvnic_reset(struct ibmvnic_adapter *adapter,
if (tmp->reset_reason == reason) {
netdev_dbg(netdev, "Skipping matching reset\n");
mutex_unlock(&adapter->rwi_lock);
- return;
+ ret = EBUSY;
+ goto err;
}
}
@@ -1912,7 +1945,8 @@ static void ibmvnic_reset(struct ibmvnic_adapter *adapter,
if (!rwi) {
mutex_unlock(&adapter->rwi_lock);
ibmvnic_close(netdev);
- return;
+ ret = ENOMEM;
+ goto err;
}
rwi->reset_reason = reason;
@@ -1921,6 +1955,12 @@ static void ibmvnic_reset(struct ibmvnic_adapter *adapter,
netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
schedule_work(&adapter->ibmvnic_reset);
+
+ return 0;
+err:
+ if (adapter->wait_for_reset)
+ adapter->wait_for_reset = false;
+ return -ret;
}
static void ibmvnic_tx_timeout(struct net_device *dev)
@@ -2055,6 +2095,8 @@ static void ibmvnic_netpoll_controller(struct net_device *dev)
static int wait_for_reset(struct ibmvnic_adapter *adapter)
{
+ int rc, ret;
+
adapter->fallback.mtu = adapter->req_mtu;
adapter->fallback.rx_queues = adapter->req_rx_queues;
adapter->fallback.tx_queues = adapter->req_tx_queues;
@@ -2062,11 +2104,15 @@ static int wait_for_reset(struct ibmvnic_adapter *adapter)
adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;
init_completion(&adapter->reset_done);
- ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
adapter->wait_for_reset = true;
+ rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
+ if (rc)
+ return rc;
wait_for_completion(&adapter->reset_done);
+ ret = 0;
if (adapter->reset_done_rc) {
+ ret = -EIO;
adapter->desired.mtu = adapter->fallback.mtu;
adapter->desired.rx_queues = adapter->fallback.rx_queues;
adapter->desired.tx_queues = adapter->fallback.tx_queues;
@@ -2074,12 +2120,15 @@ static int wait_for_reset(struct ibmvnic_adapter *adapter)
adapter->desired.tx_entries = adapter->fallback.tx_entries;
init_completion(&adapter->reset_done);
- ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
+ adapter->wait_for_reset = true;
+ rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
+ if (rc)
+ return ret;
wait_for_completion(&adapter->reset_done);
}
adapter->wait_for_reset = false;
- return adapter->reset_done_rc;
+ return ret;
}
static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
@@ -2364,6 +2413,7 @@ static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
}
memset(scrq->msgs, 0, 4 * PAGE_SIZE);
+ atomic_set(&scrq->used, 0);
scrq->cur = 0;
rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
@@ -2574,7 +2624,7 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
union sub_crq *next;
int index;
int i, j;
- u8 first;
+ u8 *first;
restart_loop:
while (pending_scrq(adapter, scrq)) {
@@ -2605,11 +2655,12 @@ restart_loop:
txbuff->data_dma[j] = 0;
}
/* if sub_crq was sent indirectly */
- first = txbuff->indir_arr[0].generic.first;
- if (first == IBMVNIC_CRQ_CMD) {
+ first = &txbuff->indir_arr[0].generic.first;
+ if (*first == IBMVNIC_CRQ_CMD) {
dma_unmap_single(dev, txbuff->indir_dma,
sizeof(txbuff->indir_arr),
DMA_TO_DEVICE);
+ *first = 0;
}
if (txbuff->last_frag) {
@@ -3882,9 +3933,9 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
int i;
dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
- DMA_BIDIRECTIONAL);
+ DMA_TO_DEVICE);
dma_unmap_single(dev, adapter->login_rsp_buf_token,
- adapter->login_rsp_buf_sz, DMA_BIDIRECTIONAL);
+ adapter->login_rsp_buf_sz, DMA_FROM_DEVICE);
/* If the number of queues requested can't be allocated by the
* server, the login response will return with code 1. We will need
@@ -4144,7 +4195,9 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
case IBMVNIC_CRQ_INIT:
dev_info(dev, "Partner initialized\n");
adapter->from_passive_init = true;
+ adapter->failover_pending = false;
complete(&adapter->init_done);
+ ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
break;
case IBMVNIC_CRQ_INIT_COMPLETE:
dev_info(dev, "Partner initialization complete\n");
@@ -4161,7 +4214,7 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
} else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
dev_info(dev, "Backing device failover detected\n");
- ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
+ adapter->failover_pending = true;
} else {
/* The adapter lost the connection */
dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
@@ -4461,19 +4514,6 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter)
u64 old_num_rx_queues, old_num_tx_queues;
int rc;
- if (adapter->resetting && !adapter->wait_for_reset) {
- rc = ibmvnic_reset_crq(adapter);
- if (!rc)
- rc = vio_enable_interrupts(adapter->vdev);
- } else {
- rc = init_crq_queue(adapter);
- }
-
- if (rc) {
- dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc);
- return rc;
- }
-
adapter->from_passive_init = false;
old_num_rx_queues = adapter->req_rx_queues;
@@ -4498,7 +4538,8 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter)
return -1;
}
- if (adapter->resetting && !adapter->wait_for_reset) {
+ if (adapter->resetting && !adapter->wait_for_reset &&
+ adapter->reset_reason != VNIC_RESET_MOBILITY) {
if (adapter->req_rx_queues != old_num_rx_queues ||
adapter->req_tx_queues != old_num_tx_queues) {
release_sub_crqs(adapter, 0);
@@ -4586,6 +4627,13 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
adapter->mac_change_pending = false;
do {
+ rc = init_crq_queue(adapter);
+ if (rc) {
+ dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n",
+ rc);
+ goto ibmvnic_init_fail;
+ }
+
rc = ibmvnic_init(adapter);
if (rc && rc != EAGAIN)
goto ibmvnic_init_fail;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 89efe700eafe..99c0b58c2c39 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -1108,6 +1108,7 @@ struct ibmvnic_adapter {
bool napi_enabled, from_passive_init;
bool mac_change_pending;
+ bool failover_pending;
struct ibmvnic_tunables desired;
struct ibmvnic_tunables fallback;
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 385f5d425d19..21977ec984c4 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -468,8 +468,10 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
mac_buf_len = sizeof(struct ice_aqc_manage_mac_read_resp);
mac_buf = devm_kzalloc(ice_hw_to_dev(hw), mac_buf_len, GFP_KERNEL);
- if (!mac_buf)
+ if (!mac_buf) {
+ status = ICE_ERR_NO_MEMORY;
goto err_unroll_fltr_mgmt_struct;
+ }
status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
devm_kfree(ice_hw_to_dev(hw), mac_buf);
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 186764a5c263..1db304c01d10 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -156,7 +156,7 @@ ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
static int ice_get_regs_len(struct net_device __always_unused *netdev)
{
- return ARRAY_SIZE(ice_regs_dump_list);
+ return sizeof(ice_regs_dump_list);
}
static void
@@ -170,7 +170,7 @@ ice_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
regs->version = 1;
- for (i = 0; i < ARRAY_SIZE(ice_regs_dump_list) / sizeof(u32); ++i)
+ for (i = 0; i < ARRAY_SIZE(ice_regs_dump_list); ++i)
regs_buf[i] = rd32(hw, ice_regs_dump_list[i]);
}
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index 7fc1bbf51c44..54a038943c06 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -1604,7 +1604,7 @@ static int mvpp2_prs_init_from_hw(struct mvpp2 *priv,
{
int i;
- if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
+ if (tid > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
return -EINVAL;
memset(pe, 0, sizeof(*pe));
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 53fffd09d133..ca38a30fbe91 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -3805,18 +3805,6 @@ static const struct mlxsw_config_profile mlxsw_sp_config_profile = {
},
};
-static u64 mlxsw_sp_resource_kvd_linear_occ_get(struct devlink *devlink)
-{
- struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
- struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
-
- return mlxsw_sp_kvdl_occ_get(mlxsw_sp);
-}
-
-static const struct devlink_resource_ops mlxsw_sp_resource_kvd_linear_ops = {
- .occ_get = mlxsw_sp_resource_kvd_linear_occ_get,
-};
-
static void
mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core,
struct devlink_resource_size_params *kvd_size_params,
@@ -3877,8 +3865,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
kvd_size, MLXSW_SP_RESOURCE_KVD,
DEVLINK_RESOURCE_ID_PARENT_TOP,
- &kvd_size_params,
- NULL);
+ &kvd_size_params);
if (err)
return err;
@@ -3887,8 +3874,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
linear_size,
MLXSW_SP_RESOURCE_KVD_LINEAR,
MLXSW_SP_RESOURCE_KVD,
- &linear_size_params,
- &mlxsw_sp_resource_kvd_linear_ops);
+ &linear_size_params);
if (err)
return err;
@@ -3905,8 +3891,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
double_size,
MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
MLXSW_SP_RESOURCE_KVD,
- &hash_double_size_params,
- NULL);
+ &hash_double_size_params);
if (err)
return err;
@@ -3915,8 +3900,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
single_size,
MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
MLXSW_SP_RESOURCE_KVD,
- &hash_single_size_params,
- NULL);
+ &hash_single_size_params);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 82820ba43728..804d4d2c8031 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -442,7 +442,6 @@ void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index);
int mlxsw_sp_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp,
unsigned int entry_count,
unsigned int *p_alloc_size);
-u64 mlxsw_sp_kvdl_occ_get(const struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_kvdl_resources_register(struct mlxsw_core *mlxsw_core);
struct mlxsw_sp_acl_rule_info {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
index 8796db44dcc3..fe4327f547d2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
@@ -315,8 +315,9 @@ static u64 mlxsw_sp_kvdl_part_occ(struct mlxsw_sp_kvdl_part *part)
return occ;
}
-u64 mlxsw_sp_kvdl_occ_get(const struct mlxsw_sp *mlxsw_sp)
+static u64 mlxsw_sp_kvdl_occ_get(void *priv)
{
+ const struct mlxsw_sp *mlxsw_sp = priv;
u64 occ = 0;
int i;
@@ -326,48 +327,33 @@ u64 mlxsw_sp_kvdl_occ_get(const struct mlxsw_sp *mlxsw_sp)
return occ;
}
-static u64 mlxsw_sp_kvdl_single_occ_get(struct devlink *devlink)
+static u64 mlxsw_sp_kvdl_single_occ_get(void *priv)
{
- struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
- struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ const struct mlxsw_sp *mlxsw_sp = priv;
struct mlxsw_sp_kvdl_part *part;
part = mlxsw_sp->kvdl->parts[MLXSW_SP_KVDL_PART_ID_SINGLE];
return mlxsw_sp_kvdl_part_occ(part);
}
-static u64 mlxsw_sp_kvdl_chunks_occ_get(struct devlink *devlink)
+static u64 mlxsw_sp_kvdl_chunks_occ_get(void *priv)
{
- struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
- struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ const struct mlxsw_sp *mlxsw_sp = priv;
struct mlxsw_sp_kvdl_part *part;
part = mlxsw_sp->kvdl->parts[MLXSW_SP_KVDL_PART_ID_CHUNKS];
return mlxsw_sp_kvdl_part_occ(part);
}
-static u64 mlxsw_sp_kvdl_large_chunks_occ_get(struct devlink *devlink)
+static u64 mlxsw_sp_kvdl_large_chunks_occ_get(void *priv)
{
- struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
- struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ const struct mlxsw_sp *mlxsw_sp = priv;
struct mlxsw_sp_kvdl_part *part;
part = mlxsw_sp->kvdl->parts[MLXSW_SP_KVDL_PART_ID_LARGE_CHUNKS];
return mlxsw_sp_kvdl_part_occ(part);
}
-static const struct devlink_resource_ops mlxsw_sp_kvdl_single_ops = {
- .occ_get = mlxsw_sp_kvdl_single_occ_get,
-};
-
-static const struct devlink_resource_ops mlxsw_sp_kvdl_chunks_ops = {
- .occ_get = mlxsw_sp_kvdl_chunks_occ_get,
-};
-
-static const struct devlink_resource_ops mlxsw_sp_kvdl_chunks_large_ops = {
- .occ_get = mlxsw_sp_kvdl_large_chunks_occ_get,
-};
-
int mlxsw_sp_kvdl_resources_register(struct mlxsw_core *mlxsw_core)
{
struct devlink *devlink = priv_to_devlink(mlxsw_core);
@@ -386,8 +372,7 @@ int mlxsw_sp_kvdl_resources_register(struct mlxsw_core *mlxsw_core)
MLXSW_SP_KVDL_SINGLE_SIZE,
MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
MLXSW_SP_RESOURCE_KVD_LINEAR,
- &size_params,
- &mlxsw_sp_kvdl_single_ops);
+ &size_params);
if (err)
return err;
@@ -398,8 +383,7 @@ int mlxsw_sp_kvdl_resources_register(struct mlxsw_core *mlxsw_core)
MLXSW_SP_KVDL_CHUNKS_SIZE,
MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
MLXSW_SP_RESOURCE_KVD_LINEAR,
- &size_params,
- &mlxsw_sp_kvdl_chunks_ops);
+ &size_params);
if (err)
return err;
@@ -410,13 +394,13 @@ int mlxsw_sp_kvdl_resources_register(struct mlxsw_core *mlxsw_core)
MLXSW_SP_KVDL_LARGE_CHUNKS_SIZE,
MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
MLXSW_SP_RESOURCE_KVD_LINEAR,
- &size_params,
- &mlxsw_sp_kvdl_chunks_large_ops);
+ &size_params);
return err;
}
int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp)
{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
struct mlxsw_sp_kvdl *kvdl;
int err;
@@ -429,6 +413,23 @@ int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp)
if (err)
goto err_kvdl_parts_init;
+ devlink_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ mlxsw_sp_kvdl_occ_get,
+ mlxsw_sp);
+ devlink_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
+ mlxsw_sp_kvdl_single_occ_get,
+ mlxsw_sp);
+ devlink_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
+ mlxsw_sp_kvdl_chunks_occ_get,
+ mlxsw_sp);
+ devlink_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
+ mlxsw_sp_kvdl_large_chunks_occ_get,
+ mlxsw_sp);
+
return 0;
err_kvdl_parts_init:
@@ -438,6 +439,16 @@ err_kvdl_parts_init:
void mlxsw_sp_kvdl_fini(struct mlxsw_sp *mlxsw_sp)
{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+
+ devlink_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS);
+ devlink_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS);
+ devlink_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE);
+ devlink_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR);
mlxsw_sp_kvdl_parts_fini(mlxsw_sp);
kfree(mlxsw_sp->kvdl);
}