aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c')
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c133
1 files changed, 91 insertions, 42 deletions
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index 188185c15b4a..7d7133c5f799 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -20,14 +20,14 @@ struct cgx_evq_entry {
struct cgx_link_event link_event;
};
-#define M(_name, _id, _req_type, _rsp_type) \
+#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
static struct _req_type __maybe_unused \
-*otx2_mbox_alloc_msg_ ## _name(struct rvu *rvu, int devid) \
+*otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid) \
{ \
struct _req_type *req; \
\
req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \
- &rvu->mbox_up, devid, sizeof(struct _req_type), \
+ &rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \
sizeof(struct _rsp_type)); \
if (!req) \
return NULL; \
@@ -52,7 +52,7 @@ static inline u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id)
void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu)
{
- if (cgx_id >= rvu->cgx_cnt)
+ if (cgx_id >= rvu->cgx_cnt_max)
return NULL;
return rvu->cgx_idmap[cgx_id];
@@ -61,38 +61,40 @@ void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu)
static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
{
struct npc_pkind *pkind = &rvu->hw->pkind;
- int cgx_cnt = rvu->cgx_cnt;
+ int cgx_cnt_max = rvu->cgx_cnt_max;
int cgx, lmac_cnt, lmac;
int pf = PF_CGXMAP_BASE;
int size, free_pkind;
- if (!cgx_cnt)
+ if (!cgx_cnt_max)
return 0;
- if (cgx_cnt > 0xF || MAX_LMAC_PER_CGX > 0xF)
+ if (cgx_cnt_max > 0xF || MAX_LMAC_PER_CGX > 0xF)
return -EINVAL;
/* Alloc map table
* An additional entry is required since PF id starts from 1 and
* hence entry at offset 0 is invalid.
*/
- size = (cgx_cnt * MAX_LMAC_PER_CGX + 1) * sizeof(u8);
- rvu->pf2cgxlmac_map = devm_kzalloc(rvu->dev, size, GFP_KERNEL);
+ size = (cgx_cnt_max * MAX_LMAC_PER_CGX + 1) * sizeof(u8);
+ rvu->pf2cgxlmac_map = devm_kmalloc(rvu->dev, size, GFP_KERNEL);
if (!rvu->pf2cgxlmac_map)
return -ENOMEM;
- /* Initialize offset 0 with an invalid cgx and lmac id */
- rvu->pf2cgxlmac_map[0] = 0xFF;
+ /* Initialize all entries with an invalid cgx and lmac id */
+ memset(rvu->pf2cgxlmac_map, 0xFF, size);
/* Reverse map table */
rvu->cgxlmac2pf_map = devm_kzalloc(rvu->dev,
- cgx_cnt * MAX_LMAC_PER_CGX * sizeof(u16),
+ cgx_cnt_max * MAX_LMAC_PER_CGX * sizeof(u16),
GFP_KERNEL);
if (!rvu->cgxlmac2pf_map)
return -ENOMEM;
rvu->cgx_mapped_pfs = 0;
- for (cgx = 0; cgx < cgx_cnt; cgx++) {
+ for (cgx = 0; cgx < cgx_cnt_max; cgx++) {
+ if (!rvu_cgx_pdata(cgx, rvu))
+ continue;
lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
for (lmac = 0; lmac < lmac_cnt; lmac++, pf++) {
rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
@@ -177,12 +179,12 @@ static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
}
/* Send mbox message to PF */
- msg = otx2_mbox_alloc_msg_CGX_LINK_EVENT(rvu, pfid);
+ msg = otx2_mbox_alloc_msg_cgx_link_event(rvu, pfid);
if (!msg)
continue;
msg->link_info = *linfo;
- otx2_mbox_msg_send(&rvu->mbox_up, pfid);
- err = otx2_mbox_wait_for_rsp(&rvu->mbox_up, pfid);
+ otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pfid);
+ err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pfid);
if (err)
dev_warn(rvu->dev, "notification to pf %d failed\n",
pfid);
@@ -216,7 +218,7 @@ static void cgx_evhandler_task(struct work_struct *work)
} while (1);
}
-static void cgx_lmac_event_handler_init(struct rvu *rvu)
+static int cgx_lmac_event_handler_init(struct rvu *rvu)
{
struct cgx_event_cb cb;
int cgx, lmac, err;
@@ -228,14 +230,16 @@ static void cgx_lmac_event_handler_init(struct rvu *rvu)
rvu->cgx_evh_wq = alloc_workqueue("rvu_evh_wq", 0, 0);
if (!rvu->cgx_evh_wq) {
dev_err(rvu->dev, "alloc workqueue failed");
- return;
+ return -ENOMEM;
}
cb.notify_link_chg = cgx_lmac_postevent; /* link change call back */
cb.data = rvu;
- for (cgx = 0; cgx < rvu->cgx_cnt; cgx++) {
+ for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
cgxd = rvu_cgx_pdata(cgx, rvu);
+ if (!cgxd)
+ continue;
for (lmac = 0; lmac < cgx_get_lmac_cnt(cgxd); lmac++) {
err = cgx_lmac_evh_register(&cb, cgxd, lmac);
if (err)
@@ -244,9 +248,11 @@ static void cgx_lmac_event_handler_init(struct rvu *rvu)
cgx, lmac);
}
}
+
+ return 0;
}
-void rvu_cgx_wq_destroy(struct rvu *rvu)
+static void rvu_cgx_wq_destroy(struct rvu *rvu)
{
if (rvu->cgx_evh_wq) {
flush_workqueue(rvu->cgx_evh_wq);
@@ -255,25 +261,28 @@ void rvu_cgx_wq_destroy(struct rvu *rvu)
}
}
-int rvu_cgx_probe(struct rvu *rvu)
+int rvu_cgx_init(struct rvu *rvu)
{
- int i, err;
+ int cgx, err;
+ void *cgxd;
- /* find available cgx ports */
- rvu->cgx_cnt = cgx_get_cgx_cnt();
- if (!rvu->cgx_cnt) {
+ /* CGX port id starts from 0 and are not necessarily contiguous
+ * Hence we allocate resources based on the maximum port id value.
+ */
+ rvu->cgx_cnt_max = cgx_get_cgxcnt_max();
+ if (!rvu->cgx_cnt_max) {
dev_info(rvu->dev, "No CGX devices found!\n");
return -ENODEV;
}
- rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt * sizeof(void *),
- GFP_KERNEL);
+ rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt_max *
+ sizeof(void *), GFP_KERNEL);
if (!rvu->cgx_idmap)
return -ENOMEM;
/* Initialize the cgxdata table */
- for (i = 0; i < rvu->cgx_cnt; i++)
- rvu->cgx_idmap[i] = cgx_get_pdata(i);
+ for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++)
+ rvu->cgx_idmap[cgx] = cgx_get_pdata(cgx);
/* Map CGX LMAC interfaces to RVU PFs */
err = rvu_map_cgx_lmac_pf(rvu);
@@ -281,7 +290,47 @@ int rvu_cgx_probe(struct rvu *rvu)
return err;
/* Register for CGX events */
- cgx_lmac_event_handler_init(rvu);
+ err = cgx_lmac_event_handler_init(rvu);
+ if (err)
+ return err;
+
+ /* Ensure event handler registration is completed, before
+ * we turn on the links
+ */
+ mb();
+
+ /* Do link up for all CGX ports */
+ for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
+ cgxd = rvu_cgx_pdata(cgx, rvu);
+ if (!cgxd)
+ continue;
+ err = cgx_lmac_linkup_start(cgxd);
+ if (err)
+ dev_err(rvu->dev,
+ "Link up process failed to start on cgx %d\n",
+ cgx);
+ }
+
+ return 0;
+}
+
+int rvu_cgx_exit(struct rvu *rvu)
+{
+ int cgx, lmac;
+ void *cgxd;
+
+ for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
+ cgxd = rvu_cgx_pdata(cgx, rvu);
+ if (!cgxd)
+ continue;
+ for (lmac = 0; lmac < cgx_get_lmac_cnt(cgxd); lmac++)
+ cgx_lmac_evh_unregister(cgxd, lmac);
+ }
+
+ /* Ensure event handler unregister is completed */
+ mb();
+
+ rvu_cgx_wq_destroy(rvu);
return 0;
}
@@ -303,21 +352,21 @@ int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
return 0;
}
-int rvu_mbox_handler_CGX_START_RXTX(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, true);
return 0;
}
-int rvu_mbox_handler_CGX_STOP_RXTX(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, false);
return 0;
}
-int rvu_mbox_handler_CGX_STATS(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req,
struct cgx_stats_rsp *rsp)
{
int pf = rvu_get_pf(req->hdr.pcifunc);
@@ -354,7 +403,7 @@ int rvu_mbox_handler_CGX_STATS(struct rvu *rvu, struct msg_req *req,
return 0;
}
-int rvu_mbox_handler_CGX_MAC_ADDR_SET(struct rvu *rvu,
+int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
struct cgx_mac_addr_set_or_get *req,
struct cgx_mac_addr_set_or_get *rsp)
{
@@ -368,7 +417,7 @@ int rvu_mbox_handler_CGX_MAC_ADDR_SET(struct rvu *rvu,
return 0;
}
-int rvu_mbox_handler_CGX_MAC_ADDR_GET(struct rvu *rvu,
+int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
struct cgx_mac_addr_set_or_get *req,
struct cgx_mac_addr_set_or_get *rsp)
{
@@ -387,7 +436,7 @@ int rvu_mbox_handler_CGX_MAC_ADDR_GET(struct rvu *rvu,
return 0;
}
-int rvu_mbox_handler_CGX_PROMISC_ENABLE(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
u16 pcifunc = req->hdr.pcifunc;
@@ -407,7 +456,7 @@ int rvu_mbox_handler_CGX_PROMISC_ENABLE(struct rvu *rvu, struct msg_req *req,
return 0;
}
-int rvu_mbox_handler_CGX_PROMISC_DISABLE(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
u16 pcifunc = req->hdr.pcifunc;
@@ -451,21 +500,21 @@ static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en)
return 0;
}
-int rvu_mbox_handler_CGX_START_LINKEVENTS(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_start_linkevents(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, true);
return 0;
}
-int rvu_mbox_handler_CGX_STOP_LINKEVENTS(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_stop_linkevents(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, false);
return 0;
}
-int rvu_mbox_handler_CGX_GET_LINKINFO(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req,
struct cgx_link_info_msg *rsp)
{
u8 cgx_id, lmac_id;
@@ -500,14 +549,14 @@ static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en)
lmac_id, en);
}
-int rvu_mbox_handler_CGX_INTLBK_ENABLE(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_intlbk_enable(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, true);
return 0;
}
-int rvu_mbox_handler_CGX_INTLBK_DISABLE(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, false);