aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h40
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c163
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c619
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h85
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c323
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c299
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h4
-rw-r--r--drivers/net/wan/pci200syn.c51
-rw-r--r--drivers/net/wan/z85230.c993
-rw-r--r--net/sched/cls_flower.c3
18 files changed, 1994 insertions, 646 deletions
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index aee6a6f31b0d..7d7dfa8d8a3f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -260,7 +260,11 @@ M(NIX_BP_DISABLE, 0x8017, nix_bp_disable, nix_bp_cfg_req, msg_rsp) \
M(NIX_GET_MAC_ADDR, 0x8018, nix_get_mac_addr, msg_req, nix_get_mac_addr_rsp) \
M(NIX_CN10K_AQ_ENQ, 0x8019, nix_cn10k_aq_enq, nix_cn10k_aq_enq_req, \
nix_cn10k_aq_enq_rsp) \
-M(NIX_GET_HW_INFO, 0x801a, nix_get_hw_info, msg_req, nix_hw_info)
+M(NIX_GET_HW_INFO, 0x801c, nix_get_hw_info, msg_req, nix_hw_info) \
+M(NIX_BANDPROF_ALLOC, 0x801d, nix_bandprof_alloc, nix_bandprof_alloc_req, \
+ nix_bandprof_alloc_rsp) \
+M(NIX_BANDPROF_FREE, 0x801e, nix_bandprof_free, nix_bandprof_free_req, \
+ msg_rsp)
/* Messages initiated by AF (range 0xC00 - 0xDFF) */
#define MBOX_UP_CGX_MESSAGES \
@@ -615,6 +619,9 @@ enum nix_af_status {
NIX_AF_ERR_PTP_CONFIG_FAIL = -423,
NIX_AF_ERR_NPC_KEY_NOT_SUPP = -424,
NIX_AF_ERR_INVALID_NIXBLK = -425,
+ NIX_AF_ERR_INVALID_BANDPROF = -426,
+ NIX_AF_ERR_IPOLICER_NOTSUPP = -427,
+ NIX_AF_ERR_BANDPROF_INVAL_REQ = -428,
};
/* For NIX RX vtag action */
@@ -683,6 +690,7 @@ struct nix_cn10k_aq_enq_req {
struct nix_cq_ctx_s cq;
struct nix_rsse_s rss;
struct nix_rx_mce_s mce;
+ struct nix_bandprof_s prof;
};
union {
struct nix_cn10k_rq_ctx_s rq_mask;
@@ -690,6 +698,7 @@ struct nix_cn10k_aq_enq_req {
struct nix_cq_ctx_s cq_mask;
struct nix_rsse_s rss_mask;
struct nix_rx_mce_s mce_mask;
+ struct nix_bandprof_s prof_mask;
};
};
@@ -701,6 +710,7 @@ struct nix_cn10k_aq_enq_rsp {
struct nix_cq_ctx_s cq;
struct nix_rsse_s rss;
struct nix_rx_mce_s mce;
+ struct nix_bandprof_s prof;
};
};
@@ -716,6 +726,7 @@ struct nix_aq_enq_req {
struct nix_cq_ctx_s cq;
struct nix_rsse_s rss;
struct nix_rx_mce_s mce;
+ u64 prof;
};
union {
struct nix_rq_ctx_s rq_mask;
@@ -723,6 +734,7 @@ struct nix_aq_enq_req {
struct nix_cq_ctx_s cq_mask;
struct nix_rsse_s rss_mask;
struct nix_rx_mce_s mce_mask;
+ u64 prof_mask;
};
};
@@ -734,6 +746,7 @@ struct nix_aq_enq_rsp {
struct nix_cq_ctx_s cq;
struct nix_rsse_s rss;
struct nix_rx_mce_s mce;
+ u64 prof;
};
};
@@ -975,6 +988,31 @@ struct nix_hw_info {
u16 min_mtu;
};
+struct nix_bandprof_alloc_req {
+ struct mbox_msghdr hdr;
+ /* Count of profiles needed per layer */
+ u16 prof_count[BAND_PROF_NUM_LAYERS];
+};
+
+struct nix_bandprof_alloc_rsp {
+ struct mbox_msghdr hdr;
+ u16 prof_count[BAND_PROF_NUM_LAYERS];
+
+ /* There is no need to allocate morethan 1 bandwidth profile
+ * per RQ of a PF_FUNC's NIXLF. So limit the maximum
+ * profiles to 64 per PF_FUNC.
+ */
+#define MAX_BANDPROF_PER_PFFUNC 64
+ u16 prof_idx[BAND_PROF_NUM_LAYERS][MAX_BANDPROF_PER_PFFUNC];
+};
+
+struct nix_bandprof_free_req {
+ struct mbox_msghdr hdr;
+ u8 free_all;
+ u16 prof_count[BAND_PROF_NUM_LAYERS];
+ u16 prof_idx[BAND_PROF_NUM_LAYERS][MAX_BANDPROF_PER_PFFUNC];
+};
+
/* NPC mbox message structs */
#define NPC_MCAM_ENTRY_INVALID 0xFFFF
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index f11a02d6b6ef..0b092949d7ac 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -184,6 +184,14 @@ int rvu_rsrc_free_count(struct rsrc_bmap *rsrc)
return (rsrc->max - used);
}
+bool is_rsrc_free(struct rsrc_bmap *rsrc, int id)
+{
+ if (!rsrc->bmap)
+ return false;
+
+ return !test_bit(id, rsrc->bmap);
+}
+
int rvu_alloc_bitmap(struct rsrc_bmap *rsrc)
{
rsrc->bmap = kcalloc(BITS_TO_LONGS(rsrc->max),
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index c88dab7747ef..9e5d9ba6f01e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -296,6 +296,13 @@ struct nix_txvlan {
struct mutex rsrc_lock; /* Serialize resource alloc/free */
};
+struct nix_ipolicer {
+ struct rsrc_bmap band_prof;
+ u16 *pfvf_map;
+ u16 *match_id;
+ u16 *ref_count;
+};
+
struct nix_hw {
int blkaddr;
struct rvu *rvu;
@@ -305,6 +312,7 @@ struct nix_hw {
struct nix_mark_format mark_format;
struct nix_lso lso;
struct nix_txvlan txvlan;
+ struct nix_ipolicer *ipolicer;
};
/* RVU block's capabilities or functionality,
@@ -322,6 +330,7 @@ struct hw_cap {
bool nix_rx_multicast; /* Rx packet replication support */
bool per_pf_mbox_regs; /* PF mbox specified in per PF registers ? */
bool programmable_chans; /* Channels programmable ? */
+ bool ipolicer;
};
struct rvu_hwinfo {
@@ -587,6 +596,7 @@ static inline bool is_rvu_fwdata_valid(struct rvu *rvu)
int rvu_alloc_bitmap(struct rsrc_bmap *rsrc);
int rvu_alloc_rsrc(struct rsrc_bmap *rsrc);
void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id);
+bool is_rsrc_free(struct rsrc_bmap *rsrc, int id);
int rvu_rsrc_free_count(struct rsrc_bmap *rsrc);
int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc);
bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc);
@@ -672,6 +682,12 @@ int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr);
void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc);
int nix_get_struct_ptrs(struct rvu *rvu, u16 pcifunc,
struct nix_hw **nix_hw, int *blkaddr);
+int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc,
+ u16 rq_idx, u16 match_id);
+int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw,
+ struct nix_cn10k_aq_enq_req *aq_req,
+ struct nix_cn10k_aq_enq_rsp *aq_rsp,
+ u16 pcifunc, u8 ctype, u32 qidx);
/* NPC APIs */
int rvu_npc_init(struct rvu *rvu);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index 7103f8216ad1..3cc3c6fd1d84 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -1632,6 +1632,165 @@ static int rvu_dbg_nix_qsize_display(struct seq_file *filp, void *unused)
RVU_DEBUG_SEQ_FOPS(nix_qsize, nix_qsize_display, nix_qsize_write);
+static void print_band_prof_ctx(struct seq_file *m,
+ struct nix_bandprof_s *prof)
+{
+ char *str;
+
+ switch (prof->pc_mode) {
+ case NIX_RX_PC_MODE_VLAN:
+ str = "VLAN";
+ break;
+ case NIX_RX_PC_MODE_DSCP:
+ str = "DSCP";
+ break;
+ case NIX_RX_PC_MODE_GEN:
+ str = "Generic";
+ break;
+ case NIX_RX_PC_MODE_RSVD:
+ str = "Reserved";
+ break;
+ }
+ seq_printf(m, "W0: pc_mode\t\t%s\n", str);
+ str = (prof->icolor == 3) ? "Color blind" :
+ (prof->icolor == 0) ? "Green" :
+ (prof->icolor == 1) ? "Yellow" : "Red";
+ seq_printf(m, "W0: icolor\t\t%s\n", str);
+ seq_printf(m, "W0: tnl_ena\t\t%d\n", prof->tnl_ena);
+ seq_printf(m, "W0: peir_exponent\t%d\n", prof->peir_exponent);
+ seq_printf(m, "W0: pebs_exponent\t%d\n", prof->pebs_exponent);
+ seq_printf(m, "W0: cir_exponent\t%d\n", prof->cir_exponent);
+ seq_printf(m, "W0: cbs_exponent\t%d\n", prof->cbs_exponent);
+ seq_printf(m, "W0: peir_mantissa\t%d\n", prof->peir_mantissa);
+ seq_printf(m, "W0: pebs_mantissa\t%d\n", prof->pebs_mantissa);
+ seq_printf(m, "W0: cir_mantissa\t%d\n", prof->cir_mantissa);
+
+ seq_printf(m, "W1: cbs_mantissa\t%d\n", prof->cbs_mantissa);
+ str = (prof->lmode == 0) ? "byte" : "packet";
+ seq_printf(m, "W1: lmode\t\t%s\n", str);
+ seq_printf(m, "W1: l_select\t\t%d\n", prof->l_sellect);
+ seq_printf(m, "W1: rdiv\t\t%d\n", prof->rdiv);
+ seq_printf(m, "W1: adjust_exponent\t%d\n", prof->adjust_exponent);
+ seq_printf(m, "W1: adjust_mantissa\t%d\n", prof->adjust_mantissa);
+ str = (prof->gc_action == 0) ? "PASS" :
+ (prof->gc_action == 1) ? "DROP" : "RED";
+ seq_printf(m, "W1: gc_action\t\t%s\n", str);
+ str = (prof->yc_action == 0) ? "PASS" :
+ (prof->yc_action == 1) ? "DROP" : "RED";
+ seq_printf(m, "W1: yc_action\t\t%s\n", str);
+ str = (prof->rc_action == 0) ? "PASS" :
+ (prof->rc_action == 1) ? "DROP" : "RED";
+ seq_printf(m, "W1: rc_action\t\t%s\n", str);
+ seq_printf(m, "W1: meter_algo\t\t%d\n", prof->meter_algo);
+ seq_printf(m, "W1: band_prof_id\t%d\n", prof->band_prof_id);
+ seq_printf(m, "W1: hl_en\t\t%d\n", prof->hl_en);
+
+ seq_printf(m, "W2: ts\t\t\t%lld\n", (u64)prof->ts);
+ seq_printf(m, "W3: pe_accum\t\t%d\n", prof->pe_accum);
+ seq_printf(m, "W3: c_accum\t\t%d\n", prof->c_accum);
+ seq_printf(m, "W4: green_pkt_pass\t%lld\n",
+ (u64)prof->green_pkt_pass);
+ seq_printf(m, "W5: yellow_pkt_pass\t%lld\n",
+ (u64)prof->yellow_pkt_pass);
+ seq_printf(m, "W6: red_pkt_pass\t%lld\n", (u64)prof->red_pkt_pass);
+ seq_printf(m, "W7: green_octs_pass\t%lld\n",
+ (u64)prof->green_octs_pass);
+ seq_printf(m, "W8: yellow_octs_pass\t%lld\n",
+ (u64)prof->yellow_octs_pass);
+ seq_printf(m, "W9: red_octs_pass\t%lld\n", (u64)prof->red_octs_pass);
+ seq_printf(m, "W10: green_pkt_drop\t%lld\n",
+ (u64)prof->green_pkt_drop);
+ seq_printf(m, "W11: yellow_pkt_drop\t%lld\n",
+ (u64)prof->yellow_pkt_drop);
+ seq_printf(m, "W12: red_pkt_drop\t%lld\n", (u64)prof->red_pkt_drop);
+ seq_printf(m, "W13: green_octs_drop\t%lld\n",
+ (u64)prof->green_octs_drop);
+ seq_printf(m, "W14: yellow_octs_drop\t%lld\n",
+ (u64)prof->yellow_octs_drop);
+ seq_printf(m, "W15: red_octs_drop\t%lld\n", (u64)prof->red_octs_drop);
+ seq_puts(m, "==============================\n");
+}
+
+static int rvu_dbg_nix_band_prof_ctx_display(struct seq_file *m, void *unused)
+{
+ struct nix_hw *nix_hw = m->private;
+ struct nix_cn10k_aq_enq_req aq_req;
+ struct nix_cn10k_aq_enq_rsp aq_rsp;
+ struct rvu *rvu = nix_hw->rvu;
+ struct nix_ipolicer *ipolicer;
+ int layer, prof_idx, idx, rc;
+ u16 pcifunc;
+ char *str;
+
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+ if (layer == BAND_PROF_INVAL_LAYER)
+ continue;
+ str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
+ (layer == BAND_PROF_MID_LAYER) ? "Mid" : "Top";
+
+ seq_printf(m, "\n%s bandwidth profiles\n", str);
+ seq_puts(m, "=======================\n");
+
+ ipolicer = &nix_hw->ipolicer[layer];
+
+ for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
+ if (is_rsrc_free(&ipolicer->band_prof, idx))
+ continue;
+
+ prof_idx = (idx & 0x3FFF) | (layer << 14);
+ rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp,
+ 0x00, NIX_AQ_CTYPE_BANDPROF,
+ prof_idx);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to fetch context of %s profile %d, err %d\n",
+ __func__, str, idx, rc);
+ return 0;
+ }
+ seq_printf(m, "\n%s bandwidth profile:: %d\n", str, idx);
+ pcifunc = ipolicer->pfvf_map[idx];
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK))
+ seq_printf(m, "Allocated to :: PF %d\n",
+ rvu_get_pf(pcifunc));
+ else
+ seq_printf(m, "Allocated to :: PF %d VF %d\n",
+ rvu_get_pf(pcifunc),
+ (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
+ print_band_prof_ctx(m, &aq_rsp.prof);
+ }
+ }
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(nix_band_prof_ctx, nix_band_prof_ctx_display, NULL);
+
+static int rvu_dbg_nix_band_prof_rsrc_display(struct seq_file *m, void *unused)
+{
+ struct nix_hw *nix_hw = m->private;
+ struct nix_ipolicer *ipolicer;
+ int layer;
+ char *str;
+
+ seq_puts(m, "\nBandwidth profile resource free count\n");
+ seq_puts(m, "=====================================\n");
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+ if (layer == BAND_PROF_INVAL_LAYER)
+ continue;
+ str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
+ (layer == BAND_PROF_MID_LAYER) ? "Mid " : "Top ";
+
+ ipolicer = &nix_hw->ipolicer[layer];
+ seq_printf(m, "%s :: Max: %4d Free: %4d\n", str,
+ ipolicer->band_prof.max,
+ rvu_rsrc_free_count(&ipolicer->band_prof));
+ }
+ seq_puts(m, "=====================================\n");
+
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(nix_band_prof_rsrc, nix_band_prof_rsrc_display, NULL);
+
static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr)
{
struct nix_hw *nix_hw;
@@ -1664,6 +1823,10 @@ static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr)
&rvu_dbg_nix_ndc_rx_hits_miss_fops);
debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu,
&rvu_dbg_nix_qsize_fops);
+ debugfs_create_file("ingress_policer_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_band_prof_ctx_fops);
+ debugfs_create_file("ingress_policer_rsrc", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_band_prof_rsrc_fops);
}
static void rvu_dbg_npa_init(struct rvu *rvu)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index d8cb665b7d8a..d6f8210652c5 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -23,6 +23,14 @@ static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
int type, int chan_id);
static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
int type, bool add);
+static int nix_setup_ipolicers(struct rvu *rvu,
+ struct nix_hw *nix_hw, int blkaddr);
+static void nix_ipolicer_freemem(struct nix_hw *nix_hw);
+static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
+ struct nix_hw *nix_hw, u16 pcifunc);
+static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc);
+static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
+ u32 leaf_prof);
enum mc_tbl_sz {
MC_TBL_SZ_256,
@@ -699,8 +707,11 @@ static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
pfvf = rvu_get_pfvf(rvu, pcifunc);
nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
- /* Skip NIXLF check for broadcast MCE entry init */
- if (!(!rsp && req->ctype == NIX_AQ_CTYPE_MCE)) {
+ /* Skip NIXLF check for broadcast MCE entry and bandwidth profile
+ * operations done by AF itself.
+ */
+ if (!((!rsp && req->ctype == NIX_AQ_CTYPE_MCE) ||
+ (req->ctype == NIX_AQ_CTYPE_BANDPROF && !pcifunc))) {
if (!pfvf->nixlf || nixlf < 0)
return NIX_AF_ERR_AF_LF_INVALID;
}
@@ -740,6 +751,11 @@ static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
if (rsp)
rc = NIX_AF_ERR_AQ_ENQUEUE;
break;
+ case NIX_AQ_CTYPE_BANDPROF:
+ if (nix_verify_bandprof((struct nix_cn10k_aq_enq_req *)req,
+ nix_hw, pcifunc))
+ rc = NIX_AF_ERR_INVALID_BANDPROF;
+ break;
default:
rc = NIX_AF_ERR_AQ_ENQUEUE;
}
@@ -796,6 +812,9 @@ static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
else if (req->ctype == NIX_AQ_CTYPE_MCE)
memcpy(mask, &req->mce_mask,
sizeof(struct nix_rx_mce_s));
+ else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
+ memcpy(mask, &req->prof_mask,
+ sizeof(struct nix_bandprof_s));
fallthrough;
case NIX_AQ_INSTOP_INIT:
if (req->ctype == NIX_AQ_CTYPE_RQ)
@@ -808,6 +827,8 @@ static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s));
else if (req->ctype == NIX_AQ_CTYPE_MCE)
memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s));
+ else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
+ memcpy(ctx, &req->prof, sizeof(struct nix_bandprof_s));
break;
case NIX_AQ_INSTOP_NOP:
case NIX_AQ_INSTOP_READ:
@@ -885,6 +906,9 @@ static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
else if (req->ctype == NIX_AQ_CTYPE_MCE)
memcpy(&rsp->mce, ctx,
sizeof(struct nix_rx_mce_s));
+ else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
+ memcpy(&rsp->prof, ctx,
+ sizeof(struct nix_bandprof_s));
}
}
@@ -3624,6 +3648,10 @@ static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
if (err)
return err;
+ err = nix_setup_ipolicers(rvu, nix_hw, blkaddr);
+ if (err)
+ return err;
+
err = nix_af_mark_format_setup(rvu, nix_hw, blkaddr);
if (err)
return err;
@@ -3772,6 +3800,8 @@ static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr,
kfree(txsch->schq.bmap);
}
+ nix_ipolicer_freemem(nix_hw);
+
vlan = &nix_hw->txvlan;
kfree(vlan->rsrc.bmap);
mutex_destroy(&vlan->rsrc_lock);
@@ -3879,6 +3909,8 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
}
nix_ctx_free(rvu, pfvf);
+
+ nix_free_all_bandprof(rvu, pcifunc);
}
#define NIX_AF_LFX_TX_CFG_PTP_EN BIT_ULL(32)
@@ -3987,3 +4019,586 @@ void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc)
if (from_vf)
ether_addr_copy(pfvf->mac_addr, pfvf->default_mac);
}
+
+/* NIX ingress policers or bandwidth profiles APIs */
+static void nix_config_rx_pkt_policer_precolor(struct rvu *rvu, int blkaddr)
+{
+ struct npc_lt_def_cfg defs, *ltdefs;
+
+ ltdefs = &defs;
+ memcpy(ltdefs, rvu->kpu.lt_def, sizeof(struct npc_lt_def_cfg));
+
+ /* Extract PCP and DEI fields from outer VLAN from byte offset
+ * 2 from the start of LB_PTR (ie TAG).
+ * VLAN0 is Outer VLAN and VLAN1 is Inner VLAN. Inner VLAN
+ * fields are considered when 'Tunnel enable' is set in profile.
+ */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN0_PCP_DEI,
+ (2UL << 12) | (ltdefs->ovlan.lid << 8) |
+ (ltdefs->ovlan.ltype_match << 4) |
+ ltdefs->ovlan.ltype_mask);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN1_PCP_DEI,
+ (2UL << 12) | (ltdefs->ivlan.lid << 8) |
+ (ltdefs->ivlan.ltype_match << 4) |
+ ltdefs->ivlan.ltype_mask);
+
+ /* DSCP field in outer and tunneled IPv4 packets */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4_DSCP,
+ (1UL << 12) | (ltdefs->rx_oip4.lid << 8) |
+ (ltdefs->rx_oip4.ltype_match << 4) |
+ ltdefs->rx_oip4.ltype_mask);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4_DSCP,
+ (1UL << 12) | (ltdefs->rx_iip4.lid << 8) |
+ (ltdefs->rx_iip4.ltype_match << 4) |
+ ltdefs->rx_iip4.ltype_mask);
+
+ /* DSCP field (traffic class) in outer and tunneled IPv6 packets */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6_DSCP,
+ (1UL << 11) | (ltdefs->rx_oip6.lid << 8) |
+ (ltdefs->rx_oip6.ltype_match << 4) |
+ ltdefs->rx_oip6.ltype_mask);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6_DSCP,
+ (1UL << 11) | (ltdefs->rx_iip6.lid << 8) |
+ (ltdefs->rx_iip6.ltype_match << 4) |
+ ltdefs->rx_iip6.ltype_mask);
+}
+
+static int nix_init_policer_context(struct rvu *rvu, struct nix_hw *nix_hw,
+ int layer, int prof_idx)
+{
+ struct nix_cn10k_aq_enq_req aq_req;
+ int rc;
+
+ memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
+
+ aq_req.qidx = (prof_idx & 0x3FFF) | (layer << 14);
+ aq_req.ctype = NIX_AQ_CTYPE_BANDPROF;
+ aq_req.op = NIX_AQ_INSTOP_INIT;
+
+ /* Context is all zeros, submit to AQ */
+ rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
+ (struct nix_aq_enq_req *)&aq_req, NULL);
+ if (rc)
+ dev_err(rvu->dev, "Failed to INIT bandwidth profile layer %d profile %d\n",
+ layer, prof_idx);
+ return rc;
+}
+
+static int nix_setup_ipolicers(struct rvu *rvu,
+ struct nix_hw *nix_hw, int blkaddr)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct nix_ipolicer *ipolicer;
+ int err, layer, prof_idx;
+ u64 cfg;
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
+ if (!(cfg & BIT_ULL(61))) {
+ hw->cap.ipolicer = false;
+ return 0;
+ }
+
+ hw->cap.ipolicer = true;
+ nix_hw->ipolicer = devm_kcalloc(rvu->dev, BAND_PROF_NUM_LAYERS,
+ sizeof(*ipolicer), GFP_KERNEL);
+ if (!nix_hw->ipolicer)
+ return -ENOMEM;
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_PL_CONST);
+
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+ ipolicer = &nix_hw->ipolicer[layer];
+ switch (layer) {
+ case BAND_PROF_LEAF_LAYER:
+ ipolicer->band_prof.max = cfg & 0XFFFF;
+ break;
+ case BAND_PROF_MID_LAYER:
+ ipolicer->band_prof.max = (cfg >> 16) & 0XFFFF;
+ break;
+ case BAND_PROF_TOP_LAYER:
+ ipolicer->band_prof.max = (cfg >> 32) & 0XFFFF;
+ break;
+ }
+
+ if (!ipolicer->band_prof.max)
+ continue;
+
+ err = rvu_alloc_bitmap(&ipolicer->band_prof);
+ if (err)
+ return err;
+
+ ipolicer->pfvf_map = devm_kcalloc(rvu->dev,
+ ipolicer->band_prof.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!ipolicer->pfvf_map)
+ return -ENOMEM;
+
+ ipolicer->match_id = devm_kcalloc(rvu->dev,
+ ipolicer->band_prof.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!ipolicer->match_id)
+ return -ENOMEM;
+
+ for (prof_idx = 0;
+ prof_idx < ipolicer->band_prof.max; prof_idx++) {
+ /* Set AF as current owner for INIT ops to succeed */
+ ipolicer->pfvf_map[prof_idx] = 0x00;
+
+ /* There is no enable bit in the profile context,
+ * so no context disable. So let's INIT them here
+ * so that PF/VF later on have to just do WRITE to
+ * setup policer rates and config.
+ */
+ err = nix_init_policer_context(rvu, nix_hw,
+ layer, prof_idx);
+ if (err)
+ return err;
+ }
+
+ /* Allocate memory for maintaining ref_counts for MID level
+ * profiles, this will be needed for leaf layer profiles'
+ * aggregation.
+ */
+ if (layer != BAND_PROF_MID_LAYER)
+ continue;
+
+ ipolicer->ref_count = devm_kcalloc(rvu->dev,
+ ipolicer->band_prof.max,
+ sizeof(u16), GFP_KERNEL);
+ }
+
+ /* Set policer timeunit to 2us ie (19 + 1) * 100 nsec = 2us */
+ rvu_write64(rvu, blkaddr, NIX_AF_PL_TS, 19);
+
+ nix_config_rx_pkt_policer_precolor(rvu, blkaddr);
+
+ return 0;
+}
+
+static void nix_ipolicer_freemem(struct nix_hw *nix_hw)
+{
+ struct nix_ipolicer *ipolicer;
+ int layer;
+
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+ ipolicer = &nix_hw->ipolicer[layer];
+
+ if (!ipolicer->band_prof.max)
+ continue;
+
+ kfree(ipolicer->band_prof.bmap);
+ }
+}
+
+static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
+ struct nix_hw *nix_hw, u16 pcifunc)
+{
+ struct nix_ipolicer *ipolicer;
+ int layer, hi_layer, prof_idx;
+
+ /* Bits [15:14] in profile index represent layer */
+ layer = (req->qidx >> 14) & 0x03;
+ prof_idx = req->qidx & 0x3FFF;
+
+ ipolicer = &nix_hw->ipolicer[layer];
+ if (prof_idx >= ipolicer->band_prof.max)
+ return -EINVAL;
+
+ /* Check if the profile is allocated to the requesting PCIFUNC or not
+ * with the exception of AF. AF is allowed to read and update contexts.
+ */
+ if (pcifunc && ipolicer->pfvf_map[prof_idx] != pcifunc)
+ return -EINVAL;
+
+ /* If this profile is linked to higher layer profile then check
+ * if that profile is also allocated to the requesting PCIFUNC
+ * or not.
+ */
+ if (!req->prof.hl_en)
+ return 0;
+
+ /* Leaf layer profile can link only to mid layer and
+ * mid layer to top layer.
+ */
+ if (layer == BAND_PROF_LEAF_LAYER)
+ hi_layer = BAND_PROF_MID_LAYER;
+ else if (layer == BAND_PROF_MID_LAYER)
+ hi_layer = BAND_PROF_TOP_LAYER;
+ else
+ return -EINVAL;
+
+ ipolicer = &nix_hw->ipolicer[hi_layer];
+ prof_idx = req->prof.band_prof_id;
+ if (prof_idx >= ipolicer->band_prof.max ||
+ ipolicer->pfvf_map[prof_idx] != pcifunc)
+ return -EINVAL;
+
+ return 0;
+}
+
+int rvu_mbox_handler_nix_bandprof_alloc(struct rvu *rvu,
+ struct nix_bandprof_alloc_req *req,
+ struct nix_bandprof_alloc_rsp *rsp)
+{
+ int blkaddr, layer, prof, idx, err;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct nix_ipolicer *ipolicer;
+ struct nix_hw *nix_hw;
+
+ if (!rvu->hw->cap.ipolicer)
+ return NIX_AF_ERR_IPOLICER_NOTSUPP;
+
+ err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
+ if (err)
+ return err;
+
+ mutex_lock(&rvu->rsrc_lock);
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+ if (layer == BAND_PROF_INVAL_LAYER)
+ continue;
+ if (!req->prof_count[layer])
+ continue;
+
+ ipolicer = &nix_hw->ipolicer[layer];
+ for (idx = 0; idx < req->prof_count[layer]; idx++) {
+ /* Allocate a max of 'MAX_BANDPROF_PER_PFFUNC' profiles */
+ if (idx == MAX_BANDPROF_PER_PFFUNC)
+ break;
+
+ prof = rvu_alloc_rsrc(&ipolicer->band_prof);
+ if (prof < 0)
+ break;
+ rsp->prof_count[layer]++;
+ rsp->prof_idx[layer][idx] = prof;
+ ipolicer->pfvf_map[prof] = pcifunc;
+ }
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+ return 0;
+}
+
+static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc)
+{
+ int blkaddr, layer, prof_idx, err;
+ struct nix_ipolicer *ipolicer;
+ struct nix_hw *nix_hw;
+
+ if (!rvu->hw->cap.ipolicer)
+ return NIX_AF_ERR_IPOLICER_NOTSUPP;
+
+ err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
+ if (err)
+ return err;
+
+ mutex_lock(&rvu->rsrc_lock);
+ /* Free all the profiles allocated to the PCIFUNC */
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+ if (layer == BAND_PROF_INVAL_LAYER)
+ continue;
+ ipolicer = &nix_hw->ipolicer[layer];
+
+ for (prof_idx = 0; prof_idx < ipolicer->band_prof.max; prof_idx++) {
+ if (ipolicer->pfvf_map[prof_idx] != pcifunc)
+ continue;
+
+ /* Clear ratelimit aggregation, if any */
+ if (layer == BAND_PROF_LEAF_LAYER &&
+ ipolicer->match_id[prof_idx])
+ nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx);
+
+ ipolicer->pfvf_map[prof_idx] = 0x00;
+ ipolicer->match_id[prof_idx] = 0;
+ rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
+ }
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+ return 0;
+}
+
+int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu,
+ struct nix_bandprof_free_req *req,
+ struct msg_rsp *rsp)
+{
+ int blkaddr, layer, prof_idx, idx, err;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct nix_ipolicer *ipolicer;
+ struct nix_hw *nix_hw;
+
+ if (req->free_all)
+ return nix_free_all_bandprof(rvu, pcifunc);
+
+ if (!rvu->hw->cap.ipolicer)
+ return NIX_AF_ERR_IPOLICER_NOTSUPP;
+
+ err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
+ if (err)
+ return err;
+
+ mutex_lock(&rvu->rsrc_lock);
+ /* Free the requested profile indices */
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+ if (layer == BAND_PROF_INVAL_LAYER)
+ continue;
+ if (!req->prof_count[layer])
+ continue;
+
+ ipolicer = &nix_hw->ipolicer[layer];
+ for (idx = 0; idx < req->prof_count[layer]; idx++) {
+ prof_idx = req->prof_idx[layer][idx];
+ if (prof_idx >= ipolicer->band_prof.max ||
+ ipolicer->pfvf_map[prof_idx] != pcifunc)
+ continue;
+
+ /* Clear ratelimit aggregation, if any */
+ if (layer == BAND_PROF_LEAF_LAYER &&
+ ipolicer->match_id[prof_idx])
+ nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx);
+
+ ipolicer->pfvf_map[prof_idx] = 0x00;
+ ipolicer->match_id[prof_idx] = 0;
+ rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
+ if (idx == MAX_BANDPROF_PER_PFFUNC)
+ break;
+ }
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+ return 0;
+}
+
+int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw,
+ struct nix_cn10k_aq_enq_req *aq_req,
+ struct nix_cn10k_aq_enq_rsp *aq_rsp,
+ u16 pcifunc, u8 ctype, u32 qidx)
+{
+ memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
+ aq_req->hdr.pcifunc = pcifunc;
+ aq_req->ctype = ctype;
+ aq_req->op = NIX_AQ_INSTOP_READ;
+ aq_req->qidx = qidx;
+
+ return rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
+ (struct nix_aq_enq_req *)aq_req,
+ (struct nix_aq_enq_rsp *)aq_rsp);
+}
+
+static int nix_ipolicer_map_leaf_midprofs(struct rvu *rvu,
+ struct nix_hw *nix_hw,
+ struct nix_cn10k_aq_enq_req *aq_req,
+ struct nix_cn10k_aq_enq_rsp *aq_rsp,
+ u32 leaf_prof, u16 mid_prof)
+{
+ memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
+ aq_req->hdr.pcifunc = 0x00;
+ aq_req->ctype = NIX_AQ_CTYPE_BANDPROF;
+ aq_req->op = NIX_AQ_INSTOP_WRITE;
+ aq_req->qidx = leaf_prof;
+
+ aq_req->prof.band_prof_id = mid_prof;
+ aq_req->prof_mask.band_prof_id = GENMASK(6, 0);
+ aq_req->prof.hl_en = 1;
+ aq_req->prof_mask.hl_en = 1;
+
+ return rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
+ (struct nix_aq_enq_req *)aq_req,
+ (struct nix_aq_enq_rsp *)aq_rsp);
+}
+
+int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc,
+ u16 rq_idx, u16 match_id)
+{
+ int leaf_prof, mid_prof, leaf_match;
+ struct nix_cn10k_aq_enq_req aq_req;
+ struct nix_cn10k_aq_enq_rsp aq_rsp;
+ struct nix_ipolicer *ipolicer;
+ struct nix_hw *nix_hw;
+ int blkaddr, idx, rc;
+
+ if (!rvu->hw->cap.ipolicer)
+ return 0;
+
+ rc = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
+ if (rc)
+ return rc;
+
+ /* Fetch the RQ's context to see if policing is enabled */
+ rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, pcifunc,
+ NIX_AQ_CTYPE_RQ, rq_idx);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to fetch RQ%d context of PFFUNC 0x%x\n",
+ __func__, rq_idx, pcifunc);
+ return rc;
+ }
+
+ if (!aq_rsp.rq.policer_ena)
+ return 0;
+
+ /* Get the bandwidth profile ID mapped to this RQ */
+ leaf_prof = aq_rsp.rq.band_prof_id;
+
+ ipolicer = &nix_hw->ipolicer[BAND_PROF_LEAF_LAYER];
+ ipolicer->match_id[leaf_prof] = match_id;
+
+ /* Check if any other leaf profile is marked with same match_id */
+ for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
+ if (idx == leaf_prof)
+ continue;
+ if (ipolicer->match_id[idx] != match_id)
+ continue;
+
+ leaf_match = idx;
+ break;
+ }
+
+ if (idx == ipolicer->band_prof.max)
+ return 0;
+
+ /* Fetch the matching profile's context to check if it's already
+ * mapped to a mid level profile.
+ */
+ rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
+ NIX_AQ_CTYPE_BANDPROF, leaf_match);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to fetch context of leaf profile %d\n",
+ __func__, leaf_match);
+ return rc;
+ }
+
+ ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER];
+ if (aq_rsp.prof.hl_en) {
+ /* Get Mid layer prof index and map leaf_prof index
+ * also such that flows that are being steered
+ * to different RQs and marked with same match_id
+ * are rate limited in a aggregate fashion
+ */
+ mid_prof = aq_rsp.prof.band_prof_id;
+ rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
+ &aq_req, &aq_rsp,
+ leaf_prof, mid_prof);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to map leaf(%d) and mid(%d) profiles\n",
+ __func__, leaf_prof, mid_prof);
+ goto exit;
+ }
+
+ mutex_lock(&rvu->rsrc_lock);
+ ipolicer->ref_count[mid_prof]++;
+ mutex_unlock(&rvu->rsrc_lock);
+ goto exit;
+ }
+
+ /* Allocate a mid layer profile and
+ * map both 'leaf_prof' and 'leaf_match' profiles to it.
+ */
+ mutex_lock(&rvu->rsrc_lock);
+ mid_prof = rvu_alloc_rsrc(&ipolicer->band_prof);
+ if (mid_prof < 0) {
+ dev_err(rvu->dev,
+ "%s: Unable to allocate mid layer profile\n", __func__);
+ mutex_unlock(&rvu->rsrc_lock);
+ goto exit;
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+ ipolicer->pfvf_map[mid_prof] = 0x00;
+ ipolicer->ref_count[mid_prof] = 0;
+
+ /* Initialize mid layer profile same as 'leaf_prof' */
+ rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
+ NIX_AQ_CTYPE_BANDPROF, leaf_prof);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to fetch context of leaf profile %d\n",
+ __func__, leaf_prof);
+ goto exit;
+ }
+
+ memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
+ aq_req.hdr.pcifunc = 0x00;
+ aq_req.qidx = (mid_prof & 0x3FFF) | (BAND_PROF_MID_LAYER << 14);
+ aq_req.ctype = NIX_AQ_CTYPE_BANDPROF;
+ aq_req.op = NIX_AQ_INSTOP_WRITE;
+ memcpy(&aq_req.prof, &aq_rsp.prof, sizeof(struct nix_bandprof_s));
+ /* Clear higher layer enable bit in the mid profile, just in case */
+ aq_req.prof.hl_en = 0;
+ aq_req.prof_mask.hl_en = 1;
+
+ rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
+ (struct nix_aq_enq_req *)&aq_req, NULL);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to INIT context of mid layer profile %d\n",
+ __func__, mid_prof);
+ goto exit;
+ }
+
+ /* Map both leaf profiles to this mid layer profile */
+ rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
+ &aq_req, &aq_rsp,
+ leaf_prof, mid_prof);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to map leaf(%d) and mid(%d) profiles\n",
+ __func__, leaf_prof, mid_prof);
+ goto exit;
+ }
+
+ mutex_lock(&rvu->rsrc_lock);
+ ipolicer->ref_count[mid_prof]++;
+ mutex_unlock(&rvu->rsrc_lock);
+
+ rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
+ &aq_req, &aq_rsp,
+ leaf_match, mid_prof);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to map leaf(%d) and mid(%d) profiles\n",
+ __func__, leaf_match, mid_prof);
+ ipolicer->ref_count[mid_prof]--;
+ goto exit;
+ }
+
+ mutex_lock(&rvu->rsrc_lock);
+ ipolicer->ref_count[mid_prof]++;
+ mutex_unlock(&rvu->rsrc_lock);
+
+exit:
+ return rc;
+}
+
+/* Called with mutex rsrc_lock */
+static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
+ u32 leaf_prof)
+{
+ struct nix_cn10k_aq_enq_req aq_req;
+ struct nix_cn10k_aq_enq_rsp aq_rsp;
+ struct nix_ipolicer *ipolicer;
+ u16 mid_prof;
+ int rc;
+
+ mutex_unlock(&rvu->rsrc_lock);
+
+ rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
+ NIX_AQ_CTYPE_BANDPROF, leaf_prof);
+
+ mutex_lock(&rvu->rsrc_lock);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to fetch context of leaf profile %d\n",
+ __func__, leaf_prof);
+ return;
+ }
+
+ if (!aq_rsp.prof.hl_en)
+ return;
+
+ mid_prof = aq_rsp.prof.band_prof_id;
+ ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER];
+ ipolicer->ref_count[mid_prof]--;
+ /* If ref_count is zero, free mid layer profile */
+ if (!ipolicer->ref_count[mid_prof]) {
+ ipolicer->pfvf_map[mid_prof] = 0x00;
+ rvu_free_rsrc(&ipolicer->band_prof, mid_prof);
+ }
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
index 6ba6a835e2fa..87d7c6ab047f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
@@ -1110,6 +1110,11 @@ find_rule:
req->vtag0_type == NIX_AF_LFX_RX_VTAG_TYPE7)
rule->vfvlan_cfg = true;
+ if (is_npc_intf_rx(req->intf) && req->match_id &&
+ (req->op == NIX_RX_ACTIONOP_UCAST || req->op == NIX_RX_ACTIONOP_RSS))
+ return rvu_nix_setup_ratelimit_aggr(rvu, req->hdr.pcifunc,
+ req->index, req->match_id);
+
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
index ce365ae80352..76837d5e19c6 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
@@ -171,6 +171,7 @@
#define NIX_AF_SQ_CONST (0x0040)
#define NIX_AF_CQ_CONST (0x0048)
#define NIX_AF_RQ_CONST (0x0050)
+#define NIX_AF_PL_CONST (0x0058)
#define NIX_AF_PSE_CONST (0x0060)
#define NIX_AF_TL1_CONST (0x0070)
#define NIX_AF_TL2_CONST (0x0078)
@@ -181,6 +182,7 @@
#define NIX_AF_LSO_CFG (0x00A8)
#define NIX_AF_BLK_RST (0x00B0)
#define NIX_AF_TX_TSTMP_CFG (0x00C0)
+#define NIX_AF_PL_TS (0x00C8)
#define NIX_AF_RX_CFG (0x00D0)
#define NIX_AF_AVG_DELAY (0x00E0)
#define NIX_AF_CINT_DELAY (0x00F0)
@@ -212,7 +214,9 @@
#define NIX_AF_RX_DEF_OL2 (0x0200)
#define NIX_AF_RX_DEF_OIP4 (0x0210)
#define NIX_AF_RX_DEF_IIP4 (0x0220)
+#define NIX_AF_RX_DEF_VLAN0_PCP_DEI (0x0228)
#define NIX_AF_RX_DEF_OIP6 (0x0230)
+#define NIX_AF_RX_DEF_VLAN1_PCP_DEI (0x0238)
#define NIX_AF_RX_DEF_IIP6 (0x0240)
#define NIX_AF_RX_DEF_OTCP (0x0250)
#define NIX_AF_RX_DEF_ITCP (0x0260)
@@ -223,6 +227,10 @@
#define NIX_AF_RX_DEF_ISCTP (0x02A0)
#define NIX_AF_RX_DEF_IPSECX (0x02B0)
#define NIX_AF_RX_DEF_CST_APAD1 (0x02A8)
+#define NIX_AF_RX_DEF_IIP4_DSCP (0x02E0)
+#define NIX_AF_RX_DEF_OIP4_DSCP (0x02E8)
+#define NIX_AF_RX_DEF_IIP6_DSCP (0x02F0)
+#define NIX_AF_RX_DEF_OIP6_DSCP (0x02F8)
#define NIX_AF_RX_IPSEC_GEN_CFG (0x0300)
#define NIX_AF_RX_CPTX_INST_ADDR (0x0310)
#define NIX_AF_NDC_TX_SYNC (0x03F0)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
index 5e5f45c7eab0..14aa8e37ea41 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
@@ -286,7 +286,7 @@ enum nix_aq_ctype {
NIX_AQ_CTYPE_MCE = 0x3,
NIX_AQ_CTYPE_RSS = 0x4,
NIX_AQ_CTYPE_DYNO = 0x5,
- NIX_AQ_CTYPE_BAND_PROF = 0x6,
+ NIX_AQ_CTYPE_BANDPROF = 0x6,
};
/* NIX admin queue instruction opcodes */
@@ -665,6 +665,89 @@ struct nix_rx_mce_s {
uint64_t next : 16;
};
+enum nix_band_prof_layers {
+ BAND_PROF_LEAF_LAYER = 0,
+ BAND_PROF_INVAL_LAYER = 1,
+ BAND_PROF_MID_LAYER = 2,
+ BAND_PROF_TOP_LAYER = 3,
+ BAND_PROF_NUM_LAYERS = 4,
+};
+
+enum NIX_RX_BAND_PROF_ACTIONRESULT_E {
+ NIX_RX_BAND_PROF_ACTIONRESULT_PASS = 0x0,
+ NIX_RX_BAND_PROF_ACTIONRESULT_DROP = 0x1,
+ NIX_RX_BAND_PROF_ACTIONRESULT_RED = 0x2,
+};
+
+enum nix_band_prof_pc_mode {
+ NIX_RX_PC_MODE_VLAN = 0,
+ NIX_RX_PC_MODE_DSCP = 1,
+ NIX_RX_PC_MODE_GEN = 2,
+ NIX_RX_PC_MODE_RSVD = 3,
+};
+
+/* NIX ingress policer bandwidth profile structure */
+struct nix_bandprof_s {
+ uint64_t pc_mode : 2; /* W0 */
+ uint64_t icolor : 2;
+ uint64_t tnl_ena : 1;
+ uint64_t reserved_5_7 : 3;
+ uint64_t peir_exponent : 5;
+ uint64_t reserved_13_15 : 3;
+ uint64_t pebs_exponent : 5;
+ uint64_t reserved_21_23 : 3;
+ uint64_t cir_exponent : 5;
+ uint64_t reserved_29_31 : 3;
+ uint64_t cbs_exponent : 5;
+ uint64_t reserved_37_39 : 3;
+ uint64_t peir_mantissa : 8;
+ uint64_t pebs_mantissa : 8;
+ uint64_t cir_mantissa : 8;
+ uint64_t cbs_mantissa : 8; /* W1 */
+ uint64_t lmode : 1;
+ uint64_t l_sellect : 3;
+ uint64_t rdiv : 4;
+ uint64_t adjust_exponent : 5;
+ uint64_t reserved_85_86 : 2;
+ uint64_t adjust_mantissa : 9;
+ uint64_t gc_action : 2;
+ uint64_t yc_action : 2;
+ uint64_t rc_action : 2;
+ uint64_t meter_algo : 2;
+ uint64_t band_prof_id : 7;
+ uint64_t reserved_111_118 : 8;
+ uint64_t hl_en : 1;
+ uint64_t reserved_120_127 : 8;
+ uint64_t ts : 48; /* W2 */
+ uint64_t reserved_176_191 : 16;
+ uint64_t pe_accum : 32; /* W3 */
+ uint64_t c_accum : 32;
+ uint64_t green_pkt_pass : 48; /* W4 */
+ uint64_t reserved_304_319 : 16;
+ uint64_t yellow_pkt_pass : 48; /* W5 */
+ uint64_t reserved_368_383 : 16;
+ uint64_t red_pkt_pass : 48; /* W6 */
+ uint64_t reserved_432_447 : 16;
+ uint64_t green_octs_pass : 48; /* W7 */
+ uint64_t reserved_496_511 : 16;
+ uint64_t yellow_octs_pass : 48; /* W8 */
+ uint64_t reserved_560_575 : 16;
+ uint64_t red_octs_pass : 48; /* W9 */
+ uint64_t reserved_624_639 : 16;
+ uint64_t green_pkt_drop : 48; /* W10 */
+ uint64_t reserved_688_703 : 16;
+ uint64_t yellow_pkt_drop : 48; /* W11 */
+ uint64_t reserved_752_767 : 16;
+ uint64_t red_pkt_drop : 48; /* W12 */
+ uint64_t reserved_816_831 : 16;
+ uint64_t green_octs_drop : 48; /* W13 */
+ uint64_t reserved_880_895 : 16;
+ uint64_t yellow_octs_drop : 48; /* W14 */
+ uint64_t reserved_944_959 : 16;
+ uint64_t red_octs_drop : 48; /* W15 */
+ uint64_t reserved_1008_1023 : 16;
+};
+
enum nix_lsoalg {
NIX_LSOALG_NOP,
NIX_LSOALG_ADD_SEGNUM,
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
index 9ec0313f13fc..1b08896b46d2 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
@@ -179,3 +179,326 @@ void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx)
sq->head++;
sq->head &= (sq->sqe_cnt - 1);
}
+
+int cn10k_free_all_ipolicers(struct otx2_nic *pfvf)
+{
+ struct nix_bandprof_free_req *req;
+ int rc;
+
+ if (is_dev_otx2(pfvf->pdev))
+ return 0;
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_nix_bandprof_free(&pfvf->mbox);
+ if (!req) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Free all bandwidth profiles allocated */
+ req->free_all = true;
+
+ rc = otx2_sync_mbox_msg(&pfvf->mbox);
+out:
+ mutex_unlock(&pfvf->mbox.lock);
+ return rc;
+}
+
+int cn10k_alloc_leaf_profile(struct otx2_nic *pfvf, u16 *leaf)
+{
+ struct nix_bandprof_alloc_req *req;
+ struct nix_bandprof_alloc_rsp *rsp;
+ int rc;
+
+ req = otx2_mbox_alloc_msg_nix_bandprof_alloc(&pfvf->mbox);
+ if (!req)
+ return -ENOMEM;
+
+ req->prof_count[BAND_PROF_LEAF_LAYER] = 1;
+
+ rc = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (rc)
+ goto out;
+
+ rsp = (struct nix_bandprof_alloc_rsp *)
+ otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
+ if (!rsp->prof_count[BAND_PROF_LEAF_LAYER]) {
+ rc = -EIO;
+ goto out;
+ }
+
+ *leaf = rsp->prof_idx[BAND_PROF_LEAF_LAYER][0];
+out:
+ if (rc) {
+ dev_warn(pfvf->dev,
+ "Failed to allocate ingress bandwidth policer\n");
+ }
+
+ return rc;
+}
+
+int cn10k_alloc_matchall_ipolicer(struct otx2_nic *pfvf)
+{
+ struct otx2_hw *hw = &pfvf->hw;
+ int ret;
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ ret = cn10k_alloc_leaf_profile(pfvf, &hw->matchall_ipolicer);
+
+ mutex_unlock(&pfvf->mbox.lock);
+
+ return ret;
+}
+
+#define POLICER_TIMESTAMP 1 /* 1 second */
+#define MAX_RATE_EXP 22 /* Valid rate exponent range: 0 - 22 */
+
+static void cn10k_get_ingress_burst_cfg(u32 burst, u32 *burst_exp,
+ u32 *burst_mantissa)
+{
+ int tmp;
+
+ /* Burst is calculated as
+ * (1+[BURST_MANTISSA]/256)*2^[BURST_EXPONENT]
+ * This is the upper limit on number tokens (bytes) that
+ * can be accumulated in the bucket.
+ */
+ *burst_exp = ilog2(burst);
+ if (burst < 256) {
+ /* No float: can't express mantissa in this case */
+ *burst_mantissa = 0;
+ return;
+ }
+
+ if (*burst_exp > MAX_RATE_EXP)
+ *burst_exp = MAX_RATE_EXP;
+
+ /* Calculate mantissa
+ * Find remaining bytes 'burst - 2^burst_exp'
+ * mantissa = (remaining bytes) / 2^ (burst_exp - 8)
+ */
+ tmp = burst - rounddown_pow_of_two(burst);
+ *burst_mantissa = tmp / (1UL << (*burst_exp - 8));
+}
+
+static void cn10k_get_ingress_rate_cfg(u64 rate, u32 *rate_exp,
+ u32 *rate_mantissa, u32 *rdiv)
+{
+ u32 div = 0;
+ u32 exp = 0;
+ u64 tmp;
+
+ /* Figure out mantissa, exponent and divider from given max pkt rate
+ *
+ * To achieve desired rate HW adds
+ * (1+[RATE_MANTISSA]/256)*2^[RATE_EXPONENT] tokens (bytes) at every
+ * policer timeunit * 2^rdiv ie 2 * 2^rdiv usecs, to the token bucket.
+ * Here policer timeunit is 2 usecs and rate is in bits per sec.
+ * Since floating point cannot be used below algorithm uses 1000000
+ * scale factor to support rates upto 100Gbps.
+ */
+ tmp = rate * 32 * 2;
+ if (tmp < 256000000) {
+ while (tmp < 256000000) {
+ tmp = tmp * 2;
+ div++;
+ }
+ } else {
+ for (exp = 0; tmp >= 512000000 && exp <= MAX_RATE_EXP; exp++)
+ tmp = tmp / 2;
+
+ if (exp > MAX_RATE_EXP)
+ exp = MAX_RATE_EXP;
+ }
+
+ *rate_mantissa = (tmp - 256000000) / 1000000;
+ *rate_exp = exp;
+ *rdiv = div;
+}
+
+int cn10k_map_unmap_rq_policer(struct otx2_nic *pfvf, int rq_idx,
+ u16 policer, bool map)
+{
+ struct nix_cn10k_aq_enq_req *aq;
+
+ aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
+ if (!aq)
+ return -ENOMEM;
+
+ /* Enable policing and set the bandwidth profile (policer) index */
+ if (map)
+ aq->rq.policer_ena = 1;
+ else
+ aq->rq.policer_ena = 0;
+ aq->rq_mask.policer_ena = 1;
+
+ aq->rq.band_prof_id = policer;
+ aq->rq_mask.band_prof_id = GENMASK(9, 0);
+
+ /* Fill AQ info */
+ aq->qidx = rq_idx;
+ aq->ctype = NIX_AQ_CTYPE_RQ;
+ aq->op = NIX_AQ_INSTOP_WRITE;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+
+int cn10k_free_leaf_profile(struct otx2_nic *pfvf, u16 leaf)
+{
+ struct nix_bandprof_free_req *req;
+
+ req = otx2_mbox_alloc_msg_nix_bandprof_free(&pfvf->mbox);
+ if (!req)
+ return -ENOMEM;
+
+ req->prof_count[BAND_PROF_LEAF_LAYER] = 1;
+ req->prof_idx[BAND_PROF_LEAF_LAYER][0] = leaf;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+
+int cn10k_free_matchall_ipolicer(struct otx2_nic *pfvf)
+{
+ struct otx2_hw *hw = &pfvf->hw;
+ int qidx, rc;
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ /* Remove RQ's policer mapping */
+ for (qidx = 0; qidx < hw->rx_queues; qidx++)
+ cn10k_map_unmap_rq_policer(pfvf, qidx,
+ hw->matchall_ipolicer, false);
+
+ rc = cn10k_free_leaf_profile(pfvf, hw->matchall_ipolicer);
+
+ mutex_unlock(&pfvf->mbox.lock);
+ return rc;
+}
+
+int cn10k_set_ipolicer_rate(struct otx2_nic *pfvf, u16 profile,
+ u32 burst, u64 rate, bool pps)
+{
+ struct nix_cn10k_aq_enq_req *aq;
+ u32 burst_exp, burst_mantissa;
+ u32 rate_exp, rate_mantissa;
+ u32 rdiv;
+
+ /* Get exponent and mantissa values for the desired rate */
+ cn10k_get_ingress_burst_cfg(burst, &burst_exp, &burst_mantissa);
+ cn10k_get_ingress_rate_cfg(rate, &rate_exp, &rate_mantissa, &rdiv);
+
+ /* Init bandwidth profile */
+ aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
+ if (!aq)
+ return -ENOMEM;
+
+ /* Set initial color mode to blind */
+ aq->prof.icolor = 0x03;
+ aq->prof_mask.icolor = 0x03;
+
+ /* Set rate and burst values */
+ aq->prof.cir_exponent = rate_exp;
+ aq->prof_mask.cir_exponent = 0x1F;
+
+ aq->prof.cir_mantissa = rate_mantissa;
+ aq->prof_mask.cir_mantissa = 0xFF;
+
+ aq->prof.cbs_exponent = burst_exp;
+ aq->prof_mask.cbs_exponent = 0x1F;
+
+ aq->prof.cbs_mantissa = burst_mantissa;
+ aq->prof_mask.cbs_mantissa = 0xFF;
+
+ aq->prof.rdiv = rdiv;
+ aq->prof_mask.rdiv = 0xF;
+
+ if (pps) {
+ /* The amount of decremented tokens is calculated according to
+ * the following equation:
+ * max([ LMODE ? 0 : (packet_length - LXPTR)] +
+ * ([ADJUST_MANTISSA]/256 - 1) * 2^[ADJUST_EXPONENT],
+ * 1/256)
+ * if LMODE is 1 then rate limiting will be based on
+ * PPS otherwise bps.
+ * The aim of the ADJUST value is to specify a token cost per
+ * packet in contrary to the packet length that specifies a
+ * cost per byte. To rate limit based on PPS adjust mantissa
+ * is set as 384 and exponent as 1 so that number of tokens
+ * decremented becomes 1 i.e, 1 token per packeet.
+ */
+ aq->prof.adjust_exponent = 1;
+ aq->prof_mask.adjust_exponent = 0x1F;
+
+ aq->prof.adjust_mantissa = 384;
+ aq->prof_mask.adjust_mantissa = 0x1FF;
+
+ aq->prof.lmode = 0x1;
+ aq->prof_mask.lmode = 0x1;
+ }
+
+ /* Two rate three color marker
+ * With PEIR/EIR set to zero, color will be either green or red
+ */
+ aq->prof.meter_algo = 2;
+ aq->prof_mask.meter_algo = 0x3;
+
+ aq->prof.rc_action = NIX_RX_BAND_PROF_ACTIONRESULT_DROP;
+ aq->prof_mask.rc_action = 0x3;
+
+ aq->prof.yc_action = NIX_RX_BAND_PROF_ACTIONRESULT_PASS;
+ aq->prof_mask.yc_action = 0x3;
+
+ aq->prof.gc_action = NIX_RX_BAND_PROF_ACTIONRESULT_PASS;
+ aq->prof_mask.gc_action = 0x3;
+
+ /* Setting exponent value as 24 and mantissa as 0 configures
+ * the bucket with zero values making bucket unused. Peak
+ * information rate and Excess information rate buckets are
+ * unused here.
+ */
+ aq->prof.peir_exponent = 24;
+ aq->prof_mask.peir_exponent = 0x1F;
+
+ aq->prof.peir_mantissa = 0;
+ aq->prof_mask.peir_mantissa = 0xFF;
+
+ aq->prof.pebs_exponent = 24;
+ aq->prof_mask.pebs_exponent = 0x1F;
+
+ aq->prof.pebs_mantissa = 0;
+ aq->prof_mask.pebs_mantissa = 0xFF;
+
+ /* Fill AQ info */
+ aq->qidx = profile;
+ aq->ctype = NIX_AQ_CTYPE_BANDPROF;
+ aq->op = NIX_AQ_INSTOP_WRITE;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+
+int cn10k_set_matchall_ipolicer_rate(struct otx2_nic *pfvf,
+ u32 burst, u64 rate)
+{
+ struct otx2_hw *hw = &pfvf->hw;
+ int qidx, rc;
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ rc = cn10k_set_ipolicer_rate(pfvf, hw->matchall_ipolicer, burst,
+ rate, false);
+ if (rc)
+ goto out;
+
+ for (qidx = 0; qidx < hw->rx_queues; qidx++) {
+ rc = cn10k_map_unmap_rq_policer(pfvf, qidx,
+ hw->matchall_ipolicer, true);
+ if (rc)
+ break;
+ }
+
+out:
+ mutex_unlock(&pfvf->mbox.lock);
+ return rc;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
index e0bc595cbb78..71292a4cf1f3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
@@ -14,4 +14,15 @@ void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx);
int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
int cn10k_pf_lmtst_init(struct otx2_nic *pf);
int cn10k_vf_lmtst_init(struct otx2_nic *vf);
+int cn10k_free_all_ipolicers(struct otx2_nic *pfvf);
+int cn10k_alloc_matchall_ipolicer(struct otx2_nic *pfvf);
+int cn10k_free_matchall_ipolicer(struct otx2_nic *pfvf);
+int cn10k_set_matchall_ipolicer_rate(struct otx2_nic *pfvf,
+ u32 burst, u64 rate);
+int cn10k_map_unmap_rq_policer(struct otx2_nic *pfvf, int rq_idx,
+ u16 policer, bool map);
+int cn10k_alloc_leaf_profile(struct otx2_nic *pfvf, u16 *leaf);
+int cn10k_set_ipolicer_rate(struct otx2_nic *pfvf, u16 profile,
+ u32 burst, u64 rate, bool pps);
+int cn10k_free_leaf_profile(struct otx2_nic *pfvf, u16 leaf);
#endif /* CN10K_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index e5616d466236..234b330f3183 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -180,6 +180,7 @@ struct otx2_hw {
/* NIX */
u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
+ u16 matchall_ipolicer;
/* HW settings, coalescing etc */
u16 rx_chan_base;
@@ -327,6 +328,7 @@ struct otx2_nic {
#define OTX2_FLAG_TX_PAUSE_ENABLED BIT_ULL(10)
#define OTX2_FLAG_TC_FLOWER_SUPPORT BIT_ULL(11)
#define OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED BIT_ULL(12)
+#define OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED BIT_ULL(13)
u64 flags;
struct otx2_qset qset;
@@ -370,6 +372,7 @@ struct otx2_nic {
struct otx2_flow_config *flow_cfg;
struct otx2_tc_info tc_info;
+ unsigned long rq_bmap;
};
static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index 9d9a2e438acf..8df748e0677b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -286,6 +286,12 @@ static int otx2_set_channels(struct net_device *dev,
if (!channel->rx_count || !channel->tx_count)
return -EINVAL;
+ if (bitmap_weight(&pfvf->rq_bmap, pfvf->hw.rx_queues) > 1) {
+ netdev_err(dev,
+ "Receive queues are in use by TC police action\n");
+ return -EINVAL;
+ }
+
if (if_up)
dev->netdev_ops->ndo_stop(dev);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 65f505b07b5d..59912f73417b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -1461,6 +1461,9 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
otx2_free_cq_res(pf);
+ /* Free all ingress bandwidth profiles allocated */
+ cn10k_free_all_ipolicers(pf);
+
mutex_lock(&mbox->lock);
/* Reset NIX LF */
free_req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
index 26712c091c63..905fc02a7dfe 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
@@ -15,6 +15,7 @@
#include <net/tc_act/tc_vlan.h>
#include <net/ipv6.h>
+#include "cn10k.h"
#include "otx2_common.h"
/* Egress rate limiting definitions */
@@ -41,11 +42,14 @@ struct otx2_tc_flow_stats {
struct otx2_tc_flow {
struct rhash_head node;
unsigned long cookie;
- u16 entry;
unsigned int bitpos;
struct rcu_head rcu;
struct otx2_tc_flow_stats stats;
spinlock_t lock; /* lock for stats */
+ u16 rq;
+ u16 entry;
+ u16 leaf_profile;
+ bool is_act_police;
};
static void otx2_get_egress_burst_cfg(u32 burst, u32 *burst_exp,
@@ -220,17 +224,76 @@ static int otx2_tc_egress_matchall_delete(struct otx2_nic *nic,
return err;
}
+static int otx2_tc_act_set_police(struct otx2_nic *nic,
+ struct otx2_tc_flow *node,
+ struct flow_cls_offload *f,
+ u64 rate, u32 burst, u32 mark,
+ struct npc_install_flow_req *req, bool pps)
+{
+ struct netlink_ext_ack *extack = f->common.extack;
+ struct otx2_hw *hw = &nic->hw;
+ int rq_idx, rc;
+
+ rq_idx = find_first_zero_bit(&nic->rq_bmap, hw->rx_queues);
+ if (rq_idx >= hw->rx_queues) {
+ NL_SET_ERR_MSG_MOD(extack, "Police action rules exceeded");
+ return -EINVAL;
+ }
+
+ mutex_lock(&nic->mbox.lock);
+
+ rc = cn10k_alloc_leaf_profile(nic, &node->leaf_profile);
+ if (rc) {
+ mutex_unlock(&nic->mbox.lock);
+ return rc;
+ }
+
+ rc = cn10k_set_ipolicer_rate(nic, node->leaf_profile, burst, rate, pps);
+ if (rc)
+ goto free_leaf;
+
+ rc = cn10k_map_unmap_rq_policer(nic, rq_idx, node->leaf_profile, true);
+ if (rc)
+ goto free_leaf;
+
+ mutex_unlock(&nic->mbox.lock);
+
+ req->match_id = mark & 0xFFFFULL;
+ req->index = rq_idx;
+ req->op = NIX_RX_ACTIONOP_UCAST;
+ set_bit(rq_idx, &nic->rq_bmap);
+ node->is_act_police = true;
+ node->rq = rq_idx;
+
+ return 0;
+
+free_leaf:
+ if (cn10k_free_leaf_profile(nic, node->leaf_profile))
+ netdev_err(nic->netdev,
+ "Unable to free leaf bandwidth profile(%d)\n",
+ node->leaf_profile);
+ mutex_unlock(&nic->mbox.lock);
+ return rc;
+}
+
static int otx2_tc_parse_actions(struct otx2_nic *nic,
struct flow_action *flow_action,
- struct npc_install_flow_req *req)
+ struct npc_install_flow_req *req,
+ struct flow_cls_offload *f,
+ struct otx2_tc_flow *node)
{
+ struct netlink_ext_ack *extack = f->common.extack;
struct flow_action_entry *act;
struct net_device *target;
struct otx2_nic *priv;
+ u32 burst, mark = 0;
+ u8 nr_police = 0;
+ bool pps;
+ u64 rate;
int i;
if (!flow_action_has_entries(flow_action)) {
- netdev_info(nic->netdev, "no tc actions specified");
+ NL_SET_ERR_MSG_MOD(extack, "no tc actions specified");
return -EINVAL;
}
@@ -247,8 +310,8 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic,
priv = netdev_priv(target);
/* npc_install_flow_req doesn't support passing a target pcifunc */
if (rvu_get_pf(nic->pcifunc) != rvu_get_pf(priv->pcifunc)) {
- netdev_info(nic->netdev,
- "can't redirect to other pf/vf\n");
+ NL_SET_ERR_MSG_MOD(extack,
+ "can't redirect to other pf/vf");
return -EOPNOTSUPP;
}
req->vf = priv->pcifunc & RVU_PFVF_FUNC_MASK;
@@ -259,18 +322,55 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic,
/* use RX_VTAG_TYPE7 which is initialized to strip vlan tag */
req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7;
break;
+ case FLOW_ACTION_POLICE:
+ /* Ingress ratelimiting is not supported on OcteonTx2 */
+ if (is_dev_otx2(nic->pdev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Ingress policing not supported on this platform");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.rate_bytes_ps > 0) {
+ rate = act->police.rate_bytes_ps * 8;
+ burst = act->police.burst;
+ } else if (act->police.rate_pkt_ps > 0) {
+ /* The algorithm used to calculate rate
+ * mantissa, exponent values for a given token
+ * rate (token can be byte or packet) requires
+ * token rate to be mutiplied by 8.
+ */
+ rate = act->police.rate_pkt_ps * 8;
+ burst = act->police.burst_pkt;
+ pps = true;
+ }
+ nr_police++;
+ break;
+ case FLOW_ACTION_MARK:
+ mark = act->mark;
+ break;
default:
return -EOPNOTSUPP;
}
}
+ if (nr_police > 1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "rate limit police offload requires a single action");
+ return -EOPNOTSUPP;
+ }
+
+ if (nr_police)
+ return otx2_tc_act_set_police(nic, node, f, rate, burst,
+ mark, req, pps);
+
return 0;
}
-static int otx2_tc_prepare_flow(struct otx2_nic *nic,
+static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
struct flow_cls_offload *f,
struct npc_install_flow_req *req)
{
+ struct netlink_ext_ack *extack = f->common.extack;
struct flow_msg *flow_spec = &req->packet;
struct flow_msg *flow_mask = &req->mask;
struct flow_dissector *dissector;
@@ -335,7 +435,7 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic,
flow_rule_match_eth_addrs(rule, &match);
if (!is_zero_ether_addr(match.mask->src)) {
- netdev_err(nic->netdev, "src mac match not supported\n");
+ NL_SET_ERR_MSG_MOD(extack, "src mac match not supported");
return -EOPNOTSUPP;
}
@@ -353,11 +453,11 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic,
flow_rule_match_ip(rule, &match);
if ((ntohs(flow_spec->etype) != ETH_P_IP) &&
match.mask->tos) {
- netdev_err(nic->netdev, "tos not supported\n");
+ NL_SET_ERR_MSG_MOD(extack, "tos not supported");
return -EOPNOTSUPP;
}
if (match.mask->ttl) {
- netdev_err(nic->netdev, "ttl not supported\n");
+ NL_SET_ERR_MSG_MOD(extack, "ttl not supported");
return -EOPNOTSUPP;
}
flow_spec->tos = match.key->tos;
@@ -413,8 +513,8 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic,
if (ipv6_addr_loopback(&match.key->dst) ||
ipv6_addr_loopback(&match.key->src)) {
- netdev_err(nic->netdev,
- "Flow matching on IPv6 loopback addr is not supported\n");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Flow matching IPv6 loopback addr not supported");
return -EOPNOTSUPP;
}
@@ -463,7 +563,7 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic,
req->features |= BIT_ULL(NPC_SPORT_SCTP);
}
- return otx2_tc_parse_actions(nic, &rule->action, req);
+ return otx2_tc_parse_actions(nic, &rule->action, req, f, node);
}
static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry)
@@ -498,6 +598,7 @@ static int otx2_tc_del_flow(struct otx2_nic *nic,
{
struct otx2_tc_info *tc_info = &nic->tc_info;
struct otx2_tc_flow *flow_node;
+ int err;
flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
&tc_flow_cmd->cookie,
@@ -508,6 +609,27 @@ static int otx2_tc_del_flow(struct otx2_nic *nic,
return -EINVAL;
}
+ if (flow_node->is_act_police) {
+ mutex_lock(&nic->mbox.lock);
+
+ err = cn10k_map_unmap_rq_policer(nic, flow_node->rq,
+ flow_node->leaf_profile, false);
+ if (err)
+ netdev_err(nic->netdev,
+ "Unmapping RQ %d & profile %d failed\n",
+ flow_node->rq, flow_node->leaf_profile);
+
+ err = cn10k_free_leaf_profile(nic, flow_node->leaf_profile);
+ if (err)
+ netdev_err(nic->netdev,
+ "Unable to free leaf bandwidth profile(%d)\n",
+ flow_node->leaf_profile);
+
+ __clear_bit(flow_node->rq, &nic->rq_bmap);
+
+ mutex_unlock(&nic->mbox.lock);
+ }
+
otx2_del_mcam_flow_entry(nic, flow_node->entry);
WARN_ON(rhashtable_remove_fast(&nic->tc_info.flow_table,
@@ -524,14 +646,21 @@ static int otx2_tc_del_flow(struct otx2_nic *nic,
static int otx2_tc_add_flow(struct otx2_nic *nic,
struct flow_cls_offload *tc_flow_cmd)
{
+ struct netlink_ext_ack *extack = tc_flow_cmd->common.extack;
struct otx2_tc_info *tc_info = &nic->tc_info;
struct otx2_tc_flow *new_node, *old_node;
- struct npc_install_flow_req *req;
- int rc;
+ struct npc_install_flow_req *req, dummy;
+ int rc, err;
if (!(nic->flags & OTX2_FLAG_TC_FLOWER_SUPPORT))
return -ENOMEM;
+ if (bitmap_full(tc_info->tc_entries_bitmap, nic->flow_cfg->tc_max_flows)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Not enough MCAM space to add the flow");
+ return -ENOMEM;
+ }
+
/* allocate memory for the new flow and it's node */
new_node = kzalloc(sizeof(*new_node), GFP_KERNEL);
if (!new_node)
@@ -539,17 +668,11 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
spin_lock_init(&new_node->lock);
new_node->cookie = tc_flow_cmd->cookie;
- mutex_lock(&nic->mbox.lock);
- req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox);
- if (!req) {
- mutex_unlock(&nic->mbox.lock);
- return -ENOMEM;
- }
+ memset(&dummy, 0, sizeof(struct npc_install_flow_req));
- rc = otx2_tc_prepare_flow(nic, tc_flow_cmd, req);
+ rc = otx2_tc_prepare_flow(nic, new_node, tc_flow_cmd, &dummy);
if (rc) {
- otx2_mbox_reset(&nic->mbox.mbox, 0);
- mutex_unlock(&nic->mbox.lock);
+ kfree_rcu(new_node, rcu);
return rc;
}
@@ -560,13 +683,17 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
if (old_node)
otx2_tc_del_flow(nic, tc_flow_cmd);
- if (bitmap_full(tc_info->tc_entries_bitmap, nic->flow_cfg->tc_max_flows)) {
- netdev_err(nic->netdev, "Not enough MCAM space to add the flow\n");
- otx2_mbox_reset(&nic->mbox.mbox, 0);
+ mutex_lock(&nic->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox);
+ if (!req) {
mutex_unlock(&nic->mbox.lock);
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto free_leaf;
}
+ memcpy(&dummy.hdr, &req->hdr, sizeof(struct mbox_msghdr));
+ memcpy(req, &dummy, sizeof(struct npc_install_flow_req));
+
new_node->bitpos = find_first_zero_bit(tc_info->tc_entries_bitmap,
nic->flow_cfg->tc_max_flows);
req->channel = nic->hw.rx_chan_base;
@@ -579,9 +706,10 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
/* Send message to AF */
rc = otx2_sync_mbox_msg(&nic->mbox);
if (rc) {
- netdev_err(nic->netdev, "Failed to install MCAM flow entry\n");
+ NL_SET_ERR_MSG_MOD(extack, "Failed to install MCAM flow entry");
mutex_unlock(&nic->mbox.lock);
- goto out;
+ kfree_rcu(new_node, rcu);
+ goto free_leaf;
}
mutex_unlock(&nic->mbox.lock);
@@ -591,12 +719,35 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
if (rc) {
otx2_del_mcam_flow_entry(nic, req->entry);
kfree_rcu(new_node, rcu);
- goto out;
+ goto free_leaf;
}
set_bit(new_node->bitpos, tc_info->tc_entries_bitmap);
tc_info->num_entries++;
-out:
+
+ return 0;
+
+free_leaf:
+ if (new_node->is_act_police) {
+ mutex_lock(&nic->mbox.lock);
+
+ err = cn10k_map_unmap_rq_policer(nic, new_node->rq,
+ new_node->leaf_profile, false);
+ if (err)
+ netdev_err(nic->netdev,
+ "Unmapping RQ %d & profile %d failed\n",
+ new_node->rq, new_node->leaf_profile);
+ err = cn10k_free_leaf_profile(nic, new_node->leaf_profile);
+ if (err)
+ netdev_err(nic->netdev,
+ "Unable to free leaf bandwidth profile(%d)\n",
+ new_node->leaf_profile);
+
+ __clear_bit(new_node->rq, &nic->rq_bmap);
+
+ mutex_unlock(&nic->mbox.lock);
+ }
+
return rc;
}
@@ -675,6 +826,87 @@ static int otx2_setup_tc_cls_flower(struct otx2_nic *nic,
}
}
+static int otx2_tc_ingress_matchall_install(struct otx2_nic *nic,
+ struct tc_cls_matchall_offload *cls)
+{
+ struct netlink_ext_ack *extack = cls->common.extack;
+ struct flow_action *actions = &cls->rule->action;
+ struct flow_action_entry *entry;
+ u64 rate;
+ int err;
+
+ err = otx2_tc_validate_flow(nic, actions, extack);
+ if (err)
+ return err;
+
+ if (nic->flags & OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only one ingress MATCHALL ratelimitter can be offloaded");
+ return -ENOMEM;
+ }
+
+ entry = &cls->rule->action.entries[0];
+ switch (entry->id) {
+ case FLOW_ACTION_POLICE:
+ /* Ingress ratelimiting is not supported on OcteonTx2 */
+ if (is_dev_otx2(nic->pdev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Ingress policing not supported on this platform");
+ return -EOPNOTSUPP;
+ }
+
+ err = cn10k_alloc_matchall_ipolicer(nic);
+ if (err)
+ return err;
+
+ /* Convert to bits per second */
+ rate = entry->police.rate_bytes_ps * 8;
+ err = cn10k_set_matchall_ipolicer_rate(nic, entry->police.burst, rate);
+ if (err)
+ return err;
+ nic->flags |= OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only police action supported with Ingress MATCHALL offload");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int otx2_tc_ingress_matchall_delete(struct otx2_nic *nic,
+ struct tc_cls_matchall_offload *cls)
+{
+ struct netlink_ext_ack *extack = cls->common.extack;
+ int err;
+
+ if (nic->flags & OTX2_FLAG_INTF_DOWN) {
+ NL_SET_ERR_MSG_MOD(extack, "Interface not initialized");
+ return -EINVAL;
+ }
+
+ err = cn10k_free_matchall_ipolicer(nic);
+ nic->flags &= ~OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED;
+ return err;
+}
+
+static int otx2_setup_tc_ingress_matchall(struct otx2_nic *nic,
+ struct tc_cls_matchall_offload *cls_matchall)
+{
+ switch (cls_matchall->command) {
+ case TC_CLSMATCHALL_REPLACE:
+ return otx2_tc_ingress_matchall_install(nic, cls_matchall);
+ case TC_CLSMATCHALL_DESTROY:
+ return otx2_tc_ingress_matchall_delete(nic, cls_matchall);
+ case TC_CLSMATCHALL_STATS:
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
static int otx2_setup_tc_block_ingress_cb(enum tc_setup_type type,
void *type_data, void *cb_priv)
{
@@ -686,6 +918,8 @@ static int otx2_setup_tc_block_ingress_cb(enum tc_setup_type type,
switch (type) {
case TC_SETUP_CLSFLOWER:
return otx2_setup_tc_cls_flower(nic, type_data);
+ case TC_SETUP_CLSMATCHALL:
+ return otx2_setup_tc_ingress_matchall(nic, type_data);
default:
break;
}
@@ -775,6 +1009,9 @@ int otx2_init_tc(struct otx2_nic *nic)
{
struct otx2_tc_info *tc = &nic->tc_info;
+ /* Exclude receive queue 0 being used for police action */
+ set_bit(0, &nic->rq_bmap);
+
tc->flow_ht_params = tc_flow_ht_params;
return rhashtable_init(&tc->flow_table, &tc->flow_ht_params);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 6655cb8e24cf..e735134e8487 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -339,9 +339,9 @@ static inline bool stmmac_xdp_is_enabled(struct stmmac_priv *priv)
static inline unsigned int stmmac_rx_offset(struct stmmac_priv *priv)
{
if (stmmac_xdp_is_enabled(priv))
- return XDP_PACKET_HEADROOM;
+ return XDP_PACKET_HEADROOM + NET_IP_ALIGN;
- return 0;
+ return NET_SKB_PAD + NET_IP_ALIGN;
}
void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue);
diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c
index ba5cc0c53833..dee9c4e15eca 100644
--- a/drivers/net/wan/pci200syn.c
+++ b/drivers/net/wan/pci200syn.c
@@ -42,8 +42,7 @@
static int pci_clock_freq = 33000000;
#define CLOCK_BASE pci_clock_freq
-/*
- * PLX PCI9052 local configuration and shared runtime registers.
+/* PLX PCI9052 local configuration and shared runtime registers.
* This structure can be used to access 9052 registers (memory mapped).
*/
typedef struct {
@@ -56,9 +55,7 @@ typedef struct {
u32 cs_base[4]; /* 3C-48h : Chip Select Base Addrs */
u32 intr_ctrl_stat; /* 4Ch : Interrupt Control/Status */
u32 init_ctrl; /* 50h : EEPROM ctrl, Init Ctrl, etc */
-}plx9052;
-
-
+} plx9052;
typedef struct port_s {
struct napi_struct napi;
@@ -74,9 +71,7 @@ typedef struct port_s {
u16 txlast;
u8 rxs, txs, tmc; /* SCA registers */
u8 chan; /* physical port # - 0 or 1 */
-}port_t;
-
-
+} port_t;
typedef struct card_s {
u8 __iomem *rambase; /* buffer memory base (virtual) */
@@ -88,15 +83,15 @@ typedef struct card_s {
u8 irq; /* interrupt request level */
port_t ports[2];
-}card_t;
-
+} card_t;
-#define get_port(card, port) (&card->ports[port])
+#define get_port(card, port) (&(card)->ports[port])
#define sca_flush(card) (sca_in(IER0, card))
static inline void new_memcpy_toio(char __iomem *dest, char *src, int length)
{
int len;
+
do {
len = length > 256 ? 256 : length;
memcpy_toio(dest, src, len);
@@ -112,7 +107,6 @@ static inline void new_memcpy_toio(char __iomem *dest, char *src, int length)
#include "hd64572.c"
-
static void pci200_set_iface(port_t *port)
{
card_t *card = port->card;
@@ -122,7 +116,7 @@ static void pci200_set_iface(port_t *port)
sca_out(EXS_TES1, (port->chan ? MSCI1_OFFSET : MSCI0_OFFSET) + EXS,
port->card);
- switch(port->settings.clock_type) {
+ switch (port->settings.clock_type) {
case CLOCK_INT:
rxs |= CLK_BRG; /* BRG output */
txs |= CLK_PIN_OUT | CLK_TX_RXCLK; /* RX clock */
@@ -151,13 +145,11 @@ static void pci200_set_iface(port_t *port)
sca_set_port(port);
}
-
-
static int pci200_open(struct net_device *dev)
{
port_t *port = dev_to_port(dev);
-
int result = hdlc_open(dev);
+
if (result)
return result;
@@ -167,8 +159,6 @@ static int pci200_open(struct net_device *dev)
return 0;
}
-
-
static int pci200_close(struct net_device *dev)
{
sca_close(dev);
@@ -177,8 +167,6 @@ static int pci200_close(struct net_device *dev)
return 0;
}
-
-
static int pci200_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
const size_t size = sizeof(sync_serial_settings);
@@ -195,7 +183,7 @@ static int pci200_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (cmd != SIOCWANDEV)
return hdlc_ioctl(dev, ifr, cmd);
- switch(ifr->ifr_settings.type) {
+ switch (ifr->ifr_settings.type) {
case IF_GET_IFACE:
ifr->ifr_settings.type = IF_IFACE_V35;
if (ifr->ifr_settings.size < size) {
@@ -233,8 +221,6 @@ static int pci200_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
}
}
-
-
static void pci200_pci_remove_one(struct pci_dev *pdev)
{
int i;
@@ -292,7 +278,7 @@ static int pci200_pci_init_one(struct pci_dev *pdev,
}
card = kzalloc(sizeof(card_t), GFP_KERNEL);
- if (card == NULL) {
+ if (!card) {
pci_release_regions(pdev);
pci_disable_device(pdev);
return -ENOBUFS;
@@ -314,18 +300,16 @@ static int pci200_pci_init_one(struct pci_dev *pdev,
return -EFAULT;
}
- plxphys = pci_resource_start(pdev,0) & PCI_BASE_ADDRESS_MEM_MASK;
+ plxphys = pci_resource_start(pdev, 0) & PCI_BASE_ADDRESS_MEM_MASK;
card->plxbase = ioremap(plxphys, PCI200SYN_PLX_SIZE);
- scaphys = pci_resource_start(pdev,2) & PCI_BASE_ADDRESS_MEM_MASK;
+ scaphys = pci_resource_start(pdev, 2) & PCI_BASE_ADDRESS_MEM_MASK;
card->scabase = ioremap(scaphys, PCI200SYN_SCA_SIZE);
- ramphys = pci_resource_start(pdev,3) & PCI_BASE_ADDRESS_MEM_MASK;
+ ramphys = pci_resource_start(pdev, 3) & PCI_BASE_ADDRESS_MEM_MASK;
card->rambase = pci_ioremap_bar(pdev, 3);
- if (card->plxbase == NULL ||
- card->scabase == NULL ||
- card->rambase == NULL) {
+ if (!card->plxbase || !card->scabase || !card->rambase) {
pr_err("ioremap() failed\n");
pci200_pci_remove_one(pdev);
return -EFAULT;
@@ -380,6 +364,7 @@ static int pci200_pci_init_one(struct pci_dev *pdev,
port_t *port = &card->ports[i];
struct net_device *dev = port->netdev;
hdlc_device *hdlc = dev_to_hdlc(dev);
+
port->chan = i;
spin_lock_init(&port->lock);
@@ -407,15 +392,12 @@ static int pci200_pci_init_one(struct pci_dev *pdev,
return 0;
}
-
-
static const struct pci_device_id pci200_pci_tbl[] = {
{ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_VENDOR_ID_PLX,
PCI_DEVICE_ID_PLX_PCI200SYN, 0, 0, 0 },
{ 0, }
};
-
static struct pci_driver pci200_pci_driver = {
.name = "PCI200SYN",
.id_table = pci200_pci_tbl,
@@ -423,7 +405,6 @@ static struct pci_driver pci200_pci_driver = {
.remove = pci200_pci_remove_one,
};
-
static int __init pci200_init_module(void)
{
if (pci_clock_freq < 1000000 || pci_clock_freq > 80000000) {
@@ -433,8 +414,6 @@ static int __init pci200_init_module(void)
return pci_register_driver(&pci200_pci_driver);
}
-
-
static void __exit pci200_cleanup_module(void)
{
pci_unregister_driver(&pci200_pci_driver);
diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
index 002b8c99ab5b..982a03488a00 100644
--- a/drivers/net/wan/z85230.c
+++ b/drivers/net/wan/z85230.c
@@ -1,7 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- *
- * (c) Copyright 1998 Alan Cox <alan@lxorguk.ukuu.org.uk>
+/* (c) Copyright 1998 Alan Cox <alan@lxorguk.ukuu.org.uk>
* (c) Copyright 2000, 2001 Red Hat Inc
*
* Development of this driver was funded by Equiinet Ltd
@@ -12,7 +10,7 @@
* Asynchronous mode dropped for 2.2. For 2.5 we will attempt the
* unification of all the Z85x30 asynchronous drivers for real.
*
- * DMA now uses get_free_page as kmalloc buffers may span a 64K
+ * DMA now uses get_free_page as kmalloc buffers may span a 64K
* boundary.
*
* Modified for SMP safety and SMP locking by Alan Cox
@@ -55,14 +53,13 @@
#include "z85230.h"
-
/**
* z8530_read_port - Architecture specific interface function
* @p: port to read
*
* Provided port access methods. The Comtrol SV11 requires no delays
* between accesses and uses PC I/O. Some drivers may need a 5uS delay
- *
+ *
* In the longer term this should become an architecture specific
* section so that this can become a generic driver interface for all
* platforms. For now we only handle PC I/O ports with or without the
@@ -74,8 +71,9 @@
static inline int z8530_read_port(unsigned long p)
{
- u8 r=inb(Z8530_PORT_OF(p));
- if(p&Z8530_PORT_SLEEP) /* gcc should figure this out efficiently ! */
+ u8 r = inb(Z8530_PORT_OF(p));
+
+ if (p & Z8530_PORT_SLEEP) /* gcc should figure this out efficiently ! */
udelay(5);
return r;
}
@@ -95,34 +93,30 @@ static inline int z8530_read_port(unsigned long p)
* dread 5uS sanity delay.
*/
-
static inline void z8530_write_port(unsigned long p, u8 d)
{
- outb(d,Z8530_PORT_OF(p));
- if(p&Z8530_PORT_SLEEP)
+ outb(d, Z8530_PORT_OF(p));
+ if (p & Z8530_PORT_SLEEP)
udelay(5);
}
-
-
static void z8530_rx_done(struct z8530_channel *c);
static void z8530_tx_done(struct z8530_channel *c);
-
/**
- * read_zsreg - Read a register from a Z85230
+ * read_zsreg - Read a register from a Z85230
* @c: Z8530 channel to read from (2 per chip)
* @reg: Register to read
* FIXME: Use a spinlock.
- *
+ *
* Most of the Z8530 registers are indexed off the control registers.
* A read is done by writing to the control register and reading the
* register back. The caller must hold the lock
*/
-
+
static inline u8 read_zsreg(struct z8530_channel *c, u8 reg)
{
- if(reg)
+ if (reg)
z8530_write_port(c->ctrlio, reg);
return z8530_read_port(c->ctrlio);
}
@@ -138,7 +132,8 @@ static inline u8 read_zsreg(struct z8530_channel *c, u8 reg)
static inline u8 read_zsdata(struct z8530_channel *c)
{
u8 r;
- r=z8530_read_port(c->dataio);
+
+ r = z8530_read_port(c->dataio);
return r;
}
@@ -156,10 +151,9 @@ static inline u8 read_zsdata(struct z8530_channel *c)
*/
static inline void write_zsreg(struct z8530_channel *c, u8 reg, u8 val)
{
- if(reg)
+ if (reg)
z8530_write_port(c->ctrlio, reg);
z8530_write_port(c->ctrlio, val);
-
}
/**
@@ -182,108 +176,94 @@ static inline void write_zsctrl(struct z8530_channel *c, u8 val)
*
* Write directly to the data register on the Z8530
*/
-
-
static inline void write_zsdata(struct z8530_channel *c, u8 val)
{
z8530_write_port(c->dataio, val);
}
-/*
- * Register loading parameters for a dead port
+/* Register loading parameters for a dead port
*/
-
-u8 z8530_dead_port[]=
-{
+
+u8 z8530_dead_port[] = {
255
};
-
EXPORT_SYMBOL(z8530_dead_port);
-/*
- * Register loading parameters for currently supported circuit types
+/* Register loading parameters for currently supported circuit types
*/
-
-/*
- * Data clocked by telco end. This is the correct data for the UK
+/* Data clocked by telco end. This is the correct data for the UK
* "kilostream" service, and most other similar services.
*/
-
-u8 z8530_hdlc_kilostream[]=
-{
- 4, SYNC_ENAB|SDLC|X1CLK,
+
+u8 z8530_hdlc_kilostream[] = {
+ 4, SYNC_ENAB | SDLC | X1CLK,
2, 0, /* No vector */
1, 0,
- 3, ENT_HM|RxCRC_ENAB|Rx8,
- 5, TxCRC_ENAB|RTS|TxENAB|Tx8|DTR,
+ 3, ENT_HM | RxCRC_ENAB | Rx8,
+ 5, TxCRC_ENAB | RTS | TxENAB | Tx8 | DTR,
9, 0, /* Disable interrupts */
6, 0xFF,
7, FLAG,
- 10, ABUNDER|NRZ|CRCPS,/*MARKIDLE ??*/
+ 10, ABUNDER | NRZ | CRCPS,/*MARKIDLE ??*/
11, TCTRxCP,
14, DISDPLL,
- 15, DCDIE|SYNCIE|CTSIE|TxUIE|BRKIE,
- 1, EXT_INT_ENAB|TxINT_ENAB|INT_ALL_Rx,
- 9, NV|MIE|NORESET,
+ 15, DCDIE | SYNCIE | CTSIE | TxUIE | BRKIE,
+ 1, EXT_INT_ENAB | TxINT_ENAB | INT_ALL_Rx,
+ 9, NV | MIE | NORESET,
255
};
-
EXPORT_SYMBOL(z8530_hdlc_kilostream);
-/*
- * As above but for enhanced chips.
+/* As above but for enhanced chips.
*/
-
-u8 z8530_hdlc_kilostream_85230[]=
-{
- 4, SYNC_ENAB|SDLC|X1CLK,
+
+u8 z8530_hdlc_kilostream_85230[] = {
+ 4, SYNC_ENAB | SDLC | X1CLK,
2, 0, /* No vector */
1, 0,
- 3, ENT_HM|RxCRC_ENAB|Rx8,
- 5, TxCRC_ENAB|RTS|TxENAB|Tx8|DTR,
+ 3, ENT_HM | RxCRC_ENAB | Rx8,
+ 5, TxCRC_ENAB | RTS | TxENAB | Tx8 | DTR,
9, 0, /* Disable interrupts */
6, 0xFF,
7, FLAG,
- 10, ABUNDER|NRZ|CRCPS, /* MARKIDLE?? */
+ 10, ABUNDER | NRZ | CRCPS, /* MARKIDLE?? */
11, TCTRxCP,
14, DISDPLL,
- 15, DCDIE|SYNCIE|CTSIE|TxUIE|BRKIE,
- 1, EXT_INT_ENAB|TxINT_ENAB|INT_ALL_Rx,
- 9, NV|MIE|NORESET,
+ 15, DCDIE | SYNCIE | CTSIE | TxUIE | BRKIE,
+ 1, EXT_INT_ENAB | TxINT_ENAB | INT_ALL_Rx,
+ 9, NV | MIE | NORESET,
23, 3, /* Extended mode AUTO TX and EOM*/
-
+
255
};
-
EXPORT_SYMBOL(z8530_hdlc_kilostream_85230);
/**
* z8530_flush_fifo - Flush on chip RX FIFO
* @c: Channel to flush
*
- * Flush the receive FIFO. There is no specific option for this, we
+ * Flush the receive FIFO. There is no specific option for this, we
* blindly read bytes and discard them. Reading when there is no data
* is harmless. The 8530 has a 4 byte FIFO, the 85230 has 8 bytes.
- *
+ *
* All locking is handled for the caller. On return data may still be
* present if it arrived during the flush.
*/
-
+
static void z8530_flush_fifo(struct z8530_channel *c)
{
read_zsreg(c, R1);
read_zsreg(c, R1);
read_zsreg(c, R1);
read_zsreg(c, R1);
- if(c->dev->type==Z85230)
- {
+ if (c->dev->type == Z85230) {
read_zsreg(c, R1);
read_zsreg(c, R1);
read_zsreg(c, R1);
read_zsreg(c, R1);
}
-}
+}
/**
* z8530_rtsdtr - Control the outgoing DTS/RTS line
@@ -309,7 +289,7 @@ static void z8530_rtsdtr(struct z8530_channel *c, int set)
* z8530_rx - Handle a PIO receive event
* @c: Z8530 channel to process
*
- * Receive handler for receiving in PIO mode. This is much like the
+ * Receive handler for receiving in PIO mode. This is much like the
* async one but not quite the same or as complex
*
* Note: Its intended that this handler can easily be separated from
@@ -322,77 +302,63 @@ static void z8530_rtsdtr(struct z8530_channel *c, int set)
* other code - this is true in the RT case too.
*
* We only cover the sync cases for this. If you want 2Mbit async
- * do it yourself but consider medical assistance first. This non DMA
- * synchronous mode is portable code. The DMA mode assumes PCI like
+ * do it yourself but consider medical assistance first. This non DMA
+ * synchronous mode is portable code. The DMA mode assumes PCI like
* ISA DMA
*
* Called with the device lock held
*/
-
+
static void z8530_rx(struct z8530_channel *c)
{
- u8 ch,stat;
+ u8 ch, stat;
- while(1)
- {
+ while (1) {
/* FIFO empty ? */
- if(!(read_zsreg(c, R0)&1))
+ if (!(read_zsreg(c, R0) & 1))
break;
- ch=read_zsdata(c);
- stat=read_zsreg(c, R1);
-
- /*
- * Overrun ?
+ ch = read_zsdata(c);
+ stat = read_zsreg(c, R1);
+
+ /* Overrun ?
*/
- if(c->count < c->max)
- {
- *c->dptr++=ch;
+ if (c->count < c->max) {
+ *c->dptr++ = ch;
c->count++;
}
- if(stat&END_FR)
- {
-
- /*
- * Error ?
+ if (stat & END_FR) {
+ /* Error ?
*/
- if(stat&(Rx_OVR|CRC_ERR))
- {
+ if (stat & (Rx_OVR | CRC_ERR)) {
/* Rewind the buffer and return */
- if(c->skb)
- c->dptr=c->skb->data;
- c->count=0;
- if(stat&Rx_OVR)
- {
+ if (c->skb)
+ c->dptr = c->skb->data;
+ c->count = 0;
+ if (stat & Rx_OVR) {
pr_warn("%s: overrun\n", c->dev->name);
c->rx_overrun++;
}
- if(stat&CRC_ERR)
- {
+ if (stat & CRC_ERR) {
c->rx_crc_err++;
/* printk("crc error\n"); */
}
/* Shove the frame upstream */
- }
- else
- {
- /*
- * Drop the lock for RX processing, or
- * there are deadlocks
- */
+ } else {
+ /* Drop the lock for RX processing, or
+ * there are deadlocks
+ */
z8530_rx_done(c);
write_zsctrl(c, RES_Rx_CRC);
}
}
}
- /*
- * Clear irq
+ /* Clear irq
*/
write_zsctrl(c, ERR_RES);
write_zsctrl(c, RES_H_IUS);
}
-
/**
* z8530_tx - Handle a PIO transmit event
* @c: Z8530 channel to process
@@ -402,35 +368,31 @@ static void z8530_rx(struct z8530_channel *c)
* in as possible, its quite possible that we won't keep up with the
* data rate otherwise.
*/
-
+
static void z8530_tx(struct z8530_channel *c)
{
- while(c->txcount) {
+ while (c->txcount) {
/* FIFO full ? */
- if(!(read_zsreg(c, R0)&4))
+ if (!(read_zsreg(c, R0) & 4))
return;
c->txcount--;
- /*
- * Shovel out the byte
+ /* Shovel out the byte
*/
write_zsreg(c, R8, *c->tx_ptr++);
write_zsctrl(c, RES_H_IUS);
/* We are about to underflow */
- if(c->txcount==0)
- {
+ if (c->txcount == 0) {
write_zsctrl(c, RES_EOM_L);
- write_zsreg(c, R10, c->regs[10]&~ABUNDER);
+ write_zsreg(c, R10, c->regs[10] & ~ABUNDER);
}
}
-
- /*
- * End of frame TX - fire another one
+ /* End of frame TX - fire another one
*/
-
+
write_zsctrl(c, RES_Tx_P);
- z8530_tx_done(c);
+ z8530_tx_done(c);
write_zsctrl(c, RES_H_IUS);
}
@@ -460,8 +422,7 @@ static void z8530_status(struct z8530_channel *chan)
z8530_tx_done(chan);
}
- if (altered & chan->dcdcheck)
- {
+ if (altered & chan->dcdcheck) {
if (status & chan->dcdcheck) {
pr_info("%s: DCD raised\n", chan->dev->name);
write_zsreg(chan, R3, chan->regs[3] | RxENABLE);
@@ -474,7 +435,6 @@ static void z8530_status(struct z8530_channel *chan)
if (chan->netdevice)
netif_carrier_off(chan->netdevice);
}
-
}
write_zsctrl(chan, RES_EXT_INT);
write_zsctrl(chan, RES_H_IUS);
@@ -485,7 +445,6 @@ struct z8530_irqhandler z8530_sync = {
.tx = z8530_tx,
.status = z8530_status,
};
-
EXPORT_SYMBOL(z8530_sync);
/**
@@ -497,31 +456,27 @@ EXPORT_SYMBOL(z8530_sync);
* events are handled by the DMA hardware. We get a kick here only if
* a frame ended.
*/
-
+
static void z8530_dma_rx(struct z8530_channel *chan)
{
- if(chan->rxdma_on)
- {
+ if (chan->rxdma_on) {
/* Special condition check only */
u8 status;
-
+
read_zsreg(chan, R7);
read_zsreg(chan, R6);
-
- status=read_zsreg(chan, R1);
-
- if(status&END_FR)
- {
+
+ status = read_zsreg(chan, R1);
+
+ if (status & END_FR)
z8530_rx_done(chan); /* Fire up the next one */
- }
+
write_zsctrl(chan, ERR_RES);
write_zsctrl(chan, RES_H_IUS);
- }
- else
- {
+ } else {
/* DMA is off right now, drain the slow way */
z8530_rx(chan);
- }
+ }
}
/**
@@ -531,11 +486,9 @@ static void z8530_dma_rx(struct z8530_channel *chan)
* We have received an interrupt while doing DMA transmissions. It
* shouldn't happen. Scream loudly if it does.
*/
-
static void z8530_dma_tx(struct z8530_channel *chan)
{
- if(!chan->dma_tx)
- {
+ if (!chan->dma_tx) {
pr_warn("Hey who turned the DMA off?\n");
z8530_tx(chan);
return;
@@ -548,40 +501,35 @@ static void z8530_dma_tx(struct z8530_channel *chan)
/**
* z8530_dma_status - Handle a DMA status exception
* @chan: Z8530 channel to process
- *
+ *
* A status event occurred on the Z8530. We receive these for two reasons
* when in DMA mode. Firstly if we finished a packet transfer we get one
* and kick the next packet out. Secondly we may see a DCD change.
*
*/
-
static void z8530_dma_status(struct z8530_channel *chan)
{
u8 status, altered;
- status=read_zsreg(chan, R0);
- altered=chan->status^status;
-
- chan->status=status;
+ status = read_zsreg(chan, R0);
+ altered = chan->status ^ status;
+ chan->status = status;
- if(chan->dma_tx)
- {
- if(status&TxEOM)
- {
+ if (chan->dma_tx) {
+ if (status & TxEOM) {
unsigned long flags;
-
- flags=claim_dma_lock();
+
+ flags = claim_dma_lock();
disable_dma(chan->txdma);
- clear_dma_ff(chan->txdma);
- chan->txdma_on=0;
+ clear_dma_ff(chan->txdma);
+ chan->txdma_on = 0;
release_dma_lock(flags);
z8530_tx_done(chan);
}
}
- if (altered & chan->dcdcheck)
- {
+ if (altered & chan->dcdcheck) {
if (status & chan->dcdcheck) {
pr_info("%s: DCD raised\n", chan->dev->name);
write_zsreg(chan, R3, chan->regs[3] | RxENABLE);
@@ -621,21 +569,18 @@ static struct z8530_irqhandler z8530_txdma_sync = {
* (eg the MacII) we must clear the interrupt cause or die.
*/
-
static void z8530_rx_clear(struct z8530_channel *c)
{
- /*
- * Data and status bytes
+ /* Data and status bytes
*/
u8 stat;
read_zsdata(c);
- stat=read_zsreg(c, R1);
-
- if(stat&END_FR)
+ stat = read_zsreg(c, R1);
+
+ if (stat & END_FR)
write_zsctrl(c, RES_Rx_CRC);
- /*
- * Clear irq
+ /* Clear irq
*/
write_zsctrl(c, ERR_RES);
write_zsctrl(c, RES_H_IUS);
@@ -667,8 +612,9 @@ static void z8530_tx_clear(struct z8530_channel *c)
static void z8530_status_clear(struct z8530_channel *chan)
{
- u8 status=read_zsreg(chan, R0);
- if(status&TxEOM)
+ u8 status = read_zsreg(chan, R0);
+
+ if (status & TxEOM)
write_zsctrl(chan, ERR_RES);
write_zsctrl(chan, RES_EXT_INT);
write_zsctrl(chan, RES_H_IUS);
@@ -679,13 +625,11 @@ struct z8530_irqhandler z8530_nop = {
.tx = z8530_tx_clear,
.status = z8530_status_clear,
};
-
-
EXPORT_SYMBOL(z8530_nop);
/**
* z8530_interrupt - Handle an interrupt from a Z8530
- * @irq: Interrupt number
+ * @irq: Interrupt number
* @dev_id: The Z8530 device that is interrupting.
*
* A Z85[2]30 device has stuck its hand in the air for attention.
@@ -701,78 +645,73 @@ EXPORT_SYMBOL(z8530_nop);
irqreturn_t z8530_interrupt(int irq, void *dev_id)
{
- struct z8530_dev *dev=dev_id;
+ struct z8530_dev *dev = dev_id;
u8 intr;
static volatile int locker=0;
- int work=0;
+ int work = 0;
struct z8530_irqhandler *irqs;
-
- if(locker)
- {
+
+ if (locker) {
pr_err("IRQ re-enter\n");
return IRQ_NONE;
}
- locker=1;
+ locker = 1;
spin_lock(&dev->lock);
- while(++work<5000)
- {
-
+ while (++work < 5000) {
intr = read_zsreg(&dev->chanA, R3);
- if(!(intr & (CHARxIP|CHATxIP|CHAEXT|CHBRxIP|CHBTxIP|CHBEXT)))
+ if (!(intr &
+ (CHARxIP | CHATxIP | CHAEXT | CHBRxIP | CHBTxIP | CHBEXT)))
break;
-
- /* This holds the IRQ status. On the 8530 you must read it from chan
- A even though it applies to the whole chip */
-
+
+ /* This holds the IRQ status. On the 8530 you must read it
+ * from chan A even though it applies to the whole chip
+ */
+
/* Now walk the chip and see what it is wanting - it may be
- an IRQ for someone else remember */
-
- irqs=dev->chanA.irqs;
+ * an IRQ for someone else remember
+ */
+
+ irqs = dev->chanA.irqs;
- if(intr & (CHARxIP|CHATxIP|CHAEXT))
- {
- if(intr&CHARxIP)
+ if (intr & (CHARxIP | CHATxIP | CHAEXT)) {
+ if (intr & CHARxIP)
irqs->rx(&dev->chanA);
- if(intr&CHATxIP)
+ if (intr & CHATxIP)
irqs->tx(&dev->chanA);
- if(intr&CHAEXT)
+ if (intr & CHAEXT)
irqs->status(&dev->chanA);
}
- irqs=dev->chanB.irqs;
+ irqs = dev->chanB.irqs;
- if(intr & (CHBRxIP|CHBTxIP|CHBEXT))
- {
- if(intr&CHBRxIP)
+ if (intr & (CHBRxIP | CHBTxIP | CHBEXT)) {
+ if (intr & CHBRxIP)
irqs->rx(&dev->chanB);
- if(intr&CHBTxIP)
+ if (intr & CHBTxIP)
irqs->tx(&dev->chanB);
- if(intr&CHBEXT)
+ if (intr & CHBEXT)
irqs->status(&dev->chanB);
}
}
spin_unlock(&dev->lock);
- if(work==5000)
+ if (work == 5000)
pr_err("%s: interrupt jammed - abort(0x%X)!\n",
dev->name, intr);
/* Ok all done */
- locker=0;
+ locker = 0;
return IRQ_HANDLED;
}
-
EXPORT_SYMBOL(z8530_interrupt);
-static const u8 reg_init[16]=
-{
- 0,0,0,0,
- 0,0,0,0,
- 0,0,0,0,
- 0x55,0,0,0
+static const u8 reg_init[16] = {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0x55, 0, 0, 0
};
-
/**
* z8530_sync_open - Open a Z8530 channel for PIO
* @dev: The network interface we are using
@@ -781,7 +720,6 @@ static const u8 reg_init[16]=
* Switch a Z8530 into synchronous mode without DMA assist. We
* raise the RTS/DTR and commence network operation.
*/
-
int z8530_sync_open(struct net_device *dev, struct z8530_channel *c)
{
unsigned long flags;
@@ -789,7 +727,7 @@ int z8530_sync_open(struct net_device *dev, struct z8530_channel *c)
spin_lock_irqsave(c->lock, flags);
c->sync = 1;
- c->mtu = dev->mtu+64;
+ c->mtu = dev->mtu + 64;
c->count = 0;
c->skb = NULL;
c->skb2 = NULL;
@@ -798,17 +736,15 @@ int z8530_sync_open(struct net_device *dev, struct z8530_channel *c)
/* This loads the double buffer up */
z8530_rx_done(c); /* Load the frame ring */
z8530_rx_done(c); /* Load the backup frame */
- z8530_rtsdtr(c,1);
+ z8530_rtsdtr(c, 1);
c->dma_tx = 0;
- c->regs[R1]|=TxINT_ENAB;
+ c->regs[R1] |= TxINT_ENAB;
write_zsreg(c, R1, c->regs[R1]);
- write_zsreg(c, R3, c->regs[R3]|RxENABLE);
+ write_zsreg(c, R3, c->regs[R3] | RxENABLE);
spin_unlock_irqrestore(c->lock, flags);
return 0;
}
-
-
EXPORT_SYMBOL(z8530_sync_open);
/**
@@ -819,25 +755,23 @@ EXPORT_SYMBOL(z8530_sync_open);
* Close down a Z8530 interface and switch its interrupt handlers
* to discard future events.
*/
-
int z8530_sync_close(struct net_device *dev, struct z8530_channel *c)
{
u8 chk;
unsigned long flags;
-
+
spin_lock_irqsave(c->lock, flags);
c->irqs = &z8530_nop;
c->max = 0;
c->sync = 0;
-
- chk=read_zsreg(c,R0);
+
+ chk = read_zsreg(c, R0);
write_zsreg(c, R3, c->regs[R3]);
- z8530_rtsdtr(c,0);
+ z8530_rtsdtr(c, 0);
spin_unlock_irqrestore(c->lock, flags);
return 0;
}
-
EXPORT_SYMBOL(z8530_sync_close);
/**
@@ -849,91 +783,83 @@ EXPORT_SYMBOL(z8530_sync_close);
* ISA DMA channels must be available for this to work. We assume ISA
* DMA driven I/O and PC limits on access.
*/
-
int z8530_sync_dma_open(struct net_device *dev, struct z8530_channel *c)
{
unsigned long cflags, dflags;
-
+
c->sync = 1;
- c->mtu = dev->mtu+64;
+ c->mtu = dev->mtu + 64;
c->count = 0;
c->skb = NULL;
c->skb2 = NULL;
- /*
- * Load the DMA interfaces up
+
+ /* Load the DMA interfaces up
*/
c->rxdma_on = 0;
c->txdma_on = 0;
-
- /*
- * Allocate the DMA flip buffers. Limit by page size.
+
+ /* Allocate the DMA flip buffers. Limit by page size.
* Everyone runs 1500 mtu or less on wan links so this
* should be fine.
*/
-
- if(c->mtu > PAGE_SIZE/2)
+
+ if (c->mtu > PAGE_SIZE / 2)
return -EMSGSIZE;
-
- c->rx_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
- if(c->rx_buf[0]==NULL)
+
+ c->rx_buf[0] = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!c->rx_buf[0])
return -ENOBUFS;
- c->rx_buf[1]=c->rx_buf[0]+PAGE_SIZE/2;
-
- c->tx_dma_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
- if(c->tx_dma_buf[0]==NULL)
- {
+ c->rx_buf[1] = c->rx_buf[0] + PAGE_SIZE / 2;
+
+ c->tx_dma_buf[0] = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!c->tx_dma_buf[0]) {
free_page((unsigned long)c->rx_buf[0]);
- c->rx_buf[0]=NULL;
+ c->rx_buf[0] = NULL;
return -ENOBUFS;
}
- c->tx_dma_buf[1]=c->tx_dma_buf[0]+PAGE_SIZE/2;
+ c->tx_dma_buf[1] = c->tx_dma_buf[0] + PAGE_SIZE / 2;
- c->tx_dma_used=0;
+ c->tx_dma_used = 0;
c->dma_tx = 1;
- c->dma_num=0;
- c->dma_ready=1;
-
- /*
- * Enable DMA control mode
+ c->dma_num = 0;
+ c->dma_ready = 1;
+
+ /* Enable DMA control mode
*/
spin_lock_irqsave(c->lock, cflags);
-
- /*
- * TX DMA via DIR/REQ
+
+ /* TX DMA via DIR/REQ
+ */
+
+ c->regs[R14] |= DTRREQ;
+ write_zsreg(c, R14, c->regs[R14]);
+
+ c->regs[R1] &= ~TxINT_ENAB;
+ write_zsreg(c, R1, c->regs[R1]);
+
+ /* RX DMA via W/Req
*/
-
- c->regs[R14]|= DTRREQ;
- write_zsreg(c, R14, c->regs[R14]);
- c->regs[R1]&= ~TxINT_ENAB;
+ c->regs[R1] |= WT_FN_RDYFN;
+ c->regs[R1] |= WT_RDY_RT;
+ c->regs[R1] |= INT_ERR_Rx;
+ c->regs[R1] &= ~TxINT_ENAB;
write_zsreg(c, R1, c->regs[R1]);
-
- /*
- * RX DMA via W/Req
- */
-
- c->regs[R1]|= WT_FN_RDYFN;
- c->regs[R1]|= WT_RDY_RT;
- c->regs[R1]|= INT_ERR_Rx;
- c->regs[R1]&= ~TxINT_ENAB;
+ c->regs[R1] |= WT_RDY_ENAB;
write_zsreg(c, R1, c->regs[R1]);
- c->regs[R1]|= WT_RDY_ENAB;
- write_zsreg(c, R1, c->regs[R1]);
-
- /*
- * DMA interrupts
+
+ /* DMA interrupts
+ */
+
+ /* Set up the DMA configuration
*/
-
- /*
- * Set up the DMA configuration
- */
-
- dflags=claim_dma_lock();
-
+
+ dflags = claim_dma_lock();
+
disable_dma(c->rxdma);
clear_dma_ff(c->rxdma);
- set_dma_mode(c->rxdma, DMA_MODE_READ|0x10);
+ set_dma_mode(c->rxdma, DMA_MODE_READ | 0x10);
set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[0]));
set_dma_count(c->rxdma, c->mtu);
enable_dma(c->rxdma);
@@ -942,26 +868,24 @@ int z8530_sync_dma_open(struct net_device *dev, struct z8530_channel *c)
clear_dma_ff(c->txdma);
set_dma_mode(c->txdma, DMA_MODE_WRITE);
disable_dma(c->txdma);
-
+
release_dma_lock(dflags);
-
- /*
- * Select the DMA interrupt handlers
+
+ /* Select the DMA interrupt handlers
*/
c->rxdma_on = 1;
c->txdma_on = 1;
c->tx_dma_used = 1;
-
+
c->irqs = &z8530_dma_sync;
- z8530_rtsdtr(c,1);
- write_zsreg(c, R3, c->regs[R3]|RxENABLE);
+ z8530_rtsdtr(c, 1);
+ write_zsreg(c, R3, c->regs[R3] | RxENABLE);
spin_unlock_irqrestore(c->lock, cflags);
-
+
return 0;
}
-
EXPORT_SYMBOL(z8530_sync_dma_open);
/**
@@ -972,66 +896,60 @@ EXPORT_SYMBOL(z8530_sync_dma_open);
* Shut down a DMA mode synchronous interface. Halt the DMA, and
* free the buffers.
*/
-
int z8530_sync_dma_close(struct net_device *dev, struct z8530_channel *c)
{
u8 chk;
unsigned long flags;
-
+
c->irqs = &z8530_nop;
c->max = 0;
c->sync = 0;
-
- /*
- * Disable the PC DMA channels
+
+ /* Disable the PC DMA channels
*/
-
- flags=claim_dma_lock();
+
+ flags = claim_dma_lock();
disable_dma(c->rxdma);
clear_dma_ff(c->rxdma);
-
+
c->rxdma_on = 0;
-
+
disable_dma(c->txdma);
clear_dma_ff(c->txdma);
release_dma_lock(flags);
-
+
c->txdma_on = 0;
c->tx_dma_used = 0;
spin_lock_irqsave(c->lock, flags);
- /*
- * Disable DMA control mode
+ /* Disable DMA control mode
*/
-
- c->regs[R1]&= ~WT_RDY_ENAB;
- write_zsreg(c, R1, c->regs[R1]);
- c->regs[R1]&= ~(WT_RDY_RT|WT_FN_RDYFN|INT_ERR_Rx);
- c->regs[R1]|= INT_ALL_Rx;
+
+ c->regs[R1] &= ~WT_RDY_ENAB;
+ write_zsreg(c, R1, c->regs[R1]);
+ c->regs[R1] &= ~(WT_RDY_RT | WT_FN_RDYFN | INT_ERR_Rx);
+ c->regs[R1] |= INT_ALL_Rx;
write_zsreg(c, R1, c->regs[R1]);
- c->regs[R14]&= ~DTRREQ;
- write_zsreg(c, R14, c->regs[R14]);
-
- if(c->rx_buf[0])
- {
+ c->regs[R14] &= ~DTRREQ;
+ write_zsreg(c, R14, c->regs[R14]);
+
+ if (c->rx_buf[0]) {
free_page((unsigned long)c->rx_buf[0]);
- c->rx_buf[0]=NULL;
+ c->rx_buf[0] = NULL;
}
- if(c->tx_dma_buf[0])
- {
+ if (c->tx_dma_buf[0]) {
free_page((unsigned long)c->tx_dma_buf[0]);
- c->tx_dma_buf[0]=NULL;
+ c->tx_dma_buf[0] = NULL;
}
- chk=read_zsreg(c,R0);
+ chk = read_zsreg(c, R0);
write_zsreg(c, R3, c->regs[R3]);
- z8530_rtsdtr(c,0);
+ z8530_rtsdtr(c, 0);
spin_unlock_irqrestore(c->lock, flags);
return 0;
}
-
EXPORT_SYMBOL(z8530_sync_dma_close);
/**
@@ -1050,65 +968,58 @@ int z8530_sync_txdma_open(struct net_device *dev, struct z8530_channel *c)
printk("Opening sync interface for TX-DMA\n");
c->sync = 1;
- c->mtu = dev->mtu+64;
+ c->mtu = dev->mtu + 64;
c->count = 0;
c->skb = NULL;
c->skb2 = NULL;
-
- /*
- * Allocate the DMA flip buffers. Limit by page size.
+
+ /* Allocate the DMA flip buffers. Limit by page size.
* Everyone runs 1500 mtu or less on wan links so this
* should be fine.
*/
-
- if(c->mtu > PAGE_SIZE/2)
+
+ if (c->mtu > PAGE_SIZE / 2)
return -EMSGSIZE;
-
- c->tx_dma_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
- if(c->tx_dma_buf[0]==NULL)
- return -ENOBUFS;
- c->tx_dma_buf[1] = c->tx_dma_buf[0] + PAGE_SIZE/2;
+ c->tx_dma_buf[0] = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!c->tx_dma_buf[0])
+ return -ENOBUFS;
+ c->tx_dma_buf[1] = c->tx_dma_buf[0] + PAGE_SIZE / 2;
spin_lock_irqsave(c->lock, cflags);
- /*
- * Load the PIO receive ring
+ /* Load the PIO receive ring
*/
z8530_rx_done(c);
z8530_rx_done(c);
- /*
- * Load the DMA interfaces up
+ /* Load the DMA interfaces up
*/
c->rxdma_on = 0;
c->txdma_on = 0;
-
- c->tx_dma_used=0;
- c->dma_num=0;
- c->dma_ready=1;
+
+ c->tx_dma_used = 0;
+ c->dma_num = 0;
+ c->dma_ready = 1;
c->dma_tx = 1;
- /*
- * Enable DMA control mode
+ /* Enable DMA control mode
*/
- /*
- * TX DMA via DIR/REQ
+ /* TX DMA via DIR/REQ
*/
- c->regs[R14]|= DTRREQ;
- write_zsreg(c, R14, c->regs[R14]);
-
- c->regs[R1]&= ~TxINT_ENAB;
+ c->regs[R14] |= DTRREQ;
+ write_zsreg(c, R14, c->regs[R14]);
+
+ c->regs[R1] &= ~TxINT_ENAB;
write_zsreg(c, R1, c->regs[R1]);
-
- /*
- * Set up the DMA configuration
- */
-
+
+ /* Set up the DMA configuration
+ */
+
dflags = claim_dma_lock();
disable_dma(c->txdma);
@@ -1117,23 +1028,21 @@ int z8530_sync_txdma_open(struct net_device *dev, struct z8530_channel *c)
disable_dma(c->txdma);
release_dma_lock(dflags);
-
- /*
- * Select the DMA interrupt handlers
+
+ /* Select the DMA interrupt handlers
*/
c->rxdma_on = 0;
c->txdma_on = 1;
c->tx_dma_used = 1;
-
+
c->irqs = &z8530_txdma_sync;
- z8530_rtsdtr(c,1);
- write_zsreg(c, R3, c->regs[R3]|RxENABLE);
+ z8530_rtsdtr(c, 1);
+ write_zsreg(c, R3, c->regs[R3] | RxENABLE);
spin_unlock_irqrestore(c->lock, cflags);
-
+
return 0;
}
-
EXPORT_SYMBOL(z8530_sync_txdma_open);
/**
@@ -1141,7 +1050,7 @@ EXPORT_SYMBOL(z8530_sync_txdma_open);
* @dev: Network device to detach
* @c: Z8530 channel to move into discard mode
*
- * Shut down a DMA/PIO split mode synchronous interface. Halt the DMA,
+ * Shut down a DMA/PIO split mode synchronous interface. Halt the DMA,
* and free the buffers.
*/
@@ -1150,17 +1059,15 @@ int z8530_sync_txdma_close(struct net_device *dev, struct z8530_channel *c)
unsigned long dflags, cflags;
u8 chk;
-
spin_lock_irqsave(c->lock, cflags);
-
+
c->irqs = &z8530_nop;
c->max = 0;
c->sync = 0;
-
- /*
- * Disable the PC DMA channels
+
+ /* Disable the PC DMA channels
*/
-
+
dflags = claim_dma_lock();
disable_dma(c->txdma);
@@ -1170,41 +1077,34 @@ int z8530_sync_txdma_close(struct net_device *dev, struct z8530_channel *c)
release_dma_lock(dflags);
- /*
- * Disable DMA control mode
+ /* Disable DMA control mode
*/
-
- c->regs[R1]&= ~WT_RDY_ENAB;
- write_zsreg(c, R1, c->regs[R1]);
- c->regs[R1]&= ~(WT_RDY_RT|WT_FN_RDYFN|INT_ERR_Rx);
- c->regs[R1]|= INT_ALL_Rx;
+
+ c->regs[R1] &= ~WT_RDY_ENAB;
+ write_zsreg(c, R1, c->regs[R1]);
+ c->regs[R1] &= ~(WT_RDY_RT | WT_FN_RDYFN | INT_ERR_Rx);
+ c->regs[R1] |= INT_ALL_Rx;
write_zsreg(c, R1, c->regs[R1]);
- c->regs[R14]&= ~DTRREQ;
- write_zsreg(c, R14, c->regs[R14]);
-
- if(c->tx_dma_buf[0])
- {
+ c->regs[R14] &= ~DTRREQ;
+ write_zsreg(c, R14, c->regs[R14]);
+
+ if (c->tx_dma_buf[0]) {
free_page((unsigned long)c->tx_dma_buf[0]);
- c->tx_dma_buf[0]=NULL;
+ c->tx_dma_buf[0] = NULL;
}
- chk=read_zsreg(c,R0);
+ chk = read_zsreg(c, R0);
write_zsreg(c, R3, c->regs[R3]);
- z8530_rtsdtr(c,0);
+ z8530_rtsdtr(c, 0);
spin_unlock_irqrestore(c->lock, cflags);
return 0;
}
-
-
EXPORT_SYMBOL(z8530_sync_txdma_close);
-
-/*
- * Name strings for Z8530 chips. SGI claim to have a 130, Zilog deny
+/* Name strings for Z8530 chips. SGI claim to have a 130, Zilog deny
* it exists...
*/
-
-static const char *z8530_type_name[]={
+static const char * const z8530_type_name[] = {
"Z8530",
"Z85C30",
"Z85230"
@@ -1224,78 +1124,71 @@ static const char *z8530_type_name[]={
void z8530_describe(struct z8530_dev *dev, char *mapping, unsigned long io)
{
pr_info("%s: %s found at %s 0x%lX, IRQ %d\n",
- dev->name,
+ dev->name,
z8530_type_name[dev->type],
mapping,
Z8530_PORT_OF(io),
dev->irq);
}
-
EXPORT_SYMBOL(z8530_describe);
-/*
- * Locked operation part of the z8530 init code
+/* Locked operation part of the z8530 init code
*/
-
static inline int do_z8530_init(struct z8530_dev *dev)
{
/* NOP the interrupt handlers first - we might get a
- floating IRQ transition when we reset the chip */
- dev->chanA.irqs=&z8530_nop;
- dev->chanB.irqs=&z8530_nop;
- dev->chanA.dcdcheck=DCD;
- dev->chanB.dcdcheck=DCD;
+ * floating IRQ transition when we reset the chip
+ */
+ dev->chanA.irqs = &z8530_nop;
+ dev->chanB.irqs = &z8530_nop;
+ dev->chanA.dcdcheck = DCD;
+ dev->chanB.dcdcheck = DCD;
/* Reset the chip */
write_zsreg(&dev->chanA, R9, 0xC0);
udelay(200);
/* Now check its valid */
write_zsreg(&dev->chanA, R12, 0xAA);
- if(read_zsreg(&dev->chanA, R12)!=0xAA)
+ if (read_zsreg(&dev->chanA, R12) != 0xAA)
return -ENODEV;
write_zsreg(&dev->chanA, R12, 0x55);
- if(read_zsreg(&dev->chanA, R12)!=0x55)
+ if (read_zsreg(&dev->chanA, R12) != 0x55)
return -ENODEV;
-
- dev->type=Z8530;
-
- /*
- * See the application note.
+
+ dev->type = Z8530;
+
+ /* See the application note.
*/
-
+
write_zsreg(&dev->chanA, R15, 0x01);
-
- /*
- * If we can set the low bit of R15 then
+
+ /* If we can set the low bit of R15 then
* the chip is enhanced.
*/
-
- if(read_zsreg(&dev->chanA, R15)==0x01)
- {
+
+ if (read_zsreg(&dev->chanA, R15) == 0x01) {
/* This C30 versus 230 detect is from Klaus Kudielka's dmascc */
/* Put a char in the fifo */
write_zsreg(&dev->chanA, R8, 0);
- if(read_zsreg(&dev->chanA, R0)&Tx_BUF_EMP)
+ if (read_zsreg(&dev->chanA, R0) & Tx_BUF_EMP)
dev->type = Z85230; /* Has a FIFO */
else
dev->type = Z85C30; /* Z85C30, 1 byte FIFO */
}
-
- /*
- * The code assumes R7' and friends are
+
+ /* The code assumes R7' and friends are
* off. Use write_zsext() for these and keep
* this bit clear.
*/
-
+
write_zsreg(&dev->chanA, R15, 0);
-
- /*
- * At this point it looks like the chip is behaving
+
+ /* At this point it looks like the chip is behaving
*/
-
+
memcpy(dev->chanA.regs, reg_init, 16);
- memcpy(dev->chanB.regs, reg_init ,16);
-
+ memcpy(dev->chanB.regs, reg_init, 16);
+
return 0;
}
@@ -1332,36 +1225,32 @@ int z8530_init(struct z8530_dev *dev)
return ret;
}
-
-
EXPORT_SYMBOL(z8530_init);
/**
* z8530_shutdown - Shutdown a Z8530 device
* @dev: The Z8530 chip to shutdown
*
- * We set the interrupt handlers to silence any interrupts. We then
+ * We set the interrupt handlers to silence any interrupts. We then
* reset the chip and wait 100uS to be sure the reset completed. Just
* in case the caller then tries to do stuff.
*
* This is called without the lock held
*/
-
int z8530_shutdown(struct z8530_dev *dev)
{
unsigned long flags;
/* Reset the chip */
spin_lock_irqsave(&dev->lock, flags);
- dev->chanA.irqs=&z8530_nop;
- dev->chanB.irqs=&z8530_nop;
+ dev->chanA.irqs = &z8530_nop;
+ dev->chanB.irqs = &z8530_nop;
write_zsreg(&dev->chanA, R9, 0xC0);
/* We must lock the udelay, the chip is offlimits here */
udelay(100);
spin_unlock_irqrestore(&dev->lock, flags);
return 0;
}
-
EXPORT_SYMBOL(z8530_shutdown);
/**
@@ -1370,7 +1259,7 @@ EXPORT_SYMBOL(z8530_shutdown);
* @rtable: table of register, value pairs
* FIXME: ioctl to allow user uploaded tables
*
- * Load a Z8530 channel up from the system data. We use +16 to
+ * Load a Z8530 channel up from the system data. We use +16 to
* indicate the "prime" registers. The value 255 terminates the
* table.
*/
@@ -1381,41 +1270,39 @@ int z8530_channel_load(struct z8530_channel *c, u8 *rtable)
spin_lock_irqsave(c->lock, flags);
- while(*rtable!=255)
- {
- int reg=*rtable++;
- if(reg>0x0F)
- write_zsreg(c, R15, c->regs[15]|1);
- write_zsreg(c, reg&0x0F, *rtable);
- if(reg>0x0F)
- write_zsreg(c, R15, c->regs[15]&~1);
- c->regs[reg]=*rtable++;
+ while (*rtable != 255) {
+ int reg = *rtable++;
+
+ if (reg > 0x0F)
+ write_zsreg(c, R15, c->regs[15] | 1);
+ write_zsreg(c, reg & 0x0F, *rtable);
+ if (reg > 0x0F)
+ write_zsreg(c, R15, c->regs[15] & ~1);
+ c->regs[reg] = *rtable++;
}
- c->rx_function=z8530_null_rx;
- c->skb=NULL;
- c->tx_skb=NULL;
- c->tx_next_skb=NULL;
- c->mtu=1500;
- c->max=0;
- c->count=0;
- c->status=read_zsreg(c, R0);
- c->sync=1;
- write_zsreg(c, R3, c->regs[R3]|RxENABLE);
+ c->rx_function = z8530_null_rx;
+ c->skb = NULL;
+ c->tx_skb = NULL;
+ c->tx_next_skb = NULL;
+ c->mtu = 1500;
+ c->max = 0;
+ c->count = 0;
+ c->status = read_zsreg(c, R0);
+ c->sync = 1;
+ write_zsreg(c, R3, c->regs[R3] | RxENABLE);
spin_unlock_irqrestore(c->lock, flags);
return 0;
}
-
EXPORT_SYMBOL(z8530_channel_load);
-
/**
* z8530_tx_begin - Begin packet transmission
* @c: The Z8530 channel to kick
*
* This is the speed sensitive side of transmission. If we are called
* and no buffer is being transmitted we commence the next buffer. If
- * nothing is queued we idle the sync.
+ * nothing is queued we idle the sync.
*
* Note: We are handling this code path in the interrupt path, keep it
* fast or bad things will happen.
@@ -1426,85 +1313,68 @@ EXPORT_SYMBOL(z8530_channel_load);
static void z8530_tx_begin(struct z8530_channel *c)
{
unsigned long flags;
- if(c->tx_skb)
+
+ if (c->tx_skb)
return;
-
- c->tx_skb=c->tx_next_skb;
- c->tx_next_skb=NULL;
- c->tx_ptr=c->tx_next_ptr;
-
- if(c->tx_skb==NULL)
- {
+
+ c->tx_skb = c->tx_next_skb;
+ c->tx_next_skb = NULL;
+ c->tx_ptr = c->tx_next_ptr;
+
+ if (!c->tx_skb) {
/* Idle on */
- if(c->dma_tx)
- {
- flags=claim_dma_lock();
+ if (c->dma_tx) {
+ flags = claim_dma_lock();
disable_dma(c->txdma);
- /*
- * Check if we crapped out.
+ /* Check if we crapped out.
*/
- if (get_dma_residue(c->txdma))
- {
+ if (get_dma_residue(c->txdma)) {
c->netdevice->stats.tx_dropped++;
c->netdevice->stats.tx_fifo_errors++;
}
release_dma_lock(flags);
}
- c->txcount=0;
- }
- else
- {
- c->txcount=c->tx_skb->len;
-
-
- if(c->dma_tx)
- {
- /*
- * FIXME. DMA is broken for the original 8530,
+ c->txcount = 0;
+ } else {
+ c->txcount = c->tx_skb->len;
+
+ if (c->dma_tx) {
+ /* FIXME. DMA is broken for the original 8530,
* on the older parts we need to set a flag and
* wait for a further TX interrupt to fire this
- * stage off
+ * stage off
*/
-
- flags=claim_dma_lock();
+
+ flags = claim_dma_lock();
disable_dma(c->txdma);
- /*
- * These two are needed by the 8530/85C30
+ /* These two are needed by the 8530/85C30
* and must be issued when idling.
*/
-
- if(c->dev->type!=Z85230)
- {
+ if (c->dev->type != Z85230) {
write_zsctrl(c, RES_Tx_CRC);
write_zsctrl(c, RES_EOM_L);
- }
- write_zsreg(c, R10, c->regs[10]&~ABUNDER);
+ }
+ write_zsreg(c, R10, c->regs[10] & ~ABUNDER);
clear_dma_ff(c->txdma);
set_dma_addr(c->txdma, virt_to_bus(c->tx_ptr));
set_dma_count(c->txdma, c->txcount);
enable_dma(c->txdma);
release_dma_lock(flags);
write_zsctrl(c, RES_EOM_L);
- write_zsreg(c, R5, c->regs[R5]|TxENAB);
- }
- else
- {
-
+ write_zsreg(c, R5, c->regs[R5] | TxENAB);
+ } else {
/* ABUNDER off */
write_zsreg(c, R10, c->regs[10]);
write_zsctrl(c, RES_Tx_CRC);
-
- while(c->txcount && (read_zsreg(c,R0)&Tx_BUF_EMP))
- {
+
+ while (c->txcount && (read_zsreg(c, R0) & Tx_BUF_EMP)) {
write_zsreg(c, R8, *c->tx_ptr++);
c->txcount--;
}
-
}
}
- /*
- * Since we emptied tx_skb we can ask for more
+ /* Since we emptied tx_skb we can ask for more
*/
netif_wake_queue(c->netdevice);
}
@@ -1525,7 +1395,7 @@ static void z8530_tx_done(struct z8530_channel *c)
struct sk_buff *skb;
/* Actually this can happen.*/
- if (c->tx_skb == NULL)
+ if (!c->tx_skb)
return;
skb = c->tx_skb;
@@ -1544,12 +1414,10 @@ static void z8530_tx_done(struct z8530_channel *c)
* We point the receive handler at this function when idle. Instead
* of processing the frames we get to throw them away.
*/
-
void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb)
{
dev_kfree_skb_any(skb);
}
-
EXPORT_SYMBOL(z8530_null_rx);
/**
@@ -1564,67 +1432,58 @@ EXPORT_SYMBOL(z8530_null_rx);
*
* Called with the lock held
*/
-
static void z8530_rx_done(struct z8530_channel *c)
{
struct sk_buff *skb;
int ct;
-
- /*
- * Is our receive engine in DMA mode
+
+ /* Is our receive engine in DMA mode
*/
-
- if(c->rxdma_on)
- {
- /*
- * Save the ready state and the buffer currently
+ if (c->rxdma_on) {
+ /* Save the ready state and the buffer currently
* being used as the DMA target
*/
-
- int ready=c->dma_ready;
- unsigned char *rxb=c->rx_buf[c->dma_num];
+ int ready = c->dma_ready;
+ unsigned char *rxb = c->rx_buf[c->dma_num];
unsigned long flags;
-
- /*
- * Complete this DMA. Necessary to find the length
- */
-
- flags=claim_dma_lock();
-
+
+ /* Complete this DMA. Necessary to find the length
+ */
+ flags = claim_dma_lock();
+
disable_dma(c->rxdma);
clear_dma_ff(c->rxdma);
- c->rxdma_on=0;
- ct=c->mtu-get_dma_residue(c->rxdma);
- if(ct<0)
- ct=2; /* Shit happens.. */
- c->dma_ready=0;
-
- /*
- * Normal case: the other slot is free, start the next DMA
+ c->rxdma_on = 0;
+ ct = c->mtu - get_dma_residue(c->rxdma);
+ if (ct < 0)
+ ct = 2; /* Shit happens.. */
+ c->dma_ready = 0;
+
+ /* Normal case: the other slot is free, start the next DMA
* into it immediately.
*/
-
- if(ready)
- {
- c->dma_num^=1;
- set_dma_mode(c->rxdma, DMA_MODE_READ|0x10);
+
+ if (ready) {
+ c->dma_num ^= 1;
+ set_dma_mode(c->rxdma, DMA_MODE_READ | 0x10);
set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[c->dma_num]));
set_dma_count(c->rxdma, c->mtu);
c->rxdma_on = 1;
enable_dma(c->rxdma);
- /* Stop any frames that we missed the head of
- from passing */
+ /* Stop any frames that we missed the head of
+ * from passing
+ */
write_zsreg(c, R0, RES_Rx_CRC);
- }
- else
+ } else {
/* Can't occur as we dont reenable the DMA irq until
- after the flip is done */
+ * after the flip is done
+ */
netdev_warn(c->netdevice, "DMA flip overrun!\n");
+ }
release_dma_lock(flags);
- /*
- * Shove the old buffer into an sk_buff. We can't DMA
+ /* Shove the old buffer into an sk_buff. We can't DMA
* directly into one on a PC - it might be above the 16Mb
* boundary. Optimisation - we could check to see if we
* can avoid the copy. Optimisation 2 - make the memcpy
@@ -1632,7 +1491,7 @@ static void z8530_rx_done(struct z8530_channel *c)
*/
skb = dev_alloc_skb(ct);
- if (skb == NULL) {
+ if (!skb) {
c->netdevice->stats.rx_dropped++;
netdev_warn(c->netdevice, "Memory squeeze\n");
} else {
@@ -1646,8 +1505,7 @@ static void z8530_rx_done(struct z8530_channel *c)
RT_LOCK;
skb = c->skb;
- /*
- * The game we play for non DMA is similar. We want to
+ /* The game we play for non DMA is similar. We want to
* get the controller set up for the next packet as fast
* as possible. We potentially only have one byte + the
* fifo length for this. Thus we want to flip to the new
@@ -1658,7 +1516,7 @@ static void z8530_rx_done(struct z8530_channel *c)
* sync IRQ for the RT_LOCK area.
*
*/
- ct=c->count;
+ ct = c->count;
c->skb = c->skb2;
c->count = 0;
@@ -1673,15 +1531,13 @@ static void z8530_rx_done(struct z8530_channel *c)
RT_UNLOCK;
c->skb2 = dev_alloc_skb(c->mtu);
- if (c->skb2 == NULL)
- netdev_warn(c->netdevice, "memory squeeze\n");
- else
+ if (c->skb2)
skb_put(c->skb2, c->mtu);
+
c->netdevice->stats.rx_packets++;
c->netdevice->stats.rx_bytes += ct;
}
- /*
- * If we received a frame we must now process it.
+ /* If we received a frame we must now process it.
*/
if (skb) {
skb_trim(skb, ct);
@@ -1702,9 +1558,10 @@ static void z8530_rx_done(struct z8530_channel *c)
static inline int spans_boundary(struct sk_buff *skb)
{
- unsigned long a=(unsigned long)skb->data;
- a^=(a+skb->len);
- if(a&0x00010000) /* If the 64K bit is different.. */
+ unsigned long a = (unsigned long)skb->data;
+
+ a ^= (a + skb->len);
+ if (a & 0x00010000) /* If the 64K bit is different.. */
return 1;
return 0;
}
@@ -1715,60 +1572,54 @@ static inline int spans_boundary(struct sk_buff *skb)
* @skb: The packet to kick down the channel
*
* Queue a packet for transmission. Because we have rather
- * hard to hit interrupt latencies for the Z85230 per packet
+ * hard to hit interrupt latencies for the Z85230 per packet
* even in DMA mode we do the flip to DMA buffer if needed here
* not in the IRQ.
*
- * Called from the network code. The lock is not held at this
+ * Called from the network code. The lock is not held at this
* point.
*/
-
netdev_tx_t z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb)
{
unsigned long flags;
-
+
netif_stop_queue(c->netdevice);
- if(c->tx_next_skb)
+ if (c->tx_next_skb)
return NETDEV_TX_BUSY;
-
/* PC SPECIFIC - DMA limits */
-
- /*
- * If we will DMA the transmit and its gone over the ISA bus
+ /* If we will DMA the transmit and its gone over the ISA bus
* limit, then copy to the flip buffer
*/
-
- if(c->dma_tx && ((unsigned long)(virt_to_bus(skb->data+skb->len))>=16*1024*1024 || spans_boundary(skb)))
- {
- /*
- * Send the flip buffer, and flip the flippy bit.
+
+ if (c->dma_tx &&
+ ((unsigned long)(virt_to_bus(skb->data + skb->len)) >=
+ 16 * 1024 * 1024 || spans_boundary(skb))) {
+ /* Send the flip buffer, and flip the flippy bit.
* We don't care which is used when just so long as
* we never use the same buffer twice in a row. Since
* only one buffer can be going out at a time the other
* has to be safe.
*/
- c->tx_next_ptr=c->tx_dma_buf[c->tx_dma_used];
- c->tx_dma_used^=1; /* Flip temp buffer */
+ c->tx_next_ptr = c->tx_dma_buf[c->tx_dma_used];
+ c->tx_dma_used ^= 1; /* Flip temp buffer */
skb_copy_from_linear_data(skb, c->tx_next_ptr, skb->len);
+ } else {
+ c->tx_next_ptr = skb->data;
}
- else
- c->tx_next_ptr=skb->data;
RT_LOCK;
- c->tx_next_skb=skb;
+ c->tx_next_skb = skb;
RT_UNLOCK;
-
+
spin_lock_irqsave(c->lock, flags);
z8530_tx_begin(c);
spin_unlock_irqrestore(c->lock, flags);
-
+
return NETDEV_TX_OK;
}
-
EXPORT_SYMBOL(z8530_queue_xmit);
-/*
- * Module support
+/* Module support
*/
static const char banner[] __initconst =
KERN_INFO "Generic Z85C30/Z85230 interface driver v0.02\n";
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index d7869a984881..2e704c7a105a 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -1531,14 +1531,13 @@ static int fl_set_key(struct net *net, struct nlattr **tb,
&mask->basic.n_proto,
TCA_FLOWER_UNSPEC,
sizeof(key->basic.n_proto));
+ mask->basic.n_proto = cpu_to_be16(0);
} else {
key->basic.n_proto = ethertype;
- mask->basic.n_proto = cpu_to_be16(~0);
}
}
} else {
key->basic.n_proto = ethertype;
- mask->basic.n_proto = cpu_to_be16(~0);
}
}