aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/brcm80211/brcmsmac/dma.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/brcm80211/brcmsmac/dma.c')
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/dma.c345
1 files changed, 227 insertions, 118 deletions
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/dma.c b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
index 5e53305bd9a9..1860c572b3c4 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/dma.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
@@ -14,17 +14,22 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/pci.h>
+#include <net/cfg80211.h>
+#include <net/mac80211.h>
#include <brcmu_utils.h>
#include <aiutils.h>
#include "types.h"
+#include "main.h"
#include "dma.h"
#include "soc.h"
+#include "scb.h"
+#include "ampdu.h"
+#include "debug.h"
+#include "brcms_trace_events.h"
/*
* dma register field offset calculation
@@ -176,28 +181,6 @@
#define BCMEXTRAHDROOM 172
-/* debug/trace */
-#ifdef DEBUG
-#define DMA_ERROR(fmt, ...) \
-do { \
- if (*di->msg_level & 1) \
- pr_debug("%s: " fmt, __func__, ##__VA_ARGS__); \
-} while (0)
-#define DMA_TRACE(fmt, ...) \
-do { \
- if (*di->msg_level & 2) \
- pr_debug("%s: " fmt, __func__, ##__VA_ARGS__); \
-} while (0)
-#else
-#define DMA_ERROR(fmt, ...) \
- no_printk(fmt, ##__VA_ARGS__)
-#define DMA_TRACE(fmt, ...) \
- no_printk(fmt, ##__VA_ARGS__)
-#endif /* DEBUG */
-
-#define DMA_NONE(fmt, ...) \
- no_printk(fmt, ##__VA_ARGS__)
-
#define MAXNAMEL 8 /* 8 char names */
/* macros to convert between byte offsets and indexes */
@@ -224,12 +207,14 @@ struct dma64desc {
/* dma engine software state */
struct dma_info {
struct dma_pub dma; /* exported structure */
- uint *msg_level; /* message level pointer */
char name[MAXNAMEL]; /* callers name for diag msgs */
struct bcma_device *core;
struct device *dmadev;
+ /* session information for AMPDU */
+ struct brcms_ampdu_session ampdu_session;
+
bool dma64; /* this dma engine is operating in 64-bit mode */
bool addrext; /* this dma engine supports DmaExtendedAddrChanges */
@@ -298,12 +283,6 @@ struct dma_info {
bool aligndesc_4k;
};
-/*
- * default dma message level (if input msg_level
- * pointer is null in dma_attach())
- */
-static uint dma_msg_level;
-
/* Check for odd number of 1's */
static u32 parity32(__le32 data)
{
@@ -353,7 +332,7 @@ static uint prevtxd(struct dma_info *di, uint i)
static uint nextrxd(struct dma_info *di, uint i)
{
- return txd(di, i + 1);
+ return rxd(di, i + 1);
}
static uint ntxdactive(struct dma_info *di, uint h, uint t)
@@ -370,10 +349,8 @@ static uint _dma_ctrlflags(struct dma_info *di, uint mask, uint flags)
{
uint dmactrlflags;
- if (di == NULL) {
- DMA_ERROR("NULL dma handle\n");
+ if (di == NULL)
return 0;
- }
dmactrlflags = di->dma.dmactrlflags;
dmactrlflags &= ~mask;
@@ -423,13 +400,15 @@ static bool _dma_isaddrext(struct dma_info *di)
/* not all tx or rx channel are available */
if (di->d64txregbase != 0) {
if (!_dma64_addrext(di, DMA64TXREGOFFS(di, control)))
- DMA_ERROR("%s: DMA64 tx doesn't have AE set\n",
- di->name);
+ brcms_dbg_dma(di->core,
+ "%s: DMA64 tx doesn't have AE set\n",
+ di->name);
return true;
} else if (di->d64rxregbase != 0) {
if (!_dma64_addrext(di, DMA64RXREGOFFS(di, control)))
- DMA_ERROR("%s: DMA64 rx doesn't have AE set\n",
- di->name);
+ brcms_dbg_dma(di->core,
+ "%s: DMA64 rx doesn't have AE set\n",
+ di->name);
return true;
}
@@ -530,8 +509,9 @@ static bool dma64_alloc(struct dma_info *di, uint direction)
va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits,
&alloced, &di->txdpaorig);
if (va == NULL) {
- DMA_ERROR("%s: DMA_ALLOC_CONSISTENT(ntxd) failed\n",
- di->name);
+ brcms_dbg_dma(di->core,
+ "%s: DMA_ALLOC_CONSISTENT(ntxd) failed\n",
+ di->name);
return false;
}
align = (1 << align_bits);
@@ -544,8 +524,9 @@ static bool dma64_alloc(struct dma_info *di, uint direction)
va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits,
&alloced, &di->rxdpaorig);
if (va == NULL) {
- DMA_ERROR("%s: DMA_ALLOC_CONSISTENT(nrxd) failed\n",
- di->name);
+ brcms_dbg_dma(di->core,
+ "%s: DMA_ALLOC_CONSISTENT(nrxd) failed\n",
+ di->name);
return false;
}
align = (1 << align_bits);
@@ -564,12 +545,13 @@ static bool _dma_alloc(struct dma_info *di, uint direction)
return dma64_alloc(di, direction);
}
-struct dma_pub *dma_attach(char *name, struct si_pub *sih,
- struct bcma_device *core,
+struct dma_pub *dma_attach(char *name, struct brcms_c_info *wlc,
uint txregbase, uint rxregbase, uint ntxd, uint nrxd,
uint rxbufsize, int rxextheadroom,
- uint nrxpost, uint rxoffset, uint *msg_level)
+ uint nrxpost, uint rxoffset)
{
+ struct si_pub *sih = wlc->hw->sih;
+ struct bcma_device *core = wlc->hw->d11core;
struct dma_info *di;
u8 rev = core->id.rev;
uint size;
@@ -580,9 +562,6 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
if (di == NULL)
return NULL;
- di->msg_level = msg_level ? msg_level : &dma_msg_level;
-
-
di->dma64 =
((bcma_aread32(core, BCMA_IOST) & SISF_DMA64) == SISF_DMA64);
@@ -598,11 +577,11 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
*/
_dma_ctrlflags(di, DMA_CTRL_ROC | DMA_CTRL_PEN, 0);
- DMA_TRACE("%s: %s flags 0x%x ntxd %d nrxd %d "
- "rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d "
- "txregbase %u rxregbase %u\n", name, "DMA64",
- di->dma.dmactrlflags, ntxd, nrxd, rxbufsize,
- rxextheadroom, nrxpost, rxoffset, txregbase, rxregbase);
+ brcms_dbg_dma(di->core, "%s: %s flags 0x%x ntxd %d nrxd %d "
+ "rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d "
+ "txregbase %u rxregbase %u\n", name, "DMA64",
+ di->dma.dmactrlflags, ntxd, nrxd, rxbufsize,
+ rxextheadroom, nrxpost, rxoffset, txregbase, rxregbase);
/* make a private copy of our callers name */
strncpy(di->name, name, MAXNAMEL);
@@ -664,8 +643,8 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
di->dmadesc_align = 4; /* 16 byte alignment */
}
- DMA_NONE("DMA descriptor align_needed %d, align %d\n",
- di->aligndesc_4k, di->dmadesc_align);
+ brcms_dbg_dma(di->core, "DMA descriptor align_needed %d, align %d\n",
+ di->aligndesc_4k, di->dmadesc_align);
/* allocate tx packet pointer vector */
if (ntxd) {
@@ -703,21 +682,27 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
if ((di->ddoffsetlow != 0) && !di->addrext) {
if (di->txdpa > SI_PCI_DMA_SZ) {
- DMA_ERROR("%s: txdpa 0x%x: addrext not supported\n",
- di->name, (u32)di->txdpa);
+ brcms_dbg_dma(di->core,
+ "%s: txdpa 0x%x: addrext not supported\n",
+ di->name, (u32)di->txdpa);
goto fail;
}
if (di->rxdpa > SI_PCI_DMA_SZ) {
- DMA_ERROR("%s: rxdpa 0x%x: addrext not supported\n",
- di->name, (u32)di->rxdpa);
+ brcms_dbg_dma(di->core,
+ "%s: rxdpa 0x%x: addrext not supported\n",
+ di->name, (u32)di->rxdpa);
goto fail;
}
}
- DMA_TRACE("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh 0x%x addrext %d\n",
- di->ddoffsetlow, di->ddoffsethigh,
- di->dataoffsetlow, di->dataoffsethigh,
- di->addrext);
+ /* Initialize AMPDU session */
+ brcms_c_ampdu_reset_session(&di->ampdu_session, wlc);
+
+ brcms_dbg_dma(di->core,
+ "ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh 0x%x addrext %d\n",
+ di->ddoffsetlow, di->ddoffsethigh,
+ di->dataoffsetlow, di->dataoffsethigh,
+ di->addrext);
return (struct dma_pub *) di;
@@ -763,7 +748,7 @@ void dma_detach(struct dma_pub *pub)
{
struct dma_info *di = (struct dma_info *)pub;
- DMA_TRACE("%s:\n", di->name);
+ brcms_dbg_dma(di->core, "%s:\n", di->name);
/* free dma descriptor rings */
if (di->txd64)
@@ -839,7 +824,7 @@ static void _dma_rxenable(struct dma_info *di)
uint dmactrlflags = di->dma.dmactrlflags;
u32 control;
- DMA_TRACE("%s:\n", di->name);
+ brcms_dbg_dma(di->core, "%s:\n", di->name);
control = D64_RC_RE | (bcma_read32(di->core,
DMA64RXREGOFFS(di, control)) &
@@ -859,7 +844,7 @@ void dma_rxinit(struct dma_pub *pub)
{
struct dma_info *di = (struct dma_info *)pub;
- DMA_TRACE("%s:\n", di->name);
+ brcms_dbg_dma(di->core, "%s:\n", di->name);
if (di->nrxd == 0)
return;
@@ -954,7 +939,7 @@ int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list)
return 0;
len = le16_to_cpu(*(__le16 *) (p->data));
- DMA_TRACE("%s: dma_rx len %d\n", di->name, len);
+ brcms_dbg_dma(di->core, "%s: dma_rx len %d\n", di->name, len);
dma_spin_for_len(len, p);
/* set actual length */
@@ -981,14 +966,15 @@ int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list)
DMA64RXREGOFFS(di, status0)) &
D64_RS0_CD_MASK) - di->rcvptrbase) &
D64_RS0_CD_MASK, struct dma64desc);
- DMA_ERROR("rxin %d rxout %d, hw_curr %d\n",
- di->rxin, di->rxout, cur);
+ brcms_dbg_dma(di->core,
+ "rxin %d rxout %d, hw_curr %d\n",
+ di->rxin, di->rxout, cur);
}
#endif /* DEBUG */
if ((di->dma.dmactrlflags & DMA_CTRL_RXMULTI) == 0) {
- DMA_ERROR("%s: bad frame length (%d)\n",
- di->name, len);
+ brcms_dbg_dma(di->core, "%s: bad frame length (%d)\n",
+ di->name, len);
skb_queue_walk_safe(&dma_frames, p, next) {
skb_unlink(p, &dma_frames);
brcmu_pkt_buf_free_skb(p);
@@ -1005,7 +991,7 @@ int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list)
static bool dma64_rxidle(struct dma_info *di)
{
- DMA_TRACE("%s:\n", di->name);
+ brcms_dbg_dma(di->core, "%s:\n", di->name);
if (di->nrxd == 0)
return true;
@@ -1016,6 +1002,17 @@ static bool dma64_rxidle(struct dma_info *di)
D64_RS0_CD_MASK));
}
+static bool dma64_txidle(struct dma_info *di)
+{
+ if (di->ntxd == 0)
+ return true;
+
+ return ((bcma_read32(di->core,
+ DMA64TXREGOFFS(di, status0)) & D64_XS0_CD_MASK) ==
+ (bcma_read32(di->core, DMA64TXREGOFFS(di, ptr)) &
+ D64_XS0_CD_MASK));
+}
+
/*
* post receive buffers
* return false is refill failed completely and ring is empty this will stall
@@ -1047,7 +1044,7 @@ bool dma_rxfill(struct dma_pub *pub)
n = di->nrxpost - nrxdactive(di, rxin, rxout);
- DMA_TRACE("%s: post %d\n", di->name, n);
+ brcms_dbg_dma(di->core, "%s: post %d\n", di->name, n);
if (di->rxbufsize > BCMEXTRAHDROOM)
extra_offset = di->rxextrahdrroom;
@@ -1060,9 +1057,11 @@ bool dma_rxfill(struct dma_pub *pub)
p = brcmu_pkt_buf_get_skb(di->rxbufsize + extra_offset);
if (p == NULL) {
- DMA_ERROR("%s: out of rxbufs\n", di->name);
+ brcms_dbg_dma(di->core, "%s: out of rxbufs\n",
+ di->name);
if (i == 0 && dma64_rxidle(di)) {
- DMA_ERROR("%s: ring is empty !\n", di->name);
+ brcms_dbg_dma(di->core, "%s: ring is empty !\n",
+ di->name);
ring_empty = true;
}
di->dma.rxnobuf++;
@@ -1107,7 +1106,7 @@ void dma_rxreclaim(struct dma_pub *pub)
struct dma_info *di = (struct dma_info *)pub;
struct sk_buff *p;
- DMA_TRACE("%s:\n", di->name);
+ brcms_dbg_dma(di->core, "%s:\n", di->name);
while ((p = _dma_getnextrxp(di, true)))
brcmu_pkt_buf_free_skb(p);
@@ -1138,7 +1137,7 @@ void dma_txinit(struct dma_pub *pub)
struct dma_info *di = (struct dma_info *)pub;
u32 control = D64_XC_XE;
- DMA_TRACE("%s:\n", di->name);
+ brcms_dbg_dma(di->core, "%s:\n", di->name);
if (di->ntxd == 0)
return;
@@ -1170,7 +1169,7 @@ void dma_txsuspend(struct dma_pub *pub)
{
struct dma_info *di = (struct dma_info *)pub;
- DMA_TRACE("%s:\n", di->name);
+ brcms_dbg_dma(di->core, "%s:\n", di->name);
if (di->ntxd == 0)
return;
@@ -1182,7 +1181,7 @@ void dma_txresume(struct dma_pub *pub)
{
struct dma_info *di = (struct dma_info *)pub;
- DMA_TRACE("%s:\n", di->name);
+ brcms_dbg_dma(di->core, "%s:\n", di->name);
if (di->ntxd == 0)
return;
@@ -1205,11 +1204,11 @@ void dma_txreclaim(struct dma_pub *pub, enum txd_range range)
struct dma_info *di = (struct dma_info *)pub;
struct sk_buff *p;
- DMA_TRACE("%s: %s\n",
- di->name,
- range == DMA_RANGE_ALL ? "all" :
- range == DMA_RANGE_TRANSMITTED ? "transmitted" :
- "transferred");
+ brcms_dbg_dma(di->core, "%s: %s\n",
+ di->name,
+ range == DMA_RANGE_ALL ? "all" :
+ range == DMA_RANGE_TRANSMITTED ? "transmitted" :
+ "transferred");
if (di->txin == di->txout)
return;
@@ -1264,39 +1263,25 @@ bool dma_rxreset(struct dma_pub *pub)
return status == D64_RS0_RS_DISABLED;
}
-/*
- * !! tx entry routine
- * WARNING: call must check the return value for error.
- * the error(toss frames) could be fatal and cause many subsequent hard
- * to debug problems
- */
-int dma_txfast(struct dma_pub *pub, struct sk_buff *p, bool commit)
+static void dma_txenq(struct dma_info *di, struct sk_buff *p)
{
- struct dma_info *di = (struct dma_info *)pub;
unsigned char *data;
uint len;
u16 txout;
u32 flags = 0;
dma_addr_t pa;
- DMA_TRACE("%s:\n", di->name);
-
txout = di->txout;
+ if (WARN_ON(nexttxd(di, txout) == di->txin))
+ return;
+
/*
* obtain and initialize transmit descriptor entry.
*/
data = p->data;
len = p->len;
- /* no use to transmit a zero length packet */
- if (len == 0)
- return 0;
-
- /* return nonzero if out of tx descriptors */
- if (nexttxd(di, txout) == di->txin)
- goto outoftxd;
-
/* get physical address of buffer start */
pa = dma_map_single(di->dmadev, data, len, DMA_TO_DEVICE);
@@ -1318,23 +1303,147 @@ int dma_txfast(struct dma_pub *pub, struct sk_buff *p, bool commit)
/* bump the tx descriptor index */
di->txout = txout;
+}
- /* kick the chip */
- if (commit)
- bcma_write32(di->core, DMA64TXREGOFFS(di, ptr),
- di->xmtptrbase + I2B(txout, struct dma64desc));
+static void ampdu_finalize(struct dma_info *di)
+{
+ struct brcms_ampdu_session *session = &di->ampdu_session;
+ struct sk_buff *p;
+
+ trace_brcms_ampdu_session(&session->wlc->hw->d11core->dev,
+ session->max_ampdu_len,
+ session->max_ampdu_frames,
+ session->ampdu_len,
+ skb_queue_len(&session->skb_list),
+ session->dma_len);
+
+ if (WARN_ON(skb_queue_empty(&session->skb_list)))
+ return;
+
+ brcms_c_ampdu_finalize(session);
+
+ while (!skb_queue_empty(&session->skb_list)) {
+ p = skb_dequeue(&session->skb_list);
+ dma_txenq(di, p);
+ }
+
+ bcma_write32(di->core, DMA64TXREGOFFS(di, ptr),
+ di->xmtptrbase + I2B(di->txout, struct dma64desc));
+ brcms_c_ampdu_reset_session(session, session->wlc);
+}
+
+static void prep_ampdu_frame(struct dma_info *di, struct sk_buff *p)
+{
+ struct brcms_ampdu_session *session = &di->ampdu_session;
+ int ret;
+
+ ret = brcms_c_ampdu_add_frame(session, p);
+ if (ret == -ENOSPC) {
+ /*
+ * AMPDU cannot accomodate this frame. Close out the in-
+ * progress AMPDU session and start a new one.
+ */
+ ampdu_finalize(di);
+ ret = brcms_c_ampdu_add_frame(session, p);
+ }
+
+ WARN_ON(ret);
+}
+
+/* Update count of available tx descriptors based on current DMA state */
+static void dma_update_txavail(struct dma_info *di)
+{
+ /*
+ * Available space is number of descriptors less the number of
+ * active descriptors and the number of queued AMPDU frames.
+ */
+ di->dma.txavail = di->ntxd - ntxdactive(di, di->txin, di->txout) -
+ skb_queue_len(&di->ampdu_session.skb_list) - 1;
+}
+
+/*
+ * !! tx entry routine
+ * WARNING: call must check the return value for error.
+ * the error(toss frames) could be fatal and cause many subsequent hard
+ * to debug problems
+ */
+int dma_txfast(struct brcms_c_info *wlc, struct dma_pub *pub,
+ struct sk_buff *p)
+{
+ struct dma_info *di = (struct dma_info *)pub;
+ struct brcms_ampdu_session *session = &di->ampdu_session;
+ struct ieee80211_tx_info *tx_info;
+ bool is_ampdu;
+
+ /* no use to transmit a zero length packet */
+ if (p->len == 0)
+ return 0;
+
+ /* return nonzero if out of tx descriptors */
+ if (di->dma.txavail == 0 || nexttxd(di, di->txout) == di->txin)
+ goto outoftxd;
+
+ tx_info = IEEE80211_SKB_CB(p);
+ is_ampdu = tx_info->flags & IEEE80211_TX_CTL_AMPDU;
+ if (is_ampdu)
+ prep_ampdu_frame(di, p);
+ else
+ dma_txenq(di, p);
/* tx flow control */
- di->dma.txavail = di->ntxd - ntxdactive(di, di->txin, di->txout) - 1;
+ dma_update_txavail(di);
+
+ /* kick the chip */
+ if (is_ampdu) {
+ /*
+ * Start sending data if we've got a full AMPDU, there's
+ * no more space in the DMA ring, or the ring isn't
+ * currently transmitting.
+ */
+ if (skb_queue_len(&session->skb_list) == session->max_ampdu_frames ||
+ di->dma.txavail == 0 || dma64_txidle(di))
+ ampdu_finalize(di);
+ } else {
+ bcma_write32(di->core, DMA64TXREGOFFS(di, ptr),
+ di->xmtptrbase + I2B(di->txout, struct dma64desc));
+ }
return 0;
outoftxd:
- DMA_ERROR("%s: out of txds !!!\n", di->name);
+ brcms_dbg_dma(di->core, "%s: out of txds !!!\n", di->name);
brcmu_pkt_buf_free_skb(p);
di->dma.txavail = 0;
di->dma.txnobuf++;
- return -1;
+ return -ENOSPC;
+}
+
+void dma_txflush(struct dma_pub *pub)
+{
+ struct dma_info *di = (struct dma_info *)pub;
+ struct brcms_ampdu_session *session = &di->ampdu_session;
+
+ if (!skb_queue_empty(&session->skb_list))
+ ampdu_finalize(di);
+}
+
+int dma_txpending(struct dma_pub *pub)
+{
+ struct dma_info *di = (struct dma_info *)pub;
+ return ntxdactive(di, di->txin, di->txout);
+}
+
+/*
+ * If we have an active AMPDU session and are not transmitting,
+ * this function will force tx to start.
+ */
+void dma_kick_tx(struct dma_pub *pub)
+{
+ struct dma_info *di = (struct dma_info *)pub;
+ struct brcms_ampdu_session *session = &di->ampdu_session;
+
+ if (!skb_queue_empty(&session->skb_list) && dma64_txidle(di))
+ ampdu_finalize(di);
}
/*
@@ -1354,11 +1463,11 @@ struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range)
u16 active_desc;
struct sk_buff *txp;
- DMA_TRACE("%s: %s\n",
- di->name,
- range == DMA_RANGE_ALL ? "all" :
- range == DMA_RANGE_TRANSMITTED ? "transmitted" :
- "transferred");
+ brcms_dbg_dma(di->core, "%s: %s\n",
+ di->name,
+ range == DMA_RANGE_ALL ? "all" :
+ range == DMA_RANGE_TRANSMITTED ? "transmitted" :
+ "transferred");
if (di->ntxd == 0)
return NULL;
@@ -1412,13 +1521,13 @@ struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range)
di->txin = i;
/* tx flow control */
- di->dma.txavail = di->ntxd - ntxdactive(di, di->txin, di->txout) - 1;
+ dma_update_txavail(di);
return txp;
bogus:
- DMA_NONE("bogus curr: start %d end %d txout %d\n",
- start, end, di->txout);
+ brcms_dbg_dma(di->core, "bogus curr: start %d end %d txout %d\n",
+ start, end, di->txout);
return NULL;
}