aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging/brcm80211/util
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/staging/brcm80211/util')
-rw-r--r--drivers/staging/brcm80211/util/aiutils.c795
-rw-r--r--drivers/staging/brcm80211/util/bcmotp.c954
-rw-r--r--drivers/staging/brcm80211/util/bcmsrom.c2081
-rw-r--r--drivers/staging/brcm80211/util/bcmutils.c1760
-rw-r--r--drivers/staging/brcm80211/util/bcmwifi.c318
-rw-r--r--drivers/staging/brcm80211/util/bcmwpa.c51
-rw-r--r--drivers/staging/brcm80211/util/hnddma.c2689
-rw-r--r--drivers/staging/brcm80211/util/hndpmu.c2681
-rw-r--r--drivers/staging/brcm80211/util/linux_osl.c516
-rw-r--r--drivers/staging/brcm80211/util/nicpci.c880
-rw-r--r--drivers/staging/brcm80211/util/nvram/nvram_ro.c198
-rw-r--r--drivers/staging/brcm80211/util/qmath.c680
-rw-r--r--drivers/staging/brcm80211/util/siutils.c2914
-rw-r--r--drivers/staging/brcm80211/util/siutils_priv.h32
14 files changed, 16549 insertions, 0 deletions
diff --git a/drivers/staging/brcm80211/util/aiutils.c b/drivers/staging/brcm80211/util/aiutils.c
new file mode 100644
index 000000000000..8883af0efb28
--- /dev/null
+++ b/drivers/staging/brcm80211/util/aiutils.c
@@ -0,0 +1,795 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <siutils.h>
+#include <hndsoc.h>
+#include <sbchipc.h>
+#include <pcicfg.h>
+#include <bcmdevs.h>
+
+#define BCM47162_DMP() ((CHIPID(sih->chip) == BCM47162_CHIP_ID) && \
+ (CHIPREV(sih->chiprev) == 0) && \
+ (sii->coreid[sii->curidx] == MIPS74K_CORE_ID))
+
+/* EROM parsing */
+
+static uint32
+get_erom_ent(si_t * sih, uint32 ** eromptr, uint32 mask, uint32 match)
+{
+ uint32 ent;
+ uint inv = 0, nom = 0;
+
+ while (TRUE) {
+ ent = R_REG(si_osh(sih), *eromptr);
+ (*eromptr)++;
+
+ if (mask == 0)
+ break;
+
+ if ((ent & ER_VALID) == 0) {
+ inv++;
+ continue;
+ }
+
+ if (ent == (ER_END | ER_VALID))
+ break;
+
+ if ((ent & mask) == match)
+ break;
+
+ nom++;
+ }
+
+ SI_VMSG(("%s: Returning ent 0x%08x\n", __func__, ent));
+ if (inv + nom) {
+ SI_VMSG((" after %d invalid and %d non-matching entries\n",
+ inv, nom));
+ }
+ return ent;
+}
+
+static uint32
+get_asd(si_t * sih, uint32 ** eromptr, uint sp, uint ad, uint st,
+ uint32 * addrl, uint32 * addrh, uint32 * sizel, uint32 * sizeh)
+{
+ uint32 asd, sz, szd;
+
+ asd = get_erom_ent(sih, eromptr, ER_VALID, ER_VALID);
+ if (((asd & ER_TAG1) != ER_ADD) ||
+ (((asd & AD_SP_MASK) >> AD_SP_SHIFT) != sp) ||
+ ((asd & AD_ST_MASK) != st)) {
+ /* This is not what we want, "push" it back */
+ (*eromptr)--;
+ return 0;
+ }
+ *addrl = asd & AD_ADDR_MASK;
+ if (asd & AD_AG32)
+ *addrh = get_erom_ent(sih, eromptr, 0, 0);
+ else
+ *addrh = 0;
+ *sizeh = 0;
+ sz = asd & AD_SZ_MASK;
+ if (sz == AD_SZ_SZD) {
+ szd = get_erom_ent(sih, eromptr, 0, 0);
+ *sizel = szd & SD_SZ_MASK;
+ if (szd & SD_SG32)
+ *sizeh = get_erom_ent(sih, eromptr, 0, 0);
+ } else
+ *sizel = AD_SZ_BASE << (sz >> AD_SZ_SHIFT);
+
+ SI_VMSG((" SP %d, ad %d: st = %d, 0x%08x_0x%08x @ 0x%08x_0x%08x\n",
+ sp, ad, st, *sizeh, *sizel, *addrh, *addrl));
+
+ return asd;
+}
+
+static void ai_hwfixup(si_info_t * sii)
+{
+}
+
+/* parse the enumeration rom to identify all cores */
+void BCMATTACHFN(ai_scan) (si_t * sih, void *regs, uint devid) {
+ si_info_t *sii = SI_INFO(sih);
+ chipcregs_t *cc = (chipcregs_t *) regs;
+ uint32 erombase, *eromptr, *eromlim;
+
+ erombase = R_REG(sii->osh, &cc->eromptr);
+
+ switch (BUSTYPE(sih->bustype)) {
+ case SI_BUS:
+ eromptr = (uint32 *) REG_MAP(erombase, SI_CORE_SIZE);
+ break;
+
+ case PCI_BUS:
+ /* Set wrappers address */
+ sii->curwrap = (void *)((uintptr) regs + SI_CORE_SIZE);
+
+ /* Now point the window at the erom */
+ OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, erombase);
+ eromptr = regs;
+ break;
+
+#ifdef BCMSDIO
+ case SPI_BUS:
+ case SDIO_BUS:
+#endif /* BCMSDIO */
+ eromptr = (uint32 *) (uintptr) erombase;
+ break;
+
+ default:
+ SI_ERROR(("Don't know how to do AXI enumertion on bus %d\n",
+ sih->bustype));
+ ASSERT(0);
+ return;
+ }
+ eromlim = eromptr + (ER_REMAPCONTROL / sizeof(uint32));
+
+ SI_VMSG(("ai_scan: regs = 0x%p, erombase = 0x%08x, eromptr = 0x%p, eromlim = 0x%p\n", regs, erombase, eromptr, eromlim));
+ while (eromptr < eromlim) {
+ uint32 cia, cib, cid, mfg, crev, nmw, nsw, nmp, nsp;
+ uint32 mpd, asd, addrl, addrh, sizel, sizeh;
+ uint32 *base;
+ uint i, j, idx;
+ bool br;
+
+ br = FALSE;
+
+ /* Grok a component */
+ cia = get_erom_ent(sih, &eromptr, ER_TAG, ER_CI);
+ if (cia == (ER_END | ER_VALID)) {
+ SI_VMSG(("Found END of erom after %d cores\n",
+ sii->numcores));
+ ai_hwfixup(sii);
+ return;
+ }
+ base = eromptr - 1;
+ cib = get_erom_ent(sih, &eromptr, 0, 0);
+
+ if ((cib & ER_TAG) != ER_CI) {
+ SI_ERROR(("CIA not followed by CIB\n"));
+ goto error;
+ }
+
+ cid = (cia & CIA_CID_MASK) >> CIA_CID_SHIFT;
+ mfg = (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
+ crev = (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
+ nmw = (cib & CIB_NMW_MASK) >> CIB_NMW_SHIFT;
+ nsw = (cib & CIB_NSW_MASK) >> CIB_NSW_SHIFT;
+ nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
+ nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
+
+ SI_VMSG(("Found component 0x%04x/0x%04x rev %d at erom addr 0x%p, with nmw = %d, " "nsw = %d, nmp = %d & nsp = %d\n", mfg, cid, crev, base, nmw, nsw, nmp, nsp));
+
+ if (((mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) || (nsp == 0))
+ continue;
+ if ((nmw + nsw == 0)) {
+ /* A component which is not a core */
+ if (cid == OOB_ROUTER_CORE_ID) {
+ asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE,
+ &addrl, &addrh, &sizel, &sizeh);
+ if (asd != 0) {
+ sii->oob_router = addrl;
+ }
+ }
+ continue;
+ }
+
+ idx = sii->numcores;
+/* sii->eromptr[idx] = base; */
+ sii->cia[idx] = cia;
+ sii->cib[idx] = cib;
+ sii->coreid[idx] = cid;
+
+ for (i = 0; i < nmp; i++) {
+ mpd = get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
+ if ((mpd & ER_TAG) != ER_MP) {
+ SI_ERROR(("Not enough MP entries for component 0x%x\n", cid));
+ goto error;
+ }
+ SI_VMSG((" Master port %d, mp: %d id: %d\n", i,
+ (mpd & MPD_MP_MASK) >> MPD_MP_SHIFT,
+ (mpd & MPD_MUI_MASK) >> MPD_MUI_SHIFT));
+ }
+
+ /* First Slave Address Descriptor should be port 0:
+ * the main register space for the core
+ */
+ asd =
+ get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh,
+ &sizel, &sizeh);
+ if (asd == 0) {
+ /* Try again to see if it is a bridge */
+ asd =
+ get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl,
+ &addrh, &sizel, &sizeh);
+ if (asd != 0)
+ br = TRUE;
+ else if ((addrh != 0) || (sizeh != 0)
+ || (sizel != SI_CORE_SIZE)) {
+ SI_ERROR(("First Slave ASD for core 0x%04x malformed " "(0x%08x)\n", cid, asd));
+ goto error;
+ }
+ }
+ sii->coresba[idx] = addrl;
+ sii->coresba_size[idx] = sizel;
+ /* Get any more ASDs in port 0 */
+ j = 1;
+ do {
+ asd =
+ get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl,
+ &addrh, &sizel, &sizeh);
+ if ((asd != 0) && (j == 1) && (sizel == SI_CORE_SIZE)) {
+ sii->coresba2[idx] = addrl;
+ sii->coresba2_size[idx] = sizel;
+ }
+ j++;
+ } while (asd != 0);
+
+ /* Go through the ASDs for other slave ports */
+ for (i = 1; i < nsp; i++) {
+ j = 0;
+ do {
+ asd =
+ get_asd(sih, &eromptr, i, j++, AD_ST_SLAVE,
+ &addrl, &addrh, &sizel, &sizeh);
+ } while (asd != 0);
+ if (j == 0) {
+ SI_ERROR((" SP %d has no address descriptors\n",
+ i));
+ goto error;
+ }
+ }
+
+ /* Now get master wrappers */
+ for (i = 0; i < nmw; i++) {
+ asd =
+ get_asd(sih, &eromptr, i, 0, AD_ST_MWRAP, &addrl,
+ &addrh, &sizel, &sizeh);
+ if (asd == 0) {
+ SI_ERROR(("Missing descriptor for MW %d\n", i));
+ goto error;
+ }
+ if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
+ SI_ERROR(("Master wrapper %d is not 4KB\n", i));
+ goto error;
+ }
+ if (i == 0)
+ sii->wrapba[idx] = addrl;
+ }
+
+ /* And finally slave wrappers */
+ for (i = 0; i < nsw; i++) {
+ uint fwp = (nsp == 1) ? 0 : 1;
+ asd =
+ get_asd(sih, &eromptr, fwp + i, 0, AD_ST_SWRAP,
+ &addrl, &addrh, &sizel, &sizeh);
+ if (asd == 0) {
+ SI_ERROR(("Missing descriptor for SW %d\n", i));
+ goto error;
+ }
+ if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
+ SI_ERROR(("Slave wrapper %d is not 4KB\n", i));
+ goto error;
+ }
+ if ((nmw == 0) && (i == 0))
+ sii->wrapba[idx] = addrl;
+ }
+
+ /* Don't record bridges */
+ if (br)
+ continue;
+
+ /* Done with core */
+ sii->numcores++;
+ }
+
+ SI_ERROR(("Reached end of erom without finding END"));
+
+ error:
+ sii->numcores = 0;
+ return;
+}
+
+/* This function changes the logical "focus" to the indicated core.
+ * Return the current core's virtual address.
+ */
+void *ai_setcoreidx(si_t * sih, uint coreidx)
+{
+ si_info_t *sii = SI_INFO(sih);
+ uint32 addr = sii->coresba[coreidx];
+ uint32 wrap = sii->wrapba[coreidx];
+ void *regs;
+
+ if (coreidx >= sii->numcores)
+ return (NULL);
+
+ /*
+ * If the user has provided an interrupt mask enabled function,
+ * then assert interrupts are disabled before switching the core.
+ */
+ ASSERT((sii->intrsenabled_fn == NULL)
+ || !(*(sii)->intrsenabled_fn) ((sii)->intr_arg));
+
+ switch (BUSTYPE(sih->bustype)) {
+ case SI_BUS:
+ /* map new one */
+ if (!sii->regs[coreidx]) {
+ sii->regs[coreidx] = REG_MAP(addr, SI_CORE_SIZE);
+ ASSERT(GOODREGS(sii->regs[coreidx]));
+ }
+ sii->curmap = regs = sii->regs[coreidx];
+ if (!sii->wrappers[coreidx]) {
+ sii->wrappers[coreidx] = REG_MAP(wrap, SI_CORE_SIZE);
+ ASSERT(GOODREGS(sii->wrappers[coreidx]));
+ }
+ sii->curwrap = sii->wrappers[coreidx];
+ break;
+
+ case PCI_BUS:
+ /* point bar0 window */
+ OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, addr);
+ regs = sii->curmap;
+ /* point bar0 2nd 4KB window */
+ OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN2, 4, wrap);
+ break;
+
+#ifdef BCMSDIO
+ case SPI_BUS:
+ case SDIO_BUS:
+#endif /* BCMSDIO */
+ sii->curmap = regs = (void *)((uintptr) addr);
+ sii->curwrap = (void *)((uintptr) wrap);
+ break;
+
+ default:
+ ASSERT(0);
+ regs = NULL;
+ break;
+ }
+
+ sii->curmap = regs;
+ sii->curidx = coreidx;
+
+ return regs;
+}
+
+/* Return the number of address spaces in current core */
+int ai_numaddrspaces(si_t * sih)
+{
+ return 2;
+}
+
+/* Return the address of the nth address space in the current core */
+uint32 ai_addrspace(si_t * sih, uint asidx)
+{
+ si_info_t *sii;
+ uint cidx;
+
+ sii = SI_INFO(sih);
+ cidx = sii->curidx;
+
+ if (asidx == 0)
+ return sii->coresba[cidx];
+ else if (asidx == 1)
+ return sii->coresba2[cidx];
+ else {
+ SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n", __func__, asidx));
+ return 0;
+ }
+}
+
+/* Return the size of the nth address space in the current core */
+uint32 ai_addrspacesize(si_t * sih, uint asidx)
+{
+ si_info_t *sii;
+ uint cidx;
+
+ sii = SI_INFO(sih);
+ cidx = sii->curidx;
+
+ if (asidx == 0)
+ return sii->coresba_size[cidx];
+ else if (asidx == 1)
+ return sii->coresba2_size[cidx];
+ else {
+ SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n", __func__, asidx));
+ return 0;
+ }
+}
+
+uint ai_flag(si_t * sih)
+{
+ si_info_t *sii;
+ aidmp_t *ai;
+
+ sii = SI_INFO(sih);
+ if (BCM47162_DMP()) {
+ SI_ERROR(("%s: Attempting to read MIPS DMP registers on 47162a0", __func__));
+ return sii->curidx;
+ }
+ ai = sii->curwrap;
+
+ return (R_REG(sii->osh, &ai->oobselouta30) & 0x1f);
+}
+
+void ai_setint(si_t * sih, int siflag)
+{
+}
+
+void ai_write_wrap_reg(si_t * sih, uint32 offset, uint32 val)
+{
+ si_info_t *sii = SI_INFO(sih);
+ uint32 *w = (uint32 *) sii->curwrap;
+ W_REG(sii->osh, w + (offset / 4), val);
+ return;
+}
+
+uint ai_corevendor(si_t * sih)
+{
+ si_info_t *sii;
+ uint32 cia;
+
+ sii = SI_INFO(sih);
+ cia = sii->cia[sii->curidx];
+ return ((cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT);
+}
+
+uint ai_corerev(si_t * sih)
+{
+ si_info_t *sii;
+ uint32 cib;
+
+ sii = SI_INFO(sih);
+ cib = sii->cib[sii->curidx];
+ return ((cib & CIB_REV_MASK) >> CIB_REV_SHIFT);
+}
+
+bool ai_iscoreup(si_t * sih)
+{
+ si_info_t *sii;
+ aidmp_t *ai;
+
+ sii = SI_INFO(sih);
+ ai = sii->curwrap;
+
+ return (((R_REG(sii->osh, &ai->ioctrl) & (SICF_FGC | SICF_CLOCK_EN)) ==
+ SICF_CLOCK_EN)
+ && ((R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) == 0));
+}
+
+/*
+ * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
+ * switch back to the original core, and return the new value.
+ *
+ * When using the silicon backplane, no fiddling with interrupts or core switches is needed.
+ *
+ * Also, when using pci/pcie, we can optimize away the core switching for pci registers
+ * and (on newer pci cores) chipcommon registers.
+ */
+uint ai_corereg(si_t * sih, uint coreidx, uint regoff, uint mask, uint val)
+{
+ uint origidx = 0;
+ uint32 *r = NULL;
+ uint w;
+ uint intr_val = 0;
+ bool fast = FALSE;
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ ASSERT(GOODIDX(coreidx));
+ ASSERT(regoff < SI_CORE_SIZE);
+ ASSERT((val & ~mask) == 0);
+
+ if (coreidx >= SI_MAXCORES)
+ return 0;
+
+ if (BUSTYPE(sih->bustype) == SI_BUS) {
+ /* If internal bus, we can always get at everything */
+ fast = TRUE;
+ /* map if does not exist */
+ if (!sii->regs[coreidx]) {
+ sii->regs[coreidx] = REG_MAP(sii->coresba[coreidx],
+ SI_CORE_SIZE);
+ ASSERT(GOODREGS(sii->regs[coreidx]));
+ }
+ r = (uint32 *) ((uchar *) sii->regs[coreidx] + regoff);
+ } else if (BUSTYPE(sih->bustype) == PCI_BUS) {
+ /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
+
+ if ((sii->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
+ /* Chipc registers are mapped at 12KB */
+
+ fast = TRUE;
+ r = (uint32 *) ((char *)sii->curmap +
+ PCI_16KB0_CCREGS_OFFSET + regoff);
+ } else if (sii->pub.buscoreidx == coreidx) {
+ /* pci registers are at either in the last 2KB of an 8KB window
+ * or, in pcie and pci rev 13 at 8KB
+ */
+ fast = TRUE;
+ if (SI_FAST(sii))
+ r = (uint32 *) ((char *)sii->curmap +
+ PCI_16KB0_PCIREGS_OFFSET +
+ regoff);
+ else
+ r = (uint32 *) ((char *)sii->curmap +
+ ((regoff >= SBCONFIGOFF) ?
+ PCI_BAR0_PCISBR_OFFSET :
+ PCI_BAR0_PCIREGS_OFFSET) +
+ regoff);
+ }
+ }
+
+ if (!fast) {
+ INTR_OFF(sii, intr_val);
+
+ /* save current core index */
+ origidx = si_coreidx(&sii->pub);
+
+ /* switch core */
+ r = (uint32 *) ((uchar *) ai_setcoreidx(&sii->pub, coreidx) +
+ regoff);
+ }
+ ASSERT(r != NULL);
+
+ /* mask and set */
+ if (mask || val) {
+ w = (R_REG(sii->osh, r) & ~mask) | val;
+ W_REG(sii->osh, r, w);
+ }
+
+ /* readback */
+ w = R_REG(sii->osh, r);
+
+ if (!fast) {
+ /* restore core index */
+ if (origidx != coreidx)
+ ai_setcoreidx(&sii->pub, origidx);
+
+ INTR_RESTORE(sii, intr_val);
+ }
+
+ return (w);
+}
+
+void ai_core_disable(si_t * sih, uint32 bits)
+{
+ si_info_t *sii;
+ volatile uint32 dummy;
+ aidmp_t *ai;
+
+ sii = SI_INFO(sih);
+
+ ASSERT(GOODREGS(sii->curwrap));
+ ai = sii->curwrap;
+
+ /* if core is already in reset, just return */
+ if (R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET)
+ return;
+
+ W_REG(sii->osh, &ai->ioctrl, bits);
+ dummy = R_REG(sii->osh, &ai->ioctrl);
+ OSL_DELAY(10);
+
+ W_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
+ OSL_DELAY(1);
+}
+
+/* reset and re-enable a core
+ * inputs:
+ * bits - core specific bits that are set during and after reset sequence
+ * resetbits - core specific bits that are set only during reset sequence
+ */
+void ai_core_reset(si_t * sih, uint32 bits, uint32 resetbits)
+{
+ si_info_t *sii;
+ aidmp_t *ai;
+ volatile uint32 dummy;
+
+ sii = SI_INFO(sih);
+ ASSERT(GOODREGS(sii->curwrap));
+ ai = sii->curwrap;
+
+ /*
+ * Must do the disable sequence first to work for arbitrary current core state.
+ */
+ ai_core_disable(sih, (bits | resetbits));
+
+ /*
+ * Now do the initialization sequence.
+ */
+ W_REG(sii->osh, &ai->ioctrl, (bits | SICF_FGC | SICF_CLOCK_EN));
+ dummy = R_REG(sii->osh, &ai->ioctrl);
+ W_REG(sii->osh, &ai->resetctrl, 0);
+ OSL_DELAY(1);
+
+ W_REG(sii->osh, &ai->ioctrl, (bits | SICF_CLOCK_EN));
+ dummy = R_REG(sii->osh, &ai->ioctrl);
+ OSL_DELAY(1);
+}
+
+void ai_core_cflags_wo(si_t * sih, uint32 mask, uint32 val)
+{
+ si_info_t *sii;
+ aidmp_t *ai;
+ uint32 w;
+
+ sii = SI_INFO(sih);
+
+ if (BCM47162_DMP()) {
+ SI_ERROR(("%s: Accessing MIPS DMP register (ioctrl) on 47162a0",
+ __func__));
+ return;
+ }
+
+ ASSERT(GOODREGS(sii->curwrap));
+ ai = sii->curwrap;
+
+ ASSERT((val & ~mask) == 0);
+
+ if (mask || val) {
+ w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
+ W_REG(sii->osh, &ai->ioctrl, w);
+ }
+}
+
+uint32 ai_core_cflags(si_t * sih, uint32 mask, uint32 val)
+{
+ si_info_t *sii;
+ aidmp_t *ai;
+ uint32 w;
+
+ sii = SI_INFO(sih);
+ if (BCM47162_DMP()) {
+ SI_ERROR(("%s: Accessing MIPS DMP register (ioctrl) on 47162a0",
+ __func__));
+ return 0;
+ }
+
+ ASSERT(GOODREGS(sii->curwrap));
+ ai = sii->curwrap;
+
+ ASSERT((val & ~mask) == 0);
+
+ if (mask || val) {
+ w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
+ W_REG(sii->osh, &ai->ioctrl, w);
+ }
+
+ return R_REG(sii->osh, &ai->ioctrl);
+}
+
+uint32 ai_core_sflags(si_t * sih, uint32 mask, uint32 val)
+{
+ si_info_t *sii;
+ aidmp_t *ai;
+ uint32 w;
+
+ sii = SI_INFO(sih);
+ if (BCM47162_DMP()) {
+ SI_ERROR(("%s: Accessing MIPS DMP register (iostatus) on 47162a0", __func__));
+ return 0;
+ }
+
+ ASSERT(GOODREGS(sii->curwrap));
+ ai = sii->curwrap;
+
+ ASSERT((val & ~mask) == 0);
+ ASSERT((mask & ~SISF_CORE_BITS) == 0);
+
+ if (mask || val) {
+ w = ((R_REG(sii->osh, &ai->iostatus) & ~mask) | val);
+ W_REG(sii->osh, &ai->iostatus, w);
+ }
+
+ return R_REG(sii->osh, &ai->iostatus);
+}
+
+#ifdef BCMDBG
+void ai_view(si_t * sih, bool verbose)
+{
+ si_info_t *sii;
+ osl_t *osh;
+ aidmp_t *ai;
+ uint32 config;
+
+ sii = SI_INFO(sih);
+ ai = sii->curwrap;
+ osh = sii->osh;
+ if (BCM47162_DMP()) {
+ SI_ERROR(("Cannot access mips74k DMP in 47162a0\n"));
+ return;
+ }
+
+ config = R_REG(osh, &ai->config);
+ SI_ERROR(("\nCore ID: 0x%x, config 0x%x\n", si_coreid(&sii->pub),
+ config));
+
+ if (config & AICFG_RST)
+ SI_ERROR(("resetctrl 0x%x, resetstatus 0x%x, resetreadid 0x%x, resetwriteid 0x%x\n", R_REG(osh, &ai->resetctrl), R_REG(osh, &ai->resetstatus), R_REG(osh, &ai->resetreadid), R_REG(osh, &ai->resetwriteid)));
+
+ if (config & AICFG_IOC)
+ SI_ERROR(("ioctrl 0x%x, width %d\n", R_REG(osh, &ai->ioctrl),
+ R_REG(osh, &ai->ioctrlwidth)));
+
+ if (config & AICFG_IOS)
+ SI_ERROR(("iostatus 0x%x, width %d\n",
+ R_REG(osh, &ai->iostatus), R_REG(osh,
+ &ai->
+ iostatuswidth)));
+
+ if (config & AICFG_ERRL) {
+ SI_ERROR(("errlogctrl 0x%x, errlogdone 0x%x, errlogstatus 0x%x, intstatus 0x%x\n", R_REG(osh, &ai->errlogctrl), R_REG(osh, &ai->errlogdone), R_REG(osh, &ai->errlogstatus), R_REG(osh, &ai->intstatus)));
+ SI_ERROR(("errlogid 0x%x, errloguser 0x%x, errlogflags 0x%x, errlogaddr " "0x%x/0x%x\n", R_REG(osh, &ai->errlogid), R_REG(osh, &ai->errloguser), R_REG(osh, &ai->errlogflags), R_REG(osh, &ai->errlogaddrhi), R_REG(osh, &ai->errlogaddrlo)));
+ }
+
+ if (verbose && (config & AICFG_OOB)) {
+ SI_ERROR(("oobselina30 0x%x, oobselina74 0x%x\n",
+ R_REG(osh, &ai->oobselina30), R_REG(osh,
+ &ai->
+ oobselina74)));
+ SI_ERROR(("oobselinb30 0x%x, oobselinb74 0x%x\n",
+ R_REG(osh, &ai->oobselinb30), R_REG(osh,
+ &ai->
+ oobselinb74)));
+ SI_ERROR(("oobselinc30 0x%x, oobselinc74 0x%x\n",
+ R_REG(osh, &ai->oobselinc30), R_REG(osh,
+ &ai->
+ oobselinc74)));
+ SI_ERROR(("oobselind30 0x%x, oobselind74 0x%x\n",
+ R_REG(osh, &ai->oobselind30), R_REG(osh,
+ &ai->
+ oobselind74)));
+ SI_ERROR(("oobselouta30 0x%x, oobselouta74 0x%x\n",
+ R_REG(osh, &ai->oobselouta30), R_REG(osh,
+ &ai->
+ oobselouta74)));
+ SI_ERROR(("oobseloutb30 0x%x, oobseloutb74 0x%x\n",
+ R_REG(osh, &ai->oobseloutb30), R_REG(osh,
+ &ai->
+ oobseloutb74)));
+ SI_ERROR(("oobseloutc30 0x%x, oobseloutc74 0x%x\n",
+ R_REG(osh, &ai->oobseloutc30), R_REG(osh,
+ &ai->
+ oobseloutc74)));
+ SI_ERROR(("oobseloutd30 0x%x, oobseloutd74 0x%x\n",
+ R_REG(osh, &ai->oobseloutd30), R_REG(osh,
+ &ai->
+ oobseloutd74)));
+ SI_ERROR(("oobsynca 0x%x, oobseloutaen 0x%x\n",
+ R_REG(osh, &ai->oobsynca), R_REG(osh,
+ &ai->oobseloutaen)));
+ SI_ERROR(("oobsyncb 0x%x, oobseloutben 0x%x\n",
+ R_REG(osh, &ai->oobsyncb), R_REG(osh,
+ &ai->oobseloutben)));
+ SI_ERROR(("oobsyncc 0x%x, oobseloutcen 0x%x\n",
+ R_REG(osh, &ai->oobsyncc), R_REG(osh,
+ &ai->oobseloutcen)));
+ SI_ERROR(("oobsyncd 0x%x, oobseloutden 0x%x\n",
+ R_REG(osh, &ai->oobsyncd), R_REG(osh,
+ &ai->oobseloutden)));
+ SI_ERROR(("oobaextwidth 0x%x, oobainwidth 0x%x, oobaoutwidth 0x%x\n", R_REG(osh, &ai->oobaextwidth), R_REG(osh, &ai->oobainwidth), R_REG(osh, &ai->oobaoutwidth)));
+ SI_ERROR(("oobbextwidth 0x%x, oobbinwidth 0x%x, oobboutwidth 0x%x\n", R_REG(osh, &ai->oobbextwidth), R_REG(osh, &ai->oobbinwidth), R_REG(osh, &ai->oobboutwidth)));
+ SI_ERROR(("oobcextwidth 0x%x, oobcinwidth 0x%x, oobcoutwidth 0x%x\n", R_REG(osh, &ai->oobcextwidth), R_REG(osh, &ai->oobcinwidth), R_REG(osh, &ai->oobcoutwidth)));
+ SI_ERROR(("oobdextwidth 0x%x, oobdinwidth 0x%x, oobdoutwidth 0x%x\n", R_REG(osh, &ai->oobdextwidth), R_REG(osh, &ai->oobdinwidth), R_REG(osh, &ai->oobdoutwidth)));
+ }
+}
+#endif /* BCMDBG */
diff --git a/drivers/staging/brcm80211/util/bcmotp.c b/drivers/staging/brcm80211/util/bcmotp.c
new file mode 100644
index 000000000000..c85cc6a9ced4
--- /dev/null
+++ b/drivers/staging/brcm80211/util/bcmotp.c
@@ -0,0 +1,954 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <bcmdevs.h>
+#include <bcmutils.h>
+#include <siutils.h>
+#include <bcmendian.h>
+#include <hndsoc.h>
+#include <sbchipc.h>
+#include <bcmotp.h>
+#include "siutils_priv.h"
+
+/*
+ * There are two different OTP controllers so far:
+ * 1. new IPX OTP controller: chipc 21, >=23
+ * 2. older HND OTP controller: chipc 12, 17, 22
+ *
+ * Define BCMHNDOTP to include support for the HND OTP controller.
+ * Define BCMIPXOTP to include support for the IPX OTP controller.
+ *
+ * NOTE 1: More than one may be defined
+ * NOTE 2: If none are defined, the default is to include them all.
+ */
+
+#if !defined(BCMHNDOTP) && !defined(BCMIPXOTP)
+#define BCMHNDOTP 1
+#define BCMIPXOTP 1
+#endif
+
+#define OTPTYPE_HND(ccrev) ((ccrev) < 21 || (ccrev) == 22)
+#define OTPTYPE_IPX(ccrev) ((ccrev) == 21 || (ccrev) >= 23)
+
+#define OTPP_TRIES 10000000 /* # of tries for OTPP */
+
+#ifdef BCMIPXOTP
+#define MAXNUMRDES 9 /* Maximum OTP redundancy entries */
+#endif
+
+/* OTP common function type */
+typedef int (*otp_status_t) (void *oh);
+typedef int (*otp_size_t) (void *oh);
+typedef void *(*otp_init_t) (si_t * sih);
+typedef uint16(*otp_read_bit_t) (void *oh, chipcregs_t * cc, uint off);
+typedef int (*otp_read_region_t) (si_t * sih, int region, uint16 * data,
+ uint * wlen);
+typedef int (*otp_nvread_t) (void *oh, char *data, uint * len);
+
+/* OTP function struct */
+typedef struct otp_fn_s {
+ otp_size_t size;
+ otp_read_bit_t read_bit;
+ otp_init_t init;
+ otp_read_region_t read_region;
+ otp_nvread_t nvread;
+ otp_status_t status;
+} otp_fn_t;
+
+typedef struct {
+ uint ccrev; /* chipc revision */
+ otp_fn_t *fn; /* OTP functions */
+ si_t *sih; /* Saved sb handle */
+ osl_t *osh;
+
+#ifdef BCMIPXOTP
+ /* IPX OTP section */
+ uint16 wsize; /* Size of otp in words */
+ uint16 rows; /* Geometry */
+ uint16 cols; /* Geometry */
+ uint32 status; /* Flag bits (lock/prog/rv).
+ * (Reflected only when OTP is power cycled)
+ */
+ uint16 hwbase; /* hardware subregion offset */
+ uint16 hwlim; /* hardware subregion boundary */
+ uint16 swbase; /* software subregion offset */
+ uint16 swlim; /* software subregion boundary */
+ uint16 fbase; /* fuse subregion offset */
+ uint16 flim; /* fuse subregion boundary */
+ int otpgu_base; /* offset to General Use Region */
+#endif /* BCMIPXOTP */
+
+#ifdef BCMHNDOTP
+ /* HND OTP section */
+ uint size; /* Size of otp in bytes */
+ uint hwprot; /* Hardware protection bits */
+ uint signvalid; /* Signature valid bits */
+ int boundary; /* hw/sw boundary */
+#endif /* BCMHNDOTP */
+} otpinfo_t;
+
+static otpinfo_t otpinfo;
+
+/*
+ * IPX OTP Code
+ *
+ * Exported functions:
+ * ipxotp_status()
+ * ipxotp_size()
+ * ipxotp_init()
+ * ipxotp_read_bit()
+ * ipxotp_read_region()
+ * ipxotp_nvread()
+ *
+ */
+
+#ifdef BCMIPXOTP
+
+#define HWSW_RGN(rgn) (((rgn) == OTP_HW_RGN) ? "h/w" : "s/w")
+
+/* OTP layout */
+/* CC revs 21, 24 and 27 OTP General Use Region word offset */
+#define REVA4_OTPGU_BASE 12
+
+/* CC revs 23, 25, 26, 28 and above OTP General Use Region word offset */
+#define REVB8_OTPGU_BASE 20
+
+/* CC rev 36 OTP General Use Region word offset */
+#define REV36_OTPGU_BASE 12
+
+/* Subregion word offsets in General Use region */
+#define OTPGU_HSB_OFF 0
+#define OTPGU_SFB_OFF 1
+#define OTPGU_CI_OFF 2
+#define OTPGU_P_OFF 3
+#define OTPGU_SROM_OFF 4
+
+/* Flag bit offsets in General Use region */
+#define OTPGU_HWP_OFF 60
+#define OTPGU_SWP_OFF 61
+#define OTPGU_CIP_OFF 62
+#define OTPGU_FUSEP_OFF 63
+#define OTPGU_CIP_MSK 0x4000
+#define OTPGU_P_MSK 0xf000
+#define OTPGU_P_SHIFT (OTPGU_HWP_OFF % 16)
+
+/* OTP Size */
+#define OTP_SZ_FU_324 ((ROUNDUP(324,8))/8) /* 324 bits */
+#define OTP_SZ_FU_288 (288/8) /* 288 bits */
+#define OTP_SZ_FU_216 (216/8) /* 216 bits */
+#define OTP_SZ_FU_72 (72/8) /* 72 bits */
+#define OTP_SZ_CHECKSUM (16/8) /* 16 bits */
+#define OTP4315_SWREG_SZ 178 /* 178 bytes */
+#define OTP_SZ_FU_144 (144/8) /* 144 bits */
+
+static int ipxotp_status(void *oh)
+{
+ otpinfo_t *oi = (otpinfo_t *) oh;
+ return (int)(oi->status);
+}
+
+/* Return size in bytes */
+static int ipxotp_size(void *oh)
+{
+ otpinfo_t *oi = (otpinfo_t *) oh;
+ return (int)oi->wsize * 2;
+}
+
+static uint16 ipxotp_otpr(void *oh, chipcregs_t * cc, uint wn)
+{
+ otpinfo_t *oi;
+
+ oi = (otpinfo_t *) oh;
+
+ ASSERT(wn < oi->wsize);
+ ASSERT(cc != NULL);
+
+ return R_REG(oi->osh, &cc->sromotp[wn]);
+}
+
+static uint16 ipxotp_read_bit(void *oh, chipcregs_t * cc, uint off)
+{
+ otpinfo_t *oi = (otpinfo_t *) oh;
+ uint k, row, col;
+ uint32 otpp, st;
+
+ row = off / oi->cols;
+ col = off % oi->cols;
+
+ otpp = OTPP_START_BUSY |
+ ((OTPPOC_READ << OTPP_OC_SHIFT) & OTPP_OC_MASK) |
+ ((row << OTPP_ROW_SHIFT) & OTPP_ROW_MASK) |
+ ((col << OTPP_COL_SHIFT) & OTPP_COL_MASK);
+ W_REG(oi->osh, &cc->otpprog, otpp);
+
+ for (k = 0;
+ ((st = R_REG(oi->osh, &cc->otpprog)) & OTPP_START_BUSY)
+ && (k < OTPP_TRIES); k++) ;
+ if (k >= OTPP_TRIES) {
+ return 0xffff;
+ }
+ if (st & OTPP_READERR) {
+ return 0xffff;
+ }
+ st = (st & OTPP_VALUE_MASK) >> OTPP_VALUE_SHIFT;
+
+ return (int)st;
+}
+
+/* Calculate max HW/SW region byte size by substracting fuse region and checksum size,
+ * osizew is oi->wsize (OTP size - GU size) in words
+ */
+static int ipxotp_max_rgnsz(si_t * sih, int osizew)
+{
+ int ret = 0;
+
+ switch (CHIPID(sih->chip)) {
+ case BCM43224_CHIP_ID:
+ case BCM43225_CHIP_ID:
+ ret = osizew * 2 - OTP_SZ_FU_72 - OTP_SZ_CHECKSUM;
+ break;
+ case BCM4313_CHIP_ID:
+ ret = osizew * 2 - OTP_SZ_FU_72 - OTP_SZ_CHECKSUM;
+ break;
+ default:
+ ASSERT(0); /* Don't konw about this chip */
+ }
+
+ return ret;
+}
+
+static void BCMNMIATTACHFN(_ipxotp_init) (otpinfo_t * oi, chipcregs_t * cc) {
+ uint k;
+ uint32 otpp, st;
+
+ /* record word offset of General Use Region for various chipcommon revs */
+ if (oi->sih->ccrev == 21 || oi->sih->ccrev == 24
+ || oi->sih->ccrev == 27) {
+ oi->otpgu_base = REVA4_OTPGU_BASE;
+ } else if (oi->sih->ccrev == 36) {
+ /* OTP size greater than equal to 2KB (128 words), otpgu_base is similar to rev23 */
+ if (oi->wsize >= 128)
+ oi->otpgu_base = REVB8_OTPGU_BASE;
+ else
+ oi->otpgu_base = REV36_OTPGU_BASE;
+ } else if (oi->sih->ccrev == 23 || oi->sih->ccrev >= 25) {
+ oi->otpgu_base = REVB8_OTPGU_BASE;
+ }
+
+ /* First issue an init command so the status is up to date */
+ otpp =
+ OTPP_START_BUSY | ((OTPPOC_INIT << OTPP_OC_SHIFT) & OTPP_OC_MASK);
+
+ W_REG(oi->osh, &cc->otpprog, otpp);
+ for (k = 0;
+ ((st = R_REG(oi->osh, &cc->otpprog)) & OTPP_START_BUSY)
+ && (k < OTPP_TRIES); k++) ;
+ if (k >= OTPP_TRIES) {
+ return;
+ }
+
+ /* Read OTP lock bits and subregion programmed indication bits */
+ oi->status = R_REG(oi->osh, &cc->otpstatus);
+
+ if ((CHIPID(oi->sih->chip) == BCM43224_CHIP_ID)
+ || (CHIPID(oi->sih->chip) == BCM43225_CHIP_ID)) {
+ uint32 p_bits;
+ p_bits =
+ (ipxotp_otpr(oi, cc, oi->otpgu_base + OTPGU_P_OFF) &
+ OTPGU_P_MSK)
+ >> OTPGU_P_SHIFT;
+ oi->status |= (p_bits << OTPS_GUP_SHIFT);
+ }
+
+ /*
+ * h/w region base and fuse region limit are fixed to the top and
+ * the bottom of the general use region. Everything else can be flexible.
+ */
+ oi->hwbase = oi->otpgu_base + OTPGU_SROM_OFF;
+ oi->hwlim = oi->wsize;
+ if (oi->status & OTPS_GUP_HW) {
+ oi->hwlim =
+ ipxotp_otpr(oi, cc, oi->otpgu_base + OTPGU_HSB_OFF) / 16;
+ oi->swbase = oi->hwlim;
+ } else
+ oi->swbase = oi->hwbase;
+
+ /* subtract fuse and checksum from beginning */
+ oi->swlim = ipxotp_max_rgnsz(oi->sih, oi->wsize) / 2;
+
+ if (oi->status & OTPS_GUP_SW) {
+ oi->swlim =
+ ipxotp_otpr(oi, cc, oi->otpgu_base + OTPGU_SFB_OFF) / 16;
+ oi->fbase = oi->swlim;
+ } else
+ oi->fbase = oi->swbase;
+
+ oi->flim = oi->wsize;
+}
+
+static void *BCMNMIATTACHFN(ipxotp_init) (si_t * sih) {
+ uint idx;
+ chipcregs_t *cc;
+ otpinfo_t *oi;
+
+ /* Make sure we're running IPX OTP */
+ ASSERT(OTPTYPE_IPX(sih->ccrev));
+ if (!OTPTYPE_IPX(sih->ccrev))
+ return NULL;
+
+ /* Make sure OTP is not disabled */
+ if (si_is_otp_disabled(sih)) {
+ return NULL;
+ }
+
+ /* Make sure OTP is powered up */
+ if (!si_is_otp_powered(sih)) {
+ return NULL;
+ }
+
+ oi = &otpinfo;
+
+ /* Check for otp size */
+ switch ((sih->cccaps & CC_CAP_OTPSIZE) >> CC_CAP_OTPSIZE_SHIFT) {
+ case 0:
+ /* Nothing there */
+ return NULL;
+ case 1: /* 32x64 */
+ oi->rows = 32;
+ oi->cols = 64;
+ oi->wsize = 128;
+ break;
+ case 2: /* 64x64 */
+ oi->rows = 64;
+ oi->cols = 64;
+ oi->wsize = 256;
+ break;
+ case 5: /* 96x64 */
+ oi->rows = 96;
+ oi->cols = 64;
+ oi->wsize = 384;
+ break;
+ case 7: /* 16x64 *//* 1024 bits */
+ oi->rows = 16;
+ oi->cols = 64;
+ oi->wsize = 64;
+ break;
+ default:
+ /* Don't know the geometry */
+ return NULL;
+ }
+
+ /* Retrieve OTP region info */
+ idx = si_coreidx(sih);
+ cc = si_setcoreidx(sih, SI_CC_IDX);
+ ASSERT(cc != NULL);
+
+ _ipxotp_init(oi, cc);
+
+ si_setcoreidx(sih, idx);
+
+ return (void *)oi;
+}
+
+static int ipxotp_read_region(void *oh, int region, uint16 * data, uint * wlen)
+{
+ otpinfo_t *oi = (otpinfo_t *) oh;
+ uint idx;
+ chipcregs_t *cc;
+ uint base, i, sz;
+
+ /* Validate region selection */
+ switch (region) {
+ case OTP_HW_RGN:
+ sz = (uint) oi->hwlim - oi->hwbase;
+ if (!(oi->status & OTPS_GUP_HW)) {
+ *wlen = sz;
+ return BCME_NOTFOUND;
+ }
+ if (*wlen < sz) {
+ *wlen = sz;
+ return BCME_BUFTOOSHORT;
+ }
+ base = oi->hwbase;
+ break;
+ case OTP_SW_RGN:
+ sz = ((uint) oi->swlim - oi->swbase);
+ if (!(oi->status & OTPS_GUP_SW)) {
+ *wlen = sz;
+ return BCME_NOTFOUND;
+ }
+ if (*wlen < sz) {
+ *wlen = sz;
+ return BCME_BUFTOOSHORT;
+ }
+ base = oi->swbase;
+ break;
+ case OTP_CI_RGN:
+ sz = OTPGU_CI_SZ;
+ if (!(oi->status & OTPS_GUP_CI)) {
+ *wlen = sz;
+ return BCME_NOTFOUND;
+ }
+ if (*wlen < sz) {
+ *wlen = sz;
+ return BCME_BUFTOOSHORT;
+ }
+ base = oi->otpgu_base + OTPGU_CI_OFF;
+ break;
+ case OTP_FUSE_RGN:
+ sz = (uint) oi->flim - oi->fbase;
+ if (!(oi->status & OTPS_GUP_FUSE)) {
+ *wlen = sz;
+ return BCME_NOTFOUND;
+ }
+ if (*wlen < sz) {
+ *wlen = sz;
+ return BCME_BUFTOOSHORT;
+ }
+ base = oi->fbase;
+ break;
+ case OTP_ALL_RGN:
+ sz = ((uint) oi->flim - oi->hwbase);
+ if (!(oi->status & (OTPS_GUP_HW | OTPS_GUP_SW))) {
+ *wlen = sz;
+ return BCME_NOTFOUND;
+ }
+ if (*wlen < sz) {
+ *wlen = sz;
+ return BCME_BUFTOOSHORT;
+ }
+ base = oi->hwbase;
+ break;
+ default:
+ return BCME_BADARG;
+ }
+
+ idx = si_coreidx(oi->sih);
+ cc = si_setcoreidx(oi->sih, SI_CC_IDX);
+ ASSERT(cc != NULL);
+
+ /* Read the data */
+ for (i = 0; i < sz; i++)
+ data[i] = ipxotp_otpr(oh, cc, base + i);
+
+ si_setcoreidx(oi->sih, idx);
+ *wlen = sz;
+ return 0;
+}
+
+static int ipxotp_nvread(void *oh, char *data, uint * len)
+{
+ return BCME_UNSUPPORTED;
+}
+
+static otp_fn_t ipxotp_fn = {
+ (otp_size_t) ipxotp_size,
+ (otp_read_bit_t) ipxotp_read_bit,
+
+ (otp_init_t) ipxotp_init,
+ (otp_read_region_t) ipxotp_read_region,
+ (otp_nvread_t) ipxotp_nvread,
+
+ (otp_status_t) ipxotp_status
+};
+
+#endif /* BCMIPXOTP */
+
+/*
+ * HND OTP Code
+ *
+ * Exported functions:
+ * hndotp_status()
+ * hndotp_size()
+ * hndotp_init()
+ * hndotp_read_bit()
+ * hndotp_read_region()
+ * hndotp_nvread()
+ *
+ */
+
+#ifdef BCMHNDOTP
+
+/* Fields in otpstatus */
+#define OTPS_PROGFAIL 0x80000000
+#define OTPS_PROTECT 0x00000007
+#define OTPS_HW_PROTECT 0x00000001
+#define OTPS_SW_PROTECT 0x00000002
+#define OTPS_CID_PROTECT 0x00000004
+#define OTPS_RCEV_MSK 0x00003f00
+#define OTPS_RCEV_SHIFT 8
+
+/* Fields in the otpcontrol register */
+#define OTPC_RECWAIT 0xff000000
+#define OTPC_PROGWAIT 0x00ffff00
+#define OTPC_PRW_SHIFT 8
+#define OTPC_MAXFAIL 0x00000038
+#define OTPC_VSEL 0x00000006
+#define OTPC_SELVL 0x00000001
+
+/* OTP regions (Word offsets from otp size) */
+#define OTP_SWLIM_OFF (-4)
+#define OTP_CIDBASE_OFF 0
+#define OTP_CIDLIM_OFF 4
+
+/* Predefined OTP words (Word offset from otp size) */
+#define OTP_BOUNDARY_OFF (-4)
+#define OTP_HWSIGN_OFF (-3)
+#define OTP_SWSIGN_OFF (-2)
+#define OTP_CIDSIGN_OFF (-1)
+#define OTP_CID_OFF 0
+#define OTP_PKG_OFF 1
+#define OTP_FID_OFF 2
+#define OTP_RSV_OFF 3
+#define OTP_LIM_OFF 4
+#define OTP_RD_OFF 4 /* Redundancy row starts here */
+#define OTP_RC0_OFF 28 /* Redundancy control word 1 */
+#define OTP_RC1_OFF 32 /* Redundancy control word 2 */
+#define OTP_RC_LIM_OFF 36 /* Redundancy control word end */
+
+#define OTP_HW_REGION OTPS_HW_PROTECT
+#define OTP_SW_REGION OTPS_SW_PROTECT
+#define OTP_CID_REGION OTPS_CID_PROTECT
+
+#if OTP_HW_REGION != OTP_HW_RGN
+#error "incompatible OTP_HW_RGN"
+#endif
+#if OTP_SW_REGION != OTP_SW_RGN
+#error "incompatible OTP_SW_RGN"
+#endif
+#if OTP_CID_REGION != OTP_CI_RGN
+#error "incompatible OTP_CI_RGN"
+#endif
+
+/* Redundancy entry definitions */
+#define OTP_RCE_ROW_SZ 6
+#define OTP_RCE_SIGN_MASK 0x7fff
+#define OTP_RCE_ROW_MASK 0x3f
+#define OTP_RCE_BITS 21
+#define OTP_RCE_SIGN_SZ 15
+#define OTP_RCE_BIT0 1
+
+#define OTP_WPR 4
+#define OTP_SIGNATURE 0x578a
+#define OTP_MAGIC 0x4e56
+
+static int hndotp_status(void *oh)
+{
+ otpinfo_t *oi = (otpinfo_t *) oh;
+ return ((int)(oi->hwprot | oi->signvalid));
+}
+
+static int hndotp_size(void *oh)
+{
+ otpinfo_t *oi = (otpinfo_t *) oh;
+ return ((int)(oi->size));
+}
+
+static uint16 hndotp_otpr(void *oh, chipcregs_t * cc, uint wn)
+{
+ otpinfo_t *oi = (otpinfo_t *) oh;
+ osl_t *osh;
+ volatile uint16 *ptr;
+
+ ASSERT(wn < ((oi->size / 2) + OTP_RC_LIM_OFF));
+ ASSERT(cc != NULL);
+
+ osh = si_osh(oi->sih);
+
+ ptr = (volatile uint16 *)((volatile char *)cc + CC_SROM_OTP);
+ return (R_REG(osh, &ptr[wn]));
+}
+
+static uint16 hndotp_otproff(void *oh, chipcregs_t * cc, int woff)
+{
+ otpinfo_t *oi = (otpinfo_t *) oh;
+ osl_t *osh;
+ volatile uint16 *ptr;
+
+ ASSERT(woff >= (-((int)oi->size / 2)));
+ ASSERT(woff < OTP_LIM_OFF);
+ ASSERT(cc != NULL);
+
+ osh = si_osh(oi->sih);
+
+ ptr = (volatile uint16 *)((volatile char *)cc + CC_SROM_OTP);
+
+ return (R_REG(osh, &ptr[(oi->size / 2) + woff]));
+}
+
+static uint16 hndotp_read_bit(void *oh, chipcregs_t * cc, uint idx)
+{
+ otpinfo_t *oi = (otpinfo_t *) oh;
+ uint k, row, col;
+ uint32 otpp, st;
+ osl_t *osh;
+
+ osh = si_osh(oi->sih);
+ row = idx / 65;
+ col = idx % 65;
+
+ otpp = OTPP_START_BUSY | OTPP_READ |
+ ((row << OTPP_ROW_SHIFT) & OTPP_ROW_MASK) | (col & OTPP_COL_MASK);
+
+ W_REG(osh, &cc->otpprog, otpp);
+ st = R_REG(osh, &cc->otpprog);
+ for (k = 0;
+ ((st & OTPP_START_BUSY) == OTPP_START_BUSY) && (k < OTPP_TRIES);
+ k++)
+ st = R_REG(osh, &cc->otpprog);
+
+ if (k >= OTPP_TRIES) {
+ return 0xffff;
+ }
+ if (st & OTPP_READERR) {
+ return 0xffff;
+ }
+ st = (st & OTPP_VALUE_MASK) >> OTPP_VALUE_SHIFT;
+ return (uint16) st;
+}
+
+static void *BCMNMIATTACHFN(hndotp_init) (si_t * sih) {
+ uint idx;
+ chipcregs_t *cc;
+ otpinfo_t *oi;
+ uint32 cap = 0, clkdiv, otpdiv = 0;
+ void *ret = NULL;
+ osl_t *osh;
+
+ oi = &otpinfo;
+
+ idx = si_coreidx(sih);
+ osh = si_osh(oi->sih);
+
+ /* Check for otp */
+ if ((cc = si_setcoreidx(sih, SI_CC_IDX)) != NULL) {
+ cap = R_REG(osh, &cc->capabilities);
+ if ((cap & CC_CAP_OTPSIZE) == 0) {
+ /* Nothing there */
+ goto out;
+ }
+
+ /* As of right now, support only 4320a2, 4311a1 and 4312 */
+ ASSERT((oi->ccrev == 12) || (oi->ccrev == 17)
+ || (oi->ccrev == 22));
+ if (!
+ ((oi->ccrev == 12) || (oi->ccrev == 17)
+ || (oi->ccrev == 22)))
+ return NULL;
+
+ /* Read the OTP byte size. chipcommon rev >= 18 has RCE so the size is
+ * 8 row (64 bytes) smaller
+ */
+ oi->size =
+ 1 << (((cap & CC_CAP_OTPSIZE) >> CC_CAP_OTPSIZE_SHIFT)
+ + CC_CAP_OTPSIZE_BASE);
+ if (oi->ccrev >= 18)
+ oi->size -= ((OTP_RC0_OFF - OTP_BOUNDARY_OFF) * 2);
+
+ oi->hwprot = (int)(R_REG(osh, &cc->otpstatus) & OTPS_PROTECT);
+ oi->boundary = -1;
+
+ /* Check the region signature */
+ if (hndotp_otproff(oi, cc, OTP_HWSIGN_OFF) == OTP_SIGNATURE) {
+ oi->signvalid |= OTP_HW_REGION;
+ oi->boundary = hndotp_otproff(oi, cc, OTP_BOUNDARY_OFF);
+ }
+
+ if (hndotp_otproff(oi, cc, OTP_SWSIGN_OFF) == OTP_SIGNATURE)
+ oi->signvalid |= OTP_SW_REGION;
+
+ if (hndotp_otproff(oi, cc, OTP_CIDSIGN_OFF) == OTP_SIGNATURE)
+ oi->signvalid |= OTP_CID_REGION;
+
+ /* Set OTP clkdiv for stability */
+ if (oi->ccrev == 22)
+ otpdiv = 12;
+
+ if (otpdiv) {
+ clkdiv = R_REG(osh, &cc->clkdiv);
+ clkdiv =
+ (clkdiv & ~CLKD_OTP) | (otpdiv << CLKD_OTP_SHIFT);
+ W_REG(osh, &cc->clkdiv, clkdiv);
+ }
+ OSL_DELAY(10);
+
+ ret = (void *)oi;
+ }
+
+ out: /* All done */
+ si_setcoreidx(sih, idx);
+
+ return ret;
+}
+
+static int hndotp_read_region(void *oh, int region, uint16 * data, uint * wlen)
+{
+ otpinfo_t *oi = (otpinfo_t *) oh;
+ uint32 idx, st;
+ chipcregs_t *cc;
+ int i;
+
+ /* Only support HW region (no active chips use HND OTP SW region) */
+ ASSERT(region == OTP_HW_REGION);
+
+ /* Region empty? */
+ st = oi->hwprot | oi->signvalid;
+ if ((st & region) == 0)
+ return BCME_NOTFOUND;
+
+ *wlen =
+ ((int)*wlen < oi->boundary / 2) ? *wlen : (uint) oi->boundary / 2;
+
+ idx = si_coreidx(oi->sih);
+ cc = si_setcoreidx(oi->sih, SI_CC_IDX);
+ ASSERT(cc != NULL);
+
+ for (i = 0; i < (int)*wlen; i++)
+ data[i] = hndotp_otpr(oh, cc, i);
+
+ si_setcoreidx(oi->sih, idx);
+
+ return 0;
+}
+
+static int hndotp_nvread(void *oh, char *data, uint * len)
+{
+ int rc = 0;
+ otpinfo_t *oi = (otpinfo_t *) oh;
+ uint32 base, bound, lim = 0, st;
+ int i, chunk, gchunks, tsz = 0;
+ uint32 idx;
+ chipcregs_t *cc;
+ uint offset;
+ uint16 *rawotp = NULL;
+
+ /* save the orig core */
+ idx = si_coreidx(oi->sih);
+ cc = si_setcoreidx(oi->sih, SI_CC_IDX);
+ ASSERT(cc != NULL);
+
+ st = hndotp_status(oh);
+ if (!(st & (OTP_HW_REGION | OTP_SW_REGION))) {
+ rc = -1;
+ goto out;
+ }
+
+ /* Read the whole otp so we can easily manipulate it */
+ lim = hndotp_size(oh);
+ if ((rawotp = MALLOC(si_osh(oi->sih), lim)) == NULL) {
+ rc = -2;
+ goto out;
+ }
+ for (i = 0; i < (int)(lim / 2); i++)
+ rawotp[i] = hndotp_otpr(oh, cc, i);
+
+ if ((st & OTP_HW_REGION) == 0) {
+ /* This could be a programming failure in the first
+ * chunk followed by one or more good chunks
+ */
+ for (i = 0; i < (int)(lim / 2); i++)
+ if (rawotp[i] == OTP_MAGIC)
+ break;
+
+ if (i < (int)(lim / 2)) {
+ base = i;
+ bound = (i * 2) + rawotp[i + 1];
+ } else {
+ rc = -3;
+ goto out;
+ }
+ } else {
+ bound = rawotp[(lim / 2) + OTP_BOUNDARY_OFF];
+
+ /* There are two cases: 1) The whole otp is used as nvram
+ * and 2) There is a hardware header followed by nvram.
+ */
+ if (rawotp[0] == OTP_MAGIC) {
+ base = 0;
+ } else
+ base = bound;
+ }
+
+ /* Find and copy the data */
+
+ chunk = 0;
+ gchunks = 0;
+ i = base / 2;
+ offset = 0;
+ while ((i < (int)(lim / 2)) && (rawotp[i] == OTP_MAGIC)) {
+ int dsz, rsz = rawotp[i + 1];
+
+ if (((i * 2) + rsz) >= (int)lim) {
+ /* Bad length, try to find another chunk anyway */
+ rsz = 6;
+ }
+ if (hndcrc16((uint8 *) & rawotp[i], rsz,
+ CRC16_INIT_VALUE) == CRC16_GOOD_VALUE) {
+ /* Good crc, copy the vars */
+ gchunks++;
+ dsz = rsz - 6;
+ tsz += dsz;
+ if (offset + dsz >= *len) {
+ goto out;
+ }
+ bcopy((char *)&rawotp[i + 2], &data[offset], dsz);
+ offset += dsz;
+ /* Remove extra null characters at the end */
+ while (offset > 1 &&
+ data[offset - 1] == 0 && data[offset - 2] == 0)
+ offset--;
+ i += rsz / 2;
+ } else {
+ /* bad length or crc didn't check, try to find the next set */
+ if (rawotp[i + (rsz / 2)] == OTP_MAGIC) {
+ /* Assume length is good */
+ i += rsz / 2;
+ } else {
+ while (++i < (int)(lim / 2))
+ if (rawotp[i] == OTP_MAGIC)
+ break;
+ }
+ }
+ chunk++;
+ }
+
+ *len = offset;
+
+ out:
+ if (rawotp)
+ MFREE(si_osh(oi->sih), rawotp, lim);
+ si_setcoreidx(oi->sih, idx);
+
+ return rc;
+}
+
+static otp_fn_t hndotp_fn = {
+ (otp_size_t) hndotp_size,
+ (otp_read_bit_t) hndotp_read_bit,
+
+ (otp_init_t) hndotp_init,
+ (otp_read_region_t) hndotp_read_region,
+ (otp_nvread_t) hndotp_nvread,
+
+ (otp_status_t) hndotp_status
+};
+
+#endif /* BCMHNDOTP */
+
+/*
+ * Common Code: Compiled for IPX / HND / AUTO
+ * otp_status()
+ * otp_size()
+ * otp_read_bit()
+ * otp_init()
+ * otp_read_region()
+ * otp_nvread()
+ */
+
+int otp_status(void *oh)
+{
+ otpinfo_t *oi = (otpinfo_t *) oh;
+
+ return oi->fn->status(oh);
+}
+
+int otp_size(void *oh)
+{
+ otpinfo_t *oi = (otpinfo_t *) oh;
+
+ return oi->fn->size(oh);
+}
+
+uint16 otp_read_bit(void *oh, uint offset)
+{
+ otpinfo_t *oi = (otpinfo_t *) oh;
+ uint idx = si_coreidx(oi->sih);
+ chipcregs_t *cc = si_setcoreidx(oi->sih, SI_CC_IDX);
+ uint16 readBit = (uint16) oi->fn->read_bit(oh, cc, offset);
+ si_setcoreidx(oi->sih, idx);
+ return readBit;
+}
+
+void *BCMNMIATTACHFN(otp_init) (si_t * sih) {
+ otpinfo_t *oi;
+ void *ret = NULL;
+
+ oi = &otpinfo;
+ bzero(oi, sizeof(otpinfo_t));
+
+ oi->ccrev = sih->ccrev;
+
+#ifdef BCMIPXOTP
+ if (OTPTYPE_IPX(oi->ccrev))
+ oi->fn = &ipxotp_fn;
+#endif
+
+#ifdef BCMHNDOTP
+ if (OTPTYPE_HND(oi->ccrev))
+ oi->fn = &hndotp_fn;
+#endif
+
+ if (oi->fn == NULL) {
+ return NULL;
+ }
+
+ oi->sih = sih;
+ oi->osh = si_osh(oi->sih);
+
+ ret = (oi->fn->init) (sih);
+
+ return ret;
+}
+
+int
+BCMNMIATTACHFN(otp_read_region) (si_t * sih, int region, uint16 * data,
+ uint * wlen) {
+ bool wasup = FALSE;
+ void *oh;
+ int err = 0;
+
+ if (!(wasup = si_is_otp_powered(sih)))
+ si_otp_power(sih, TRUE);
+
+ if (!si_is_otp_powered(sih) || si_is_otp_disabled(sih)) {
+ err = BCME_NOTREADY;
+ goto out;
+ }
+
+ oh = otp_init(sih);
+ if (oh == NULL) {
+ err = BCME_ERROR;
+ goto out;
+ }
+
+ err = (((otpinfo_t *) oh)->fn->read_region) (oh, region, data, wlen);
+
+ out:
+ if (!wasup)
+ si_otp_power(sih, FALSE);
+
+ return err;
+}
+
+int otp_nvread(void *oh, char *data, uint * len)
+{
+ otpinfo_t *oi = (otpinfo_t *) oh;
+
+ return oi->fn->nvread(oh, data, len);
+}
diff --git a/drivers/staging/brcm80211/util/bcmsrom.c b/drivers/staging/brcm80211/util/bcmsrom.c
new file mode 100644
index 000000000000..d5527f75f62c
--- /dev/null
+++ b/drivers/staging/brcm80211/util/bcmsrom.c
@@ -0,0 +1,2081 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <stdarg.h>
+#include <bcmutils.h>
+#include <hndsoc.h>
+#include <sbchipc.h>
+#include <bcmdevs.h>
+#include <bcmendian.h>
+#include <pcicfg.h>
+#include <siutils.h>
+#include <bcmsrom.h>
+#include <bcmsrom_tbl.h>
+#ifdef BCMSDIO
+#include <bcmsdh.h>
+#include <sdio.h>
+#endif
+
+#include <bcmnvram.h>
+#include <bcmotp.h>
+
+#if defined(BCMSDIO)
+#include <sbsdio.h>
+#include <sbhnddma.h>
+#include <sbsdpcmdev.h>
+#endif
+
+#include <proto/ethernet.h> /* for sprom content groking */
+
+#define BS_ERROR(args)
+
+#define SROM_OFFSET(sih) ((sih->ccrev > 31) ? \
+ (((sih->cccaps & CC_CAP_SROM) == 0) ? NULL : \
+ ((uint8 *)curmap + PCI_16KB0_CCREGS_OFFSET + CC_SROM_OTP)) : \
+ ((uint8 *)curmap + PCI_BAR0_SPROM_OFFSET))
+
+#if defined(BCMDBG)
+#define WRITE_ENABLE_DELAY 500 /* 500 ms after write enable/disable toggle */
+#define WRITE_WORD_DELAY 20 /* 20 ms between each word write */
+#endif
+
+typedef struct varbuf {
+ char *base; /* pointer to buffer base */
+ char *buf; /* pointer to current position */
+ unsigned int size; /* current (residual) size in bytes */
+} varbuf_t;
+extern char *_vars;
+extern uint _varsz;
+
+#define SROM_CIS_SINGLE 1
+
+static int initvars_srom_si(si_t * sih, osl_t * osh, void *curmap, char **vars,
+ uint * count);
+static void _initvars_srom_pci(uint8 sromrev, uint16 * srom, uint off,
+ varbuf_t * b);
+static int initvars_srom_pci(si_t * sih, void *curmap, char **vars,
+ uint * count);
+static int initvars_flash_si(si_t * sih, char **vars, uint * count);
+#ifdef BCMSDIO
+static int initvars_cis_sdio(osl_t * osh, char **vars, uint * count);
+static int sprom_cmd_sdio(osl_t * osh, uint8 cmd);
+static int sprom_read_sdio(osl_t * osh, uint16 addr, uint16 * data);
+#endif /* BCMSDIO */
+static int sprom_read_pci(osl_t * osh, si_t * sih, uint16 * sprom, uint wordoff,
+ uint16 * buf, uint nwords, bool check_crc);
+#if defined(BCMNVRAMR)
+static int otp_read_pci(osl_t * osh, si_t * sih, uint16 * buf, uint bufsz);
+#endif
+static uint16 srom_cc_cmd(si_t * sih, osl_t * osh, void *ccregs, uint32 cmd,
+ uint wordoff, uint16 data);
+
+static int initvars_table(osl_t * osh, char *start, char *end, char **vars,
+ uint * count);
+static int initvars_flash(si_t * sih, osl_t * osh, char **vp, uint len);
+
+/* Initialization of varbuf structure */
+static void BCMATTACHFN(varbuf_init) (varbuf_t * b, char *buf, uint size) {
+ b->size = size;
+ b->base = b->buf = buf;
+}
+
+/* append a null terminated var=value string */
+static int BCMATTACHFN(varbuf_append) (varbuf_t * b, const char *fmt, ...) {
+ va_list ap;
+ int r;
+ size_t len;
+ char *s;
+
+ if (b->size < 2)
+ return 0;
+
+ va_start(ap, fmt);
+ r = vsnprintf(b->buf, b->size, fmt, ap);
+ va_end(ap);
+
+ /* C99 snprintf behavior returns r >= size on overflow,
+ * others return -1 on overflow.
+ * All return -1 on format error.
+ * We need to leave room for 2 null terminations, one for the current var
+ * string, and one for final null of the var table. So check that the
+ * strlen written, r, leaves room for 2 chars.
+ */
+ if ((r == -1) || (r > (int)(b->size - 2))) {
+ b->size = 0;
+ return 0;
+ }
+
+ /* Remove any earlier occurrence of the same variable */
+ if ((s = strchr(b->buf, '=')) != NULL) {
+ len = (size_t) (s - b->buf);
+ for (s = b->base; s < b->buf;) {
+ if ((bcmp(s, b->buf, len) == 0) && s[len] == '=') {
+ len = strlen(s) + 1;
+ memmove(s, (s + len),
+ ((b->buf + r + 1) - (s + len)));
+ b->buf -= len;
+ b->size += (unsigned int)len;
+ break;
+ }
+
+ while (*s++) ;
+ }
+ }
+
+ /* skip over this string's null termination */
+ r++;
+ b->size -= r;
+ b->buf += r;
+
+ return r;
+}
+
+/*
+ * Initialize local vars from the right source for this platform.
+ * Return 0 on success, nonzero on error.
+ */
+int
+BCMATTACHFN(srom_var_init) (si_t * sih, uint bustype, void *curmap, osl_t * osh,
+ char **vars, uint * count) {
+ uint len;
+
+ len = 0;
+
+ ASSERT(bustype == BUSTYPE(bustype));
+ if (vars == NULL || count == NULL)
+ return (0);
+
+ *vars = NULL;
+ *count = 0;
+
+ switch (BUSTYPE(bustype)) {
+ case SI_BUS:
+ case JTAG_BUS:
+ return initvars_srom_si(sih, osh, curmap, vars, count);
+
+ case PCI_BUS:
+ ASSERT(curmap != NULL);
+ if (curmap == NULL)
+ return (-1);
+
+ return initvars_srom_pci(sih, curmap, vars, count);
+
+#ifdef BCMSDIO
+ case SDIO_BUS:
+ return initvars_cis_sdio(osh, vars, count);
+#endif /* BCMSDIO */
+
+ default:
+ ASSERT(0);
+ }
+ return (-1);
+}
+
+/* support only 16-bit word read from srom */
+int
+srom_read(si_t * sih, uint bustype, void *curmap, osl_t * osh,
+ uint byteoff, uint nbytes, uint16 * buf, bool check_crc)
+{
+ uint off, nw;
+#ifdef BCMSDIO
+ uint i;
+#endif /* BCMSDIO */
+
+ ASSERT(bustype == BUSTYPE(bustype));
+
+ /* check input - 16-bit access only */
+ if (byteoff & 1 || nbytes & 1 || (byteoff + nbytes) > SROM_MAX)
+ return 1;
+
+ off = byteoff / 2;
+ nw = nbytes / 2;
+
+ if (BUSTYPE(bustype) == PCI_BUS) {
+ if (!curmap)
+ return 1;
+
+ if (si_is_sprom_available(sih)) {
+ uint16 *srom;
+
+ srom = (uint16 *) SROM_OFFSET(sih);
+ if (srom == NULL)
+ return 1;
+
+ if (sprom_read_pci
+ (osh, sih, srom, off, buf, nw, check_crc))
+ return 1;
+ }
+#if defined(BCMNVRAMR)
+ else {
+ if (otp_read_pci(osh, sih, buf, SROM_MAX))
+ return 1;
+ }
+#endif
+#ifdef BCMSDIO
+ } else if (BUSTYPE(bustype) == SDIO_BUS) {
+ off = byteoff / 2;
+ nw = nbytes / 2;
+ for (i = 0; i < nw; i++) {
+ if (sprom_read_sdio
+ (osh, (uint16) (off + i), (uint16 *) (buf + i)))
+ return 1;
+ }
+#endif /* BCMSDIO */
+ } else if (BUSTYPE(bustype) == SI_BUS) {
+ return 1;
+ } else {
+ return 1;
+ }
+
+ return 0;
+}
+
+static const char BCMATTACHDATA(vstr_manf)[] = "manf=%s";
+static const char BCMATTACHDATA(vstr_productname)[] = "productname=%s";
+static const char BCMATTACHDATA(vstr_manfid)[] = "manfid=0x%x";
+static const char BCMATTACHDATA(vstr_prodid)[] = "prodid=0x%x";
+#ifdef BCMSDIO
+static const char BCMATTACHDATA(vstr_sdmaxspeed)[] = "sdmaxspeed=%d";
+static const char BCMATTACHDATA(vstr_sdmaxblk)[][13] =
+{
+"sdmaxblk0=%d", "sdmaxblk1=%d", "sdmaxblk2=%d"};
+#endif
+static const char BCMATTACHDATA(vstr_regwindowsz)[] = "regwindowsz=%d";
+static const char BCMATTACHDATA(vstr_sromrev)[] = "sromrev=%d";
+static const char BCMATTACHDATA(vstr_chiprev)[] = "chiprev=%d";
+static const char BCMATTACHDATA(vstr_subvendid)[] = "subvendid=0x%x";
+static const char BCMATTACHDATA(vstr_subdevid)[] = "subdevid=0x%x";
+static const char BCMATTACHDATA(vstr_boardrev)[] = "boardrev=0x%x";
+static const char BCMATTACHDATA(vstr_aa2g)[] = "aa2g=0x%x";
+static const char BCMATTACHDATA(vstr_aa5g)[] = "aa5g=0x%x";
+static const char BCMATTACHDATA(vstr_ag)[] = "ag%d=0x%x";
+static const char BCMATTACHDATA(vstr_cc)[] = "cc=%d";
+static const char BCMATTACHDATA(vstr_opo)[] = "opo=%d";
+static const char BCMATTACHDATA(vstr_pa0b)[][9] =
+{
+"pa0b0=%d", "pa0b1=%d", "pa0b2=%d"};
+
+static const char BCMATTACHDATA(vstr_pa0itssit)[] = "pa0itssit=%d";
+static const char BCMATTACHDATA(vstr_pa0maxpwr)[] = "pa0maxpwr=%d";
+static const char BCMATTACHDATA(vstr_pa1b)[][9] =
+{
+"pa1b0=%d", "pa1b1=%d", "pa1b2=%d"};
+
+static const char BCMATTACHDATA(vstr_pa1lob)[][11] =
+{
+"pa1lob0=%d", "pa1lob1=%d", "pa1lob2=%d"};
+
+static const char BCMATTACHDATA(vstr_pa1hib)[][11] =
+{
+"pa1hib0=%d", "pa1hib1=%d", "pa1hib2=%d"};
+
+static const char BCMATTACHDATA(vstr_pa1itssit)[] = "pa1itssit=%d";
+static const char BCMATTACHDATA(vstr_pa1maxpwr)[] = "pa1maxpwr=%d";
+static const char BCMATTACHDATA(vstr_pa1lomaxpwr)[] = "pa1lomaxpwr=%d";
+static const char BCMATTACHDATA(vstr_pa1himaxpwr)[] = "pa1himaxpwr=%d";
+static const char BCMATTACHDATA(vstr_oem)[] =
+ "oem=%02x%02x%02x%02x%02x%02x%02x%02x";
+static const char BCMATTACHDATA(vstr_boardflags)[] = "boardflags=0x%x";
+static const char BCMATTACHDATA(vstr_boardflags2)[] = "boardflags2=0x%x";
+static const char BCMATTACHDATA(vstr_ledbh)[] = "ledbh%d=0x%x";
+static const char BCMATTACHDATA(vstr_noccode)[] = "ccode=0x0";
+static const char BCMATTACHDATA(vstr_ccode)[] = "ccode=%c%c";
+static const char BCMATTACHDATA(vstr_cctl)[] = "cctl=0x%x";
+static const char BCMATTACHDATA(vstr_cckpo)[] = "cckpo=0x%x";
+static const char BCMATTACHDATA(vstr_ofdmpo)[] = "ofdmpo=0x%x";
+static const char BCMATTACHDATA(vstr_rdlid)[] = "rdlid=0x%x";
+static const char BCMATTACHDATA(vstr_rdlrndis)[] = "rdlrndis=%d";
+static const char BCMATTACHDATA(vstr_rdlrwu)[] = "rdlrwu=%d";
+static const char BCMATTACHDATA(vstr_usbfs)[] = "usbfs=%d";
+static const char BCMATTACHDATA(vstr_wpsgpio)[] = "wpsgpio=%d";
+static const char BCMATTACHDATA(vstr_wpsled)[] = "wpsled=%d";
+static const char BCMATTACHDATA(vstr_rdlsn)[] = "rdlsn=%d";
+static const char BCMATTACHDATA(vstr_rssismf2g)[] = "rssismf2g=%d";
+static const char BCMATTACHDATA(vstr_rssismc2g)[] = "rssismc2g=%d";
+static const char BCMATTACHDATA(vstr_rssisav2g)[] = "rssisav2g=%d";
+static const char BCMATTACHDATA(vstr_bxa2g)[] = "bxa2g=%d";
+static const char BCMATTACHDATA(vstr_rssismf5g)[] = "rssismf5g=%d";
+static const char BCMATTACHDATA(vstr_rssismc5g)[] = "rssismc5g=%d";
+static const char BCMATTACHDATA(vstr_rssisav5g)[] = "rssisav5g=%d";
+static const char BCMATTACHDATA(vstr_bxa5g)[] = "bxa5g=%d";
+static const char BCMATTACHDATA(vstr_tri2g)[] = "tri2g=%d";
+static const char BCMATTACHDATA(vstr_tri5gl)[] = "tri5gl=%d";
+static const char BCMATTACHDATA(vstr_tri5g)[] = "tri5g=%d";
+static const char BCMATTACHDATA(vstr_tri5gh)[] = "tri5gh=%d";
+static const char BCMATTACHDATA(vstr_rxpo2g)[] = "rxpo2g=%d";
+static const char BCMATTACHDATA(vstr_rxpo5g)[] = "rxpo5g=%d";
+static const char BCMATTACHDATA(vstr_boardtype)[] = "boardtype=0x%x";
+static const char BCMATTACHDATA(vstr_leddc)[] = "leddc=0x%04x";
+static const char BCMATTACHDATA(vstr_vendid)[] = "vendid=0x%x";
+static const char BCMATTACHDATA(vstr_devid)[] = "devid=0x%x";
+static const char BCMATTACHDATA(vstr_xtalfreq)[] = "xtalfreq=%d";
+static const char BCMATTACHDATA(vstr_txchain)[] = "txchain=0x%x";
+static const char BCMATTACHDATA(vstr_rxchain)[] = "rxchain=0x%x";
+static const char BCMATTACHDATA(vstr_antswitch)[] = "antswitch=0x%x";
+static const char BCMATTACHDATA(vstr_regrev)[] = "regrev=0x%x";
+static const char BCMATTACHDATA(vstr_antswctl2g)[] = "antswctl2g=0x%x";
+static const char BCMATTACHDATA(vstr_triso2g)[] = "triso2g=0x%x";
+static const char BCMATTACHDATA(vstr_pdetrange2g)[] = "pdetrange2g=0x%x";
+static const char BCMATTACHDATA(vstr_extpagain2g)[] = "extpagain2g=0x%x";
+static const char BCMATTACHDATA(vstr_tssipos2g)[] = "tssipos2g=0x%x";
+static const char BCMATTACHDATA(vstr_antswctl5g)[] = "antswctl5g=0x%x";
+static const char BCMATTACHDATA(vstr_triso5g)[] = "triso5g=0x%x";
+static const char BCMATTACHDATA(vstr_pdetrange5g)[] = "pdetrange5g=0x%x";
+static const char BCMATTACHDATA(vstr_extpagain5g)[] = "extpagain5g=0x%x";
+static const char BCMATTACHDATA(vstr_tssipos5g)[] = "tssipos5g=0x%x";
+static const char BCMATTACHDATA(vstr_maxp2ga0)[] = "maxp2ga0=0x%x";
+static const char BCMATTACHDATA(vstr_itt2ga0)[] = "itt2ga0=0x%x";
+static const char BCMATTACHDATA(vstr_pa)[] = "pa%dgw%da%d=0x%x";
+static const char BCMATTACHDATA(vstr_pahl)[] = "pa%dg%cw%da%d=0x%x";
+static const char BCMATTACHDATA(vstr_maxp5ga0)[] = "maxp5ga0=0x%x";
+static const char BCMATTACHDATA(vstr_itt5ga0)[] = "itt5ga0=0x%x";
+static const char BCMATTACHDATA(vstr_maxp5gha0)[] = "maxp5gha0=0x%x";
+static const char BCMATTACHDATA(vstr_maxp5gla0)[] = "maxp5gla0=0x%x";
+static const char BCMATTACHDATA(vstr_maxp2ga1)[] = "maxp2ga1=0x%x";
+static const char BCMATTACHDATA(vstr_itt2ga1)[] = "itt2ga1=0x%x";
+static const char BCMATTACHDATA(vstr_maxp5ga1)[] = "maxp5ga1=0x%x";
+static const char BCMATTACHDATA(vstr_itt5ga1)[] = "itt5ga1=0x%x";
+static const char BCMATTACHDATA(vstr_maxp5gha1)[] = "maxp5gha1=0x%x";
+static const char BCMATTACHDATA(vstr_maxp5gla1)[] = "maxp5gla1=0x%x";
+static const char BCMATTACHDATA(vstr_cck2gpo)[] = "cck2gpo=0x%x";
+static const char BCMATTACHDATA(vstr_ofdm2gpo)[] = "ofdm2gpo=0x%x";
+static const char BCMATTACHDATA(vstr_ofdm5gpo)[] = "ofdm5gpo=0x%x";
+static const char BCMATTACHDATA(vstr_ofdm5glpo)[] = "ofdm5glpo=0x%x";
+static const char BCMATTACHDATA(vstr_ofdm5ghpo)[] = "ofdm5ghpo=0x%x";
+static const char BCMATTACHDATA(vstr_cddpo)[] = "cddpo=0x%x";
+static const char BCMATTACHDATA(vstr_stbcpo)[] = "stbcpo=0x%x";
+static const char BCMATTACHDATA(vstr_bw40po)[] = "bw40po=0x%x";
+static const char BCMATTACHDATA(vstr_bwduppo)[] = "bwduppo=0x%x";
+static const char BCMATTACHDATA(vstr_mcspo)[] = "mcs%dgpo%d=0x%x";
+static const char BCMATTACHDATA(vstr_mcspohl)[] = "mcs%dg%cpo%d=0x%x";
+static const char BCMATTACHDATA(vstr_custom)[] = "customvar%d=0x%x";
+static const char BCMATTACHDATA(vstr_cckdigfilttype)[] = "cckdigfilttype=%d";
+static const char BCMATTACHDATA(vstr_boardnum)[] = "boardnum=%d";
+static const char BCMATTACHDATA(vstr_macaddr)[] = "macaddr=%s";
+static const char BCMATTACHDATA(vstr_usbepnum)[] = "usbepnum=0x%x";
+static const char BCMATTACHDATA(vstr_end)[] = "END\0";
+
+uint8 patch_pair = 0;
+
+/* For dongle HW, accept partial calibration parameters */
+#define BCMDONGLECASE(n)
+
+int
+BCMATTACHFN(srom_parsecis) (osl_t * osh, uint8 * pcis[], uint ciscnt,
+ char **vars, uint * count)
+{
+ char eabuf[32];
+ char *base;
+ varbuf_t b;
+ uint8 *cis, tup, tlen, sromrev = 1;
+ int i, j;
+ bool ag_init = FALSE;
+ uint32 w32;
+ uint funcid;
+ uint cisnum;
+ int32 boardnum;
+ int err;
+ bool standard_cis;
+
+ ASSERT(vars != NULL);
+ ASSERT(count != NULL);
+
+ boardnum = -1;
+
+ base = MALLOC(osh, MAXSZ_NVRAM_VARS);
+ ASSERT(base != NULL);
+ if (!base)
+ return -2;
+
+ varbuf_init(&b, base, MAXSZ_NVRAM_VARS);
+ bzero(base, MAXSZ_NVRAM_VARS);
+ eabuf[0] = '\0';
+ for (cisnum = 0; cisnum < ciscnt; cisnum++) {
+ cis = *pcis++;
+ i = 0;
+ funcid = 0;
+ standard_cis = TRUE;
+ do {
+ if (standard_cis) {
+ tup = cis[i++];
+ if (tup == CISTPL_NULL || tup == CISTPL_END)
+ tlen = 0;
+ else
+ tlen = cis[i++];
+ } else {
+ if (cis[i] == CISTPL_NULL
+ || cis[i] == CISTPL_END) {
+ tlen = 0;
+ tup = cis[i];
+ } else {
+ tlen = cis[i];
+ tup = CISTPL_BRCM_HNBU;
+ }
+ ++i;
+ }
+ if ((i + tlen) >= CIS_SIZE)
+ break;
+
+ switch (tup) {
+ case CISTPL_VERS_1:
+ /* assume the strings are good if the version field checks out */
+ if (((cis[i + 1] << 8) + cis[i]) >= 0x0008) {
+ varbuf_append(&b, vstr_manf,
+ &cis[i + 2]);
+ varbuf_append(&b, vstr_productname,
+ &cis[i + 3 +
+ strlen((char *)
+ &cis[i +
+ 2])]);
+ break;
+ }
+
+ case CISTPL_MANFID:
+ varbuf_append(&b, vstr_manfid,
+ (cis[i + 1] << 8) + cis[i]);
+ varbuf_append(&b, vstr_prodid,
+ (cis[i + 3] << 8) + cis[i + 2]);
+ break;
+
+ case CISTPL_FUNCID:
+ funcid = cis[i];
+ break;
+
+ case CISTPL_FUNCE:
+ switch (funcid) {
+ case CISTPL_FID_SDIO:
+#ifdef BCMSDIO
+ if (cis[i] == 0) {
+ uint8 spd = cis[i + 3];
+ static int base[] = {
+ -1, 10, 12, 13, 15, 20,
+ 25, 30,
+ 35, 40, 45, 50, 55, 60,
+ 70, 80
+ };
+ static int mult[] = {
+ 10, 100, 1000, 10000,
+ -1, -1, -1, -1
+ };
+ ASSERT((mult[spd & 0x7] != -1)
+ &&
+ (base
+ [(spd >> 3) & 0x0f]));
+ varbuf_append(&b,
+ vstr_sdmaxblk[0],
+ (cis[i + 2] << 8)
+ + cis[i + 1]);
+ varbuf_append(&b,
+ vstr_sdmaxspeed,
+ (mult[spd & 0x7] *
+ base[(spd >> 3) &
+ 0x0f]));
+ } else if (cis[i] == 1) {
+ varbuf_append(&b,
+ vstr_sdmaxblk
+ [cisnum],
+ (cis[i + 13] << 8)
+ | cis[i + 12]);
+ }
+#endif /* BCMSDIO */
+ funcid = 0;
+ break;
+ default:
+ /* set macaddr if HNBU_MACADDR not seen yet */
+ if (eabuf[0] == '\0'
+ && cis[i] == LAN_NID
+ && !(ETHER_ISNULLADDR(&cis[i + 2]))
+ && !(ETHER_ISMULTI(&cis[i + 2]))) {
+ ASSERT(cis[i + 1] ==
+ ETHER_ADDR_LEN);
+ bcm_ether_ntoa((struct
+ ether_addr *)
+ &cis[i + 2],
+ eabuf);
+
+ /* set boardnum if HNBU_BOARDNUM not seen yet */
+ if (boardnum == -1)
+ boardnum =
+ (cis[i + 6] << 8) +
+ cis[i + 7];
+ }
+ break;
+ }
+ break;
+
+ case CISTPL_CFTABLE:
+ varbuf_append(&b, vstr_regwindowsz,
+ (cis[i + 7] << 8) | cis[i + 6]);
+ break;
+
+ case CISTPL_BRCM_HNBU:
+ switch (cis[i]) {
+ case HNBU_SROMREV:
+ sromrev = cis[i + 1];
+ varbuf_append(&b, vstr_sromrev,
+ sromrev);
+ break;
+
+ case HNBU_XTALFREQ:
+ varbuf_append(&b, vstr_xtalfreq,
+ (cis[i + 4] << 24) |
+ (cis[i + 3] << 16) |
+ (cis[i + 2] << 8) |
+ cis[i + 1]);
+ break;
+
+ case HNBU_CHIPID:
+ varbuf_append(&b, vstr_vendid,
+ (cis[i + 2] << 8) +
+ cis[i + 1]);
+ varbuf_append(&b, vstr_devid,
+ (cis[i + 4] << 8) +
+ cis[i + 3]);
+ if (tlen >= 7) {
+ varbuf_append(&b, vstr_chiprev,
+ (cis[i + 6] << 8)
+ + cis[i + 5]);
+ }
+ if (tlen >= 9) {
+ varbuf_append(&b,
+ vstr_subvendid,
+ (cis[i + 8] << 8)
+ + cis[i + 7]);
+ }
+ if (tlen >= 11) {
+ varbuf_append(&b, vstr_subdevid,
+ (cis[i + 10] << 8)
+ + cis[i + 9]);
+ /* subdevid doubles for boardtype */
+ varbuf_append(&b,
+ vstr_boardtype,
+ (cis[i + 10] << 8)
+ + cis[i + 9]);
+ }
+ break;
+
+ case HNBU_BOARDNUM:
+ boardnum =
+ (cis[i + 2] << 8) + cis[i + 1];
+ break;
+
+ case HNBU_PATCH:
+ {
+ char vstr_paddr[16];
+ char vstr_pdata[16];
+
+ /* retrieve the patch pairs
+ * from tlen/6; where 6 is
+ * sizeof(patch addr(2)) +
+ * sizeof(patch data(4)).
+ */
+ patch_pair = tlen / 6;
+
+ for (j = 0; j < patch_pair; j++) {
+ snprintf(vstr_paddr,
+ sizeof
+ (vstr_paddr),
+ "pa%d=0x%%x",
+ j);
+ snprintf(vstr_pdata,
+ sizeof
+ (vstr_pdata),
+ "pd%d=0x%%x",
+ j);
+
+ varbuf_append(&b,
+ vstr_paddr,
+ (cis
+ [i +
+ (j *
+ 6) +
+ 2] << 8)
+ | cis[i +
+ (j *
+ 6)
+ +
+ 1]);
+
+ varbuf_append(&b,
+ vstr_pdata,
+ (cis
+ [i +
+ (j *
+ 6) +
+ 6] <<
+ 24) |
+ (cis
+ [i +
+ (j *
+ 6) +
+ 5] <<
+ 16) |
+ (cis
+ [i +
+ (j *
+ 6) +
+ 4] << 8)
+ | cis[i +
+ (j *
+ 6)
+ +
+ 3]);
+ }
+ }
+ break;
+
+ case HNBU_BOARDREV:
+ if (tlen == 2)
+ varbuf_append(&b, vstr_boardrev,
+ cis[i + 1]);
+ else
+ varbuf_append(&b, vstr_boardrev,
+ (cis[i + 2] << 8)
+ + cis[i + 1]);
+ break;
+
+ case HNBU_BOARDFLAGS:
+ w32 = (cis[i + 2] << 8) + cis[i + 1];
+ if (tlen >= 5)
+ w32 |=
+ ((cis[i + 4] << 24) +
+ (cis[i + 3] << 16));
+ varbuf_append(&b, vstr_boardflags, w32);
+
+ if (tlen >= 7) {
+ w32 =
+ (cis[i + 6] << 8) + cis[i +
+ 5];
+ if (tlen >= 9)
+ w32 |=
+ ((cis[i + 8] << 24)
+ +
+ (cis[i + 7] <<
+ 16));
+ varbuf_append(&b,
+ vstr_boardflags2,
+ w32);
+ }
+ break;
+
+ case HNBU_USBFS:
+ varbuf_append(&b, vstr_usbfs,
+ cis[i + 1]);
+ break;
+
+ case HNBU_BOARDTYPE:
+ varbuf_append(&b, vstr_boardtype,
+ (cis[i + 2] << 8) +
+ cis[i + 1]);
+ break;
+
+ case HNBU_HNBUCIS:
+ /*
+ * what follows is a nonstandard HNBU CIS
+ * that lacks CISTPL_BRCM_HNBU tags
+ *
+ * skip 0xff (end of standard CIS)
+ * after this tuple
+ */
+ tlen++;
+ standard_cis = FALSE;
+ break;
+
+ case HNBU_USBEPNUM:
+ varbuf_append(&b, vstr_usbepnum,
+ (cis[i + 2] << 8) | cis[i
+ +
+ 1]);
+ break;
+
+ case HNBU_AA:
+ varbuf_append(&b, vstr_aa2g,
+ cis[i + 1]);
+ if (tlen >= 3)
+ varbuf_append(&b, vstr_aa5g,
+ cis[i + 2]);
+ break;
+
+ case HNBU_AG:
+ varbuf_append(&b, vstr_ag, 0,
+ cis[i + 1]);
+ if (tlen >= 3)
+ varbuf_append(&b, vstr_ag, 1,
+ cis[i + 2]);
+ if (tlen >= 4)
+ varbuf_append(&b, vstr_ag, 2,
+ cis[i + 3]);
+ if (tlen >= 5)
+ varbuf_append(&b, vstr_ag, 3,
+ cis[i + 4]);
+ ag_init = TRUE;
+ break;
+
+ case HNBU_ANT5G:
+ varbuf_append(&b, vstr_aa5g,
+ cis[i + 1]);
+ varbuf_append(&b, vstr_ag, 1,
+ cis[i + 2]);
+ break;
+
+ case HNBU_CC:
+ ASSERT(sromrev == 1);
+ varbuf_append(&b, vstr_cc, cis[i + 1]);
+ break;
+
+ case HNBU_PAPARMS:
+ switch (tlen) {
+ case 2:
+ ASSERT(sromrev == 1);
+ varbuf_append(&b,
+ vstr_pa0maxpwr,
+ cis[i + 1]);
+ break;
+ case 10:
+ ASSERT(sromrev >= 2);
+ varbuf_append(&b, vstr_opo,
+ cis[i + 9]);
+ /* FALLTHROUGH */
+ case 9:
+ varbuf_append(&b,
+ vstr_pa0maxpwr,
+ cis[i + 8]);
+ /* FALLTHROUGH */
+ BCMDONGLECASE(8)
+ varbuf_append(&b,
+ vstr_pa0itssit,
+ cis[i + 7]);
+ /* FALLTHROUGH */
+ BCMDONGLECASE(7)
+ for (j = 0; j < 3; j++) {
+ varbuf_append(&b,
+ vstr_pa0b
+ [j],
+ (cis
+ [i +
+ (j *
+ 2) +
+ 2] << 8)
+ + cis[i +
+ (j *
+ 2)
+ +
+ 1]);
+ }
+ break;
+ default:
+ ASSERT((tlen == 2)
+ || (tlen == 9)
+ || (tlen == 10));
+ break;
+ }
+ break;
+
+ case HNBU_PAPARMS5G:
+ ASSERT((sromrev == 2)
+ || (sromrev == 3));
+ switch (tlen) {
+ case 23:
+ varbuf_append(&b,
+ vstr_pa1himaxpwr,
+ cis[i + 22]);
+ varbuf_append(&b,
+ vstr_pa1lomaxpwr,
+ cis[i + 21]);
+ varbuf_append(&b,
+ vstr_pa1maxpwr,
+ cis[i + 20]);
+ /* FALLTHROUGH */
+ case 20:
+ varbuf_append(&b,
+ vstr_pa1itssit,
+ cis[i + 19]);
+ /* FALLTHROUGH */
+ case 19:
+ for (j = 0; j < 3; j++) {
+ varbuf_append(&b,
+ vstr_pa1b
+ [j],
+ (cis
+ [i +
+ (j *
+ 2) +
+ 2] << 8)
+ + cis[i +
+ (j *
+ 2)
+ +
+ 1]);
+ }
+ for (j = 3; j < 6; j++) {
+ varbuf_append(&b,
+ vstr_pa1lob
+ [j - 3],
+ (cis
+ [i +
+ (j *
+ 2) +
+ 2] << 8)
+ + cis[i +
+ (j *
+ 2)
+ +
+ 1]);
+ }
+ for (j = 6; j < 9; j++) {
+ varbuf_append(&b,
+ vstr_pa1hib
+ [j - 6],
+ (cis
+ [i +
+ (j *
+ 2) +
+ 2] << 8)
+ + cis[i +
+ (j *
+ 2)
+ +
+ 1]);
+ }
+ break;
+ default:
+ ASSERT((tlen == 19) ||
+ (tlen == 20)
+ || (tlen == 23));
+ break;
+ }
+ break;
+
+ case HNBU_OEM:
+ ASSERT(sromrev == 1);
+ varbuf_append(&b, vstr_oem,
+ cis[i + 1], cis[i + 2],
+ cis[i + 3], cis[i + 4],
+ cis[i + 5], cis[i + 6],
+ cis[i + 7], cis[i + 8]);
+ break;
+
+ case HNBU_LEDS:
+ for (j = 1; j <= 4; j++) {
+ if (cis[i + j] != 0xff) {
+ varbuf_append(&b,
+ vstr_ledbh,
+ j - 1,
+ cis[i +
+ j]);
+ }
+ }
+ break;
+
+ case HNBU_CCODE:
+ ASSERT(sromrev > 1);
+ if ((cis[i + 1] == 0)
+ || (cis[i + 2] == 0))
+ varbuf_append(&b, vstr_noccode);
+ else
+ varbuf_append(&b, vstr_ccode,
+ cis[i + 1],
+ cis[i + 2]);
+ varbuf_append(&b, vstr_cctl,
+ cis[i + 3]);
+ break;
+
+ case HNBU_CCKPO:
+ ASSERT(sromrev > 2);
+ varbuf_append(&b, vstr_cckpo,
+ (cis[i + 2] << 8) | cis[i
+ +
+ 1]);
+ break;
+
+ case HNBU_OFDMPO:
+ ASSERT(sromrev > 2);
+ varbuf_append(&b, vstr_ofdmpo,
+ (cis[i + 4] << 24) |
+ (cis[i + 3] << 16) |
+ (cis[i + 2] << 8) |
+ cis[i + 1]);
+ break;
+
+ case HNBU_WPS:
+ varbuf_append(&b, vstr_wpsgpio,
+ cis[i + 1]);
+ if (tlen >= 3)
+ varbuf_append(&b, vstr_wpsled,
+ cis[i + 2]);
+ break;
+
+ case HNBU_RSSISMBXA2G:
+ ASSERT(sromrev == 3);
+ varbuf_append(&b, vstr_rssismf2g,
+ cis[i + 1] & 0xf);
+ varbuf_append(&b, vstr_rssismc2g,
+ (cis[i + 1] >> 4) & 0xf);
+ varbuf_append(&b, vstr_rssisav2g,
+ cis[i + 2] & 0x7);
+ varbuf_append(&b, vstr_bxa2g,
+ (cis[i + 2] >> 3) & 0x3);
+ break;
+
+ case HNBU_RSSISMBXA5G:
+ ASSERT(sromrev == 3);
+ varbuf_append(&b, vstr_rssismf5g,
+ cis[i + 1] & 0xf);
+ varbuf_append(&b, vstr_rssismc5g,
+ (cis[i + 1] >> 4) & 0xf);
+ varbuf_append(&b, vstr_rssisav5g,
+ cis[i + 2] & 0x7);
+ varbuf_append(&b, vstr_bxa5g,
+ (cis[i + 2] >> 3) & 0x3);
+ break;
+
+ case HNBU_TRI2G:
+ ASSERT(sromrev == 3);
+ varbuf_append(&b, vstr_tri2g,
+ cis[i + 1]);
+ break;
+
+ case HNBU_TRI5G:
+ ASSERT(sromrev == 3);
+ varbuf_append(&b, vstr_tri5gl,
+ cis[i + 1]);
+ varbuf_append(&b, vstr_tri5g,
+ cis[i + 2]);
+ varbuf_append(&b, vstr_tri5gh,
+ cis[i + 3]);
+ break;
+
+ case HNBU_RXPO2G:
+ ASSERT(sromrev == 3);
+ varbuf_append(&b, vstr_rxpo2g,
+ cis[i + 1]);
+ break;
+
+ case HNBU_RXPO5G:
+ ASSERT(sromrev == 3);
+ varbuf_append(&b, vstr_rxpo5g,
+ cis[i + 1]);
+ break;
+
+ case HNBU_MACADDR:
+ if (!(ETHER_ISNULLADDR(&cis[i + 1])) &&
+ !(ETHER_ISMULTI(&cis[i + 1]))) {
+ bcm_ether_ntoa((struct
+ ether_addr *)
+ &cis[i + 1],
+ eabuf);
+
+ /* set boardnum if HNBU_BOARDNUM not seen yet */
+ if (boardnum == -1)
+ boardnum =
+ (cis[i + 5] << 8) +
+ cis[i + 6];
+ }
+ break;
+
+ case HNBU_LEDDC:
+ /* CIS leddc only has 16bits, convert it to 32bits */
+ w32 = ((cis[i + 2] << 24) | /* oncount */
+ (cis[i + 1] << 8)); /* offcount */
+ varbuf_append(&b, vstr_leddc, w32);
+ break;
+
+ case HNBU_CHAINSWITCH:
+ varbuf_append(&b, vstr_txchain,
+ cis[i + 1]);
+ varbuf_append(&b, vstr_rxchain,
+ cis[i + 2]);
+ varbuf_append(&b, vstr_antswitch,
+ (cis[i + 4] << 8) +
+ cis[i + 3]);
+ break;
+
+ case HNBU_REGREV:
+ varbuf_append(&b, vstr_regrev,
+ cis[i + 1]);
+ break;
+
+ case HNBU_FEM:{
+ uint16 fem =
+ (cis[i + 2] << 8) + cis[i +
+ 1];
+ varbuf_append(&b,
+ vstr_antswctl2g,
+ (fem &
+ SROM8_FEM_ANTSWLUT_MASK)
+ >>
+ SROM8_FEM_ANTSWLUT_SHIFT);
+ varbuf_append(&b, vstr_triso2g,
+ (fem &
+ SROM8_FEM_TR_ISO_MASK)
+ >>
+ SROM8_FEM_TR_ISO_SHIFT);
+ varbuf_append(&b,
+ vstr_pdetrange2g,
+ (fem &
+ SROM8_FEM_PDET_RANGE_MASK)
+ >>
+ SROM8_FEM_PDET_RANGE_SHIFT);
+ varbuf_append(&b,
+ vstr_extpagain2g,
+ (fem &
+ SROM8_FEM_EXTPA_GAIN_MASK)
+ >>
+ SROM8_FEM_EXTPA_GAIN_SHIFT);
+ varbuf_append(&b,
+ vstr_tssipos2g,
+ (fem &
+ SROM8_FEM_TSSIPOS_MASK)
+ >>
+ SROM8_FEM_TSSIPOS_SHIFT);
+ if (tlen < 5)
+ break;
+
+ fem =
+ (cis[i + 4] << 8) + cis[i +
+ 3];
+ varbuf_append(&b,
+ vstr_antswctl5g,
+ (fem &
+ SROM8_FEM_ANTSWLUT_MASK)
+ >>
+ SROM8_FEM_ANTSWLUT_SHIFT);
+ varbuf_append(&b, vstr_triso5g,
+ (fem &
+ SROM8_FEM_TR_ISO_MASK)
+ >>
+ SROM8_FEM_TR_ISO_SHIFT);
+ varbuf_append(&b,
+ vstr_pdetrange5g,
+ (fem &
+ SROM8_FEM_PDET_RANGE_MASK)
+ >>
+ SROM8_FEM_PDET_RANGE_SHIFT);
+ varbuf_append(&b,
+ vstr_extpagain5g,
+ (fem &
+ SROM8_FEM_EXTPA_GAIN_MASK)
+ >>
+ SROM8_FEM_EXTPA_GAIN_SHIFT);
+ varbuf_append(&b,
+ vstr_tssipos5g,
+ (fem &
+ SROM8_FEM_TSSIPOS_MASK)
+ >>
+ SROM8_FEM_TSSIPOS_SHIFT);
+ break;
+ }
+
+ case HNBU_PAPARMS_C0:
+ varbuf_append(&b, vstr_maxp2ga0,
+ cis[i + 1]);
+ varbuf_append(&b, vstr_itt2ga0,
+ cis[i + 2]);
+ varbuf_append(&b, vstr_pa, 2, 0, 0,
+ (cis[i + 4] << 8) +
+ cis[i + 3]);
+ varbuf_append(&b, vstr_pa, 2, 1, 0,
+ (cis[i + 6] << 8) +
+ cis[i + 5]);
+ varbuf_append(&b, vstr_pa, 2, 2, 0,
+ (cis[i + 8] << 8) +
+ cis[i + 7]);
+ if (tlen < 31)
+ break;
+
+ varbuf_append(&b, vstr_maxp5ga0,
+ cis[i + 9]);
+ varbuf_append(&b, vstr_itt5ga0,
+ cis[i + 10]);
+ varbuf_append(&b, vstr_maxp5gha0,
+ cis[i + 11]);
+ varbuf_append(&b, vstr_maxp5gla0,
+ cis[i + 12]);
+ varbuf_append(&b, vstr_pa, 5, 0, 0,
+ (cis[i + 14] << 8) +
+ cis[i + 13]);
+ varbuf_append(&b, vstr_pa, 5, 1, 0,
+ (cis[i + 16] << 8) +
+ cis[i + 15]);
+ varbuf_append(&b, vstr_pa, 5, 2, 0,
+ (cis[i + 18] << 8) +
+ cis[i + 17]);
+ varbuf_append(&b, vstr_pahl, 5, 'l', 0,
+ 0,
+ (cis[i + 20] << 8) +
+ cis[i + 19]);
+ varbuf_append(&b, vstr_pahl, 5, 'l', 1,
+ 0,
+ (cis[i + 22] << 8) +
+ cis[i + 21]);
+ varbuf_append(&b, vstr_pahl, 5, 'l', 2,
+ 0,
+ (cis[i + 24] << 8) +
+ cis[i + 23]);
+ varbuf_append(&b, vstr_pahl, 5, 'h', 0,
+ 0,
+ (cis[i + 26] << 8) +
+ cis[i + 25]);
+ varbuf_append(&b, vstr_pahl, 5, 'h', 1,
+ 0,
+ (cis[i + 28] << 8) +
+ cis[i + 27]);
+ varbuf_append(&b, vstr_pahl, 5, 'h', 2,
+ 0,
+ (cis[i + 30] << 8) +
+ cis[i + 29]);
+ break;
+
+ case HNBU_PAPARMS_C1:
+ varbuf_append(&b, vstr_maxp2ga1,
+ cis[i + 1]);
+ varbuf_append(&b, vstr_itt2ga1,
+ cis[i + 2]);
+ varbuf_append(&b, vstr_pa, 2, 0, 1,
+ (cis[i + 4] << 8) +
+ cis[i + 3]);
+ varbuf_append(&b, vstr_pa, 2, 1, 1,
+ (cis[i + 6] << 8) +
+ cis[i + 5]);
+ varbuf_append(&b, vstr_pa, 2, 2, 1,
+ (cis[i + 8] << 8) +
+ cis[i + 7]);
+ if (tlen < 31)
+ break;
+
+ varbuf_append(&b, vstr_maxp5ga1,
+ cis[i + 9]);
+ varbuf_append(&b, vstr_itt5ga1,
+ cis[i + 10]);
+ varbuf_append(&b, vstr_maxp5gha1,
+ cis[i + 11]);
+ varbuf_append(&b, vstr_maxp5gla1,
+ cis[i + 12]);
+ varbuf_append(&b, vstr_pa, 5, 0, 1,
+ (cis[i + 14] << 8) +
+ cis[i + 13]);
+ varbuf_append(&b, vstr_pa, 5, 1, 1,
+ (cis[i + 16] << 8) +
+ cis[i + 15]);
+ varbuf_append(&b, vstr_pa, 5, 2, 1,
+ (cis[i + 18] << 8) +
+ cis[i + 17]);
+ varbuf_append(&b, vstr_pahl, 5, 'l', 0,
+ 1,
+ (cis[i + 20] << 8) +
+ cis[i + 19]);
+ varbuf_append(&b, vstr_pahl, 5, 'l', 1,
+ 1,
+ (cis[i + 22] << 8) +
+ cis[i + 21]);
+ varbuf_append(&b, vstr_pahl, 5, 'l', 2,
+ 1,
+ (cis[i + 24] << 8) +
+ cis[i + 23]);
+ varbuf_append(&b, vstr_pahl, 5, 'h', 0,
+ 1,
+ (cis[i + 26] << 8) +
+ cis[i + 25]);
+ varbuf_append(&b, vstr_pahl, 5, 'h', 1,
+ 1,
+ (cis[i + 28] << 8) +
+ cis[i + 27]);
+ varbuf_append(&b, vstr_pahl, 5, 'h', 2,
+ 1,
+ (cis[i + 30] << 8) +
+ cis[i + 29]);
+ break;
+
+ case HNBU_PO_CCKOFDM:
+ varbuf_append(&b, vstr_cck2gpo,
+ (cis[i + 2] << 8) +
+ cis[i + 1]);
+ varbuf_append(&b, vstr_ofdm2gpo,
+ (cis[i + 6] << 24) +
+ (cis[i + 5] << 16) +
+ (cis[i + 4] << 8) +
+ cis[i + 3]);
+ if (tlen < 19)
+ break;
+
+ varbuf_append(&b, vstr_ofdm5gpo,
+ (cis[i + 10] << 24) +
+ (cis[i + 9] << 16) +
+ (cis[i + 8] << 8) +
+ cis[i + 7]);
+ varbuf_append(&b, vstr_ofdm5glpo,
+ (cis[i + 14] << 24) +
+ (cis[i + 13] << 16) +
+ (cis[i + 12] << 8) +
+ cis[i + 11]);
+ varbuf_append(&b, vstr_ofdm5ghpo,
+ (cis[i + 18] << 24) +
+ (cis[i + 17] << 16) +
+ (cis[i + 16] << 8) +
+ cis[i + 15]);
+ break;
+
+ case HNBU_PO_MCS2G:
+ for (j = 0; j <= (tlen / 2); j++) {
+ varbuf_append(&b, vstr_mcspo, 2,
+ j,
+ (cis
+ [i + 2 +
+ 2 * j] << 8) +
+ cis[i + 1 +
+ 2 * j]);
+ }
+ break;
+
+ case HNBU_PO_MCS5GM:
+ for (j = 0; j <= (tlen / 2); j++) {
+ varbuf_append(&b, vstr_mcspo, 5,
+ j,
+ (cis
+ [i + 2 +
+ 2 * j] << 8) +
+ cis[i + 1 +
+ 2 * j]);
+ }
+ break;
+
+ case HNBU_PO_MCS5GLH:
+ for (j = 0; j <= (tlen / 4); j++) {
+ varbuf_append(&b, vstr_mcspohl,
+ 5, 'l', j,
+ (cis
+ [i + 2 +
+ 2 * j] << 8) +
+ cis[i + 1 +
+ 2 * j]);
+ }
+
+ for (j = 0; j <= (tlen / 4); j++) {
+ varbuf_append(&b, vstr_mcspohl,
+ 5, 'h', j,
+ (cis
+ [i +
+ ((tlen / 2) +
+ 2) +
+ 2 * j] << 8) +
+ cis[i +
+ ((tlen / 2) +
+ 1) + 2 * j]);
+ }
+
+ break;
+
+ case HNBU_PO_CDD:
+ varbuf_append(&b, vstr_cddpo,
+ (cis[i + 2] << 8) +
+ cis[i + 1]);
+ break;
+
+ case HNBU_PO_STBC:
+ varbuf_append(&b, vstr_stbcpo,
+ (cis[i + 2] << 8) +
+ cis[i + 1]);
+ break;
+
+ case HNBU_PO_40M:
+ varbuf_append(&b, vstr_bw40po,
+ (cis[i + 2] << 8) +
+ cis[i + 1]);
+ break;
+
+ case HNBU_PO_40MDUP:
+ varbuf_append(&b, vstr_bwduppo,
+ (cis[i + 2] << 8) +
+ cis[i + 1]);
+ break;
+
+ case HNBU_OFDMPO5G:
+ varbuf_append(&b, vstr_ofdm5gpo,
+ (cis[i + 4] << 24) +
+ (cis[i + 3] << 16) +
+ (cis[i + 2] << 8) +
+ cis[i + 1]);
+ varbuf_append(&b, vstr_ofdm5glpo,
+ (cis[i + 8] << 24) +
+ (cis[i + 7] << 16) +
+ (cis[i + 6] << 8) +
+ cis[i + 5]);
+ varbuf_append(&b, vstr_ofdm5ghpo,
+ (cis[i + 12] << 24) +
+ (cis[i + 11] << 16) +
+ (cis[i + 10] << 8) +
+ cis[i + 9]);
+ break;
+
+ case HNBU_CUSTOM1:
+ varbuf_append(&b, vstr_custom, 1,
+ ((cis[i + 4] << 24) +
+ (cis[i + 3] << 16) +
+ (cis[i + 2] << 8) +
+ cis[i + 1]));
+ break;
+
+#if defined(BCMSDIO)
+ case HNBU_SROM3SWRGN:
+ if (tlen >= 73) {
+ uint16 srom[35];
+ uint8 srev = cis[i + 1 + 70];
+ ASSERT(srev == 3);
+ /* make tuple value 16-bit aligned and parse it */
+ bcopy(&cis[i + 1], srom,
+ sizeof(srom));
+ _initvars_srom_pci(srev, srom,
+ SROM3_SWRGN_OFF,
+ &b);
+ /* 2.4G antenna gain is included in SROM */
+ ag_init = TRUE;
+ /* Ethernet MAC address is included in SROM */
+ eabuf[0] = 0;
+ boardnum = -1;
+ }
+ /* create extra variables */
+ if (tlen >= 75)
+ varbuf_append(&b, vstr_vendid,
+ (cis[i + 1 + 73]
+ << 8) + cis[i +
+ 1 +
+ 72]);
+ if (tlen >= 77)
+ varbuf_append(&b, vstr_devid,
+ (cis[i + 1 + 75]
+ << 8) + cis[i +
+ 1 +
+ 74]);
+ if (tlen >= 79)
+ varbuf_append(&b, vstr_xtalfreq,
+ (cis[i + 1 + 77]
+ << 8) + cis[i +
+ 1 +
+ 76]);
+ break;
+#endif /* defined(BCMSDIO) */
+
+ case HNBU_CCKFILTTYPE:
+ varbuf_append(&b, vstr_cckdigfilttype,
+ (cis[i + 1]));
+ break;
+ }
+
+ break;
+ }
+ i += tlen;
+ } while (tup != CISTPL_END);
+ }
+
+ if (boardnum != -1) {
+ varbuf_append(&b, vstr_boardnum, boardnum);
+ }
+
+ if (eabuf[0]) {
+ varbuf_append(&b, vstr_macaddr, eabuf);
+ }
+
+ /* if there is no antenna gain field, set default */
+ if (getvar(NULL, "ag0") == NULL && ag_init == FALSE) {
+ varbuf_append(&b, vstr_ag, 0, 0xff);
+ }
+
+ /* final nullbyte terminator */
+ ASSERT(b.size >= 1);
+ *b.buf++ = '\0';
+
+ ASSERT(b.buf - base <= MAXSZ_NVRAM_VARS);
+ err = initvars_table(osh, base, b.buf, vars, count);
+
+ MFREE(osh, base, MAXSZ_NVRAM_VARS);
+ return err;
+}
+
+/* In chips with chipcommon rev 32 and later, the srom is in chipcommon,
+ * not in the bus cores.
+ */
+static uint16
+srom_cc_cmd(si_t * sih, osl_t * osh, void *ccregs, uint32 cmd, uint wordoff,
+ uint16 data)
+{
+ chipcregs_t *cc = (chipcregs_t *) ccregs;
+ uint wait_cnt = 1000;
+
+ if ((cmd == SRC_OP_READ) || (cmd == SRC_OP_WRITE)) {
+ W_REG(osh, &cc->sromaddress, wordoff * 2);
+ if (cmd == SRC_OP_WRITE)
+ W_REG(osh, &cc->sromdata, data);
+ }
+
+ W_REG(osh, &cc->sromcontrol, SRC_START | cmd);
+
+ while (wait_cnt--) {
+ if ((R_REG(osh, &cc->sromcontrol) & SRC_BUSY) == 0)
+ break;
+ }
+
+ if (!wait_cnt) {
+ BS_ERROR(("%s: Command 0x%x timed out\n", __func__, cmd));
+ return 0xffff;
+ }
+ if (cmd == SRC_OP_READ)
+ return (uint16) R_REG(osh, &cc->sromdata);
+ else
+ return 0xffff;
+}
+
+/*
+ * Read in and validate sprom.
+ * Return 0 on success, nonzero on error.
+ */
+static int
+sprom_read_pci(osl_t * osh, si_t * sih, uint16 * sprom, uint wordoff,
+ uint16 * buf, uint nwords, bool check_crc)
+{
+ int err = 0;
+ uint i;
+ void *ccregs = NULL;
+
+ /* read the sprom */
+ for (i = 0; i < nwords; i++) {
+
+ if (sih->ccrev > 31 && ISSIM_ENAB(sih)) {
+ /* use indirect since direct is too slow on QT */
+ if ((sih->cccaps & CC_CAP_SROM) == 0)
+ return 1;
+
+ ccregs = (void *)((uint8 *) sprom - CC_SROM_OTP);
+ buf[i] =
+ srom_cc_cmd(sih, osh, ccregs, SRC_OP_READ,
+ wordoff + i, 0);
+
+ } else {
+ if (ISSIM_ENAB(sih))
+ buf[i] = R_REG(osh, &sprom[wordoff + i]);
+
+ buf[i] = R_REG(osh, &sprom[wordoff + i]);
+ }
+
+ }
+
+ /* bypass crc checking for simulation to allow srom hack */
+ if (ISSIM_ENAB(sih))
+ return err;
+
+ if (check_crc) {
+
+ if (buf[0] == 0xffff) {
+ /* The hardware thinks that an srom that starts with 0xffff
+ * is blank, regardless of the rest of the content, so declare
+ * it bad.
+ */
+ BS_ERROR(("%s: buf[0] = 0x%x, returning bad-crc\n",
+ __func__, buf[0]));
+ return 1;
+ }
+
+ /* fixup the endianness so crc8 will pass */
+ htol16_buf(buf, nwords * 2);
+ if (hndcrc8((uint8 *) buf, nwords * 2, CRC8_INIT_VALUE) !=
+ CRC8_GOOD_VALUE) {
+ /* DBG only pci always read srom4 first, then srom8/9 */
+ /* BS_ERROR(("%s: bad crc\n", __func__)); */
+ err = 1;
+ }
+ /* now correct the endianness of the byte array */
+ ltoh16_buf(buf, nwords * 2);
+ }
+ return err;
+}
+
+#if defined(BCMNVRAMR)
+static int otp_read_pci(osl_t * osh, si_t * sih, uint16 * buf, uint bufsz)
+{
+ uint8 *otp;
+ uint sz = OTP_SZ_MAX / 2; /* size in words */
+ int err = 0;
+
+ ASSERT(bufsz <= OTP_SZ_MAX);
+
+ if ((otp = MALLOC(osh, OTP_SZ_MAX)) == NULL) {
+ return BCME_ERROR;
+ }
+
+ bzero(otp, OTP_SZ_MAX);
+
+ err = otp_read_region(sih, OTP_HW_RGN, (uint16 *) otp, &sz);
+
+ bcopy(otp, buf, bufsz);
+
+ if (otp)
+ MFREE(osh, otp, OTP_SZ_MAX);
+
+ /* Check CRC */
+ if (buf[0] == 0xffff) {
+ /* The hardware thinks that an srom that starts with 0xffff
+ * is blank, regardless of the rest of the content, so declare
+ * it bad.
+ */
+ BS_ERROR(("%s: buf[0] = 0x%x, returning bad-crc\n", __func__,
+ buf[0]));
+ return 1;
+ }
+
+ /* fixup the endianness so crc8 will pass */
+ htol16_buf(buf, bufsz);
+ if (hndcrc8((uint8 *) buf, SROM4_WORDS * 2, CRC8_INIT_VALUE) !=
+ CRC8_GOOD_VALUE) {
+ BS_ERROR(("%s: bad crc\n", __func__));
+ err = 1;
+ }
+ /* now correct the endianness of the byte array */
+ ltoh16_buf(buf, bufsz);
+
+ return err;
+}
+#endif /* defined(BCMNVRAMR) */
+/*
+* Create variable table from memory.
+* Return 0 on success, nonzero on error.
+*/
+static int
+BCMATTACHFN(initvars_table) (osl_t * osh, char *start, char *end, char **vars,
+ uint * count) {
+ int c = (int)(end - start);
+
+ /* do it only when there is more than just the null string */
+ if (c > 1) {
+ char *vp = MALLOC(osh, c);
+ ASSERT(vp != NULL);
+ if (!vp)
+ return BCME_NOMEM;
+ bcopy(start, vp, c);
+ *vars = vp;
+ *count = c;
+ } else {
+ *vars = NULL;
+ *count = 0;
+ }
+
+ return 0;
+}
+
+/*
+ * Find variables with <devpath> from flash. 'base' points to the beginning
+ * of the table upon enter and to the end of the table upon exit when success.
+ * Return 0 on success, nonzero on error.
+ */
+static int
+BCMATTACHFN(initvars_flash) (si_t * sih, osl_t * osh, char **base, uint len) {
+ char *vp = *base;
+ char *flash;
+ int err;
+ char *s;
+ uint l, dl, copy_len;
+ char devpath[SI_DEVPATH_BUFSZ];
+
+ /* allocate memory and read in flash */
+ if (!(flash = MALLOC(osh, NVRAM_SPACE)))
+ return BCME_NOMEM;
+ if ((err = nvram_getall(flash, NVRAM_SPACE)))
+ goto exit;
+
+ si_devpath(sih, devpath, sizeof(devpath));
+
+ /* grab vars with the <devpath> prefix in name */
+ dl = strlen(devpath);
+ for (s = flash; s && *s; s += l + 1) {
+ l = strlen(s);
+
+ /* skip non-matching variable */
+ if (strncmp(s, devpath, dl))
+ continue;
+
+ /* is there enough room to copy? */
+ copy_len = l - dl + 1;
+ if (len < copy_len) {
+ err = BCME_BUFTOOSHORT;
+ goto exit;
+ }
+
+ /* no prefix, just the name=value */
+ strncpy(vp, &s[dl], copy_len);
+ vp += copy_len;
+ len -= copy_len;
+ }
+
+ /* add null string as terminator */
+ if (len < 1) {
+ err = BCME_BUFTOOSHORT;
+ goto exit;
+ }
+ *vp++ = '\0';
+
+ *base = vp;
+
+ exit: MFREE(osh, flash, NVRAM_SPACE);
+ return err;
+}
+
+/*
+ * Initialize nonvolatile variable table from flash.
+ * Return 0 on success, nonzero on error.
+ */
+static int
+BCMATTACHFN(initvars_flash_si) (si_t * sih, char **vars, uint * count) {
+ osl_t *osh = si_osh(sih);
+ char *vp, *base;
+ int err;
+
+ ASSERT(vars != NULL);
+ ASSERT(count != NULL);
+
+ base = vp = MALLOC(osh, MAXSZ_NVRAM_VARS);
+ ASSERT(vp != NULL);
+ if (!vp)
+ return BCME_NOMEM;
+
+ if ((err = initvars_flash(sih, osh, &vp, MAXSZ_NVRAM_VARS)) == 0)
+ err = initvars_table(osh, base, vp, vars, count);
+
+ MFREE(osh, base, MAXSZ_NVRAM_VARS);
+
+ return err;
+}
+
+/* Parse SROM and create name=value pairs. 'srom' points to
+ * the SROM word array. 'off' specifies the offset of the
+ * first word 'srom' points to, which should be either 0 or
+ * SROM3_SWRG_OFF (full SROM or software region).
+ */
+
+static uint mask_shift(uint16 mask)
+{
+ uint i;
+ for (i = 0; i < (sizeof(mask) << 3); i++) {
+ if (mask & (1 << i))
+ return i;
+ }
+ ASSERT(mask);
+ return 0;
+}
+
+static uint mask_width(uint16 mask)
+{
+ int i;
+ for (i = (sizeof(mask) << 3) - 1; i >= 0; i--) {
+ if (mask & (1 << i))
+ return (uint) (i - mask_shift(mask) + 1);
+ }
+ ASSERT(mask);
+ return 0;
+}
+
+#if defined(BCMDBG)
+static bool mask_valid(uint16 mask)
+{
+ uint shift = mask_shift(mask);
+ uint width = mask_width(mask);
+ return mask == ((~0 << shift) & ~(~0 << (shift + width)));
+}
+#endif /* BCMDBG */
+
+static void
+BCMATTACHFN(_initvars_srom_pci) (uint8 sromrev, uint16 * srom, uint off,
+ varbuf_t * b) {
+ uint16 w;
+ uint32 val;
+ const sromvar_t *srv;
+ uint width;
+ uint flags;
+ uint32 sr = (1 << sromrev);
+
+ varbuf_append(b, "sromrev=%d", sromrev);
+
+ for (srv = pci_sromvars; srv->name != NULL; srv++) {
+ const char *name;
+
+ if ((srv->revmask & sr) == 0)
+ continue;
+
+ if (srv->off < off)
+ continue;
+
+ flags = srv->flags;
+ name = srv->name;
+
+ /* This entry is for mfgc only. Don't generate param for it, */
+ if (flags & SRFL_NOVAR)
+ continue;
+
+ if (flags & SRFL_ETHADDR) {
+ char eabuf[ETHER_ADDR_STR_LEN];
+ struct ether_addr ea;
+
+ ea.octet[0] = (srom[srv->off - off] >> 8) & 0xff;
+ ea.octet[1] = srom[srv->off - off] & 0xff;
+ ea.octet[2] = (srom[srv->off + 1 - off] >> 8) & 0xff;
+ ea.octet[3] = srom[srv->off + 1 - off] & 0xff;
+ ea.octet[4] = (srom[srv->off + 2 - off] >> 8) & 0xff;
+ ea.octet[5] = srom[srv->off + 2 - off] & 0xff;
+ bcm_ether_ntoa(&ea, eabuf);
+
+ varbuf_append(b, "%s=%s", name, eabuf);
+ } else {
+ ASSERT(mask_valid(srv->mask));
+ ASSERT(mask_width(srv->mask));
+
+ w = srom[srv->off - off];
+ val = (w & srv->mask) >> mask_shift(srv->mask);
+ width = mask_width(srv->mask);
+
+ while (srv->flags & SRFL_MORE) {
+ srv++;
+ ASSERT(srv->name != NULL);
+
+ if (srv->off == 0 || srv->off < off)
+ continue;
+
+ ASSERT(mask_valid(srv->mask));
+ ASSERT(mask_width(srv->mask));
+
+ w = srom[srv->off - off];
+ val +=
+ ((w & srv->mask) >> mask_shift(srv->
+ mask)) <<
+ width;
+ width += mask_width(srv->mask);
+ }
+
+ if ((flags & SRFL_NOFFS)
+ && ((int)val == (1 << width) - 1))
+ continue;
+
+ if (flags & SRFL_CCODE) {
+ if (val == 0)
+ varbuf_append(b, "ccode=");
+ else
+ varbuf_append(b, "ccode=%c%c",
+ (val >> 8), (val & 0xff));
+ }
+ /* LED Powersave duty cycle has to be scaled:
+ *(oncount >> 24) (offcount >> 8)
+ */
+ else if (flags & SRFL_LEDDC) {
+ uint32 w32 = (((val >> 8) & 0xff) << 24) | /* oncount */
+ (((val & 0xff)) << 8); /* offcount */
+ varbuf_append(b, "leddc=%d", w32);
+ } else if (flags & SRFL_PRHEX)
+ varbuf_append(b, "%s=0x%x", name, val);
+ else if ((flags & SRFL_PRSIGN)
+ && (val & (1 << (width - 1))))
+ varbuf_append(b, "%s=%d", name,
+ (int)(val | (~0 << width)));
+ else
+ varbuf_append(b, "%s=%u", name, val);
+ }
+ }
+
+ if (sromrev >= 4) {
+ /* Do per-path variables */
+ uint p, pb, psz;
+
+ if (sromrev >= 8) {
+ pb = SROM8_PATH0;
+ psz = SROM8_PATH1 - SROM8_PATH0;
+ } else {
+ pb = SROM4_PATH0;
+ psz = SROM4_PATH1 - SROM4_PATH0;
+ }
+
+ for (p = 0; p < MAX_PATH_SROM; p++) {
+ for (srv = perpath_pci_sromvars; srv->name != NULL;
+ srv++) {
+ if ((srv->revmask & sr) == 0)
+ continue;
+
+ if (pb + srv->off < off)
+ continue;
+
+ /* This entry is for mfgc only. Don't generate param for it, */
+ if (srv->flags & SRFL_NOVAR)
+ continue;
+
+ w = srom[pb + srv->off - off];
+
+ ASSERT(mask_valid(srv->mask));
+ val = (w & srv->mask) >> mask_shift(srv->mask);
+ width = mask_width(srv->mask);
+
+ /* Cheating: no per-path var is more than 1 word */
+
+ if ((srv->flags & SRFL_NOFFS)
+ && ((int)val == (1 << width) - 1))
+ continue;
+
+ if (srv->flags & SRFL_PRHEX)
+ varbuf_append(b, "%s%d=0x%x", srv->name,
+ p, val);
+ else
+ varbuf_append(b, "%s%d=%d", srv->name,
+ p, val);
+ }
+ pb += psz;
+ }
+ }
+}
+
+/*
+ * Initialize nonvolatile variable table from sprom.
+ * Return 0 on success, nonzero on error.
+ */
+static int
+BCMATTACHFN(initvars_srom_pci) (si_t * sih, void *curmap, char **vars,
+ uint * count) {
+ uint16 *srom, *sromwindow;
+ uint8 sromrev = 0;
+ uint32 sr;
+ varbuf_t b;
+ char *vp, *base = NULL;
+ osl_t *osh = si_osh(sih);
+ bool flash = FALSE;
+ int err = 0;
+
+ /*
+ * Apply CRC over SROM content regardless SROM is present or not,
+ * and use variable <devpath>sromrev's existance in flash to decide
+ * if we should return an error when CRC fails or read SROM variables
+ * from flash.
+ */
+ srom = MALLOC(osh, SROM_MAX);
+ ASSERT(srom != NULL);
+ if (!srom)
+ return -2;
+
+ sromwindow = (uint16 *) SROM_OFFSET(sih);
+ if (si_is_sprom_available(sih)) {
+ err =
+ sprom_read_pci(osh, sih, sromwindow, 0, srom, SROM_WORDS,
+ TRUE);
+
+ if ((srom[SROM4_SIGN] == SROM4_SIGNATURE) ||
+ (((sih->buscoretype == PCIE_CORE_ID)
+ && (sih->buscorerev >= 6))
+ || ((sih->buscoretype == PCI_CORE_ID)
+ && (sih->buscorerev >= 0xe)))) {
+ /* sromrev >= 4, read more */
+ err =
+ sprom_read_pci(osh, sih, sromwindow, 0, srom,
+ SROM4_WORDS, TRUE);
+ sromrev = srom[SROM4_CRCREV] & 0xff;
+ if (err)
+ BS_ERROR(("%s: srom %d, bad crc\n", __func__,
+ sromrev));
+
+ } else if (err == 0) {
+ /* srom is good and is rev < 4 */
+ /* top word of sprom contains version and crc8 */
+ sromrev = srom[SROM_CRCREV] & 0xff;
+ /* bcm4401 sroms misprogrammed */
+ if (sromrev == 0x10)
+ sromrev = 1;
+ }
+ }
+#if defined(BCMNVRAMR)
+ /* Use OTP if SPROM not available */
+ else if ((err = otp_read_pci(osh, sih, srom, SROM_MAX)) == 0) {
+ /* OTP only contain SROM rev8/rev9 for now */
+ sromrev = srom[SROM4_CRCREV] & 0xff;
+ }
+#endif
+ else {
+ err = 1;
+ BS_ERROR(("Neither SPROM nor OTP has valid image\n"));
+ }
+
+ /* We want internal/wltest driver to come up with default sromvars so we can
+ * program a blank SPROM/OTP.
+ */
+ if (err) {
+ char *value;
+ uint32 val;
+ val = 0;
+
+ if ((value = si_getdevpathvar(sih, "sromrev"))) {
+ sromrev = (uint8) bcm_strtoul(value, NULL, 0);
+ flash = TRUE;
+ goto varscont;
+ }
+
+ BS_ERROR(("%s, SROM CRC Error\n", __func__));
+
+ if ((value = si_getnvramflvar(sih, "sromrev"))) {
+ err = 0;
+ goto errout;
+ }
+
+ {
+ err = -1;
+ goto errout;
+ }
+ }
+
+ varscont:
+ /* Bitmask for the sromrev */
+ sr = 1 << sromrev;
+
+ /* srom version check: Current valid versions: 1, 2, 3, 4, 5, 8, 9 */
+ if ((sr & 0x33e) == 0) {
+ err = -2;
+ goto errout;
+ }
+
+ ASSERT(vars != NULL);
+ ASSERT(count != NULL);
+
+ base = vp = MALLOC(osh, MAXSZ_NVRAM_VARS);
+ ASSERT(vp != NULL);
+ if (!vp) {
+ err = -2;
+ goto errout;
+ }
+
+ /* read variables from flash */
+ if (flash) {
+ if ((err = initvars_flash(sih, osh, &vp, MAXSZ_NVRAM_VARS)))
+ goto errout;
+ goto varsdone;
+ }
+
+ varbuf_init(&b, base, MAXSZ_NVRAM_VARS);
+
+ /* parse SROM into name=value pairs. */
+ _initvars_srom_pci(sromrev, srom, 0, &b);
+
+ /* final nullbyte terminator */
+ ASSERT(b.size >= 1);
+ vp = b.buf;
+ *vp++ = '\0';
+
+ ASSERT((vp - base) <= MAXSZ_NVRAM_VARS);
+
+ varsdone:
+ err = initvars_table(osh, base, vp, vars, count);
+
+ errout:
+ if (base)
+ MFREE(osh, base, MAXSZ_NVRAM_VARS);
+
+ MFREE(osh, srom, SROM_MAX);
+ return err;
+}
+
+#ifdef BCMSDIO
+/*
+ * Read the SDIO cis and call parsecis to initialize the vars.
+ * Return 0 on success, nonzero on error.
+ */
+static int
+BCMATTACHFN(initvars_cis_sdio) (osl_t * osh, char **vars, uint * count) {
+ uint8 *cis[SBSDIO_NUM_FUNCTION + 1];
+ uint fn, numfn;
+ int rc = 0;
+
+ numfn = bcmsdh_query_iofnum(NULL);
+ ASSERT(numfn <= SDIOD_MAX_IOFUNCS);
+
+ for (fn = 0; fn <= numfn; fn++) {
+ if ((cis[fn] = MALLOC(osh, SBSDIO_CIS_SIZE_LIMIT)) == NULL) {
+ rc = -1;
+ break;
+ }
+
+ bzero(cis[fn], SBSDIO_CIS_SIZE_LIMIT);
+
+ if (bcmsdh_cis_read(NULL, fn, cis[fn], SBSDIO_CIS_SIZE_LIMIT) !=
+ 0) {
+ MFREE(osh, cis[fn], SBSDIO_CIS_SIZE_LIMIT);
+ rc = -2;
+ break;
+ }
+ }
+
+ if (!rc)
+ rc = srom_parsecis(osh, cis, fn, vars, count);
+
+ while (fn-- > 0)
+ MFREE(osh, cis[fn], SBSDIO_CIS_SIZE_LIMIT);
+
+ return (rc);
+}
+
+/* set SDIO sprom command register */
+static int BCMATTACHFN(sprom_cmd_sdio) (osl_t * osh, uint8 cmd) {
+ uint8 status = 0;
+ uint wait_cnt = 1000;
+
+ /* write sprom command register */
+ bcmsdh_cfg_write(NULL, SDIO_FUNC_1, SBSDIO_SPROM_CS, cmd, NULL);
+
+ /* wait status */
+ while (wait_cnt--) {
+ status =
+ bcmsdh_cfg_read(NULL, SDIO_FUNC_1, SBSDIO_SPROM_CS, NULL);
+ if (status & SBSDIO_SPROM_DONE)
+ return 0;
+ }
+
+ return 1;
+}
+
+/* read a word from the SDIO srom */
+static int sprom_read_sdio(osl_t * osh, uint16 addr, uint16 * data)
+{
+ uint8 addr_l, addr_h, data_l, data_h;
+
+ addr_l = (uint8) ((addr * 2) & 0xff);
+ addr_h = (uint8) (((addr * 2) >> 8) & 0xff);
+
+ /* set address */
+ bcmsdh_cfg_write(NULL, SDIO_FUNC_1, SBSDIO_SPROM_ADDR_HIGH, addr_h,
+ NULL);
+ bcmsdh_cfg_write(NULL, SDIO_FUNC_1, SBSDIO_SPROM_ADDR_LOW, addr_l,
+ NULL);
+
+ /* do read */
+ if (sprom_cmd_sdio(osh, SBSDIO_SPROM_READ))
+ return 1;
+
+ /* read data */
+ data_h =
+ bcmsdh_cfg_read(NULL, SDIO_FUNC_1, SBSDIO_SPROM_DATA_HIGH, NULL);
+ data_l =
+ bcmsdh_cfg_read(NULL, SDIO_FUNC_1, SBSDIO_SPROM_DATA_LOW, NULL);
+
+ *data = (data_h << 8) | data_l;
+ return 0;
+}
+#endif /* BCMSDIO */
+
+static int
+BCMATTACHFN(initvars_srom_si) (si_t * sih, osl_t * osh, void *curmap,
+ char **vars, uint * varsz) {
+ /* Search flash nvram section for srom variables */
+ return initvars_flash_si(sih, vars, varsz);
+}
diff --git a/drivers/staging/brcm80211/util/bcmutils.c b/drivers/staging/brcm80211/util/bcmutils.c
new file mode 100644
index 000000000000..364f837fb080
--- /dev/null
+++ b/drivers/staging/brcm80211/util/bcmutils.c
@@ -0,0 +1,1760 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <stdarg.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <siutils.h>
+#include <bcmnvram.h>
+#include <bcmendian.h>
+#include <bcmdevs.h>
+#include <proto/ethernet.h>
+#include <proto/802.1d.h>
+#include <proto/802.11.h>
+
+#ifdef WLC_LOW
+/* nvram vars cache */
+static char *nvram_vars = NULL;
+static int vars_len = -1;
+#endif /* WLC_LOW */
+
+/* copy a pkt buffer chain into a buffer */
+uint pktcopy(osl_t * osh, void *p, uint offset, int len, uchar * buf)
+{
+ uint n, ret = 0;
+
+ if (len < 0)
+ len = 4096; /* "infinite" */
+
+ /* skip 'offset' bytes */
+ for (; p && offset; p = PKTNEXT(p)) {
+ if (offset < (uint) PKTLEN(p))
+ break;
+ offset -= PKTLEN(p);
+ }
+
+ if (!p)
+ return 0;
+
+ /* copy the data */
+ for (; p && len; p = PKTNEXT(p)) {
+ n = MIN((uint) PKTLEN(p) - offset, (uint) len);
+ bcopy(PKTDATA(p) + offset, buf, n);
+ buf += n;
+ len -= n;
+ ret += n;
+ offset = 0;
+ }
+
+ return ret;
+}
+
+/* copy a buffer into a pkt buffer chain */
+uint pktfrombuf(osl_t * osh, void *p, uint offset, int len, uchar * buf)
+{
+ uint n, ret = 0;
+
+ /* skip 'offset' bytes */
+ for (; p && offset; p = PKTNEXT(p)) {
+ if (offset < (uint) PKTLEN(p))
+ break;
+ offset -= PKTLEN(p);
+ }
+
+ if (!p)
+ return 0;
+
+ /* copy the data */
+ for (; p && len; p = PKTNEXT(p)) {
+ n = MIN((uint) PKTLEN(p) - offset, (uint) len);
+ bcopy(buf, PKTDATA(p) + offset, n);
+ buf += n;
+ len -= n;
+ ret += n;
+ offset = 0;
+ }
+
+ return ret;
+}
+
+/* return total length of buffer chain */
+uint BCMFASTPATH pkttotlen(osl_t * osh, void *p)
+{
+ uint total;
+
+ total = 0;
+ for (; p; p = PKTNEXT(p))
+ total += PKTLEN(p);
+ return (total);
+}
+
+/* return the last buffer of chained pkt */
+void *pktlast(osl_t * osh, void *p)
+{
+ for (; PKTNEXT(p); p = PKTNEXT(p)) ;
+
+ return (p);
+}
+
+/* count segments of a chained packet */
+uint BCMFASTPATH pktsegcnt(osl_t * osh, void *p)
+{
+ uint cnt;
+
+ for (cnt = 0; p; p = PKTNEXT(p))
+ cnt++;
+
+ return cnt;
+}
+
+/*
+ * osl multiple-precedence packet queue
+ * hi_prec is always >= the number of the highest non-empty precedence
+ */
+void *BCMFASTPATH pktq_penq(struct pktq *pq, int prec, void *p)
+{
+ struct pktq_prec *q;
+
+ ASSERT(prec >= 0 && prec < pq->num_prec);
+ ASSERT(PKTLINK(p) == NULL); /* queueing chains not allowed */
+
+ ASSERT(!pktq_full(pq));
+ ASSERT(!pktq_pfull(pq, prec));
+
+ q = &pq->q[prec];
+
+ if (q->head)
+ PKTSETLINK(q->tail, p);
+ else
+ q->head = p;
+
+ q->tail = p;
+ q->len++;
+
+ pq->len++;
+
+ if (pq->hi_prec < prec)
+ pq->hi_prec = (uint8) prec;
+
+ return p;
+}
+
+void *BCMFASTPATH pktq_penq_head(struct pktq *pq, int prec, void *p)
+{
+ struct pktq_prec *q;
+
+ ASSERT(prec >= 0 && prec < pq->num_prec);
+ ASSERT(PKTLINK(p) == NULL); /* queueing chains not allowed */
+
+ ASSERT(!pktq_full(pq));
+ ASSERT(!pktq_pfull(pq, prec));
+
+ q = &pq->q[prec];
+
+ if (q->head == NULL)
+ q->tail = p;
+
+ PKTSETLINK(p, q->head);
+ q->head = p;
+ q->len++;
+
+ pq->len++;
+
+ if (pq->hi_prec < prec)
+ pq->hi_prec = (uint8) prec;
+
+ return p;
+}
+
+void *BCMFASTPATH pktq_pdeq(struct pktq *pq, int prec)
+{
+ struct pktq_prec *q;
+ void *p;
+
+ ASSERT(prec >= 0 && prec < pq->num_prec);
+
+ q = &pq->q[prec];
+
+ if ((p = q->head) == NULL)
+ return NULL;
+
+ if ((q->head = PKTLINK(p)) == NULL)
+ q->tail = NULL;
+
+ q->len--;
+
+ pq->len--;
+
+ PKTSETLINK(p, NULL);
+
+ return p;
+}
+
+void *BCMFASTPATH pktq_pdeq_tail(struct pktq *pq, int prec)
+{
+ struct pktq_prec *q;
+ void *p, *prev;
+
+ ASSERT(prec >= 0 && prec < pq->num_prec);
+
+ q = &pq->q[prec];
+
+ if ((p = q->head) == NULL)
+ return NULL;
+
+ for (prev = NULL; p != q->tail; p = PKTLINK(p))
+ prev = p;
+
+ if (prev)
+ PKTSETLINK(prev, NULL);
+ else
+ q->head = NULL;
+
+ q->tail = prev;
+ q->len--;
+
+ pq->len--;
+
+ return p;
+}
+
+void
+pktq_pflush(osl_t * osh, struct pktq *pq, int prec, bool dir, ifpkt_cb_t fn,
+ int arg)
+{
+ struct pktq_prec *q;
+ void *p, *prev = NULL;
+
+ q = &pq->q[prec];
+ p = q->head;
+ while (p) {
+ if (fn == NULL || (*fn) (p, arg)) {
+ bool head = (p == q->head);
+ if (head)
+ q->head = PKTLINK(p);
+ else
+ PKTSETLINK(prev, PKTLINK(p));
+ PKTSETLINK(p, NULL);
+ PKTFREE(osh, p, dir);
+ q->len--;
+ pq->len--;
+ p = (head ? q->head : PKTLINK(prev));
+ } else {
+ prev = p;
+ p = PKTLINK(p);
+ }
+ }
+
+ if (q->head == NULL) {
+ ASSERT(q->len == 0);
+ q->tail = NULL;
+ }
+}
+
+bool BCMFASTPATH pktq_pdel(struct pktq *pq, void *pktbuf, int prec)
+{
+ struct pktq_prec *q;
+ void *p;
+
+ ASSERT(prec >= 0 && prec < pq->num_prec);
+
+ if (!pktbuf)
+ return FALSE;
+
+ q = &pq->q[prec];
+
+ if (q->head == pktbuf) {
+ if ((q->head = PKTLINK(pktbuf)) == NULL)
+ q->tail = NULL;
+ } else {
+ for (p = q->head; p && PKTLINK(p) != pktbuf; p = PKTLINK(p)) ;
+ if (p == NULL)
+ return FALSE;
+
+ PKTSETLINK(p, PKTLINK(pktbuf));
+ if (q->tail == pktbuf)
+ q->tail = p;
+ }
+
+ q->len--;
+ pq->len--;
+ PKTSETLINK(pktbuf, NULL);
+ return TRUE;
+}
+
+void pktq_init(struct pktq *pq, int num_prec, int max_len)
+{
+ int prec;
+
+ ASSERT(num_prec > 0 && num_prec <= PKTQ_MAX_PREC);
+
+ /* pq is variable size; only zero out what's requested */
+ bzero(pq,
+ OFFSETOF(struct pktq, q) + (sizeof(struct pktq_prec) * num_prec));
+
+ pq->num_prec = (uint16) num_prec;
+
+ pq->max = (uint16) max_len;
+
+ for (prec = 0; prec < num_prec; prec++)
+ pq->q[prec].max = pq->max;
+}
+
+void *BCMFASTPATH pktq_deq(struct pktq *pq, int *prec_out)
+{
+ struct pktq_prec *q;
+ void *p;
+ int prec;
+
+ if (pq->len == 0)
+ return NULL;
+
+ while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
+ pq->hi_prec--;
+
+ q = &pq->q[prec];
+
+ if ((p = q->head) == NULL)
+ return NULL;
+
+ if ((q->head = PKTLINK(p)) == NULL)
+ q->tail = NULL;
+
+ q->len--;
+
+ pq->len--;
+
+ if (prec_out)
+ *prec_out = prec;
+
+ PKTSETLINK(p, NULL);
+
+ return p;
+}
+
+void *BCMFASTPATH pktq_deq_tail(struct pktq *pq, int *prec_out)
+{
+ struct pktq_prec *q;
+ void *p, *prev;
+ int prec;
+
+ if (pq->len == 0)
+ return NULL;
+
+ for (prec = 0; prec < pq->hi_prec; prec++)
+ if (pq->q[prec].head)
+ break;
+
+ q = &pq->q[prec];
+
+ if ((p = q->head) == NULL)
+ return NULL;
+
+ for (prev = NULL; p != q->tail; p = PKTLINK(p))
+ prev = p;
+
+ if (prev)
+ PKTSETLINK(prev, NULL);
+ else
+ q->head = NULL;
+
+ q->tail = prev;
+ q->len--;
+
+ pq->len--;
+
+ if (prec_out)
+ *prec_out = prec;
+
+ PKTSETLINK(p, NULL);
+
+ return p;
+}
+
+void *pktq_peek(struct pktq *pq, int *prec_out)
+{
+ int prec;
+
+ if (pq->len == 0)
+ return NULL;
+
+ while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
+ pq->hi_prec--;
+
+ if (prec_out)
+ *prec_out = prec;
+
+ return (pq->q[prec].head);
+}
+
+void *pktq_peek_tail(struct pktq *pq, int *prec_out)
+{
+ int prec;
+
+ if (pq->len == 0)
+ return NULL;
+
+ for (prec = 0; prec < pq->hi_prec; prec++)
+ if (pq->q[prec].head)
+ break;
+
+ if (prec_out)
+ *prec_out = prec;
+
+ return (pq->q[prec].tail);
+}
+
+void pktq_flush(osl_t * osh, struct pktq *pq, bool dir, ifpkt_cb_t fn, int arg)
+{
+ int prec;
+ for (prec = 0; prec < pq->num_prec; prec++)
+ pktq_pflush(osh, pq, prec, dir, fn, arg);
+ if (fn == NULL)
+ ASSERT(pq->len == 0);
+}
+
+/* Return sum of lengths of a specific set of precedences */
+int pktq_mlen(struct pktq *pq, uint prec_bmp)
+{
+ int prec, len;
+
+ len = 0;
+
+ for (prec = 0; prec <= pq->hi_prec; prec++)
+ if (prec_bmp & (1 << prec))
+ len += pq->q[prec].len;
+
+ return len;
+}
+
+/* Priority dequeue from a specific set of precedences */
+void *BCMFASTPATH pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out)
+{
+ struct pktq_prec *q;
+ void *p;
+ int prec;
+
+ if (pq->len == 0)
+ return NULL;
+
+ while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
+ pq->hi_prec--;
+
+ while ((prec_bmp & (1 << prec)) == 0 || pq->q[prec].head == NULL)
+ if (prec-- == 0)
+ return NULL;
+
+ q = &pq->q[prec];
+
+ if ((p = q->head) == NULL)
+ return NULL;
+
+ if ((q->head = PKTLINK(p)) == NULL)
+ q->tail = NULL;
+
+ q->len--;
+
+ if (prec_out)
+ *prec_out = prec;
+
+ pq->len--;
+
+ PKTSETLINK(p, NULL);
+
+ return p;
+}
+
+const unsigned char bcm_ctype[] = {
+
+ _BCM_C, _BCM_C, _BCM_C, _BCM_C, _BCM_C, _BCM_C, _BCM_C, _BCM_C, /* 0-7 */
+ _BCM_C, _BCM_C | _BCM_S, _BCM_C | _BCM_S, _BCM_C | _BCM_S,
+ _BCM_C | _BCM_S, _BCM_C | _BCM_S, _BCM_C,
+ _BCM_C, /* 8-15 */
+ _BCM_C, _BCM_C, _BCM_C, _BCM_C, _BCM_C, _BCM_C, _BCM_C, _BCM_C, /* 16-23 */
+ _BCM_C, _BCM_C, _BCM_C, _BCM_C, _BCM_C, _BCM_C, _BCM_C, _BCM_C, /* 24-31 */
+ _BCM_S | _BCM_SP, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, /* 32-39 */
+ _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, /* 40-47 */
+ _BCM_D, _BCM_D, _BCM_D, _BCM_D, _BCM_D, _BCM_D, _BCM_D, _BCM_D, /* 48-55 */
+ _BCM_D, _BCM_D, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, /* 56-63 */
+ _BCM_P, _BCM_U | _BCM_X, _BCM_U | _BCM_X, _BCM_U | _BCM_X,
+ _BCM_U | _BCM_X, _BCM_U | _BCM_X,
+ _BCM_U | _BCM_X, _BCM_U, /* 64-71 */
+ _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, /* 72-79 */
+ _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, /* 80-87 */
+ _BCM_U, _BCM_U, _BCM_U, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, /* 88-95 */
+ _BCM_P, _BCM_L | _BCM_X, _BCM_L | _BCM_X, _BCM_L | _BCM_X,
+ _BCM_L | _BCM_X, _BCM_L | _BCM_X,
+ _BCM_L | _BCM_X, _BCM_L, /* 96-103 */
+ _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, /* 104-111 */
+ _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, /* 112-119 */
+ _BCM_L, _BCM_L, _BCM_L, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_C, /* 120-127 */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 128-143 */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 144-159 */
+ _BCM_S | _BCM_SP, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P,
+ _BCM_P, _BCM_P, _BCM_P,
+ _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, /* 160-175 */
+ _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P,
+ _BCM_P, _BCM_P,
+ _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, /* 176-191 */
+ _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U,
+ _BCM_U, _BCM_U,
+ _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, /* 192-207 */
+ _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_P, _BCM_U,
+ _BCM_U, _BCM_U,
+ _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_L, /* 208-223 */
+ _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L,
+ _BCM_L, _BCM_L,
+ _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, /* 224-239 */
+ _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_P, _BCM_L,
+ _BCM_L, _BCM_L,
+ _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L /* 240-255 */
+};
+
+ulong BCMROMFN(bcm_strtoul) (char *cp, char **endp, uint base) {
+ ulong result, last_result = 0, value;
+ bool minus;
+
+ minus = FALSE;
+
+ while (bcm_isspace(*cp))
+ cp++;
+
+ if (cp[0] == '+')
+ cp++;
+ else if (cp[0] == '-') {
+ minus = TRUE;
+ cp++;
+ }
+
+ if (base == 0) {
+ if (cp[0] == '0') {
+ if ((cp[1] == 'x') || (cp[1] == 'X')) {
+ base = 16;
+ cp = &cp[2];
+ } else {
+ base = 8;
+ cp = &cp[1];
+ }
+ } else
+ base = 10;
+ } else if (base == 16 && (cp[0] == '0')
+ && ((cp[1] == 'x') || (cp[1] == 'X'))) {
+ cp = &cp[2];
+ }
+
+ result = 0;
+
+ while (bcm_isxdigit(*cp) &&
+ (value =
+ bcm_isdigit(*cp) ? *cp - '0' : bcm_toupper(*cp) - 'A' + 10) <
+ base) {
+ result = result * base + value;
+ /* Detected overflow */
+ if (result < last_result && !minus)
+ return (ulong) - 1;
+ last_result = result;
+ cp++;
+ }
+
+ if (minus)
+ result = (ulong) (-(long)result);
+
+ if (endp)
+ *endp = (char *)cp;
+
+ return (result);
+}
+
+int BCMROMFN(bcm_atoi) (char *s) {
+ return (int)bcm_strtoul(s, NULL, 10);
+}
+
+/* return pointer to location of substring 'needle' in 'haystack' */
+char *BCMROMFN(bcmstrstr) (char *haystack, char *needle) {
+ int len, nlen;
+ int i;
+
+ if ((haystack == NULL) || (needle == NULL))
+ return (haystack);
+
+ nlen = strlen(needle);
+ len = strlen(haystack) - nlen + 1;
+
+ for (i = 0; i < len; i++)
+ if (memcmp(needle, &haystack[i], nlen) == 0)
+ return (&haystack[i]);
+ return (NULL);
+}
+
+char *BCMROMFN(bcmstrcat) (char *dest, const char *src) {
+ char *p;
+
+ p = dest + strlen(dest);
+
+ while ((*p++ = *src++) != '\0') ;
+
+ return (dest);
+}
+
+char *BCMROMFN(bcmstrncat) (char *dest, const char *src, uint size) {
+ char *endp;
+ char *p;
+
+ p = dest + strlen(dest);
+ endp = p + size;
+
+ while (p != endp && (*p++ = *src++) != '\0') ;
+
+ return (dest);
+}
+
+/****************************************************************************
+* Function: bcmstrtok
+*
+* Purpose:
+* Tokenizes a string. This function is conceptually similiar to ANSI C strtok(),
+* but allows strToken() to be used by different strings or callers at the same
+* time. Each call modifies '*string' by substituting a NULL character for the
+* first delimiter that is encountered, and updates 'string' to point to the char
+* after the delimiter. Leading delimiters are skipped.
+*
+* Parameters:
+* string (mod) Ptr to string ptr, updated by token.
+* delimiters (in) Set of delimiter characters.
+* tokdelim (out) Character that delimits the returned token. (May
+* be set to NULL if token delimiter is not required).
+*
+* Returns: Pointer to the next token found. NULL when no more tokens are found.
+*****************************************************************************
+*/
+char *bcmstrtok(char **string, const char *delimiters, char *tokdelim)
+{
+ unsigned char *str;
+ unsigned long map[8];
+ int count;
+ char *nextoken;
+
+ if (tokdelim != NULL) {
+ /* Prime the token delimiter */
+ *tokdelim = '\0';
+ }
+
+ /* Clear control map */
+ for (count = 0; count < 8; count++) {
+ map[count] = 0;
+ }
+
+ /* Set bits in delimiter table */
+ do {
+ map[*delimiters >> 5] |= (1 << (*delimiters & 31));
+ }
+ while (*delimiters++);
+
+ str = (unsigned char *)*string;
+
+ /* Find beginning of token (skip over leading delimiters). Note that
+ * there is no token iff this loop sets str to point to the terminal
+ * null (*str == '\0')
+ */
+ while (((map[*str >> 5] & (1 << (*str & 31))) && *str) || (*str == ' ')) {
+ str++;
+ }
+
+ nextoken = (char *)str;
+
+ /* Find the end of the token. If it is not the end of the string,
+ * put a null there.
+ */
+ for (; *str; str++) {
+ if (map[*str >> 5] & (1 << (*str & 31))) {
+ if (tokdelim != NULL) {
+ *tokdelim = *str;
+ }
+
+ *str++ = '\0';
+ break;
+ }
+ }
+
+ *string = (char *)str;
+
+ /* Determine if a token has been found. */
+ if (nextoken == (char *)str) {
+ return NULL;
+ } else {
+ return nextoken;
+ }
+}
+
+#define xToLower(C) \
+ ((C >= 'A' && C <= 'Z') ? (char)((int)C - (int)'A' + (int)'a') : C)
+
+/****************************************************************************
+* Function: bcmstricmp
+*
+* Purpose: Compare to strings case insensitively.
+*
+* Parameters: s1 (in) First string to compare.
+* s2 (in) Second string to compare.
+*
+* Returns: Return 0 if the two strings are equal, -1 if t1 < t2 and 1 if
+* t1 > t2, when ignoring case sensitivity.
+*****************************************************************************
+*/
+int bcmstricmp(const char *s1, const char *s2)
+{
+ char dc, sc;
+
+ while (*s2 && *s1) {
+ dc = xToLower(*s1);
+ sc = xToLower(*s2);
+ if (dc < sc)
+ return -1;
+ if (dc > sc)
+ return 1;
+ s1++;
+ s2++;
+ }
+
+ if (*s1 && !*s2)
+ return 1;
+ if (!*s1 && *s2)
+ return -1;
+ return 0;
+}
+
+/****************************************************************************
+* Function: bcmstrnicmp
+*
+* Purpose: Compare to strings case insensitively, upto a max of 'cnt'
+* characters.
+*
+* Parameters: s1 (in) First string to compare.
+* s2 (in) Second string to compare.
+* cnt (in) Max characters to compare.
+*
+* Returns: Return 0 if the two strings are equal, -1 if t1 < t2 and 1 if
+* t1 > t2, when ignoring case sensitivity.
+*****************************************************************************
+*/
+int bcmstrnicmp(const char *s1, const char *s2, int cnt)
+{
+ char dc, sc;
+
+ while (*s2 && *s1 && cnt) {
+ dc = xToLower(*s1);
+ sc = xToLower(*s2);
+ if (dc < sc)
+ return -1;
+ if (dc > sc)
+ return 1;
+ s1++;
+ s2++;
+ cnt--;
+ }
+
+ if (!cnt)
+ return 0;
+ if (*s1 && !*s2)
+ return 1;
+ if (!*s1 && *s2)
+ return -1;
+ return 0;
+}
+
+/* parse a xx:xx:xx:xx:xx:xx format ethernet address */
+int BCMROMFN(bcm_ether_atoe) (char *p, struct ether_addr * ea) {
+ int i = 0;
+
+ for (;;) {
+ ea->octet[i++] = (char)bcm_strtoul(p, &p, 16);
+ if (!*p++ || i == 6)
+ break;
+ }
+
+ return (i == 6);
+}
+
+char *bcm_ether_ntoa(const struct ether_addr *ea, char *buf)
+{
+ static const char template[] = "%02x:%02x:%02x:%02x:%02x:%02x";
+ snprintf(buf, 18, template,
+ ea->octet[0] & 0xff, ea->octet[1] & 0xff, ea->octet[2] & 0xff,
+ ea->octet[3] & 0xff, ea->octet[4] & 0xff, ea->octet[5] & 0xff);
+ return (buf);
+}
+
+void bcm_mdelay(uint ms)
+{
+ uint i;
+
+ for (i = 0; i < ms; i++) {
+ OSL_DELAY(1000);
+ }
+}
+
+/*
+ * Search the name=value vars for a specific one and return its value.
+ * Returns NULL if not found.
+ */
+char *getvar(char *vars, const char *name)
+{
+ char *s;
+ int len;
+
+ if (!name)
+ return NULL;
+
+ len = strlen(name);
+ if (len == 0)
+ return NULL;
+
+ /* first look in vars[] */
+ for (s = vars; s && *s;) {
+ if ((bcmp(s, name, len) == 0) && (s[len] == '='))
+ return (&s[len + 1]);
+
+ while (*s++) ;
+ }
+
+ /* then query nvram */
+ return (nvram_get(name));
+}
+
+/*
+ * Search the vars for a specific one and return its value as
+ * an integer. Returns 0 if not found.
+ */
+int getintvar(char *vars, const char *name)
+{
+ char *val;
+
+ if ((val = getvar(vars, name)) == NULL)
+ return (0);
+
+ return (bcm_strtoul(val, NULL, 0));
+}
+
+int getintvararray(char *vars, const char *name, uint8 index)
+{
+ char *buf, *endp;
+ int i = 0;
+ int val = 0;
+
+ if ((buf = getvar(vars, name)) == NULL) {
+ return (0);
+ }
+
+ /* table values are always separated by "," or " " */
+ while (*buf != '\0') {
+ val = bcm_strtoul(buf, &endp, 0);
+ if (i == index) {
+ return val;
+ }
+ buf = endp;
+ /* delimiter is ',' */
+ if (*buf == ',')
+ buf++;
+ i++;
+ }
+ return 0;
+}
+
+/* Search for token in comma separated token-string */
+static int findmatch(char *string, char *name)
+{
+ uint len;
+ char *c;
+
+ len = strlen(name);
+ while ((c = strchr(string, ',')) != NULL) {
+ if (len == (uint) (c - string) && !strncmp(string, name, len))
+ return 1;
+ string = c + 1;
+ }
+
+ return (!strcmp(string, name));
+}
+
+/* Return gpio pin number assigned to the named pin
+ *
+ * Variable should be in format:
+ *
+ * gpio<N>=pin_name,pin_name
+ *
+ * This format allows multiple features to share the gpio with mutual
+ * understanding.
+ *
+ * 'def_pin' is returned if a specific gpio is not defined for the requested functionality
+ * and if def_pin is not used by others.
+ */
+uint getgpiopin(char *vars, char *pin_name, uint def_pin)
+{
+ char name[] = "gpioXXXX";
+ char *val;
+ uint pin;
+
+ /* Go thru all possibilities till a match in pin name */
+ for (pin = 0; pin < GPIO_NUMPINS; pin++) {
+ snprintf(name, sizeof(name), "gpio%d", pin);
+ val = getvar(vars, name);
+ if (val && findmatch(val, pin_name))
+ return pin;
+ }
+
+ if (def_pin != GPIO_PIN_NOTDEFINED) {
+ /* make sure the default pin is not used by someone else */
+ snprintf(name, sizeof(name), "gpio%d", def_pin);
+ if (getvar(vars, name)) {
+ def_pin = GPIO_PIN_NOTDEFINED;
+ }
+ }
+ return def_pin;
+}
+
+#if defined(BCMDBG)
+/* pretty hex print a pkt buffer chain */
+void prpkt(const char *msg, osl_t * osh, void *p0)
+{
+ void *p;
+
+ if (msg && (msg[0] != '\0'))
+ printf("%s:\n", msg);
+
+ for (p = p0; p; p = PKTNEXT(p))
+ prhex(NULL, PKTDATA(p), PKTLEN(p));
+}
+#endif /* defined(BCMDBG) */
+
+static char bcm_undeferrstr[32];
+static const char *bcmerrorstrtable[] = BCMERRSTRINGTABLE;
+
+/* Convert the error codes into related error strings */
+const char *bcmerrorstr(int bcmerror)
+{
+ /* check if someone added a bcmerror code but forgot to add errorstring */
+ ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(bcmerrorstrtable) - 1));
+
+ if (bcmerror > 0 || bcmerror < BCME_LAST) {
+ snprintf(bcm_undeferrstr, sizeof(bcm_undeferrstr),
+ "Undefined error %d", bcmerror);
+ return bcm_undeferrstr;
+ }
+
+ ASSERT(strlen(bcmerrorstrtable[-bcmerror]) < BCME_STRLEN);
+
+ return bcmerrorstrtable[-bcmerror];
+}
+
+#ifdef WLC_LOW
+static void BCMINITFN(bcm_nvram_refresh) (char *flash) {
+ int i;
+ int ret = 0;
+
+ ASSERT(flash != NULL);
+
+ /* default "empty" vars cache */
+ bzero(flash, 2);
+
+ if ((ret = nvram_getall(flash, NVRAM_SPACE)))
+ return;
+
+ /* determine nvram length */
+ for (i = 0; i < NVRAM_SPACE; i++) {
+ if (flash[i] == '\0' && flash[i + 1] == '\0')
+ break;
+ }
+
+ if (i > 1)
+ vars_len = i + 2;
+ else
+ vars_len = 0;
+}
+
+char *bcm_nvram_vars(uint * length)
+{
+#ifndef BCMNVRAMR
+ /* cache may be stale if nvram is read/write */
+ if (nvram_vars) {
+ ASSERT(!bcmreclaimed);
+ bcm_nvram_refresh(nvram_vars);
+ }
+#endif
+ if (length)
+ *length = vars_len;
+ return nvram_vars;
+}
+
+/* copy nvram vars into locally-allocated multi-string array */
+int BCMINITFN(bcm_nvram_cache) (void *sih) {
+ int ret = 0;
+ void *osh;
+ char *flash = NULL;
+
+ if (vars_len >= 0) {
+#ifndef BCMNVRAMR
+ bcm_nvram_refresh(nvram_vars);
+#endif
+ return 0;
+ }
+
+ osh = si_osh((si_t *) sih);
+
+ /* allocate memory and read in flash */
+ if (!(flash = MALLOC(osh, NVRAM_SPACE))) {
+ ret = BCME_NOMEM;
+ goto exit;
+ }
+
+ bcm_nvram_refresh(flash);
+#ifdef BCMNVRAMR
+ if (vars_len > 3) {
+ /* copy into a properly-sized buffer */
+ if (!(nvram_vars = MALLOC(osh, vars_len))) {
+ ret = BCME_NOMEM;
+ } else
+ bcopy(flash, nvram_vars, vars_len);
+ }
+ MFREE(osh, flash, NVRAM_SPACE);
+#else
+ /* cache must be full size of nvram if read/write */
+ nvram_vars = flash;
+#endif /* BCMNVRAMR */
+
+ exit:
+ return ret;
+}
+#endif /* WLC_LOW */
+
+/* iovar table lookup */
+const bcm_iovar_t *bcm_iovar_lookup(const bcm_iovar_t * table, const char *name)
+{
+ const bcm_iovar_t *vi;
+ const char *lookup_name;
+
+ /* skip any ':' delimited option prefixes */
+ lookup_name = strrchr(name, ':');
+ if (lookup_name != NULL)
+ lookup_name++;
+ else
+ lookup_name = name;
+
+ ASSERT(table != NULL);
+
+ for (vi = table; vi->name; vi++) {
+ if (!strcmp(vi->name, lookup_name))
+ return vi;
+ }
+ /* ran to end of table */
+
+ return NULL; /* var name not found */
+}
+
+int bcm_iovar_lencheck(const bcm_iovar_t * vi, void *arg, int len, bool set)
+{
+ int bcmerror = 0;
+
+ /* length check on io buf */
+ switch (vi->type) {
+ case IOVT_BOOL:
+ case IOVT_INT8:
+ case IOVT_INT16:
+ case IOVT_INT32:
+ case IOVT_UINT8:
+ case IOVT_UINT16:
+ case IOVT_UINT32:
+ /* all integers are int32 sized args at the ioctl interface */
+ if (len < (int)sizeof(int)) {
+ bcmerror = BCME_BUFTOOSHORT;
+ }
+ break;
+
+ case IOVT_BUFFER:
+ /* buffer must meet minimum length requirement */
+ if (len < vi->minlen) {
+ bcmerror = BCME_BUFTOOSHORT;
+ }
+ break;
+
+ case IOVT_VOID:
+ if (!set) {
+ /* Cannot return nil... */
+ bcmerror = BCME_UNSUPPORTED;
+ } else if (len) {
+ /* Set is an action w/o parameters */
+ bcmerror = BCME_BUFTOOLONG;
+ }
+ break;
+
+ default:
+ /* unknown type for length check in iovar info */
+ ASSERT(0);
+ bcmerror = BCME_UNSUPPORTED;
+ }
+
+ return bcmerror;
+}
+
+/*******************************************************************************
+ * crc8
+ *
+ * Computes a crc8 over the input data using the polynomial:
+ *
+ * x^8 + x^7 +x^6 + x^4 + x^2 + 1
+ *
+ * The caller provides the initial value (either CRC8_INIT_VALUE
+ * or the previous returned value) to allow for processing of
+ * discontiguous blocks of data. When generating the CRC the
+ * caller is responsible for complementing the final return value
+ * and inserting it into the byte stream. When checking, a final
+ * return value of CRC8_GOOD_VALUE indicates a valid CRC.
+ *
+ * Reference: Dallas Semiconductor Application Note 27
+ * Williams, Ross N., "A Painless Guide to CRC Error Detection Algorithms",
+ * ver 3, Aug 1993, ross@guest.adelaide.edu.au, Rocksoft Pty Ltd.,
+ * ftp://ftp.rocksoft.com/clients/rocksoft/papers/crc_v3.txt
+ *
+ * ****************************************************************************
+ */
+
+static const uint8 crc8_table[256] = {
+ 0x00, 0xF7, 0xB9, 0x4E, 0x25, 0xD2, 0x9C, 0x6B,
+ 0x4A, 0xBD, 0xF3, 0x04, 0x6F, 0x98, 0xD6, 0x21,
+ 0x94, 0x63, 0x2D, 0xDA, 0xB1, 0x46, 0x08, 0xFF,
+ 0xDE, 0x29, 0x67, 0x90, 0xFB, 0x0C, 0x42, 0xB5,
+ 0x7F, 0x88, 0xC6, 0x31, 0x5A, 0xAD, 0xE3, 0x14,
+ 0x35, 0xC2, 0x8C, 0x7B, 0x10, 0xE7, 0xA9, 0x5E,
+ 0xEB, 0x1C, 0x52, 0xA5, 0xCE, 0x39, 0x77, 0x80,
+ 0xA1, 0x56, 0x18, 0xEF, 0x84, 0x73, 0x3D, 0xCA,
+ 0xFE, 0x09, 0x47, 0xB0, 0xDB, 0x2C, 0x62, 0x95,
+ 0xB4, 0x43, 0x0D, 0xFA, 0x91, 0x66, 0x28, 0xDF,
+ 0x6A, 0x9D, 0xD3, 0x24, 0x4F, 0xB8, 0xF6, 0x01,
+ 0x20, 0xD7, 0x99, 0x6E, 0x05, 0xF2, 0xBC, 0x4B,
+ 0x81, 0x76, 0x38, 0xCF, 0xA4, 0x53, 0x1D, 0xEA,
+ 0xCB, 0x3C, 0x72, 0x85, 0xEE, 0x19, 0x57, 0xA0,
+ 0x15, 0xE2, 0xAC, 0x5B, 0x30, 0xC7, 0x89, 0x7E,
+ 0x5F, 0xA8, 0xE6, 0x11, 0x7A, 0x8D, 0xC3, 0x34,
+ 0xAB, 0x5C, 0x12, 0xE5, 0x8E, 0x79, 0x37, 0xC0,
+ 0xE1, 0x16, 0x58, 0xAF, 0xC4, 0x33, 0x7D, 0x8A,
+ 0x3F, 0xC8, 0x86, 0x71, 0x1A, 0xED, 0xA3, 0x54,
+ 0x75, 0x82, 0xCC, 0x3B, 0x50, 0xA7, 0xE9, 0x1E,
+ 0xD4, 0x23, 0x6D, 0x9A, 0xF1, 0x06, 0x48, 0xBF,
+ 0x9E, 0x69, 0x27, 0xD0, 0xBB, 0x4C, 0x02, 0xF5,
+ 0x40, 0xB7, 0xF9, 0x0E, 0x65, 0x92, 0xDC, 0x2B,
+ 0x0A, 0xFD, 0xB3, 0x44, 0x2F, 0xD8, 0x96, 0x61,
+ 0x55, 0xA2, 0xEC, 0x1B, 0x70, 0x87, 0xC9, 0x3E,
+ 0x1F, 0xE8, 0xA6, 0x51, 0x3A, 0xCD, 0x83, 0x74,
+ 0xC1, 0x36, 0x78, 0x8F, 0xE4, 0x13, 0x5D, 0xAA,
+ 0x8B, 0x7C, 0x32, 0xC5, 0xAE, 0x59, 0x17, 0xE0,
+ 0x2A, 0xDD, 0x93, 0x64, 0x0F, 0xF8, 0xB6, 0x41,
+ 0x60, 0x97, 0xD9, 0x2E, 0x45, 0xB2, 0xFC, 0x0B,
+ 0xBE, 0x49, 0x07, 0xF0, 0x9B, 0x6C, 0x22, 0xD5,
+ 0xF4, 0x03, 0x4D, 0xBA, 0xD1, 0x26, 0x68, 0x9F
+};
+
+#define CRC_INNER_LOOP(n, c, x) \
+ (c) = ((c) >> 8) ^ crc##n##_table[((c) ^ (x)) & 0xff]
+
+uint8 BCMROMFN(hndcrc8) (uint8 * pdata, /* pointer to array of data to process */
+ uint nbytes, /* number of input data bytes to process */
+ uint8 crc /* either CRC8_INIT_VALUE or previous return value */
+ ) {
+ /* hard code the crc loop instead of using CRC_INNER_LOOP macro
+ * to avoid the undefined and unnecessary (uint8 >> 8) operation.
+ */
+ while (nbytes-- > 0)
+ crc = crc8_table[(crc ^ *pdata++) & 0xff];
+
+ return crc;
+}
+
+/*******************************************************************************
+ * crc16
+ *
+ * Computes a crc16 over the input data using the polynomial:
+ *
+ * x^16 + x^12 +x^5 + 1
+ *
+ * The caller provides the initial value (either CRC16_INIT_VALUE
+ * or the previous returned value) to allow for processing of
+ * discontiguous blocks of data. When generating the CRC the
+ * caller is responsible for complementing the final return value
+ * and inserting it into the byte stream. When checking, a final
+ * return value of CRC16_GOOD_VALUE indicates a valid CRC.
+ *
+ * Reference: Dallas Semiconductor Application Note 27
+ * Williams, Ross N., "A Painless Guide to CRC Error Detection Algorithms",
+ * ver 3, Aug 1993, ross@guest.adelaide.edu.au, Rocksoft Pty Ltd.,
+ * ftp://ftp.rocksoft.com/clients/rocksoft/papers/crc_v3.txt
+ *
+ * ****************************************************************************
+ */
+
+static const uint16 crc16_table[256] = {
+ 0x0000, 0x1189, 0x2312, 0x329B, 0x4624, 0x57AD, 0x6536, 0x74BF,
+ 0x8C48, 0x9DC1, 0xAF5A, 0xBED3, 0xCA6C, 0xDBE5, 0xE97E, 0xF8F7,
+ 0x1081, 0x0108, 0x3393, 0x221A, 0x56A5, 0x472C, 0x75B7, 0x643E,
+ 0x9CC9, 0x8D40, 0xBFDB, 0xAE52, 0xDAED, 0xCB64, 0xF9FF, 0xE876,
+ 0x2102, 0x308B, 0x0210, 0x1399, 0x6726, 0x76AF, 0x4434, 0x55BD,
+ 0xAD4A, 0xBCC3, 0x8E58, 0x9FD1, 0xEB6E, 0xFAE7, 0xC87C, 0xD9F5,
+ 0x3183, 0x200A, 0x1291, 0x0318, 0x77A7, 0x662E, 0x54B5, 0x453C,
+ 0xBDCB, 0xAC42, 0x9ED9, 0x8F50, 0xFBEF, 0xEA66, 0xD8FD, 0xC974,
+ 0x4204, 0x538D, 0x6116, 0x709F, 0x0420, 0x15A9, 0x2732, 0x36BB,
+ 0xCE4C, 0xDFC5, 0xED5E, 0xFCD7, 0x8868, 0x99E1, 0xAB7A, 0xBAF3,
+ 0x5285, 0x430C, 0x7197, 0x601E, 0x14A1, 0x0528, 0x37B3, 0x263A,
+ 0xDECD, 0xCF44, 0xFDDF, 0xEC56, 0x98E9, 0x8960, 0xBBFB, 0xAA72,
+ 0x6306, 0x728F, 0x4014, 0x519D, 0x2522, 0x34AB, 0x0630, 0x17B9,
+ 0xEF4E, 0xFEC7, 0xCC5C, 0xDDD5, 0xA96A, 0xB8E3, 0x8A78, 0x9BF1,
+ 0x7387, 0x620E, 0x5095, 0x411C, 0x35A3, 0x242A, 0x16B1, 0x0738,
+ 0xFFCF, 0xEE46, 0xDCDD, 0xCD54, 0xB9EB, 0xA862, 0x9AF9, 0x8B70,
+ 0x8408, 0x9581, 0xA71A, 0xB693, 0xC22C, 0xD3A5, 0xE13E, 0xF0B7,
+ 0x0840, 0x19C9, 0x2B52, 0x3ADB, 0x4E64, 0x5FED, 0x6D76, 0x7CFF,
+ 0x9489, 0x8500, 0xB79B, 0xA612, 0xD2AD, 0xC324, 0xF1BF, 0xE036,
+ 0x18C1, 0x0948, 0x3BD3, 0x2A5A, 0x5EE5, 0x4F6C, 0x7DF7, 0x6C7E,
+ 0xA50A, 0xB483, 0x8618, 0x9791, 0xE32E, 0xF2A7, 0xC03C, 0xD1B5,
+ 0x2942, 0x38CB, 0x0A50, 0x1BD9, 0x6F66, 0x7EEF, 0x4C74, 0x5DFD,
+ 0xB58B, 0xA402, 0x9699, 0x8710, 0xF3AF, 0xE226, 0xD0BD, 0xC134,
+ 0x39C3, 0x284A, 0x1AD1, 0x0B58, 0x7FE7, 0x6E6E, 0x5CF5, 0x4D7C,
+ 0xC60C, 0xD785, 0xE51E, 0xF497, 0x8028, 0x91A1, 0xA33A, 0xB2B3,
+ 0x4A44, 0x5BCD, 0x6956, 0x78DF, 0x0C60, 0x1DE9, 0x2F72, 0x3EFB,
+ 0xD68D, 0xC704, 0xF59F, 0xE416, 0x90A9, 0x8120, 0xB3BB, 0xA232,
+ 0x5AC5, 0x4B4C, 0x79D7, 0x685E, 0x1CE1, 0x0D68, 0x3FF3, 0x2E7A,
+ 0xE70E, 0xF687, 0xC41C, 0xD595, 0xA12A, 0xB0A3, 0x8238, 0x93B1,
+ 0x6B46, 0x7ACF, 0x4854, 0x59DD, 0x2D62, 0x3CEB, 0x0E70, 0x1FF9,
+ 0xF78F, 0xE606, 0xD49D, 0xC514, 0xB1AB, 0xA022, 0x92B9, 0x8330,
+ 0x7BC7, 0x6A4E, 0x58D5, 0x495C, 0x3DE3, 0x2C6A, 0x1EF1, 0x0F78
+};
+
+uint16 BCMROMFN(hndcrc16) (uint8 * pdata, /* pointer to array of data to process */
+ uint nbytes, /* number of input data bytes to process */
+ uint16 crc /* either CRC16_INIT_VALUE or previous return value */
+ ) {
+ while (nbytes-- > 0)
+ CRC_INNER_LOOP(16, crc, *pdata++);
+ return crc;
+}
+
+/*
+ * Advance from the current 1-byte tag/1-byte length/variable-length value
+ * triple, to the next, returning a pointer to the next.
+ * If the current or next TLV is invalid (does not fit in given buffer length),
+ * NULL is returned.
+ * *buflen is not modified if the TLV elt parameter is invalid, or is decremented
+ * by the TLV parameter's length if it is valid.
+ */
+bcm_tlv_t *BCMROMFN(bcm_next_tlv) (bcm_tlv_t * elt, int *buflen) {
+ int len;
+
+ /* validate current elt */
+ if (!bcm_valid_tlv(elt, *buflen))
+ return NULL;
+
+ /* advance to next elt */
+ len = elt->len;
+ elt = (bcm_tlv_t *) (elt->data + len);
+ *buflen -= (2 + len);
+
+ /* validate next elt */
+ if (!bcm_valid_tlv(elt, *buflen))
+ return NULL;
+
+ return elt;
+}
+
+/*
+ * Traverse a string of 1-byte tag/1-byte length/variable-length value
+ * triples, returning a pointer to the substring whose first element
+ * matches tag
+ */
+bcm_tlv_t *BCMROMFN(bcm_parse_tlvs) (void *buf, int buflen, uint key) {
+ bcm_tlv_t *elt;
+ int totlen;
+
+ elt = (bcm_tlv_t *) buf;
+ totlen = buflen;
+
+ /* find tagged parameter */
+ while (totlen >= 2) {
+ int len = elt->len;
+
+ /* validate remaining totlen */
+ if ((elt->id == key) && (totlen >= (len + 2)))
+ return (elt);
+
+ elt = (bcm_tlv_t *) ((uint8 *) elt + (len + 2));
+ totlen -= (len + 2);
+ }
+
+ return NULL;
+}
+
+/*
+ * Traverse a string of 1-byte tag/1-byte length/variable-length value
+ * triples, returning a pointer to the substring whose first element
+ * matches tag. Stop parsing when we see an element whose ID is greater
+ * than the target key.
+ */
+bcm_tlv_t *BCMROMFN(bcm_parse_ordered_tlvs) (void *buf, int buflen, uint key) {
+ bcm_tlv_t *elt;
+ int totlen;
+
+ elt = (bcm_tlv_t *) buf;
+ totlen = buflen;
+
+ /* find tagged parameter */
+ while (totlen >= 2) {
+ uint id = elt->id;
+ int len = elt->len;
+
+ /* Punt if we start seeing IDs > than target key */
+ if (id > key)
+ return (NULL);
+
+ /* validate remaining totlen */
+ if ((id == key) && (totlen >= (len + 2)))
+ return (elt);
+
+ elt = (bcm_tlv_t *) ((uint8 *) elt + (len + 2));
+ totlen -= (len + 2);
+ }
+ return NULL;
+}
+
+#if defined(BCMDBG)
+int
+bcm_format_flags(const bcm_bit_desc_t * bd, uint32 flags, char *buf, int len)
+{
+ int i;
+ char *p = buf;
+ char hexstr[16];
+ int slen = 0, nlen = 0;
+ uint32 bit;
+ const char *name;
+
+ if (len < 2 || !buf)
+ return 0;
+
+ buf[0] = '\0';
+
+ for (i = 0; flags != 0; i++) {
+ bit = bd[i].bit;
+ name = bd[i].name;
+ if (bit == 0 && flags != 0) {
+ /* print any unnamed bits */
+ snprintf(hexstr, 16, "0x%X", flags);
+ name = hexstr;
+ flags = 0; /* exit loop */
+ } else if ((flags & bit) == 0)
+ continue;
+ flags &= ~bit;
+ nlen = strlen(name);
+ slen += nlen;
+ /* count btwn flag space */
+ if (flags != 0)
+ slen += 1;
+ /* need NULL char as well */
+ if (len <= slen)
+ break;
+ /* copy NULL char but don't count it */
+ strncpy(p, name, nlen + 1);
+ p += nlen;
+ /* copy btwn flag space and NULL char */
+ if (flags != 0)
+ p += snprintf(p, 2, " ");
+ len -= slen;
+ }
+
+ /* indicate the str was too short */
+ if (flags != 0) {
+ if (len < 2)
+ p -= 2 - len; /* overwrite last char */
+ p += snprintf(p, 2, ">");
+ }
+
+ return (int)(p - buf);
+}
+
+/* print bytes formatted as hex to a string. return the resulting string length */
+int bcm_format_hex(char *str, const void *bytes, int len)
+{
+ int i;
+ char *p = str;
+ const uint8 *src = (const uint8 *)bytes;
+
+ for (i = 0; i < len; i++) {
+ p += snprintf(p, 3, "%02X", *src);
+ src++;
+ }
+ return (int)(p - str);
+}
+#endif /* defined(BCMDBG) */
+
+/* pretty hex print a contiguous buffer */
+void prhex(const char *msg, uchar * buf, uint nbytes)
+{
+ char line[128], *p;
+ int len = sizeof(line);
+ int nchar;
+ uint i;
+
+ if (msg && (msg[0] != '\0'))
+ printf("%s:\n", msg);
+
+ p = line;
+ for (i = 0; i < nbytes; i++) {
+ if (i % 16 == 0) {
+ nchar = snprintf(p, len, " %04d: ", i); /* line prefix */
+ p += nchar;
+ len -= nchar;
+ }
+ if (len > 0) {
+ nchar = snprintf(p, len, "%02x ", buf[i]);
+ p += nchar;
+ len -= nchar;
+ }
+
+ if (i % 16 == 15) {
+ printf("%s\n", line); /* flush line */
+ p = line;
+ len = sizeof(line);
+ }
+ }
+
+ /* flush last partial line */
+ if (p != line)
+ printf("%s\n", line);
+}
+
+static const char *crypto_algo_names[] = {
+ "NONE",
+ "WEP1",
+ "TKIP",
+ "WEP128",
+ "AES_CCM",
+ "NALG" "UNDEF",
+ "UNDEF",
+ "UNDEF",
+ "UNDEF"
+};
+
+const char *bcm_crypto_algo_name(uint algo)
+{
+ return (algo <
+ ARRAYSIZE(crypto_algo_names)) ? crypto_algo_names[algo] : "ERR";
+}
+
+#ifdef BCMDBG
+void deadbeef(void *p, uint len)
+{
+ static uint8 meat[] = { 0xde, 0xad, 0xbe, 0xef };
+
+ while (len-- > 0) {
+ *(uint8 *) p = meat[((uintptr) p) & 3];
+ p = (uint8 *) p + 1;
+ }
+}
+#endif /* BCMDBG */
+
+char *bcm_chipname(uint chipid, char *buf, uint len)
+{
+ const char *fmt;
+
+ fmt = ((chipid > 0xa000) || (chipid < 0x4000)) ? "%d" : "%x";
+ snprintf(buf, len, fmt, chipid);
+ return buf;
+}
+
+/* Produce a human-readable string for boardrev */
+char *bcm_brev_str(uint32 brev, char *buf)
+{
+ if (brev < 0x100)
+ snprintf(buf, 8, "%d.%d", (brev & 0xf0) >> 4, brev & 0xf);
+ else
+ snprintf(buf, 8, "%c%03x",
+ ((brev & 0xf000) == 0x1000) ? 'P' : 'A', brev & 0xfff);
+
+ return (buf);
+}
+
+#define BUFSIZE_TODUMP_ATONCE 512 /* Buffer size */
+
+/* dump large strings to console */
+void printbig(char *buf)
+{
+ uint len, max_len;
+ char c;
+
+ len = strlen(buf);
+
+ max_len = BUFSIZE_TODUMP_ATONCE;
+
+ while (len > max_len) {
+ c = buf[max_len];
+ buf[max_len] = '\0';
+ printf("%s", buf);
+ buf[max_len] = c;
+
+ buf += max_len;
+ len -= max_len;
+ }
+ /* print the remaining string */
+ printf("%s\n", buf);
+ return;
+}
+
+/* routine to dump fields in a fileddesc structure */
+uint
+bcmdumpfields(bcmutl_rdreg_rtn read_rtn, void *arg0, uint arg1,
+ struct fielddesc * fielddesc_array, char *buf, uint32 bufsize)
+{
+ uint filled_len;
+ int len;
+ struct fielddesc *cur_ptr;
+
+ filled_len = 0;
+ cur_ptr = fielddesc_array;
+
+ while (bufsize > 1) {
+ if (cur_ptr->nameandfmt == NULL)
+ break;
+ len = snprintf(buf, bufsize, cur_ptr->nameandfmt,
+ read_rtn(arg0, arg1, cur_ptr->offset));
+ /* check for snprintf overflow or error */
+ if (len < 0 || (uint32) len >= bufsize)
+ len = bufsize - 1;
+ buf += len;
+ bufsize -= len;
+ filled_len += len;
+ cur_ptr++;
+ }
+ return filled_len;
+}
+
+uint bcm_mkiovar(char *name, char *data, uint datalen, char *buf, uint buflen)
+{
+ uint len;
+
+ len = strlen(name) + 1;
+
+ if ((len + datalen) > buflen)
+ return 0;
+
+ strncpy(buf, name, buflen);
+
+ /* append data onto the end of the name string */
+ memcpy(&buf[len], data, datalen);
+ len += datalen;
+
+ return len;
+}
+
+/* Quarter dBm units to mW
+ * Table starts at QDBM_OFFSET, so the first entry is mW for qdBm=153
+ * Table is offset so the last entry is largest mW value that fits in
+ * a uint16.
+ */
+
+#define QDBM_OFFSET 153 /* Offset for first entry */
+#define QDBM_TABLE_LEN 40 /* Table size */
+
+/* Smallest mW value that will round up to the first table entry, QDBM_OFFSET.
+ * Value is ( mW(QDBM_OFFSET - 1) + mW(QDBM_OFFSET) ) / 2
+ */
+#define QDBM_TABLE_LOW_BOUND 6493 /* Low bound */
+
+/* Largest mW value that will round down to the last table entry,
+ * QDBM_OFFSET + QDBM_TABLE_LEN-1.
+ * Value is ( mW(QDBM_OFFSET + QDBM_TABLE_LEN - 1) + mW(QDBM_OFFSET + QDBM_TABLE_LEN) ) / 2.
+ */
+#define QDBM_TABLE_HIGH_BOUND 64938 /* High bound */
+
+static const uint16 nqdBm_to_mW_map[QDBM_TABLE_LEN] = {
+/* qdBm: +0 +1 +2 +3 +4 +5 +6 +7 */
+/* 153: */ 6683, 7079, 7499, 7943, 8414, 8913, 9441, 10000,
+/* 161: */ 10593, 11220, 11885, 12589, 13335, 14125, 14962, 15849,
+/* 169: */ 16788, 17783, 18836, 19953, 21135, 22387, 23714, 25119,
+/* 177: */ 26607, 28184, 29854, 31623, 33497, 35481, 37584, 39811,
+/* 185: */ 42170, 44668, 47315, 50119, 53088, 56234, 59566, 63096
+};
+
+uint16 BCMROMFN(bcm_qdbm_to_mw) (uint8 qdbm) {
+ uint factor = 1;
+ int idx = qdbm - QDBM_OFFSET;
+
+ if (idx >= QDBM_TABLE_LEN) {
+ /* clamp to max uint16 mW value */
+ return 0xFFFF;
+ }
+
+ /* scale the qdBm index up to the range of the table 0-40
+ * where an offset of 40 qdBm equals a factor of 10 mW.
+ */
+ while (idx < 0) {
+ idx += 40;
+ factor *= 10;
+ }
+
+ /* return the mW value scaled down to the correct factor of 10,
+ * adding in factor/2 to get proper rounding.
+ */
+ return ((nqdBm_to_mW_map[idx] + factor / 2) / factor);
+}
+
+uint8 BCMROMFN(bcm_mw_to_qdbm) (uint16 mw) {
+ uint8 qdbm;
+ int offset;
+ uint mw_uint = mw;
+ uint boundary;
+
+ /* handle boundary case */
+ if (mw_uint <= 1)
+ return 0;
+
+ offset = QDBM_OFFSET;
+
+ /* move mw into the range of the table */
+ while (mw_uint < QDBM_TABLE_LOW_BOUND) {
+ mw_uint *= 10;
+ offset -= 40;
+ }
+
+ for (qdbm = 0; qdbm < QDBM_TABLE_LEN - 1; qdbm++) {
+ boundary = nqdBm_to_mW_map[qdbm] + (nqdBm_to_mW_map[qdbm + 1] -
+ nqdBm_to_mW_map[qdbm]) / 2;
+ if (mw_uint < boundary)
+ break;
+ }
+
+ qdbm += (uint8) offset;
+
+ return (qdbm);
+}
+
+uint BCMROMFN(bcm_bitcount) (uint8 * bitmap, uint length) {
+ uint bitcount = 0, i;
+ uint8 tmp;
+ for (i = 0; i < length; i++) {
+ tmp = bitmap[i];
+ while (tmp) {
+ bitcount++;
+ tmp &= (tmp - 1);
+ }
+ }
+ return bitcount;
+}
+
+/* Initialization of bcmstrbuf structure */
+void bcm_binit(struct bcmstrbuf *b, char *buf, uint size)
+{
+ b->origsize = b->size = size;
+ b->origbuf = b->buf = buf;
+}
+
+/* Buffer sprintf wrapper to guard against buffer overflow */
+int bcm_bprintf(struct bcmstrbuf *b, const char *fmt, ...)
+{
+ va_list ap;
+ int r;
+
+ va_start(ap, fmt);
+ r = vsnprintf(b->buf, b->size, fmt, ap);
+
+ /* Non Ansi C99 compliant returns -1,
+ * Ansi compliant return r >= b->size,
+ * bcmstdlib returns 0, handle all
+ */
+ if ((r == -1) || (r >= (int)b->size) || (r == 0)) {
+ b->size = 0;
+ } else {
+ b->size -= r;
+ b->buf += r;
+ }
+
+ va_end(ap);
+
+ return r;
+}
+
+void bcm_inc_bytes(uchar * num, int num_bytes, uint8 amount)
+{
+ int i;
+
+ for (i = 0; i < num_bytes; i++) {
+ num[i] += amount;
+ if (num[i] >= amount)
+ break;
+ amount = 1;
+ }
+}
+
+int bcm_cmp_bytes(uchar * arg1, uchar * arg2, uint8 nbytes)
+{
+ int i;
+
+ for (i = nbytes - 1; i >= 0; i--) {
+ if (arg1[i] != arg2[i])
+ return (arg1[i] - arg2[i]);
+ }
+ return 0;
+}
+
+void bcm_print_bytes(char *name, const uchar * data, int len)
+{
+ int i;
+ int per_line = 0;
+
+ printf("%s: %d\n", name ? name : "", len);
+ for (i = 0; i < len; i++) {
+ printf("%02x ", *data++);
+ per_line++;
+ if (per_line == 16) {
+ per_line = 0;
+ printf("\n");
+ }
+ }
+ printf("\n");
+}
+
+#if defined(BCMDBG)
+#define SSID_FMT_BUF_LEN ((4 * DOT11_MAX_SSID_LEN) + 1)
+int bcm_format_ssid(char *buf, const uchar ssid[], uint ssid_len)
+{
+ uint i, c;
+ char *p = buf;
+ char *endp = buf + SSID_FMT_BUF_LEN;
+
+ if (ssid_len > DOT11_MAX_SSID_LEN)
+ ssid_len = DOT11_MAX_SSID_LEN;
+
+ for (i = 0; i < ssid_len; i++) {
+ c = (uint) ssid[i];
+ if (c == '\\') {
+ *p++ = '\\';
+ *p++ = '\\';
+ } else if (bcm_isprint((uchar) c)) {
+ *p++ = (char)c;
+ } else {
+ p += snprintf(p, (endp - p), "\\x%02X", c);
+ }
+ }
+ *p = '\0';
+ ASSERT(p < endp);
+
+ return (int)(p - buf);
+}
+#endif /* defined(BCMDBG) */
diff --git a/drivers/staging/brcm80211/util/bcmwifi.c b/drivers/staging/brcm80211/util/bcmwifi.c
new file mode 100644
index 000000000000..ae5ff88407a7
--- /dev/null
+++ b/drivers/staging/brcm80211/util/bcmwifi.c
@@ -0,0 +1,318 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <typedefs.h>
+
+#include <osl.h>
+#include <bcmutils.h>
+#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base))
+#define tolower(c) (bcm_isupper((c)) ? ((c) + 'a' - 'A') : (c))
+#include <bcmwifi.h>
+
+/* Chanspec ASCII representation:
+ * <channel><band><bandwidth><ctl-sideband>
+ * digit [AB] [N] [UL]
+ *
+ * <channel>: channel number of the 10MHz or 20MHz channel,
+ * or control sideband channel of 40MHz channel.
+ * <band>: A for 5GHz, B for 2.4GHz
+ * <bandwidth>: N for 10MHz, nothing for 20MHz or 40MHz
+ * (ctl-sideband spec implies 40MHz)
+ * <ctl-sideband>: U for upper, L for lower
+ *
+ * <band> may be omitted on input, and will be assumed to be
+ * 2.4GHz if channel number <= 14.
+ *
+ * Examples:
+ * 8 -> 2.4GHz channel 8, 20MHz
+ * 8b -> 2.4GHz channel 8, 20MHz
+ * 8l -> 2.4GHz channel 8, 40MHz, lower ctl sideband
+ * 8a -> 5GHz channel 8 (low 5 GHz band), 20MHz
+ * 36 -> 5GHz channel 36, 20MHz
+ * 36l -> 5GHz channel 36, 40MHz, lower ctl sideband
+ * 40u -> 5GHz channel 40, 40MHz, upper ctl sideband
+ * 180n -> channel 180, 10MHz
+ */
+
+/* given a chanspec and a string buffer, format the chanspec as a
+ * string, and return the original pointer a.
+ * Min buffer length must be CHANSPEC_STR_LEN.
+ * On error return NULL
+ */
+char *wf_chspec_ntoa(chanspec_t chspec, char *buf)
+{
+ const char *band, *bw, *sb;
+ uint channel;
+
+ band = "";
+ bw = "";
+ sb = "";
+ channel = CHSPEC_CHANNEL(chspec);
+ /* check for non-default band spec */
+ if ((CHSPEC_IS2G(chspec) && channel > CH_MAX_2G_CHANNEL) ||
+ (CHSPEC_IS5G(chspec) && channel <= CH_MAX_2G_CHANNEL))
+ band = (CHSPEC_IS2G(chspec)) ? "b" : "a";
+ if (CHSPEC_IS40(chspec)) {
+ if (CHSPEC_SB_UPPER(chspec)) {
+ sb = "u";
+ channel += CH_10MHZ_APART;
+ } else {
+ sb = "l";
+ channel -= CH_10MHZ_APART;
+ }
+ } else if (CHSPEC_IS10(chspec)) {
+ bw = "n";
+ }
+
+ /* Outputs a max of 6 chars including '\0' */
+ snprintf(buf, 6, "%d%s%s%s", channel, band, bw, sb);
+ return (buf);
+}
+
+/* given a chanspec string, convert to a chanspec.
+ * On error return 0
+ */
+chanspec_t wf_chspec_aton(char *a)
+{
+ char *endp = NULL;
+ uint channel, band, bw, ctl_sb;
+ char c;
+
+ channel = strtoul(a, &endp, 10);
+
+ /* check for no digits parsed */
+ if (endp == a)
+ return 0;
+
+ if (channel > MAXCHANNEL)
+ return 0;
+
+ band =
+ ((channel <=
+ CH_MAX_2G_CHANNEL) ? WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G);
+ bw = WL_CHANSPEC_BW_20;
+ ctl_sb = WL_CHANSPEC_CTL_SB_NONE;
+
+ a = endp;
+
+ c = tolower(a[0]);
+ if (c == '\0')
+ goto done;
+
+ /* parse the optional ['A' | 'B'] band spec */
+ if (c == 'a' || c == 'b') {
+ band = (c == 'a') ? WL_CHANSPEC_BAND_5G : WL_CHANSPEC_BAND_2G;
+ a++;
+ c = tolower(a[0]);
+ if (c == '\0')
+ goto done;
+ }
+
+ /* parse bandwidth 'N' (10MHz) or 40MHz ctl sideband ['L' | 'U'] */
+ if (c == 'n') {
+ bw = WL_CHANSPEC_BW_10;
+ } else if (c == 'l') {
+ bw = WL_CHANSPEC_BW_40;
+ ctl_sb = WL_CHANSPEC_CTL_SB_LOWER;
+ /* adjust channel to center of 40MHz band */
+ if (channel <= (MAXCHANNEL - CH_20MHZ_APART))
+ channel += CH_10MHZ_APART;
+ else
+ return 0;
+ } else if (c == 'u') {
+ bw = WL_CHANSPEC_BW_40;
+ ctl_sb = WL_CHANSPEC_CTL_SB_UPPER;
+ /* adjust channel to center of 40MHz band */
+ if (channel > CH_20MHZ_APART)
+ channel -= CH_10MHZ_APART;
+ else
+ return 0;
+ } else {
+ return 0;
+ }
+
+ done:
+ return (channel | band | bw | ctl_sb);
+}
+
+/*
+ * Verify the chanspec is using a legal set of parameters, i.e. that the
+ * chanspec specified a band, bw, ctl_sb and channel and that the
+ * combination could be legal given any set of circumstances.
+ * RETURNS: TRUE is the chanspec is malformed, false if it looks good.
+ */
+bool wf_chspec_malformed(chanspec_t chanspec)
+{
+ /* must be 2G or 5G band */
+ if (!CHSPEC_IS5G(chanspec) && !CHSPEC_IS2G(chanspec))
+ return TRUE;
+ /* must be 20 or 40 bandwidth */
+ if (!CHSPEC_IS40(chanspec) && !CHSPEC_IS20(chanspec))
+ return TRUE;
+
+ /* 20MHZ b/w must have no ctl sb, 40 must have a ctl sb */
+ if (CHSPEC_IS20(chanspec)) {
+ if (!CHSPEC_SB_NONE(chanspec))
+ return TRUE;
+ } else {
+ if (!CHSPEC_SB_UPPER(chanspec) && !CHSPEC_SB_LOWER(chanspec))
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+/*
+ * This function returns the channel number that control traffic is being sent on, for legacy
+ * channels this is just the channel number, for 40MHZ channels it is the upper or lowre 20MHZ
+ * sideband depending on the chanspec selected
+ */
+uint8 wf_chspec_ctlchan(chanspec_t chspec)
+{
+ uint8 ctl_chan;
+
+ /* Is there a sideband ? */
+ if (CHSPEC_CTL_SB(chspec) == WL_CHANSPEC_CTL_SB_NONE) {
+ return CHSPEC_CHANNEL(chspec);
+ } else {
+ /* we only support 40MHZ with sidebands */
+ ASSERT(CHSPEC_BW(chspec) == WL_CHANSPEC_BW_40);
+ /* chanspec channel holds the centre frequency, use that and the
+ * side band information to reconstruct the control channel number
+ */
+ if (CHSPEC_CTL_SB(chspec) == WL_CHANSPEC_CTL_SB_UPPER) {
+ /* control chan is the upper 20 MHZ SB of the 40MHZ channel */
+ ctl_chan = UPPER_20_SB(CHSPEC_CHANNEL(chspec));
+ } else {
+ ASSERT(CHSPEC_CTL_SB(chspec) ==
+ WL_CHANSPEC_CTL_SB_LOWER);
+ /* control chan is the lower 20 MHZ SB of the 40MHZ channel */
+ ctl_chan = LOWER_20_SB(CHSPEC_CHANNEL(chspec));
+ }
+ }
+
+ return ctl_chan;
+}
+
+chanspec_t wf_chspec_ctlchspec(chanspec_t chspec)
+{
+ chanspec_t ctl_chspec = 0;
+ uint8 channel;
+
+ ASSERT(!wf_chspec_malformed(chspec));
+
+ /* Is there a sideband ? */
+ if (CHSPEC_CTL_SB(chspec) == WL_CHANSPEC_CTL_SB_NONE) {
+ return chspec;
+ } else {
+ if (CHSPEC_CTL_SB(chspec) == WL_CHANSPEC_CTL_SB_UPPER) {
+ channel = UPPER_20_SB(CHSPEC_CHANNEL(chspec));
+ } else {
+ channel = LOWER_20_SB(CHSPEC_CHANNEL(chspec));
+ }
+ ctl_chspec =
+ channel | WL_CHANSPEC_BW_20 | WL_CHANSPEC_CTL_SB_NONE;
+ ctl_chspec |= CHSPEC_BAND(chspec);
+ }
+ return ctl_chspec;
+}
+
+/*
+ * Return the channel number for a given frequency and base frequency.
+ * The returned channel number is relative to the given base frequency.
+ * If the given base frequency is zero, a base frequency of 5 GHz is assumed for
+ * frequencies from 5 - 6 GHz, and 2.407 GHz is assumed for 2.4 - 2.5 GHz.
+ *
+ * Frequency is specified in MHz.
+ * The base frequency is specified as (start_factor * 500 kHz).
+ * Constants WF_CHAN_FACTOR_2_4_G, WF_CHAN_FACTOR_5_G are defined for
+ * 2.4 GHz and 5 GHz bands.
+ *
+ * The returned channel will be in the range [1, 14] in the 2.4 GHz band
+ * and [0, 200] otherwise.
+ * -1 is returned if the start_factor is WF_CHAN_FACTOR_2_4_G and the
+ * frequency is not a 2.4 GHz channel, or if the frequency is not and even
+ * multiple of 5 MHz from the base frequency to the base plus 1 GHz.
+ *
+ * Reference 802.11 REVma, section 17.3.8.3, and 802.11B section 18.4.6.2
+ */
+int wf_mhz2channel(uint freq, uint start_factor)
+{
+ int ch = -1;
+ uint base;
+ int offset;
+
+ /* take the default channel start frequency */
+ if (start_factor == 0) {
+ if (freq >= 2400 && freq <= 2500)
+ start_factor = WF_CHAN_FACTOR_2_4_G;
+ else if (freq >= 5000 && freq <= 6000)
+ start_factor = WF_CHAN_FACTOR_5_G;
+ }
+
+ if (freq == 2484 && start_factor == WF_CHAN_FACTOR_2_4_G)
+ return 14;
+
+ base = start_factor / 2;
+
+ /* check that the frequency is in 1GHz range of the base */
+ if ((freq < base) || (freq > base + 1000))
+ return -1;
+
+ offset = freq - base;
+ ch = offset / 5;
+
+ /* check that frequency is a 5MHz multiple from the base */
+ if (offset != (ch * 5))
+ return -1;
+
+ /* restricted channel range check for 2.4G */
+ if (start_factor == WF_CHAN_FACTOR_2_4_G && (ch < 1 || ch > 13))
+ return -1;
+
+ return ch;
+}
+
+/*
+ * Return the center frequency in MHz of the given channel and base frequency.
+ * The channel number is interpreted relative to the given base frequency.
+ *
+ * The valid channel range is [1, 14] in the 2.4 GHz band and [0, 200] otherwise.
+ * The base frequency is specified as (start_factor * 500 kHz).
+ * Constants WF_CHAN_FACTOR_2_4_G, WF_CHAN_FACTOR_4_G, and WF_CHAN_FACTOR_5_G
+ * are defined for 2.4 GHz, 4 GHz, and 5 GHz bands.
+ * The channel range of [1, 14] is only checked for a start_factor of
+ * WF_CHAN_FACTOR_2_4_G (4814 = 2407 * 2).
+ * Odd start_factors produce channels on .5 MHz boundaries, in which case
+ * the answer is rounded down to an integral MHz.
+ * -1 is returned for an out of range channel.
+ *
+ * Reference 802.11 REVma, section 17.3.8.3, and 802.11B section 18.4.6.2
+ */
+int wf_channel2mhz(uint ch, uint start_factor)
+{
+ int freq;
+
+ if ((start_factor == WF_CHAN_FACTOR_2_4_G && (ch < 1 || ch > 14)) ||
+ (ch > 200))
+ freq = -1;
+ else if ((start_factor == WF_CHAN_FACTOR_2_4_G) && (ch == 14))
+ freq = 2484;
+ else
+ freq = ch * 5 + start_factor / 2;
+
+ return freq;
+}
diff --git a/drivers/staging/brcm80211/util/bcmwpa.c b/drivers/staging/brcm80211/util/bcmwpa.c
new file mode 100644
index 000000000000..d1b7c8d1c326
--- /dev/null
+++ b/drivers/staging/brcm80211/util/bcmwpa.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <bcmutils.h>
+#include <bcmwpa.h>
+
+/* Is this body of this tlvs entry a WFA entry? If
+ * not update the tlvs buffer pointer/length.
+ */
+bool bcm_is_wfa_ie(uint8 * ie, uint8 ** tlvs, uint * tlvs_len, uint8 type)
+{
+ /* If the contents match the WFA_OUI and type */
+ if ((ie[TLV_LEN_OFF] > (WFA_OUI_LEN + 1)) &&
+ !bcmp(&ie[TLV_BODY_OFF], WFA_OUI, WFA_OUI_LEN) &&
+ type == ie[TLV_BODY_OFF + WFA_OUI_LEN]) {
+ return TRUE;
+ }
+
+ /* point to the next ie */
+ ie += ie[TLV_LEN_OFF] + TLV_HDR_LEN;
+ /* calculate the length of the rest of the buffer */
+ *tlvs_len -= (int)(ie - *tlvs);
+ /* update the pointer to the start of the buffer */
+ *tlvs = ie;
+
+ return FALSE;
+}
+
+wpa_ie_fixed_t *BCMROMFN(bcm_find_wpaie) (uint8 * parse, uint len) {
+ bcm_tlv_t *ie;
+
+ while ((ie = bcm_parse_tlvs(parse, len, DOT11_MNG_VS_ID))) {
+ if (bcm_is_wpa_ie((uint8 *) ie, &parse, &len)) {
+ return (wpa_ie_fixed_t *) ie;
+ }
+ }
+ return NULL;
+}
diff --git a/drivers/staging/brcm80211/util/hnddma.c b/drivers/staging/brcm80211/util/hnddma.c
new file mode 100644
index 000000000000..5b59ad828141
--- /dev/null
+++ b/drivers/staging/brcm80211/util/hnddma.c
@@ -0,0 +1,2689 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <bcmdevs.h>
+#include <osl.h>
+#include <bcmendian.h>
+#include <hndsoc.h>
+#include <bcmutils.h>
+#include <siutils.h>
+
+#include <sbhnddma.h>
+#include <hnddma.h>
+
+/* debug/trace */
+#ifdef BCMDBG
+#define DMA_ERROR(args) if (!(*di->msg_level & 1)); else printf args
+#define DMA_TRACE(args) if (!(*di->msg_level & 2)); else printf args
+#else
+#define DMA_ERROR(args)
+#define DMA_TRACE(args)
+#endif /* BCMDBG */
+
+#define DMA_NONE(args)
+
+#define d32txregs dregs.d32_u.txregs_32
+#define d32rxregs dregs.d32_u.rxregs_32
+#define txd32 dregs.d32_u.txd_32
+#define rxd32 dregs.d32_u.rxd_32
+
+#define d64txregs dregs.d64_u.txregs_64
+#define d64rxregs dregs.d64_u.rxregs_64
+#define txd64 dregs.d64_u.txd_64
+#define rxd64 dregs.d64_u.rxd_64
+
+/* default dma message level (if input msg_level pointer is null in dma_attach()) */
+static uint dma_msg_level = 0;
+
+#define MAXNAMEL 8 /* 8 char names */
+
+#define DI_INFO(dmah) ((dma_info_t *)dmah)
+
+/* dma engine software state */
+typedef struct dma_info {
+ struct hnddma_pub hnddma; /* exported structure, don't use hnddma_t,
+ * which could be const
+ */
+ uint *msg_level; /* message level pointer */
+ char name[MAXNAMEL]; /* callers name for diag msgs */
+
+ void *osh; /* os handle */
+ si_t *sih; /* sb handle */
+
+ bool dma64; /* this dma engine is operating in 64-bit mode */
+ bool addrext; /* this dma engine supports DmaExtendedAddrChanges */
+
+ union {
+ struct {
+ dma32regs_t *txregs_32; /* 32-bit dma tx engine registers */
+ dma32regs_t *rxregs_32; /* 32-bit dma rx engine registers */
+ dma32dd_t *txd_32; /* pointer to dma32 tx descriptor ring */
+ dma32dd_t *rxd_32; /* pointer to dma32 rx descriptor ring */
+ } d32_u;
+ struct {
+ dma64regs_t *txregs_64; /* 64-bit dma tx engine registers */
+ dma64regs_t *rxregs_64; /* 64-bit dma rx engine registers */
+ dma64dd_t *txd_64; /* pointer to dma64 tx descriptor ring */
+ dma64dd_t *rxd_64; /* pointer to dma64 rx descriptor ring */
+ } d64_u;
+ } dregs;
+
+ uint16 dmadesc_align; /* alignment requirement for dma descriptors */
+
+ uint16 ntxd; /* # tx descriptors tunable */
+ uint16 txin; /* index of next descriptor to reclaim */
+ uint16 txout; /* index of next descriptor to post */
+ void **txp; /* pointer to parallel array of pointers to packets */
+ osldma_t *tx_dmah; /* DMA TX descriptor ring handle */
+ hnddma_seg_map_t *txp_dmah; /* DMA MAP meta-data handle */
+ dmaaddr_t txdpa; /* Aligned physical address of descriptor ring */
+ dmaaddr_t txdpaorig; /* Original physical address of descriptor ring */
+ uint16 txdalign; /* #bytes added to alloc'd mem to align txd */
+ uint32 txdalloc; /* #bytes allocated for the ring */
+ uint32 xmtptrbase; /* When using unaligned descriptors, the ptr register
+ * is not just an index, it needs all 13 bits to be
+ * an offset from the addr register.
+ */
+
+ uint16 nrxd; /* # rx descriptors tunable */
+ uint16 rxin; /* index of next descriptor to reclaim */
+ uint16 rxout; /* index of next descriptor to post */
+ void **rxp; /* pointer to parallel array of pointers to packets */
+ osldma_t *rx_dmah; /* DMA RX descriptor ring handle */
+ hnddma_seg_map_t *rxp_dmah; /* DMA MAP meta-data handle */
+ dmaaddr_t rxdpa; /* Aligned physical address of descriptor ring */
+ dmaaddr_t rxdpaorig; /* Original physical address of descriptor ring */
+ uint16 rxdalign; /* #bytes added to alloc'd mem to align rxd */
+ uint32 rxdalloc; /* #bytes allocated for the ring */
+ uint32 rcvptrbase; /* Base for ptr reg when using unaligned descriptors */
+
+ /* tunables */
+ uint16 rxbufsize; /* rx buffer size in bytes,
+ * not including the extra headroom
+ */
+ uint rxextrahdrroom; /* extra rx headroom, reverseved to assist upper stack
+ * e.g. some rx pkt buffers will be bridged to tx side
+ * without byte copying. The extra headroom needs to be
+ * large enough to fit txheader needs.
+ * Some dongle driver may not need it.
+ */
+ uint nrxpost; /* # rx buffers to keep posted */
+ uint rxoffset; /* rxcontrol offset */
+ uint ddoffsetlow; /* add to get dma address of descriptor ring, low 32 bits */
+ uint ddoffsethigh; /* high 32 bits */
+ uint dataoffsetlow; /* add to get dma address of data buffer, low 32 bits */
+ uint dataoffsethigh; /* high 32 bits */
+ bool aligndesc_4k; /* descriptor base need to be aligned or not */
+} dma_info_t;
+
+/*
+ * If BCMDMA32 is defined, hnddma will support both 32-bit and 64-bit DMA engines.
+ * Otherwise it will support only 64-bit.
+ *
+ * DMA32_ENAB indicates whether hnddma is compiled with support for 32-bit DMA engines.
+ * DMA64_ENAB indicates whether hnddma is compiled with support for 64-bit DMA engines.
+ *
+ * DMA64_MODE indicates whether the current DMA engine is running as 64-bit.
+ */
+#ifdef BCMDMA32
+#define DMA32_ENAB(di) 1
+#define DMA64_ENAB(di) 1
+#define DMA64_MODE(di) ((di)->dma64)
+#else /* !BCMDMA32 */
+#define DMA32_ENAB(di) 0
+#define DMA64_ENAB(di) 1
+#define DMA64_MODE(di) 1
+#endif /* !BCMDMA32 */
+
+/* DMA Scatter-gather list is supported. Note this is limited to TX direction only */
+#ifdef BCMDMASGLISTOSL
+#define DMASGLIST_ENAB TRUE
+#else
+#define DMASGLIST_ENAB FALSE
+#endif /* BCMDMASGLISTOSL */
+
+/* descriptor bumping macros */
+#define XXD(x, n) ((x) & ((n) - 1)) /* faster than %, but n must be power of 2 */
+#define TXD(x) XXD((x), di->ntxd)
+#define RXD(x) XXD((x), di->nrxd)
+#define NEXTTXD(i) TXD((i) + 1)
+#define PREVTXD(i) TXD((i) - 1)
+#define NEXTRXD(i) RXD((i) + 1)
+#define PREVRXD(i) RXD((i) - 1)
+
+#define NTXDACTIVE(h, t) TXD((t) - (h))
+#define NRXDACTIVE(h, t) RXD((t) - (h))
+
+/* macros to convert between byte offsets and indexes */
+#define B2I(bytes, type) ((bytes) / sizeof(type))
+#define I2B(index, type) ((index) * sizeof(type))
+
+#define PCI32ADDR_HIGH 0xc0000000 /* address[31:30] */
+#define PCI32ADDR_HIGH_SHIFT 30 /* address[31:30] */
+
+#define PCI64ADDR_HIGH 0x80000000 /* address[63] */
+#define PCI64ADDR_HIGH_SHIFT 31 /* address[63] */
+
+/* Common prototypes */
+static bool _dma_isaddrext(dma_info_t * di);
+static bool _dma_descriptor_align(dma_info_t * di);
+static bool _dma_alloc(dma_info_t * di, uint direction);
+static void _dma_detach(dma_info_t * di);
+static void _dma_ddtable_init(dma_info_t * di, uint direction, dmaaddr_t pa);
+static void _dma_rxinit(dma_info_t * di);
+static void *_dma_rx(dma_info_t * di);
+static bool _dma_rxfill(dma_info_t * di);
+static void _dma_rxreclaim(dma_info_t * di);
+static void _dma_rxenable(dma_info_t * di);
+static void *_dma_getnextrxp(dma_info_t * di, bool forceall);
+static void _dma_rx_param_get(dma_info_t * di, uint16 * rxoffset,
+ uint16 * rxbufsize);
+
+static void _dma_txblock(dma_info_t * di);
+static void _dma_txunblock(dma_info_t * di);
+static uint _dma_txactive(dma_info_t * di);
+static uint _dma_rxactive(dma_info_t * di);
+static uint _dma_txpending(dma_info_t * di);
+static uint _dma_txcommitted(dma_info_t * di);
+
+static void *_dma_peeknexttxp(dma_info_t * di);
+static void *_dma_peeknextrxp(dma_info_t * di);
+static uintptr _dma_getvar(dma_info_t * di, const char *name);
+static void _dma_counterreset(dma_info_t * di);
+static void _dma_fifoloopbackenable(dma_info_t * di);
+static uint _dma_ctrlflags(dma_info_t * di, uint mask, uint flags);
+static uint8 dma_align_sizetobits(uint size);
+static void *dma_ringalloc(osl_t * osh, uint32 boundary, uint size,
+ uint16 * alignbits, uint * alloced,
+ dmaaddr_t * descpa, osldma_t ** dmah);
+
+/* Prototypes for 32-bit routines */
+static bool dma32_alloc(dma_info_t * di, uint direction);
+static bool dma32_txreset(dma_info_t * di);
+static bool dma32_rxreset(dma_info_t * di);
+static bool dma32_txsuspendedidle(dma_info_t * di);
+static int dma32_txfast(dma_info_t * di, void *p0, bool commit);
+static void *dma32_getnexttxp(dma_info_t * di, txd_range_t range);
+static void *dma32_getnextrxp(dma_info_t * di, bool forceall);
+static void dma32_txrotate(dma_info_t * di);
+static bool dma32_rxidle(dma_info_t * di);
+static void dma32_txinit(dma_info_t * di);
+static bool dma32_txenabled(dma_info_t * di);
+static void dma32_txsuspend(dma_info_t * di);
+static void dma32_txresume(dma_info_t * di);
+static bool dma32_txsuspended(dma_info_t * di);
+static void dma32_txreclaim(dma_info_t * di, txd_range_t range);
+static bool dma32_txstopped(dma_info_t * di);
+static bool dma32_rxstopped(dma_info_t * di);
+static bool dma32_rxenabled(dma_info_t * di);
+
+static bool _dma32_addrext(osl_t * osh, dma32regs_t * dma32regs);
+
+/* Prototypes for 64-bit routines */
+static bool dma64_alloc(dma_info_t * di, uint direction);
+static bool dma64_txreset(dma_info_t * di);
+static bool dma64_rxreset(dma_info_t * di);
+static bool dma64_txsuspendedidle(dma_info_t * di);
+static int dma64_txfast(dma_info_t * di, void *p0, bool commit);
+static int dma64_txunframed(dma_info_t * di, void *p0, uint len, bool commit);
+static void *dma64_getpos(dma_info_t * di, bool direction);
+static void *dma64_getnexttxp(dma_info_t * di, txd_range_t range);
+static void *dma64_getnextrxp(dma_info_t * di, bool forceall);
+static void dma64_txrotate(dma_info_t * di);
+
+static bool dma64_rxidle(dma_info_t * di);
+static void dma64_txinit(dma_info_t * di);
+static bool dma64_txenabled(dma_info_t * di);
+static void dma64_txsuspend(dma_info_t * di);
+static void dma64_txresume(dma_info_t * di);
+static bool dma64_txsuspended(dma_info_t * di);
+static void dma64_txreclaim(dma_info_t * di, txd_range_t range);
+static bool dma64_txstopped(dma_info_t * di);
+static bool dma64_rxstopped(dma_info_t * di);
+static bool dma64_rxenabled(dma_info_t * di);
+static bool _dma64_addrext(osl_t * osh, dma64regs_t * dma64regs);
+
+STATIC INLINE uint32 parity32(uint32 data);
+
+const di_fcn_t dma64proc = {
+ (di_detach_t) _dma_detach,
+ (di_txinit_t) dma64_txinit,
+ (di_txreset_t) dma64_txreset,
+ (di_txenabled_t) dma64_txenabled,
+ (di_txsuspend_t) dma64_txsuspend,
+ (di_txresume_t) dma64_txresume,
+ (di_txsuspended_t) dma64_txsuspended,
+ (di_txsuspendedidle_t) dma64_txsuspendedidle,
+ (di_txfast_t) dma64_txfast,
+ (di_txunframed_t) dma64_txunframed,
+ (di_getpos_t) dma64_getpos,
+ (di_txstopped_t) dma64_txstopped,
+ (di_txreclaim_t) dma64_txreclaim,
+ (di_getnexttxp_t) dma64_getnexttxp,
+ (di_peeknexttxp_t) _dma_peeknexttxp,
+ (di_txblock_t) _dma_txblock,
+ (di_txunblock_t) _dma_txunblock,
+ (di_txactive_t) _dma_txactive,
+ (di_txrotate_t) dma64_txrotate,
+
+ (di_rxinit_t) _dma_rxinit,
+ (di_rxreset_t) dma64_rxreset,
+ (di_rxidle_t) dma64_rxidle,
+ (di_rxstopped_t) dma64_rxstopped,
+ (di_rxenable_t) _dma_rxenable,
+ (di_rxenabled_t) dma64_rxenabled,
+ (di_rx_t) _dma_rx,
+ (di_rxfill_t) _dma_rxfill,
+ (di_rxreclaim_t) _dma_rxreclaim,
+ (di_getnextrxp_t) _dma_getnextrxp,
+ (di_peeknextrxp_t) _dma_peeknextrxp,
+ (di_rxparam_get_t) _dma_rx_param_get,
+
+ (di_fifoloopbackenable_t) _dma_fifoloopbackenable,
+ (di_getvar_t) _dma_getvar,
+ (di_counterreset_t) _dma_counterreset,
+ (di_ctrlflags_t) _dma_ctrlflags,
+ NULL,
+ NULL,
+ NULL,
+ (di_rxactive_t) _dma_rxactive,
+ (di_txpending_t) _dma_txpending,
+ (di_txcommitted_t) _dma_txcommitted,
+ 39
+};
+
+static const di_fcn_t dma32proc = {
+ (di_detach_t) _dma_detach,
+ (di_txinit_t) dma32_txinit,
+ (di_txreset_t) dma32_txreset,
+ (di_txenabled_t) dma32_txenabled,
+ (di_txsuspend_t) dma32_txsuspend,
+ (di_txresume_t) dma32_txresume,
+ (di_txsuspended_t) dma32_txsuspended,
+ (di_txsuspendedidle_t) dma32_txsuspendedidle,
+ (di_txfast_t) dma32_txfast,
+ NULL,
+ NULL,
+ (di_txstopped_t) dma32_txstopped,
+ (di_txreclaim_t) dma32_txreclaim,
+ (di_getnexttxp_t) dma32_getnexttxp,
+ (di_peeknexttxp_t) _dma_peeknexttxp,
+ (di_txblock_t) _dma_txblock,
+ (di_txunblock_t) _dma_txunblock,
+ (di_txactive_t) _dma_txactive,
+ (di_txrotate_t) dma32_txrotate,
+
+ (di_rxinit_t) _dma_rxinit,
+ (di_rxreset_t) dma32_rxreset,
+ (di_rxidle_t) dma32_rxidle,
+ (di_rxstopped_t) dma32_rxstopped,
+ (di_rxenable_t) _dma_rxenable,
+ (di_rxenabled_t) dma32_rxenabled,
+ (di_rx_t) _dma_rx,
+ (di_rxfill_t) _dma_rxfill,
+ (di_rxreclaim_t) _dma_rxreclaim,
+ (di_getnextrxp_t) _dma_getnextrxp,
+ (di_peeknextrxp_t) _dma_peeknextrxp,
+ (di_rxparam_get_t) _dma_rx_param_get,
+
+ (di_fifoloopbackenable_t) _dma_fifoloopbackenable,
+ (di_getvar_t) _dma_getvar,
+ (di_counterreset_t) _dma_counterreset,
+ (di_ctrlflags_t) _dma_ctrlflags,
+ NULL,
+ NULL,
+ NULL,
+ (di_rxactive_t) _dma_rxactive,
+ (di_txpending_t) _dma_txpending,
+ (di_txcommitted_t) _dma_txcommitted,
+ 39
+};
+
+hnddma_t *dma_attach(osl_t * osh, char *name, si_t * sih, void *dmaregstx,
+ void *dmaregsrx, uint ntxd, uint nrxd, uint rxbufsize,
+ int rxextheadroom, uint nrxpost, uint rxoffset,
+ uint * msg_level)
+{
+ dma_info_t *di;
+ uint size;
+
+ /* allocate private info structure */
+ if ((di = MALLOC(osh, sizeof(dma_info_t))) == NULL) {
+#ifdef BCMDBG
+ printf("dma_attach: out of memory, malloced %d bytes\n",
+ MALLOCED(osh));
+#endif
+ return (NULL);
+ }
+
+ bzero((char *)di, sizeof(dma_info_t));
+
+ di->msg_level = msg_level ? msg_level : &dma_msg_level;
+
+ /* old chips w/o sb is no longer supported */
+ ASSERT(sih != NULL);
+
+ if (DMA64_ENAB(di))
+ di->dma64 =
+ ((si_core_sflags(sih, 0, 0) & SISF_DMA64) == SISF_DMA64);
+ else
+ di->dma64 = 0;
+
+ /* check arguments */
+ ASSERT(ISPOWEROF2(ntxd));
+ ASSERT(ISPOWEROF2(nrxd));
+
+ if (nrxd == 0)
+ ASSERT(dmaregsrx == NULL);
+ if (ntxd == 0)
+ ASSERT(dmaregstx == NULL);
+
+ /* init dma reg pointer */
+ if (DMA64_ENAB(di) && DMA64_MODE(di)) {
+ ASSERT(ntxd <= D64MAXDD);
+ ASSERT(nrxd <= D64MAXDD);
+ di->d64txregs = (dma64regs_t *) dmaregstx;
+ di->d64rxregs = (dma64regs_t *) dmaregsrx;
+ di->hnddma.di_fn = (const di_fcn_t *)&dma64proc;
+ } else if (DMA32_ENAB(di)) {
+ ASSERT(ntxd <= D32MAXDD);
+ ASSERT(nrxd <= D32MAXDD);
+ di->d32txregs = (dma32regs_t *) dmaregstx;
+ di->d32rxregs = (dma32regs_t *) dmaregsrx;
+ di->hnddma.di_fn = (const di_fcn_t *)&dma32proc;
+ } else {
+ DMA_ERROR(("dma_attach: driver doesn't support 32-bit DMA\n"));
+ ASSERT(0);
+ goto fail;
+ }
+
+ /* Default flags (which can be changed by the driver calling dma_ctrlflags
+ * before enable): For backwards compatibility both Rx Overflow Continue
+ * and Parity are DISABLED.
+ * supports it.
+ */
+ di->hnddma.di_fn->ctrlflags(&di->hnddma, DMA_CTRL_ROC | DMA_CTRL_PEN,
+ 0);
+
+ DMA_TRACE(("%s: dma_attach: %s osh %p flags 0x%x ntxd %d nrxd %d rxbufsize %d " "rxextheadroom %d nrxpost %d rxoffset %d dmaregstx %p dmaregsrx %p\n", name, (DMA64_MODE(di) ? "DMA64" : "DMA32"), osh, di->hnddma.dmactrlflags, ntxd, nrxd, rxbufsize, rxextheadroom, nrxpost, rxoffset, dmaregstx, dmaregsrx));
+
+ /* make a private copy of our callers name */
+ strncpy(di->name, name, MAXNAMEL);
+ di->name[MAXNAMEL - 1] = '\0';
+
+ di->osh = osh;
+ di->sih = sih;
+
+ /* save tunables */
+ di->ntxd = (uint16) ntxd;
+ di->nrxd = (uint16) nrxd;
+
+ /* the actual dma size doesn't include the extra headroom */
+ di->rxextrahdrroom =
+ (rxextheadroom == -1) ? BCMEXTRAHDROOM : rxextheadroom;
+ if (rxbufsize > BCMEXTRAHDROOM)
+ di->rxbufsize = (uint16) (rxbufsize - di->rxextrahdrroom);
+ else
+ di->rxbufsize = (uint16) rxbufsize;
+
+ di->nrxpost = (uint16) nrxpost;
+ di->rxoffset = (uint8) rxoffset;
+
+ /*
+ * figure out the DMA physical address offset for dd and data
+ * PCI/PCIE: they map silicon backplace address to zero based memory, need offset
+ * Other bus: use zero
+ * SI_BUS BIGENDIAN kludge: use sdram swapped region for data buffer, not descriptor
+ */
+ di->ddoffsetlow = 0;
+ di->dataoffsetlow = 0;
+ /* for pci bus, add offset */
+ if (sih->bustype == PCI_BUS) {
+ if ((sih->buscoretype == PCIE_CORE_ID) && DMA64_MODE(di)) {
+ /* pcie with DMA64 */
+ di->ddoffsetlow = 0;
+ di->ddoffsethigh = SI_PCIE_DMA_H32;
+ } else {
+ /* pci(DMA32/DMA64) or pcie with DMA32 */
+ di->ddoffsetlow = SI_PCI_DMA;
+ di->ddoffsethigh = 0;
+ }
+ di->dataoffsetlow = di->ddoffsetlow;
+ di->dataoffsethigh = di->ddoffsethigh;
+ }
+#if defined(__mips__) && defined(IL_BIGENDIAN)
+ di->dataoffsetlow = di->dataoffsetlow + SI_SDRAM_SWAPPED;
+#endif /* defined(__mips__) && defined(IL_BIGENDIAN) */
+ /* WAR64450 : DMACtl.Addr ext fields are not supported in SDIOD core. */
+ if ((si_coreid(sih) == SDIOD_CORE_ID)
+ && ((si_corerev(sih) > 0) && (si_corerev(sih) <= 2)))
+ di->addrext = 0;
+ else if ((si_coreid(sih) == I2S_CORE_ID) &&
+ ((si_corerev(sih) == 0) || (si_corerev(sih) == 1)))
+ di->addrext = 0;
+ else
+ di->addrext = _dma_isaddrext(di);
+
+ /* does the descriptors need to be aligned and if yes, on 4K/8K or not */
+ di->aligndesc_4k = _dma_descriptor_align(di);
+ if (di->aligndesc_4k) {
+ if (DMA64_MODE(di)) {
+ di->dmadesc_align = D64RINGALIGN_BITS;
+ if ((ntxd < D64MAXDD / 2) && (nrxd < D64MAXDD / 2)) {
+ /* for smaller dd table, HW relax the alignment requirement */
+ di->dmadesc_align = D64RINGALIGN_BITS - 1;
+ }
+ } else
+ di->dmadesc_align = D32RINGALIGN_BITS;
+ } else
+ di->dmadesc_align = 4; /* 16 byte alignment */
+
+ DMA_NONE(("DMA descriptor align_needed %d, align %d\n",
+ di->aligndesc_4k, di->dmadesc_align));
+
+ /* allocate tx packet pointer vector */
+ if (ntxd) {
+ size = ntxd * sizeof(void *);
+ if ((di->txp = MALLOC(osh, size)) == NULL) {
+ DMA_ERROR(("%s: dma_attach: out of tx memory, malloced %d bytes\n", di->name, MALLOCED(osh)));
+ goto fail;
+ }
+ bzero((char *)di->txp, size);
+ }
+
+ /* allocate rx packet pointer vector */
+ if (nrxd) {
+ size = nrxd * sizeof(void *);
+ if ((di->rxp = MALLOC(osh, size)) == NULL) {
+ DMA_ERROR(("%s: dma_attach: out of rx memory, malloced %d bytes\n", di->name, MALLOCED(osh)));
+ goto fail;
+ }
+ bzero((char *)di->rxp, size);
+ }
+
+ /* allocate transmit descriptor ring, only need ntxd descriptors but it must be aligned */
+ if (ntxd) {
+ if (!_dma_alloc(di, DMA_TX))
+ goto fail;
+ }
+
+ /* allocate receive descriptor ring, only need nrxd descriptors but it must be aligned */
+ if (nrxd) {
+ if (!_dma_alloc(di, DMA_RX))
+ goto fail;
+ }
+
+ if ((di->ddoffsetlow != 0) && !di->addrext) {
+ if (PHYSADDRLO(di->txdpa) > SI_PCI_DMA_SZ) {
+ DMA_ERROR(("%s: dma_attach: txdpa 0x%x: addrext not supported\n", di->name, (uint32) PHYSADDRLO(di->txdpa)));
+ goto fail;
+ }
+ if (PHYSADDRLO(di->rxdpa) > SI_PCI_DMA_SZ) {
+ DMA_ERROR(("%s: dma_attach: rxdpa 0x%x: addrext not supported\n", di->name, (uint32) PHYSADDRLO(di->rxdpa)));
+ goto fail;
+ }
+ }
+
+ DMA_TRACE(("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh " "0x%x addrext %d\n", di->ddoffsetlow, di->ddoffsethigh, di->dataoffsetlow, di->dataoffsethigh, di->addrext));
+
+ /* allocate DMA mapping vectors */
+ if (DMASGLIST_ENAB) {
+ if (ntxd) {
+ size = ntxd * sizeof(hnddma_seg_map_t);
+ if ((di->txp_dmah =
+ (hnddma_seg_map_t *) MALLOC(osh, size)) == NULL)
+ goto fail;
+ bzero((char *)di->txp_dmah, size);
+ }
+
+ if (nrxd) {
+ size = nrxd * sizeof(hnddma_seg_map_t);
+ if ((di->rxp_dmah =
+ (hnddma_seg_map_t *) MALLOC(osh, size)) == NULL)
+ goto fail;
+ bzero((char *)di->rxp_dmah, size);
+ }
+ }
+
+ return ((hnddma_t *) di);
+
+ fail:
+ _dma_detach(di);
+ return (NULL);
+}
+
+/* init the tx or rx descriptor */
+static INLINE void
+dma32_dd_upd(dma_info_t * di, dma32dd_t * ddring, dmaaddr_t pa, uint outidx,
+ uint32 * flags, uint32 bufcount)
+{
+ /* dma32 uses 32-bit control to fit both flags and bufcounter */
+ *flags = *flags | (bufcount & CTRL_BC_MASK);
+
+ if ((di->dataoffsetlow == 0) || !(PHYSADDRLO(pa) & PCI32ADDR_HIGH)) {
+ W_SM(&ddring[outidx].addr,
+ BUS_SWAP32(PHYSADDRLO(pa) + di->dataoffsetlow));
+ W_SM(&ddring[outidx].ctrl, BUS_SWAP32(*flags));
+ } else {
+ /* address extension */
+ uint32 ae;
+ ASSERT(di->addrext);
+ ae = (PHYSADDRLO(pa) & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT;
+ PHYSADDRLO(pa) &= ~PCI32ADDR_HIGH;
+
+ *flags |= (ae << CTRL_AE_SHIFT);
+ W_SM(&ddring[outidx].addr,
+ BUS_SWAP32(PHYSADDRLO(pa) + di->dataoffsetlow));
+ W_SM(&ddring[outidx].ctrl, BUS_SWAP32(*flags));
+ }
+}
+
+/* Check for odd number of 1's */
+STATIC INLINE uint32 parity32(uint32 data)
+{
+ data ^= data >> 16;
+ data ^= data >> 8;
+ data ^= data >> 4;
+ data ^= data >> 2;
+ data ^= data >> 1;
+
+ return (data & 1);
+}
+
+#define DMA64_DD_PARITY(dd) parity32((dd)->addrlow ^ (dd)->addrhigh ^ (dd)->ctrl1 ^ (dd)->ctrl2)
+
+static INLINE void
+dma64_dd_upd(dma_info_t * di, dma64dd_t * ddring, dmaaddr_t pa, uint outidx,
+ uint32 * flags, uint32 bufcount)
+{
+ uint32 ctrl2 = bufcount & D64_CTRL2_BC_MASK;
+
+ /* PCI bus with big(>1G) physical address, use address extension */
+#if defined(__mips__) && defined(IL_BIGENDIAN)
+ if ((di->dataoffsetlow == SI_SDRAM_SWAPPED)
+ || !(PHYSADDRLO(pa) & PCI32ADDR_HIGH)) {
+#else
+ if ((di->dataoffsetlow == 0) || !(PHYSADDRLO(pa) & PCI32ADDR_HIGH)) {
+#endif /* defined(__mips__) && defined(IL_BIGENDIAN) */
+ ASSERT((PHYSADDRHI(pa) & PCI64ADDR_HIGH) == 0);
+
+ W_SM(&ddring[outidx].addrlow,
+ BUS_SWAP32(PHYSADDRLO(pa) + di->dataoffsetlow));
+ W_SM(&ddring[outidx].addrhigh,
+ BUS_SWAP32(PHYSADDRHI(pa) + di->dataoffsethigh));
+ W_SM(&ddring[outidx].ctrl1, BUS_SWAP32(*flags));
+ W_SM(&ddring[outidx].ctrl2, BUS_SWAP32(ctrl2));
+ } else {
+ /* address extension for 32-bit PCI */
+ uint32 ae;
+ ASSERT(di->addrext);
+
+ ae = (PHYSADDRLO(pa) & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT;
+ PHYSADDRLO(pa) &= ~PCI32ADDR_HIGH;
+ ASSERT(PHYSADDRHI(pa) == 0);
+
+ ctrl2 |= (ae << D64_CTRL2_AE_SHIFT) & D64_CTRL2_AE;
+ W_SM(&ddring[outidx].addrlow,
+ BUS_SWAP32(PHYSADDRLO(pa) + di->dataoffsetlow));
+ W_SM(&ddring[outidx].addrhigh,
+ BUS_SWAP32(0 + di->dataoffsethigh));
+ W_SM(&ddring[outidx].ctrl1, BUS_SWAP32(*flags));
+ W_SM(&ddring[outidx].ctrl2, BUS_SWAP32(ctrl2));
+ }
+ if (di->hnddma.dmactrlflags & DMA_CTRL_PEN) {
+ if (DMA64_DD_PARITY(&ddring[outidx])) {
+ W_SM(&ddring[outidx].ctrl2,
+ BUS_SWAP32(ctrl2 | D64_CTRL2_PARITY));
+ }
+ }
+}
+
+static bool _dma32_addrext(osl_t * osh, dma32regs_t * dma32regs)
+{
+ uint32 w;
+
+ OR_REG(osh, &dma32regs->control, XC_AE);
+ w = R_REG(osh, &dma32regs->control);
+ AND_REG(osh, &dma32regs->control, ~XC_AE);
+ return ((w & XC_AE) == XC_AE);
+}
+
+static bool _dma_alloc(dma_info_t * di, uint direction)
+{
+ if (DMA64_ENAB(di) && DMA64_MODE(di)) {
+ return dma64_alloc(di, direction);
+ } else if (DMA32_ENAB(di)) {
+ return dma32_alloc(di, direction);
+ } else
+ ASSERT(0);
+}
+
+/* !! may be called with core in reset */
+static void _dma_detach(dma_info_t * di)
+{
+
+ DMA_TRACE(("%s: dma_detach\n", di->name));
+
+ /* shouldn't be here if descriptors are unreclaimed */
+ ASSERT(di->txin == di->txout);
+ ASSERT(di->rxin == di->rxout);
+
+ /* free dma descriptor rings */
+ if (DMA64_ENAB(di) && DMA64_MODE(di)) {
+ if (di->txd64)
+ DMA_FREE_CONSISTENT(di->osh,
+ ((int8 *) (uintptr) di->txd64 -
+ di->txdalign), di->txdalloc,
+ (di->txdpaorig), &di->tx_dmah);
+ if (di->rxd64)
+ DMA_FREE_CONSISTENT(di->osh,
+ ((int8 *) (uintptr) di->rxd64 -
+ di->rxdalign), di->rxdalloc,
+ (di->rxdpaorig), &di->rx_dmah);
+ } else if (DMA32_ENAB(di)) {
+ if (di->txd32)
+ DMA_FREE_CONSISTENT(di->osh,
+ ((int8 *) (uintptr) di->txd32 -
+ di->txdalign), di->txdalloc,
+ (di->txdpaorig), &di->tx_dmah);
+ if (di->rxd32)
+ DMA_FREE_CONSISTENT(di->osh,
+ ((int8 *) (uintptr) di->rxd32 -
+ di->rxdalign), di->rxdalloc,
+ (di->rxdpaorig), &di->rx_dmah);
+ } else
+ ASSERT(0);
+
+ /* free packet pointer vectors */
+ if (di->txp)
+ MFREE(di->osh, (void *)di->txp, (di->ntxd * sizeof(void *)));
+ if (di->rxp)
+ MFREE(di->osh, (void *)di->rxp, (di->nrxd * sizeof(void *)));
+
+ /* free tx packet DMA handles */
+ if (di->txp_dmah)
+ MFREE(di->osh, (void *)di->txp_dmah,
+ di->ntxd * sizeof(hnddma_seg_map_t));
+
+ /* free rx packet DMA handles */
+ if (di->rxp_dmah)
+ MFREE(di->osh, (void *)di->rxp_dmah,
+ di->nrxd * sizeof(hnddma_seg_map_t));
+
+ /* free our private info structure */
+ MFREE(di->osh, (void *)di, sizeof(dma_info_t));
+
+}
+
+static bool _dma_descriptor_align(dma_info_t * di)
+{
+ if (DMA64_ENAB(di) && DMA64_MODE(di)) {
+ uint32 addrl;
+
+ /* Check to see if the descriptors need to be aligned on 4K/8K or not */
+ if (di->d64txregs != NULL) {
+ W_REG(di->osh, &di->d64txregs->addrlow, 0xff0);
+ addrl = R_REG(di->osh, &di->d64txregs->addrlow);
+ if (addrl != 0)
+ return FALSE;
+ } else if (di->d64rxregs != NULL) {
+ W_REG(di->osh, &di->d64rxregs->addrlow, 0xff0);
+ addrl = R_REG(di->osh, &di->d64rxregs->addrlow);
+ if (addrl != 0)
+ return FALSE;
+ }
+ }
+ return TRUE;
+}
+
+/* return TRUE if this dma engine supports DmaExtendedAddrChanges, otherwise FALSE */
+static bool _dma_isaddrext(dma_info_t * di)
+{
+ if (DMA64_ENAB(di) && DMA64_MODE(di)) {
+ /* DMA64 supports full 32- or 64-bit operation. AE is always valid */
+
+ /* not all tx or rx channel are available */
+ if (di->d64txregs != NULL) {
+ if (!_dma64_addrext(di->osh, di->d64txregs)) {
+ DMA_ERROR(("%s: _dma_isaddrext: DMA64 tx doesn't have AE set\n", di->name));
+ ASSERT(0);
+ }
+ return TRUE;
+ } else if (di->d64rxregs != NULL) {
+ if (!_dma64_addrext(di->osh, di->d64rxregs)) {
+ DMA_ERROR(("%s: _dma_isaddrext: DMA64 rx doesn't have AE set\n", di->name));
+ ASSERT(0);
+ }
+ return TRUE;
+ }
+ return FALSE;
+ } else if (DMA32_ENAB(di)) {
+ if (di->d32txregs)
+ return (_dma32_addrext(di->osh, di->d32txregs));
+ else if (di->d32rxregs)
+ return (_dma32_addrext(di->osh, di->d32rxregs));
+ } else
+ ASSERT(0);
+
+ return FALSE;
+}
+
+/* initialize descriptor table base address */
+static void _dma_ddtable_init(dma_info_t * di, uint direction, dmaaddr_t pa)
+{
+ if (DMA64_ENAB(di) && DMA64_MODE(di)) {
+ if (!di->aligndesc_4k) {
+ if (direction == DMA_TX)
+ di->xmtptrbase = PHYSADDRLO(pa);
+ else
+ di->rcvptrbase = PHYSADDRLO(pa);
+ }
+
+ if ((di->ddoffsetlow == 0)
+ || !(PHYSADDRLO(pa) & PCI32ADDR_HIGH)) {
+ if (direction == DMA_TX) {
+ W_REG(di->osh, &di->d64txregs->addrlow,
+ (PHYSADDRLO(pa) + di->ddoffsetlow));
+ W_REG(di->osh, &di->d64txregs->addrhigh,
+ (PHYSADDRHI(pa) + di->ddoffsethigh));
+ } else {
+ W_REG(di->osh, &di->d64rxregs->addrlow,
+ (PHYSADDRLO(pa) + di->ddoffsetlow));
+ W_REG(di->osh, &di->d64rxregs->addrhigh,
+ (PHYSADDRHI(pa) + di->ddoffsethigh));
+ }
+ } else {
+ /* DMA64 32bits address extension */
+ uint32 ae;
+ ASSERT(di->addrext);
+ ASSERT(PHYSADDRHI(pa) == 0);
+
+ /* shift the high bit(s) from pa to ae */
+ ae = (PHYSADDRLO(pa) & PCI32ADDR_HIGH) >>
+ PCI32ADDR_HIGH_SHIFT;
+ PHYSADDRLO(pa) &= ~PCI32ADDR_HIGH;
+
+ if (direction == DMA_TX) {
+ W_REG(di->osh, &di->d64txregs->addrlow,
+ (PHYSADDRLO(pa) + di->ddoffsetlow));
+ W_REG(di->osh, &di->d64txregs->addrhigh,
+ di->ddoffsethigh);
+ SET_REG(di->osh, &di->d64txregs->control,
+ D64_XC_AE, (ae << D64_XC_AE_SHIFT));
+ } else {
+ W_REG(di->osh, &di->d64rxregs->addrlow,
+ (PHYSADDRLO(pa) + di->ddoffsetlow));
+ W_REG(di->osh, &di->d64rxregs->addrhigh,
+ di->ddoffsethigh);
+ SET_REG(di->osh, &di->d64rxregs->control,
+ D64_RC_AE, (ae << D64_RC_AE_SHIFT));
+ }
+ }
+
+ } else if (DMA32_ENAB(di)) {
+ ASSERT(PHYSADDRHI(pa) == 0);
+ if ((di->ddoffsetlow == 0)
+ || !(PHYSADDRLO(pa) & PCI32ADDR_HIGH)) {
+ if (direction == DMA_TX)
+ W_REG(di->osh, &di->d32txregs->addr,
+ (PHYSADDRLO(pa) + di->ddoffsetlow));
+ else
+ W_REG(di->osh, &di->d32rxregs->addr,
+ (PHYSADDRLO(pa) + di->ddoffsetlow));
+ } else {
+ /* dma32 address extension */
+ uint32 ae;
+ ASSERT(di->addrext);
+
+ /* shift the high bit(s) from pa to ae */
+ ae = (PHYSADDRLO(pa) & PCI32ADDR_HIGH) >>
+ PCI32ADDR_HIGH_SHIFT;
+ PHYSADDRLO(pa) &= ~PCI32ADDR_HIGH;
+
+ if (direction == DMA_TX) {
+ W_REG(di->osh, &di->d32txregs->addr,
+ (PHYSADDRLO(pa) + di->ddoffsetlow));
+ SET_REG(di->osh, &di->d32txregs->control, XC_AE,
+ ae << XC_AE_SHIFT);
+ } else {
+ W_REG(di->osh, &di->d32rxregs->addr,
+ (PHYSADDRLO(pa) + di->ddoffsetlow));
+ SET_REG(di->osh, &di->d32rxregs->control, RC_AE,
+ ae << RC_AE_SHIFT);
+ }
+ }
+ } else
+ ASSERT(0);
+}
+
+static void _dma_fifoloopbackenable(dma_info_t * di)
+{
+ DMA_TRACE(("%s: dma_fifoloopbackenable\n", di->name));
+
+ if (DMA64_ENAB(di) && DMA64_MODE(di))
+ OR_REG(di->osh, &di->d64txregs->control, D64_XC_LE);
+ else if (DMA32_ENAB(di))
+ OR_REG(di->osh, &di->d32txregs->control, XC_LE);
+ else
+ ASSERT(0);
+}
+
+static void _dma_rxinit(dma_info_t * di)
+{
+ DMA_TRACE(("%s: dma_rxinit\n", di->name));
+
+ if (di->nrxd == 0)
+ return;
+
+ di->rxin = di->rxout = 0;
+
+ /* clear rx descriptor ring */
+ if (DMA64_ENAB(di) && DMA64_MODE(di)) {
+ BZERO_SM((void *)(uintptr) di->rxd64,
+ (di->nrxd * sizeof(dma64dd_t)));
+
+ /* DMA engine with out alignment requirement requires table to be inited
+ * before enabling the engine
+ */
+ if (!di->aligndesc_4k)
+ _dma_ddtable_init(di, DMA_RX, di->rxdpa);
+
+ _dma_rxenable(di);
+
+ if (di->aligndesc_4k)
+ _dma_ddtable_init(di, DMA_RX, di->rxdpa);
+ } else if (DMA32_ENAB(di)) {
+ BZERO_SM((void *)(uintptr) di->rxd32,
+ (di->nrxd * sizeof(dma32dd_t)));
+ _dma_rxenable(di);
+ _dma_ddtable_init(di, DMA_RX, di->rxdpa);
+ } else
+ ASSERT(0);
+}
+
+static void _dma_rxenable(dma_info_t * di)
+{
+ uint dmactrlflags = di->hnddma.dmactrlflags;
+
+ DMA_TRACE(("%s: dma_rxenable\n", di->name));
+
+ if (DMA64_ENAB(di) && DMA64_MODE(di)) {
+ uint32 control =
+ (R_REG(di->osh, &di->d64rxregs->control) & D64_RC_AE) |
+ D64_RC_RE;
+
+ if ((dmactrlflags & DMA_CTRL_PEN) == 0)
+ control |= D64_RC_PD;
+
+ if (dmactrlflags & DMA_CTRL_ROC)
+ control |= D64_RC_OC;
+
+ W_REG(di->osh, &di->d64rxregs->control,
+ ((di->rxoffset << D64_RC_RO_SHIFT) | control));
+ } else if (DMA32_ENAB(di)) {
+ uint32 control =
+ (R_REG(di->osh, &di->d32rxregs->control) & RC_AE) | RC_RE;
+
+ if ((dmactrlflags & DMA_CTRL_PEN) == 0)
+ control |= RC_PD;
+
+ if (dmactrlflags & DMA_CTRL_ROC)
+ control |= RC_OC;
+
+ W_REG(di->osh, &di->d32rxregs->control,
+ ((di->rxoffset << RC_RO_SHIFT) | control));
+ } else
+ ASSERT(0);
+}
+
+static void
+_dma_rx_param_get(dma_info_t * di, uint16 * rxoffset, uint16 * rxbufsize)
+{
+ /* the normal values fit into 16 bits */
+ *rxoffset = (uint16) di->rxoffset;
+ *rxbufsize = (uint16) di->rxbufsize;
+}
+
+/* !! rx entry routine
+ * returns a pointer to the next frame received, or NULL if there are no more
+ * if DMA_CTRL_RXMULTI is defined, DMA scattering(multiple buffers) is supported
+ * with pkts chain
+ * otherwise, it's treated as giant pkt and will be tossed.
+ * The DMA scattering starts with normal DMA header, followed by first buffer data.
+ * After it reaches the max size of buffer, the data continues in next DMA descriptor
+ * buffer WITHOUT DMA header
+ */
+static void *BCMFASTPATH _dma_rx(dma_info_t * di)
+{
+ void *p, *head, *tail;
+ uint len;
+ uint pkt_len;
+ int resid = 0;
+
+ next_frame:
+ head = _dma_getnextrxp(di, FALSE);
+ if (head == NULL)
+ return (NULL);
+
+ len = ltoh16(*(uint16 *) (PKTDATA(head)));
+ DMA_TRACE(("%s: dma_rx len %d\n", di->name, len));
+
+#if defined(__mips__)
+ if (!len) {
+ while (!(len = *(uint16 *) OSL_UNCACHED(PKTDATA(head))))
+ OSL_DELAY(1);
+
+ *(uint16 *) PKTDATA(head) = htol16((uint16) len);
+ }
+#endif /* defined(__mips__) */
+
+ /* set actual length */
+ pkt_len = MIN((di->rxoffset + len), di->rxbufsize);
+ PKTSETLEN(head, pkt_len);
+ resid = len - (di->rxbufsize - di->rxoffset);
+
+ /* check for single or multi-buffer rx */
+ if (resid > 0) {
+ tail = head;
+ while ((resid > 0) && (p = _dma_getnextrxp(di, FALSE))) {
+ PKTSETNEXT(tail, p);
+ pkt_len = MIN(resid, (int)di->rxbufsize);
+ PKTSETLEN(p, pkt_len);
+
+ tail = p;
+ resid -= di->rxbufsize;
+ }
+
+#ifdef BCMDBG
+ if (resid > 0) {
+ uint cur;
+ ASSERT(p == NULL);
+ cur = (DMA64_ENAB(di) && DMA64_MODE(di)) ?
+ B2I(((R_REG(di->osh, &di->d64rxregs->status0) &
+ D64_RS0_CD_MASK) -
+ di->rcvptrbase) & D64_RS0_CD_MASK,
+ dma64dd_t) : B2I(R_REG(di->osh,
+ &di->d32rxregs->
+ status) & RS_CD_MASK,
+ dma32dd_t);
+ DMA_ERROR(("_dma_rx, rxin %d rxout %d, hw_curr %d\n",
+ di->rxin, di->rxout, cur));
+ }
+#endif /* BCMDBG */
+
+ if ((di->hnddma.dmactrlflags & DMA_CTRL_RXMULTI) == 0) {
+ DMA_ERROR(("%s: dma_rx: bad frame length (%d)\n",
+ di->name, len));
+ PKTFREE(di->osh, head, FALSE);
+ di->hnddma.rxgiants++;
+ goto next_frame;
+ }
+ }
+
+ return (head);
+}
+
+/* post receive buffers
+ * return FALSE is refill failed completely and ring is empty
+ * this will stall the rx dma and user might want to call rxfill again asap
+ * This unlikely happens on memory-rich NIC, but often on memory-constrained dongle
+ */
+static bool BCMFASTPATH _dma_rxfill(dma_info_t * di)
+{
+ void *p;
+ uint16 rxin, rxout;
+ uint32 flags = 0;
+ uint n;
+ uint i;
+ dmaaddr_t pa;
+ uint extra_offset = 0;
+ bool ring_empty;
+
+ ring_empty = FALSE;
+
+ /*
+ * Determine how many receive buffers we're lacking
+ * from the full complement, allocate, initialize,
+ * and post them, then update the chip rx lastdscr.
+ */
+
+ rxin = di->rxin;
+ rxout = di->rxout;
+
+ n = di->nrxpost - NRXDACTIVE(rxin, rxout);
+
+ DMA_TRACE(("%s: dma_rxfill: post %d\n", di->name, n));
+
+ if (di->rxbufsize > BCMEXTRAHDROOM)
+ extra_offset = di->rxextrahdrroom;
+
+ for (i = 0; i < n; i++) {
+ /* the di->rxbufsize doesn't include the extra headroom, we need to add it to the
+ size to be allocated
+ */
+
+ p = osl_pktget(di->osh, di->rxbufsize + extra_offset);
+
+ if (p == NULL) {
+ DMA_ERROR(("%s: dma_rxfill: out of rxbufs\n",
+ di->name));
+ if (i == 0) {
+ if (DMA64_ENAB(di) && DMA64_MODE(di)) {
+ if (dma64_rxidle(di)) {
+ DMA_ERROR(("%s: rxfill64: ring is empty !\n", di->name));
+ ring_empty = TRUE;
+ }
+ } else if (DMA32_ENAB(di)) {
+ if (dma32_rxidle(di)) {
+ DMA_ERROR(("%s: rxfill32: ring is empty !\n", di->name));
+ ring_empty = TRUE;
+ }
+ } else
+ ASSERT(0);
+ }
+ di->hnddma.rxnobuf++;
+ break;
+ }
+ /* reserve an extra headroom, if applicable */
+ if (extra_offset)
+ PKTPULL(p, extra_offset);
+
+ /* Do a cached write instead of uncached write since DMA_MAP
+ * will flush the cache.
+ */
+ *(uint32 *) (PKTDATA(p)) = 0;
+
+ if (DMASGLIST_ENAB)
+ bzero(&di->rxp_dmah[rxout], sizeof(hnddma_seg_map_t));
+
+ pa = DMA_MAP(di->osh, PKTDATA(p),
+ di->rxbufsize, DMA_RX, p, &di->rxp_dmah[rxout]);
+
+ ASSERT(ISALIGNED(PHYSADDRLO(pa), 4));
+
+ /* save the free packet pointer */
+ ASSERT(di->rxp[rxout] == NULL);
+ di->rxp[rxout] = p;
+
+ /* reset flags for each descriptor */
+ flags = 0;
+ if (DMA64_ENAB(di) && DMA64_MODE(di)) {
+ if (rxout == (di->nrxd - 1))
+ flags = D64_CTRL1_EOT;
+
+ dma64_dd_upd(di, di->rxd64, pa, rxout, &flags,
+ di->rxbufsize);
+ } else if (DMA32_ENAB(di)) {
+ if (rxout == (di->nrxd - 1))
+ flags = CTRL_EOT;
+
+ ASSERT(PHYSADDRHI(pa) == 0);
+ dma32_dd_upd(di, di->rxd32, pa, rxout, &flags,
+ di->rxbufsize);
+ } else
+ ASSERT(0);
+ rxout = NEXTRXD(rxout);
+ }
+
+ di->rxout = rxout;
+
+ /* update the chip lastdscr pointer */
+ if (DMA64_ENAB(di) && DMA64_MODE(di)) {
+ W_REG(di->osh, &di->d64rxregs->ptr,
+ di->rcvptrbase + I2B(rxout, dma64dd_t));
+ } else if (DMA32_ENAB(di)) {
+ W_REG(di->osh, &di->d32rxregs->ptr, I2B(rxout, dma32dd_t));
+ } else
+ ASSERT(0);
+
+ return ring_empty;
+}
+
+/* like getnexttxp but no reclaim */
+static void *_dma_peeknexttxp(dma_info_t * di)
+{
+ uint end, i;
+
+ if (di->ntxd == 0)
+ return (NULL);
+
+ if (DMA64_ENAB(di) && DMA64_MODE(di)) {
+ end =
+ B2I(((R_REG(di->osh, &di->d64txregs->status0) &
+ D64_XS0_CD_MASK) - di->xmtptrbase) & D64_XS0_CD_MASK,
+ dma64dd_t);
+ } else if (DMA32_ENAB(di)) {
+ end =
+ B2I(R_REG(di->osh, &di->d32txregs->status) & XS_CD_MASK,
+ dma32dd_t);
+ } else
+ ASSERT(0);
+
+ for (i = di->txin; i != end; i = NEXTTXD(i))
+ if (di->txp[i])
+ return (di->txp[i]);
+
+ return (NULL);
+}
+
+/* like getnextrxp but not take off the ring */
+static void *_dma_peeknextrxp(dma_info_t * di)
+{
+ uint end, i;
+
+ if (di->nrxd == 0)
+ return (NULL);
+
+ if (DMA64_ENAB(di) && DMA64_MODE(di)) {
+ end =
+ B2I(((R_REG(di->osh, &di->d64rxregs->status0) &
+ D64_RS0_CD_MASK) - di->rcvptrbase) & D64_RS0_CD_MASK,
+ dma64dd_t);
+ } else if (DMA32_ENAB(di)) {
+ end =
+ B2I(R_REG(di->osh, &di->d32rxregs->status) & RS_CD_MASK,
+ dma32dd_t);
+ } else
+ ASSERT(0);
+
+ for (i = di->rxin; i != end; i = NEXTRXD(i))
+ if (di->rxp[i])
+ return (di->rxp[i]);
+
+ return (NULL);
+}
+
+static void _dma_rxreclaim(dma_info_t * di)
+{
+ void *p;
+
+ /* "unused local" warning suppression for OSLs that
+ * define PKTFREE() without using the di->osh arg
+ */
+ di = di;
+
+ DMA_TRACE(("%s: dma_rxreclaim\n", di->name));
+
+ while ((p = _dma_getnextrxp(di, TRUE)))
+ PKTFREE(di->osh, p, FALSE);
+}
+
+static void *BCMFASTPATH _dma_getnextrxp(dma_info_t * di, bool forceall)
+{
+ if (di->nrxd == 0)
+ return (NULL);
+
+ if (DMA64_ENAB(di) && DMA64_MODE(di)) {
+ return dma64_getnextrxp(di, forceall);
+ } else if (DMA32_ENAB(di)) {
+ return dma32_getnextrxp(di, forceall);
+ } else
+ ASSERT(0);
+}
+
+static void _dma_txblock(dma_info_t * di)
+{
+ di->hnddma.txavail = 0;
+}
+
+static void _dma_txunblock(dma_info_t * di)
+{
+ di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
+}
+
+static uint _dma_txactive(dma_info_t * di)
+{
+ return NTXDACTIVE(di->txin, di->txout);
+}
+
+static uint _dma_txpending(dma_info_t * di)
+{
+ uint curr;
+
+ if (DMA64_ENAB(di) && DMA64_MODE(di)) {
+ curr =
+ B2I(((R_REG(di->osh, &di->d64txregs->status0) &
+ D64_XS0_CD_MASK) - di->xmtptrbase) & D64_XS0_CD_MASK,
+ dma64dd_t);
+ } else if (DMA32_ENAB(di)) {
+ curr =
+ B2I(R_REG(di->osh, &di->d32txregs->status) & XS_CD_MASK,
+ dma32dd_t);
+ } else
+ ASSERT(0);
+
+ return NTXDACTIVE(curr, di->txout);
+}
+
+static uint _dma_txcommitted(dma_info_t * di)
+{
+ uint ptr;
+ uint txin = di->txin;
+
+ if (txin == di->txout)
+ return 0;
+
+ if (DMA64_ENAB(di) && DMA64_MODE(di)) {
+ ptr = B2I(R_REG(di->osh, &di->d64txregs->ptr), dma64dd_t);
+ } else if (DMA32_ENAB(di)) {
+ ptr = B2I(R_REG(di->osh, &di->d32txregs->ptr), dma32dd_t);
+ } else
+ ASSERT(0);
+
+ return NTXDACTIVE(di->txin, ptr);
+}
+
+static uint _dma_rxactive(dma_info_t * di)
+{
+ return NRXDACTIVE(di->rxin, di->rxout);
+}
+
+static void _dma_counterreset(dma_info_t * di)
+{
+ /* reset all software counter */
+ di->hnddma.rxgiants = 0;
+ di->hnddma.rxnobuf = 0;
+ di->hnddma.txnobuf = 0;
+}
+
+static uint _dma_ctrlflags(dma_info_t * di, uint mask, uint flags)
+{
+ uint dmactrlflags = di->hnddma.dmactrlflags;
+
+ if (di == NULL) {
+ DMA_ERROR(("%s: _dma_ctrlflags: NULL dma handle\n", di->name));
+ return (0);
+ }
+
+ ASSERT((flags & ~mask) == 0);
+
+ dmactrlflags &= ~mask;
+ dmactrlflags |= flags;
+
+ /* If trying to enable parity, check if parity is actually supported */
+ if (dmactrlflags & DMA_CTRL_PEN) {
+ uint32 control;
+
+ if (DMA64_ENAB(di) && DMA64_MODE(di)) {
+ control = R_REG(di->osh, &di->d64txregs->control);
+ W_REG(di->osh, &di->d64txregs->control,
+ control | D64_XC_PD);
+ if (R_REG(di->osh, &di->d64txregs->control) & D64_XC_PD) {
+ /* We *can* disable it so it is supported,
+ * restore control register
+ */
+ W_REG(di->osh, &di->d64txregs->control,
+ control);
+ } else {
+ /* Not supported, don't allow it to be enabled */
+ dmactrlflags &= ~DMA_CTRL_PEN;
+ }
+ } else if (DMA32_ENAB(di)) {
+ control = R_REG(di->osh, &di->d32txregs->control);
+ W_REG(di->osh, &di->d32txregs->control,
+ control | XC_PD);
+ if (R_REG(di->osh, &di->d32txregs->control) & XC_PD) {
+ W_REG(di->osh, &di->d32txregs->control,
+ control);
+ } else {
+ /* Not supported, don't allow it to be enabled */
+ dmactrlflags &= ~DMA_CTRL_PEN;
+ }
+ } else
+ ASSERT(0);
+ }
+
+ di->hnddma.dmactrlflags = dmactrlflags;
+
+ return (dmactrlflags);
+}
+
+/* get the address of the var in order to change later */
+static uintptr _dma_getvar(dma_info_t * di, const char *name)
+{
+ if (!strcmp(name, "&txavail"))
+ return ((uintptr) & (di->hnddma.txavail));
+ else {
+ ASSERT(0);
+ }
+ return (0);
+}
+
+void dma_txpioloopback(osl_t * osh, dma32regs_t * regs)
+{
+ OR_REG(osh, &regs->control, XC_LE);
+}
+
+static
+uint8 dma_align_sizetobits(uint size)
+{
+ uint8 bitpos = 0;
+ ASSERT(size);
+ ASSERT(!(size & (size - 1)));
+ while (size >>= 1) {
+ bitpos++;
+ }
+ return (bitpos);
+}
+
+/* This function ensures that the DMA descriptor ring will not get allocated
+ * across Page boundary. If the allocation is done across the page boundary
+ * at the first time, then it is freed and the allocation is done at
+ * descriptor ring size aligned location. This will ensure that the ring will
+ * not cross page boundary
+ */
+static void *dma_ringalloc(osl_t * osh, uint32 boundary, uint size,
+ uint16 * alignbits, uint * alloced,
+ dmaaddr_t * descpa, osldma_t ** dmah)
+{
+ void *va;
+ uint32 desc_strtaddr;
+ uint32 alignbytes = 1 << *alignbits;
+
+ if (NULL ==
+ (va =
+ DMA_ALLOC_CONSISTENT(osh, size, *alignbits, alloced, descpa,
+ dmah)))
+ return NULL;
+
+ desc_strtaddr = (uint32) ROUNDUP((uintptr) va, alignbytes);
+ if (((desc_strtaddr + size - 1) & boundary) != (desc_strtaddr
+ & boundary)) {
+ *alignbits = dma_align_sizetobits(size);
+ DMA_FREE_CONSISTENT(osh, va, size, *descpa, dmah);
+ va = DMA_ALLOC_CONSISTENT(osh, size, *alignbits, alloced,
+ descpa, dmah);
+ }
+ return va;
+}
+
+/* 32-bit DMA functions */
+
+static void dma32_txinit(dma_info_t * di)
+{
+ uint32 control = XC_XE;
+
+ DMA_TRACE(("%s: dma_txinit\n", di->name));
+
+ if (di->ntxd == 0)
+ return;
+
+ di->txin = di->txout = 0;
+ di->hnddma.txavail = di->ntxd - 1;
+
+ /* clear tx descriptor ring */
+ BZERO_SM((void *)(uintptr) di->txd32, (di->ntxd * sizeof(dma32dd_t)));
+
+ if ((di->hnddma.dmactrlflags & DMA_CTRL_PEN) == 0)
+ control |= XC_PD;
+ W_REG(di->osh, &di->d32txregs->control, control);
+ _dma_ddtable_init(di, DMA_TX, di->txdpa);
+}
+
+static bool dma32_txenabled(dma_info_t * di)
+{
+ uint32 xc;
+
+ /* If the chip is dead, it is not enabled :-) */
+ xc = R_REG(di->osh, &di->d32txregs->control);
+ return ((xc != 0xffffffff) && (xc & XC_XE));
+}
+
+static void dma32_txsuspend(dma_info_t * di)
+{
+ DMA_TRACE(("%s: dma_txsuspend\n", di->name));
+
+ if (di->ntxd == 0)
+ return;
+
+ OR_REG(di->osh, &di->d32txregs->control, XC_SE);
+}
+
+static void dma32_txresume(dma_info_t * di)
+{
+ DMA_TRACE(("%s: dma_txresume\n", di->name));
+
+ if (di->ntxd == 0)
+ return;
+
+ AND_REG(di->osh, &di->d32txregs->control, ~XC_SE);
+}
+
+static bool dma32_txsuspended(dma_info_t * di)
+{
+ return (di->ntxd == 0)
+ || ((R_REG(di->osh, &di->d32txregs->control) & XC_SE) == XC_SE);
+}
+
+static void dma32_txreclaim(dma_info_t * di, txd_range_t range)
+{
+ void *p;
+
+ DMA_TRACE(("%s: dma_txreclaim %s\n", di->name,
+ (range == HNDDMA_RANGE_ALL) ? "all" :
+ ((range ==
+ HNDDMA_RANGE_TRANSMITTED) ? "transmitted" :
+ "transfered")));
+
+ if (di->txin == di->txout)
+ return;
+
+ while ((p = dma32_getnexttxp(di, range)))
+ PKTFREE(di->osh, p, TRUE);
+}
+
+static bool dma32_txstopped(dma_info_t * di)
+{
+ return ((R_REG(di->osh, &di->d32txregs->status) & XS_XS_MASK) ==
+ XS_XS_STOPPED);
+}
+
+static bool dma32_rxstopped(dma_info_t * di)
+{
+ return ((R_REG(di->osh, &di->d32rxregs->status) & RS_RS_MASK) ==
+ RS_RS_STOPPED);
+}
+
+static bool dma32_alloc(dma_info_t * di, uint direction)
+{
+ uint size;
+ uint ddlen;
+ void *va;
+ uint alloced;
+ uint16 align;
+ uint16 align_bits;
+
+ ddlen = sizeof(dma32dd_t);
+
+ size = (direction == DMA_TX) ? (di->ntxd * ddlen) : (di->nrxd * ddlen);
+
+ alloced = 0;
+ align_bits = di->dmadesc_align;
+ align = (1 << align_bits);
+
+ if (direction == DMA_TX) {
+ if ((va =
+ dma_ringalloc(di->osh, D32RINGALIGN, size, &align_bits,
+ &alloced, &di->txdpaorig,
+ &di->tx_dmah)) == NULL) {
+ DMA_ERROR(("%s: dma_alloc: DMA_ALLOC_CONSISTENT(ntxd) failed\n", di->name));
+ return FALSE;
+ }
+
+ PHYSADDRHISET(di->txdpa, 0);
+ ASSERT(PHYSADDRHI(di->txdpaorig) == 0);
+ di->txd32 = (dma32dd_t *) ROUNDUP((uintptr) va, align);
+ di->txdalign =
+ (uint) ((int8 *) (uintptr) di->txd32 - (int8 *) va);
+
+ PHYSADDRLOSET(di->txdpa,
+ PHYSADDRLO(di->txdpaorig) + di->txdalign);
+ /* Make sure that alignment didn't overflow */
+ ASSERT(PHYSADDRLO(di->txdpa) >= PHYSADDRLO(di->txdpaorig));
+
+ di->txdalloc = alloced;
+ ASSERT(ISALIGNED((uintptr) di->txd32, align));
+ } else {
+ if ((va =
+ dma_ringalloc(di->osh, D32RINGALIGN, size, &align_bits,
+ &alloced, &di->rxdpaorig,
+ &di->rx_dmah)) == NULL) {
+ DMA_ERROR(("%s: dma_alloc: DMA_ALLOC_CONSISTENT(nrxd) failed\n", di->name));
+ return FALSE;
+ }
+
+ PHYSADDRHISET(di->rxdpa, 0);
+ ASSERT(PHYSADDRHI(di->rxdpaorig) == 0);
+ di->rxd32 = (dma32dd_t *) ROUNDUP((uintptr) va, align);
+ di->rxdalign =
+ (uint) ((int8 *) (uintptr) di->rxd32 - (int8 *) va);
+
+ PHYSADDRLOSET(di->rxdpa,
+ PHYSADDRLO(di->rxdpaorig) + di->rxdalign);
+ /* Make sure that alignment didn't overflow */
+ ASSERT(PHYSADDRLO(di->rxdpa) >= PHYSADDRLO(di->rxdpaorig));
+ di->rxdalloc = alloced;
+ ASSERT(ISALIGNED((uintptr) di->rxd32, align));
+ }
+
+ return TRUE;
+}
+
+static bool dma32_txreset(dma_info_t * di)
+{
+ uint32 status;
+
+ if (di->ntxd == 0)
+ return TRUE;
+
+ /* suspend tx DMA first */
+ W_REG(di->osh, &di->d32txregs->control, XC_SE);
+ SPINWAIT(((status =
+ (R_REG(di->osh, &di->d32txregs->status) & XS_XS_MASK))
+ != XS_XS_DISABLED) && (status != XS_XS_IDLE)
+ && (status != XS_XS_STOPPED), (10000));
+
+ W_REG(di->osh, &di->d32txregs->control, 0);
+ SPINWAIT(((status = (R_REG(di->osh,
+ &di->d32txregs->status) & XS_XS_MASK)) !=
+ XS_XS_DISABLED), 10000);
+
+ /* wait for the last transaction to complete */
+ OSL_DELAY(300);
+
+ return (status == XS_XS_DISABLED);
+}
+
+static bool dma32_rxidle(dma_info_t * di)
+{
+ DMA_TRACE(("%s: dma_rxidle\n", di->name));
+
+ if (di->nrxd == 0)
+ return TRUE;
+
+ return ((R_REG(di->osh, &di->d32rxregs->status) & RS_CD_MASK) ==
+ R_REG(di->osh, &di->d32rxregs->ptr));
+}
+
+static bool dma32_rxreset(dma_info_t * di)
+{
+ uint32 status;
+
+ if (di->nrxd == 0)
+ return TRUE;
+
+ W_REG(di->osh, &di->d32rxregs->control, 0);
+ SPINWAIT(((status = (R_REG(di->osh,
+ &di->d32rxregs->status) & RS_RS_MASK)) !=
+ RS_RS_DISABLED), 10000);
+
+ return (status == RS_RS_DISABLED);
+}
+
+static bool dma32_rxenabled(dma_info_t * di)
+{
+ uint32 rc;
+
+ rc = R_REG(di->osh, &di->d32rxregs->control);
+ return ((rc != 0xffffffff) && (rc & RC_RE));
+}
+
+static bool dma32_txsuspendedidle(dma_info_t * di)
+{
+ if (di->ntxd == 0)
+ return TRUE;
+
+ if (!(R_REG(di->osh, &di->d32txregs->control) & XC_SE))
+ return 0;
+
+ if ((R_REG(di->osh, &di->d32txregs->status) & XS_XS_MASK) != XS_XS_IDLE)
+ return 0;
+
+ OSL_DELAY(2);
+ return ((R_REG(di->osh, &di->d32txregs->status) & XS_XS_MASK) ==
+ XS_XS_IDLE);
+}
+
+/* !! tx entry routine
+ * supports full 32bit dma engine buffer addressing so
+ * dma buffers can cross 4 Kbyte page boundaries.
+ *
+ * WARNING: call must check the return value for error.
+ * the error(toss frames) could be fatal and cause many subsequent hard to debug problems
+ */
+static int dma32_txfast(dma_info_t * di, void *p0, bool commit)
+{
+ void *p, *next;
+ uchar *data;
+ uint len;
+ uint16 txout;
+ uint32 flags = 0;
+ dmaaddr_t pa;
+
+ DMA_TRACE(("%s: dma_txfast\n", di->name));
+
+ txout = di->txout;
+
+ /*
+ * Walk the chain of packet buffers
+ * allocating and initializing transmit descriptor entries.
+ */
+ for (p = p0; p; p = next) {
+ uint nsegs, j;
+ hnddma_seg_map_t *map;
+
+ data = PKTDATA(p);
+ len = PKTLEN(p);
+#ifdef BCM_DMAPAD
+ len += PKTDMAPAD(di->osh, p);
+#endif
+ next = PKTNEXT(p);
+
+ /* return nonzero if out of tx descriptors */
+ if (NEXTTXD(txout) == di->txin)
+ goto outoftxd;
+
+ if (len == 0)
+ continue;
+
+ if (DMASGLIST_ENAB)
+ bzero(&di->txp_dmah[txout], sizeof(hnddma_seg_map_t));
+
+ /* get physical address of buffer start */
+ pa = DMA_MAP(di->osh, data, len, DMA_TX, p,
+ &di->txp_dmah[txout]);
+
+ if (DMASGLIST_ENAB) {
+ map = &di->txp_dmah[txout];
+
+ /* See if all the segments can be accounted for */
+ if (map->nsegs >
+ (uint) (di->ntxd - NTXDACTIVE(di->txin, di->txout) -
+ 1))
+ goto outoftxd;
+
+ nsegs = map->nsegs;
+ } else
+ nsegs = 1;
+
+ for (j = 1; j <= nsegs; j++) {
+ flags = 0;
+ if (p == p0 && j == 1)
+ flags |= CTRL_SOF;
+
+ /* With a DMA segment list, Descriptor table is filled
+ * using the segment list instead of looping over
+ * buffers in multi-chain DMA. Therefore, EOF for SGLIST is when
+ * end of segment list is reached.
+ */
+ if ((!DMASGLIST_ENAB && next == NULL) ||
+ (DMASGLIST_ENAB && j == nsegs))
+ flags |= (CTRL_IOC | CTRL_EOF);
+ if (txout == (di->ntxd - 1))
+ flags |= CTRL_EOT;
+
+ if (DMASGLIST_ENAB) {
+ len = map->segs[j - 1].length;
+ pa = map->segs[j - 1].addr;
+ }
+ ASSERT(PHYSADDRHI(pa) == 0);
+
+ dma32_dd_upd(di, di->txd32, pa, txout, &flags, len);
+ ASSERT(di->txp[txout] == NULL);
+
+ txout = NEXTTXD(txout);
+ }
+
+ /* See above. No need to loop over individual buffers */
+ if (DMASGLIST_ENAB)
+ break;
+ }
+
+ /* if last txd eof not set, fix it */
+ if (!(flags & CTRL_EOF))
+ W_SM(&di->txd32[PREVTXD(txout)].ctrl,
+ BUS_SWAP32(flags | CTRL_IOC | CTRL_EOF));
+
+ /* save the packet */
+ di->txp[PREVTXD(txout)] = p0;
+
+ /* bump the tx descriptor index */
+ di->txout = txout;
+
+ /* kick the chip */
+ if (commit)
+ W_REG(di->osh, &di->d32txregs->ptr, I2B(txout, dma32dd_t));
+
+ /* tx flow control */
+ di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
+
+ return (0);
+
+ outoftxd:
+ DMA_ERROR(("%s: dma_txfast: out of txds\n", di->name));
+ PKTFREE(di->osh, p0, TRUE);
+ di->hnddma.txavail = 0;
+ di->hnddma.txnobuf++;
+ return (-1);
+}
+
+/*
+ * Reclaim next completed txd (txds if using chained buffers) in the range
+ * specified and return associated packet.
+ * If range is HNDDMA_RANGE_TRANSMITTED, reclaim descriptors that have be
+ * transmitted as noted by the hardware "CurrDescr" pointer.
+ * If range is HNDDMA_RANGE_TRANSFERED, reclaim descriptors that have be
+ * transfered by the DMA as noted by the hardware "ActiveDescr" pointer.
+ * If range is HNDDMA_RANGE_ALL, reclaim all txd(s) posted to the ring and
+ * return associated packet regardless of the value of hardware pointers.
+ */
+static void *dma32_getnexttxp(dma_info_t * di, txd_range_t range)
+{
+ uint16 start, end, i;
+ uint16 active_desc;
+ void *txp;
+
+ DMA_TRACE(("%s: dma_getnexttxp %s\n", di->name,
+ (range == HNDDMA_RANGE_ALL) ? "all" :
+ ((range ==
+ HNDDMA_RANGE_TRANSMITTED) ? "transmitted" :
+ "transfered")));
+
+ if (di->ntxd == 0)
+ return (NULL);
+
+ txp = NULL;
+
+ start = di->txin;
+ if (range == HNDDMA_RANGE_ALL)
+ end = di->txout;
+ else {
+ dma32regs_t *dregs = di->d32txregs;
+
+ end =
+ (uint16) B2I(R_REG(di->osh, &dregs->status) & XS_CD_MASK,
+ dma32dd_t);
+
+ if (range == HNDDMA_RANGE_TRANSFERED) {
+ active_desc =
+ (uint16) ((R_REG(di->osh, &dregs->status) &
+ XS_AD_MASK) >> XS_AD_SHIFT);
+ active_desc = (uint16) B2I(active_desc, dma32dd_t);
+ if (end != active_desc)
+ end = PREVTXD(active_desc);
+ }
+ }
+
+ if ((start == 0) && (end > di->txout))
+ goto bogus;
+
+ for (i = start; i != end && !txp; i = NEXTTXD(i)) {
+ dmaaddr_t pa;
+ hnddma_seg_map_t *map = NULL;
+ uint size, j, nsegs;
+
+ PHYSADDRLOSET(pa,
+ (BUS_SWAP32(R_SM(&di->txd32[i].addr)) -
+ di->dataoffsetlow));
+ PHYSADDRHISET(pa, 0);
+
+ if (DMASGLIST_ENAB) {
+ map = &di->txp_dmah[i];
+ size = map->origsize;
+ nsegs = map->nsegs;
+ } else {
+ size =
+ (BUS_SWAP32(R_SM(&di->txd32[i].ctrl)) &
+ CTRL_BC_MASK);
+ nsegs = 1;
+ }
+
+ for (j = nsegs; j > 0; j--) {
+ W_SM(&di->txd32[i].addr, 0xdeadbeef);
+
+ txp = di->txp[i];
+ di->txp[i] = NULL;
+ if (j > 1)
+ i = NEXTTXD(i);
+ }
+
+ DMA_UNMAP(di->osh, pa, size, DMA_TX, txp, map);
+ }
+
+ di->txin = i;
+
+ /* tx flow control */
+ di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
+
+ return (txp);
+
+ bogus:
+ DMA_NONE(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n", start, end, di->txout, forceall));
+ return (NULL);
+}
+
+static void *dma32_getnextrxp(dma_info_t * di, bool forceall)
+{
+ uint i, curr;
+ void *rxp;
+ dmaaddr_t pa;
+ /* if forcing, dma engine must be disabled */
+ ASSERT(!forceall || !dma32_rxenabled(di));
+
+ i = di->rxin;
+
+ /* return if no packets posted */
+ if (i == di->rxout)
+ return (NULL);
+
+ curr =
+ B2I(R_REG(di->osh, &di->d32rxregs->status) & RS_CD_MASK, dma32dd_t);
+
+ /* ignore curr if forceall */
+ if (!forceall && (i == curr))
+ return (NULL);
+
+ /* get the packet pointer that corresponds to the rx descriptor */
+ rxp = di->rxp[i];
+ ASSERT(rxp);
+ di->rxp[i] = NULL;
+
+ PHYSADDRLOSET(pa,
+ (BUS_SWAP32(R_SM(&di->rxd32[i].addr)) -
+ di->dataoffsetlow));
+ PHYSADDRHISET(pa, 0);
+
+ /* clear this packet from the descriptor ring */
+ DMA_UNMAP(di->osh, pa, di->rxbufsize, DMA_RX, rxp, &di->rxp_dmah[i]);
+
+ W_SM(&di->rxd32[i].addr, 0xdeadbeef);
+
+ di->rxin = NEXTRXD(i);
+
+ return (rxp);
+}
+
+/*
+ * Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin).
+ */
+static void dma32_txrotate(dma_info_t * di)
+{
+ uint16 ad;
+ uint nactive;
+ uint rot;
+ uint16 old, new;
+ uint32 w;
+ uint16 first, last;
+
+ ASSERT(dma32_txsuspendedidle(di));
+
+ nactive = _dma_txactive(di);
+ ad = (uint16) (B2I
+ (((R_REG(di->osh, &di->d32txregs->status) & XS_AD_MASK)
+ >> XS_AD_SHIFT), dma32dd_t));
+ rot = TXD(ad - di->txin);
+
+ ASSERT(rot < di->ntxd);
+
+ /* full-ring case is a lot harder - don't worry about this */
+ if (rot >= (di->ntxd - nactive)) {
+ DMA_ERROR(("%s: dma_txrotate: ring full - punt\n", di->name));
+ return;
+ }
+
+ first = di->txin;
+ last = PREVTXD(di->txout);
+
+ /* move entries starting at last and moving backwards to first */
+ for (old = last; old != PREVTXD(first); old = PREVTXD(old)) {
+ new = TXD(old + rot);
+
+ /*
+ * Move the tx dma descriptor.
+ * EOT is set only in the last entry in the ring.
+ */
+ w = BUS_SWAP32(R_SM(&di->txd32[old].ctrl)) & ~CTRL_EOT;
+ if (new == (di->ntxd - 1))
+ w |= CTRL_EOT;
+ W_SM(&di->txd32[new].ctrl, BUS_SWAP32(w));
+ W_SM(&di->txd32[new].addr, R_SM(&di->txd32[old].addr));
+
+ /* zap the old tx dma descriptor address field */
+ W_SM(&di->txd32[old].addr, BUS_SWAP32(0xdeadbeef));
+
+ /* move the corresponding txp[] entry */
+ ASSERT(di->txp[new] == NULL);
+ di->txp[new] = di->txp[old];
+
+ /* Move the segment map as well */
+ if (DMASGLIST_ENAB) {
+ bcopy(&di->txp_dmah[old], &di->txp_dmah[new],
+ sizeof(hnddma_seg_map_t));
+ bzero(&di->txp_dmah[old], sizeof(hnddma_seg_map_t));
+ }
+
+ di->txp[old] = NULL;
+ }
+
+ /* update txin and txout */
+ di->txin = ad;
+ di->txout = TXD(di->txout + rot);
+ di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
+
+ /* kick the chip */
+ W_REG(di->osh, &di->d32txregs->ptr, I2B(di->txout, dma32dd_t));
+}
+
+/* 64-bit DMA functions */
+
+static void dma64_txinit(dma_info_t * di)
+{
+ uint32 control = D64_XC_XE;
+
+ DMA_TRACE(("%s: dma_txinit\n", di->name));
+
+ if (di->ntxd == 0)
+ return;
+
+ di->txin = di->txout = 0;
+ di->hnddma.txavail = di->ntxd - 1;
+
+ /* clear tx descriptor ring */
+ BZERO_SM((void *)(uintptr) di->txd64, (di->ntxd * sizeof(dma64dd_t)));
+
+ /* DMA engine with out alignment requirement requires table to be inited
+ * before enabling the engine
+ */
+ if (!di->aligndesc_4k)
+ _dma_ddtable_init(di, DMA_TX, di->txdpa);
+
+ if ((di->hnddma.dmactrlflags & DMA_CTRL_PEN) == 0)
+ control |= D64_XC_PD;
+ OR_REG(di->osh, &di->d64txregs->control, control);
+
+ /* DMA engine with alignment requirement requires table to be inited
+ * before enabling the engine
+ */
+ if (di->aligndesc_4k)
+ _dma_ddtable_init(di, DMA_TX, di->txdpa);
+}
+
+static bool dma64_txenabled(dma_info_t * di)
+{
+ uint32 xc;
+
+ /* If the chip is dead, it is not enabled :-) */
+ xc = R_REG(di->osh, &di->d64txregs->control);
+ return ((xc != 0xffffffff) && (xc & D64_XC_XE));
+}
+
+static void dma64_txsuspend(dma_info_t * di)
+{
+ DMA_TRACE(("%s: dma_txsuspend\n", di->name));
+
+ if (di->ntxd == 0)
+ return;
+
+ OR_REG(di->osh, &di->d64txregs->control, D64_XC_SE);
+}
+
+static void dma64_txresume(dma_info_t * di)
+{
+ DMA_TRACE(("%s: dma_txresume\n", di->name));
+
+ if (di->ntxd == 0)
+ return;
+
+ AND_REG(di->osh, &di->d64txregs->control, ~D64_XC_SE);
+}
+
+static bool dma64_txsuspended(dma_info_t * di)
+{
+ return (di->ntxd == 0) ||
+ ((R_REG(di->osh, &di->d64txregs->control) & D64_XC_SE) ==
+ D64_XC_SE);
+}
+
+static void BCMFASTPATH dma64_txreclaim(dma_info_t * di, txd_range_t range)
+{
+ void *p;
+
+ DMA_TRACE(("%s: dma_txreclaim %s\n", di->name,
+ (range == HNDDMA_RANGE_ALL) ? "all" :
+ ((range ==
+ HNDDMA_RANGE_TRANSMITTED) ? "transmitted" :
+ "transfered")));
+
+ if (di->txin == di->txout)
+ return;
+
+ while ((p = dma64_getnexttxp(di, range))) {
+ /* For unframed data, we don't have any packets to free */
+ if (!(di->hnddma.dmactrlflags & DMA_CTRL_UNFRAMED))
+ PKTFREE(di->osh, p, TRUE);
+ }
+}
+
+static bool dma64_txstopped(dma_info_t * di)
+{
+ return ((R_REG(di->osh, &di->d64txregs->status0) & D64_XS0_XS_MASK) ==
+ D64_XS0_XS_STOPPED);
+}
+
+static bool dma64_rxstopped(dma_info_t * di)
+{
+ return ((R_REG(di->osh, &di->d64rxregs->status0) & D64_RS0_RS_MASK) ==
+ D64_RS0_RS_STOPPED);
+}
+
+static bool dma64_alloc(dma_info_t * di, uint direction)
+{
+ uint16 size;
+ uint ddlen;
+ void *va;
+ uint alloced = 0;
+ uint16 align;
+ uint16 align_bits;
+
+ ddlen = sizeof(dma64dd_t);
+
+ size = (direction == DMA_TX) ? (di->ntxd * ddlen) : (di->nrxd * ddlen);
+ align_bits = di->dmadesc_align;
+ align = (1 << align_bits);
+
+ if (direction == DMA_TX) {
+ if ((va =
+ dma_ringalloc(di->osh, D64RINGALIGN, size, &align_bits,
+ &alloced, &di->txdpaorig,
+ &di->tx_dmah)) == NULL) {
+ DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(ntxd) failed\n", di->name));
+ return FALSE;
+ }
+ align = (1 << align_bits);
+ di->txd64 = (dma64dd_t *) ROUNDUP((uintptr) va, align);
+ di->txdalign =
+ (uint) ((int8 *) (uintptr) di->txd64 - (int8 *) va);
+ PHYSADDRLOSET(di->txdpa,
+ PHYSADDRLO(di->txdpaorig) + di->txdalign);
+ /* Make sure that alignment didn't overflow */
+ ASSERT(PHYSADDRLO(di->txdpa) >= PHYSADDRLO(di->txdpaorig));
+
+ PHYSADDRHISET(di->txdpa, PHYSADDRHI(di->txdpaorig));
+ di->txdalloc = alloced;
+ ASSERT(ISALIGNED((uintptr) di->txd64, align));
+ } else {
+ if ((va =
+ dma_ringalloc(di->osh, D64RINGALIGN, size, &align_bits,
+ &alloced, &di->rxdpaorig,
+ &di->rx_dmah)) == NULL) {
+ DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(nrxd) failed\n", di->name));
+ return FALSE;
+ }
+ align = (1 << align_bits);
+ di->rxd64 = (dma64dd_t *) ROUNDUP((uintptr) va, align);
+ di->rxdalign =
+ (uint) ((int8 *) (uintptr) di->rxd64 - (int8 *) va);
+ PHYSADDRLOSET(di->rxdpa,
+ PHYSADDRLO(di->rxdpaorig) + di->rxdalign);
+ /* Make sure that alignment didn't overflow */
+ ASSERT(PHYSADDRLO(di->rxdpa) >= PHYSADDRLO(di->rxdpaorig));
+
+ PHYSADDRHISET(di->rxdpa, PHYSADDRHI(di->rxdpaorig));
+ di->rxdalloc = alloced;
+ ASSERT(ISALIGNED((uintptr) di->rxd64, align));
+ }
+
+ return TRUE;
+}
+
+static bool dma64_txreset(dma_info_t * di)
+{
+ uint32 status;
+
+ if (di->ntxd == 0)
+ return TRUE;
+
+ /* suspend tx DMA first */
+ W_REG(di->osh, &di->d64txregs->control, D64_XC_SE);
+ SPINWAIT(((status =
+ (R_REG(di->osh, &di->d64txregs->status0) & D64_XS0_XS_MASK))
+ != D64_XS0_XS_DISABLED) && (status != D64_XS0_XS_IDLE)
+ && (status != D64_XS0_XS_STOPPED), 10000);
+
+ W_REG(di->osh, &di->d64txregs->control, 0);
+ SPINWAIT(((status =
+ (R_REG(di->osh, &di->d64txregs->status0) & D64_XS0_XS_MASK))
+ != D64_XS0_XS_DISABLED), 10000);
+
+ /* wait for the last transaction to complete */
+ OSL_DELAY(300);
+
+ return (status == D64_XS0_XS_DISABLED);
+}
+
+static bool dma64_rxidle(dma_info_t * di)
+{
+ DMA_TRACE(("%s: dma_rxidle\n", di->name));
+
+ if (di->nrxd == 0)
+ return TRUE;
+
+ return ((R_REG(di->osh, &di->d64rxregs->status0) & D64_RS0_CD_MASK) ==
+ (R_REG(di->osh, &di->d64rxregs->ptr) & D64_RS0_CD_MASK));
+}
+
+static bool dma64_rxreset(dma_info_t * di)
+{
+ uint32 status;
+
+ if (di->nrxd == 0)
+ return TRUE;
+
+ W_REG(di->osh, &di->d64rxregs->control, 0);
+ SPINWAIT(((status =
+ (R_REG(di->osh, &di->d64rxregs->status0) & D64_RS0_RS_MASK))
+ != D64_RS0_RS_DISABLED), 10000);
+
+ return (status == D64_RS0_RS_DISABLED);
+}
+
+static bool dma64_rxenabled(dma_info_t * di)
+{
+ uint32 rc;
+
+ rc = R_REG(di->osh, &di->d64rxregs->control);
+ return ((rc != 0xffffffff) && (rc & D64_RC_RE));
+}
+
+static bool dma64_txsuspendedidle(dma_info_t * di)
+{
+
+ if (di->ntxd == 0)
+ return TRUE;
+
+ if (!(R_REG(di->osh, &di->d64txregs->control) & D64_XC_SE))
+ return 0;
+
+ if ((R_REG(di->osh, &di->d64txregs->status0) & D64_XS0_XS_MASK) ==
+ D64_XS0_XS_IDLE)
+ return 1;
+
+ return 0;
+}
+
+/* Useful when sending unframed data. This allows us to get a progress report from the DMA.
+ * We return a pointer to the beginning of the DATA buffer of the current descriptor.
+ * If DMA is idle, we return NULL.
+ */
+static void *dma64_getpos(dma_info_t * di, bool direction)
+{
+ void *va;
+ bool idle;
+ uint32 cd_offset;
+
+ if (direction == DMA_TX) {
+ cd_offset =
+ R_REG(di->osh, &di->d64txregs->status0) & D64_XS0_CD_MASK;
+ idle = !NTXDACTIVE(di->txin, di->txout);
+ va = di->txp[B2I(cd_offset, dma64dd_t)];
+ } else {
+ cd_offset =
+ R_REG(di->osh, &di->d64rxregs->status0) & D64_XS0_CD_MASK;
+ idle = !NRXDACTIVE(di->rxin, di->rxout);
+ va = di->rxp[B2I(cd_offset, dma64dd_t)];
+ }
+
+ /* If DMA is IDLE, return NULL */
+ if (idle) {
+ DMA_TRACE(("%s: DMA idle, return NULL\n", __func__));
+ va = NULL;
+ }
+
+ return va;
+}
+
+/* TX of unframed data
+ *
+ * Adds a DMA ring descriptor for the data pointed to by "buf".
+ * This is for DMA of a buffer of data and is unlike other hnddma TX functions
+ * that take a pointer to a "packet"
+ * Each call to this is results in a single descriptor being added for "len" bytes of
+ * data starting at "buf", it doesn't handle chained buffers.
+ */
+static int dma64_txunframed(dma_info_t * di, void *buf, uint len, bool commit)
+{
+ uint16 txout;
+ uint32 flags = 0;
+ dmaaddr_t pa; /* phys addr */
+
+ txout = di->txout;
+
+ /* return nonzero if out of tx descriptors */
+ if (NEXTTXD(txout) == di->txin)
+ goto outoftxd;
+
+ if (len == 0)
+ return 0;
+
+ pa = DMA_MAP(di->osh, buf, len, DMA_TX, NULL, &di->txp_dmah[txout]);
+
+ flags = (D64_CTRL1_SOF | D64_CTRL1_IOC | D64_CTRL1_EOF);
+
+ if (txout == (di->ntxd - 1))
+ flags |= D64_CTRL1_EOT;
+
+ dma64_dd_upd(di, di->txd64, pa, txout, &flags, len);
+ ASSERT(di->txp[txout] == NULL);
+
+ /* save the buffer pointer - used by dma_getpos */
+ di->txp[txout] = buf;
+
+ txout = NEXTTXD(txout);
+ /* bump the tx descriptor index */
+ di->txout = txout;
+
+ /* kick the chip */
+ if (commit) {
+ W_REG(di->osh, &di->d64txregs->ptr,
+ di->xmtptrbase + I2B(txout, dma64dd_t));
+ }
+
+ /* tx flow control */
+ di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
+
+ return (0);
+
+ outoftxd:
+ DMA_ERROR(("%s: %s: out of txds !!!\n", di->name, __func__));
+ di->hnddma.txavail = 0;
+ di->hnddma.txnobuf++;
+ return (-1);
+}
+
+/* !! tx entry routine
+ * WARNING: call must check the return value for error.
+ * the error(toss frames) could be fatal and cause many subsequent hard to debug problems
+ */
+static int BCMFASTPATH dma64_txfast(dma_info_t * di, void *p0, bool commit)
+{
+ void *p, *next;
+ uchar *data;
+ uint len;
+ uint16 txout;
+ uint32 flags = 0;
+ dmaaddr_t pa;
+
+ DMA_TRACE(("%s: dma_txfast\n", di->name));
+
+ txout = di->txout;
+
+ /*
+ * Walk the chain of packet buffers
+ * allocating and initializing transmit descriptor entries.
+ */
+ for (p = p0; p; p = next) {
+ uint nsegs, j;
+ hnddma_seg_map_t *map;
+
+ data = PKTDATA(p);
+ len = PKTLEN(p);
+#ifdef BCM_DMAPAD
+ len += PKTDMAPAD(di->osh, p);
+#endif /* BCM_DMAPAD */
+ next = PKTNEXT(p);
+
+ /* return nonzero if out of tx descriptors */
+ if (NEXTTXD(txout) == di->txin)
+ goto outoftxd;
+
+ if (len == 0)
+ continue;
+
+ /* get physical address of buffer start */
+ if (DMASGLIST_ENAB)
+ bzero(&di->txp_dmah[txout], sizeof(hnddma_seg_map_t));
+
+ pa = DMA_MAP(di->osh, data, len, DMA_TX, p,
+ &di->txp_dmah[txout]);
+
+ if (DMASGLIST_ENAB) {
+ map = &di->txp_dmah[txout];
+
+ /* See if all the segments can be accounted for */
+ if (map->nsegs >
+ (uint) (di->ntxd - NTXDACTIVE(di->txin, di->txout) -
+ 1))
+ goto outoftxd;
+
+ nsegs = map->nsegs;
+ } else
+ nsegs = 1;
+
+ for (j = 1; j <= nsegs; j++) {
+ flags = 0;
+ if (p == p0 && j == 1)
+ flags |= D64_CTRL1_SOF;
+
+ /* With a DMA segment list, Descriptor table is filled
+ * using the segment list instead of looping over
+ * buffers in multi-chain DMA. Therefore, EOF for SGLIST is when
+ * end of segment list is reached.
+ */
+ if ((!DMASGLIST_ENAB && next == NULL) ||
+ (DMASGLIST_ENAB && j == nsegs))
+ flags |= (D64_CTRL1_IOC | D64_CTRL1_EOF);
+ if (txout == (di->ntxd - 1))
+ flags |= D64_CTRL1_EOT;
+
+ if (DMASGLIST_ENAB) {
+ len = map->segs[j - 1].length;
+ pa = map->segs[j - 1].addr;
+ }
+ dma64_dd_upd(di, di->txd64, pa, txout, &flags, len);
+ ASSERT(di->txp[txout] == NULL);
+
+ txout = NEXTTXD(txout);
+ }
+
+ /* See above. No need to loop over individual buffers */
+ if (DMASGLIST_ENAB)
+ break;
+ }
+
+ /* if last txd eof not set, fix it */
+ if (!(flags & D64_CTRL1_EOF))
+ W_SM(&di->txd64[PREVTXD(txout)].ctrl1,
+ BUS_SWAP32(flags | D64_CTRL1_IOC | D64_CTRL1_EOF));
+
+ /* save the packet */
+ di->txp[PREVTXD(txout)] = p0;
+
+ /* bump the tx descriptor index */
+ di->txout = txout;
+
+ /* kick the chip */
+ if (commit)
+ W_REG(di->osh, &di->d64txregs->ptr,
+ di->xmtptrbase + I2B(txout, dma64dd_t));
+
+ /* tx flow control */
+ di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
+
+ return (0);
+
+ outoftxd:
+ DMA_ERROR(("%s: dma_txfast: out of txds !!!\n", di->name));
+ PKTFREE(di->osh, p0, TRUE);
+ di->hnddma.txavail = 0;
+ di->hnddma.txnobuf++;
+ return (-1);
+}
+
+/*
+ * Reclaim next completed txd (txds if using chained buffers) in the range
+ * specified and return associated packet.
+ * If range is HNDDMA_RANGE_TRANSMITTED, reclaim descriptors that have be
+ * transmitted as noted by the hardware "CurrDescr" pointer.
+ * If range is HNDDMA_RANGE_TRANSFERED, reclaim descriptors that have be
+ * transfered by the DMA as noted by the hardware "ActiveDescr" pointer.
+ * If range is HNDDMA_RANGE_ALL, reclaim all txd(s) posted to the ring and
+ * return associated packet regardless of the value of hardware pointers.
+ */
+static void *BCMFASTPATH dma64_getnexttxp(dma_info_t * di, txd_range_t range)
+{
+ uint16 start, end, i;
+ uint16 active_desc;
+ void *txp;
+
+ DMA_TRACE(("%s: dma_getnexttxp %s\n", di->name,
+ (range == HNDDMA_RANGE_ALL) ? "all" :
+ ((range ==
+ HNDDMA_RANGE_TRANSMITTED) ? "transmitted" :
+ "transfered")));
+
+ if (di->ntxd == 0)
+ return (NULL);
+
+ txp = NULL;
+
+ start = di->txin;
+ if (range == HNDDMA_RANGE_ALL)
+ end = di->txout;
+ else {
+ dma64regs_t *dregs = di->d64txregs;
+
+ end =
+ (uint16) (B2I
+ (((R_REG(di->osh, &dregs->status0) &
+ D64_XS0_CD_MASK) -
+ di->xmtptrbase) & D64_XS0_CD_MASK, dma64dd_t));
+
+ if (range == HNDDMA_RANGE_TRANSFERED) {
+ active_desc =
+ (uint16) (R_REG(di->osh, &dregs->status1) &
+ D64_XS1_AD_MASK);
+ active_desc =
+ (active_desc - di->xmtptrbase) & D64_XS0_CD_MASK;
+ active_desc = B2I(active_desc, dma64dd_t);
+ if (end != active_desc)
+ end = PREVTXD(active_desc);
+ }
+ }
+
+ if ((start == 0) && (end > di->txout))
+ goto bogus;
+
+ for (i = start; i != end && !txp; i = NEXTTXD(i)) {
+ dmaaddr_t pa;
+ hnddma_seg_map_t *map = NULL;
+ uint size, j, nsegs;
+
+ PHYSADDRLOSET(pa,
+ (BUS_SWAP32(R_SM(&di->txd64[i].addrlow)) -
+ di->dataoffsetlow));
+ PHYSADDRHISET(pa,
+ (BUS_SWAP32(R_SM(&di->txd64[i].addrhigh)) -
+ di->dataoffsethigh));
+
+ if (DMASGLIST_ENAB) {
+ map = &di->txp_dmah[i];
+ size = map->origsize;
+ nsegs = map->nsegs;
+ } else {
+ size =
+ (BUS_SWAP32(R_SM(&di->txd64[i].ctrl2)) &
+ D64_CTRL2_BC_MASK);
+ nsegs = 1;
+ }
+
+ for (j = nsegs; j > 0; j--) {
+ W_SM(&di->txd64[i].addrlow, 0xdeadbeef);
+ W_SM(&di->txd64[i].addrhigh, 0xdeadbeef);
+
+ txp = di->txp[i];
+ di->txp[i] = NULL;
+ if (j > 1)
+ i = NEXTTXD(i);
+ }
+
+ DMA_UNMAP(di->osh, pa, size, DMA_TX, txp, map);
+ }
+
+ di->txin = i;
+
+ /* tx flow control */
+ di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
+
+ return (txp);
+
+ bogus:
+ DMA_NONE(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n", start, end, di->txout, forceall));
+ return (NULL);
+}
+
+static void *BCMFASTPATH dma64_getnextrxp(dma_info_t * di, bool forceall)
+{
+ uint i, curr;
+ void *rxp;
+ dmaaddr_t pa;
+
+ /* if forcing, dma engine must be disabled */
+ ASSERT(!forceall || !dma64_rxenabled(di));
+
+ i = di->rxin;
+
+ /* return if no packets posted */
+ if (i == di->rxout)
+ return (NULL);
+
+ curr =
+ B2I(((R_REG(di->osh, &di->d64rxregs->status0) & D64_RS0_CD_MASK) -
+ di->rcvptrbase) & D64_RS0_CD_MASK, dma64dd_t);
+
+ /* ignore curr if forceall */
+ if (!forceall && (i == curr))
+ return (NULL);
+
+ /* get the packet pointer that corresponds to the rx descriptor */
+ rxp = di->rxp[i];
+ ASSERT(rxp);
+ di->rxp[i] = NULL;
+
+ PHYSADDRLOSET(pa,
+ (BUS_SWAP32(R_SM(&di->rxd64[i].addrlow)) -
+ di->dataoffsetlow));
+ PHYSADDRHISET(pa,
+ (BUS_SWAP32(R_SM(&di->rxd64[i].addrhigh)) -
+ di->dataoffsethigh));
+
+ /* clear this packet from the descriptor ring */
+ DMA_UNMAP(di->osh, pa, di->rxbufsize, DMA_RX, rxp, &di->rxp_dmah[i]);
+
+ W_SM(&di->rxd64[i].addrlow, 0xdeadbeef);
+ W_SM(&di->rxd64[i].addrhigh, 0xdeadbeef);
+
+ di->rxin = NEXTRXD(i);
+
+ return (rxp);
+}
+
+static bool _dma64_addrext(osl_t * osh, dma64regs_t * dma64regs)
+{
+ uint32 w;
+ OR_REG(osh, &dma64regs->control, D64_XC_AE);
+ w = R_REG(osh, &dma64regs->control);
+ AND_REG(osh, &dma64regs->control, ~D64_XC_AE);
+ return ((w & D64_XC_AE) == D64_XC_AE);
+}
+
+/*
+ * Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin).
+ */
+static void dma64_txrotate(dma_info_t * di)
+{
+ uint16 ad;
+ uint nactive;
+ uint rot;
+ uint16 old, new;
+ uint32 w;
+ uint16 first, last;
+
+ ASSERT(dma64_txsuspendedidle(di));
+
+ nactive = _dma_txactive(di);
+ ad = (uint16) (B2I
+ ((((R_REG(di->osh, &di->d64txregs->status1) &
+ D64_XS1_AD_MASK)
+ - di->xmtptrbase) & D64_XS1_AD_MASK), dma64dd_t));
+ rot = TXD(ad - di->txin);
+
+ ASSERT(rot < di->ntxd);
+
+ /* full-ring case is a lot harder - don't worry about this */
+ if (rot >= (di->ntxd - nactive)) {
+ DMA_ERROR(("%s: dma_txrotate: ring full - punt\n", di->name));
+ return;
+ }
+
+ first = di->txin;
+ last = PREVTXD(di->txout);
+
+ /* move entries starting at last and moving backwards to first */
+ for (old = last; old != PREVTXD(first); old = PREVTXD(old)) {
+ new = TXD(old + rot);
+
+ /*
+ * Move the tx dma descriptor.
+ * EOT is set only in the last entry in the ring.
+ */
+ w = BUS_SWAP32(R_SM(&di->txd64[old].ctrl1)) & ~D64_CTRL1_EOT;
+ if (new == (di->ntxd - 1))
+ w |= D64_CTRL1_EOT;
+ W_SM(&di->txd64[new].ctrl1, BUS_SWAP32(w));
+
+ w = BUS_SWAP32(R_SM(&di->txd64[old].ctrl2));
+ W_SM(&di->txd64[new].ctrl2, BUS_SWAP32(w));
+
+ W_SM(&di->txd64[new].addrlow, R_SM(&di->txd64[old].addrlow));
+ W_SM(&di->txd64[new].addrhigh, R_SM(&di->txd64[old].addrhigh));
+
+ /* zap the old tx dma descriptor address field */
+ W_SM(&di->txd64[old].addrlow, BUS_SWAP32(0xdeadbeef));
+ W_SM(&di->txd64[old].addrhigh, BUS_SWAP32(0xdeadbeef));
+
+ /* move the corresponding txp[] entry */
+ ASSERT(di->txp[new] == NULL);
+ di->txp[new] = di->txp[old];
+
+ /* Move the map */
+ if (DMASGLIST_ENAB) {
+ bcopy(&di->txp_dmah[old], &di->txp_dmah[new],
+ sizeof(hnddma_seg_map_t));
+ bzero(&di->txp_dmah[old], sizeof(hnddma_seg_map_t));
+ }
+
+ di->txp[old] = NULL;
+ }
+
+ /* update txin and txout */
+ di->txin = ad;
+ di->txout = TXD(di->txout + rot);
+ di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
+
+ /* kick the chip */
+ W_REG(di->osh, &di->d64txregs->ptr,
+ di->xmtptrbase + I2B(di->txout, dma64dd_t));
+}
+
+uint dma_addrwidth(si_t * sih, void *dmaregs)
+{
+ dma32regs_t *dma32regs;
+ osl_t *osh;
+
+ osh = si_osh(sih);
+
+ /* Perform 64-bit checks only if we want to advertise 64-bit (> 32bit) capability) */
+ /* DMA engine is 64-bit capable */
+ if ((si_core_sflags(sih, 0, 0) & SISF_DMA64) == SISF_DMA64) {
+ /* backplane are 64-bit capable */
+ if (si_backplane64(sih))
+ /* If bus is System Backplane or PCIE then we can access 64-bits */
+ if ((BUSTYPE(sih->bustype) == SI_BUS) ||
+ ((BUSTYPE(sih->bustype) == PCI_BUS) &&
+ (sih->buscoretype == PCIE_CORE_ID)))
+ return (DMADDRWIDTH_64);
+
+ /* DMA64 is always 32-bit capable, AE is always TRUE */
+ ASSERT(_dma64_addrext(osh, (dma64regs_t *) dmaregs));
+
+ return (DMADDRWIDTH_32);
+ }
+
+ /* Start checking for 32-bit / 30-bit addressing */
+ dma32regs = (dma32regs_t *) dmaregs;
+
+ /* For System Backplane, PCIE bus or addrext feature, 32-bits ok */
+ if ((BUSTYPE(sih->bustype) == SI_BUS) ||
+ ((BUSTYPE(sih->bustype) == PCI_BUS)
+ && sih->buscoretype == PCIE_CORE_ID)
+ || (_dma32_addrext(osh, dma32regs)))
+ return (DMADDRWIDTH_32);
+
+ /* Fallthru */
+ return (DMADDRWIDTH_30);
+}
diff --git a/drivers/staging/brcm80211/util/hndpmu.c b/drivers/staging/brcm80211/util/hndpmu.c
new file mode 100644
index 000000000000..5fca01e39e7d
--- /dev/null
+++ b/drivers/staging/brcm80211/util/hndpmu.c
@@ -0,0 +1,2681 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <siutils.h>
+#include <bcmdevs.h>
+#include <hndsoc.h>
+#include <sbchipc.h>
+#include <hndpmu.h>
+#include "siutils_priv.h"
+
+#define PMU_ERROR(args)
+
+#ifdef BCMDBG
+#define PMU_MSG(args) printf args
+#else
+#define PMU_MSG(args)
+#endif /* BCMDBG */
+
+/* To check in verbose debugging messages not intended
+ * to be on except on private builds.
+ */
+#define PMU_NONE(args)
+
+/* PLL controls/clocks */
+static void si_pmu1_pllinit0(si_t * sih, osl_t * osh, chipcregs_t * cc,
+ uint32 xtal);
+static uint32 si_pmu1_cpuclk0(si_t * sih, osl_t * osh, chipcregs_t * cc);
+static uint32 si_pmu1_alpclk0(si_t * sih, osl_t * osh, chipcregs_t * cc);
+
+/* PMU resources */
+static bool si_pmu_res_depfltr_bb(si_t * sih);
+static bool si_pmu_res_depfltr_ncb(si_t * sih);
+static bool si_pmu_res_depfltr_paldo(si_t * sih);
+static bool si_pmu_res_depfltr_npaldo(si_t * sih);
+static uint32 si_pmu_res_deps(si_t * sih, osl_t * osh, chipcregs_t * cc,
+ uint32 rsrcs, bool all);
+static uint si_pmu_res_uptime(si_t * sih, osl_t * osh, chipcregs_t * cc,
+ uint8 rsrc);
+static void si_pmu_res_masks(si_t * sih, uint32 * pmin, uint32 * pmax);
+static void si_pmu_spuravoid_pllupdate(si_t * sih, chipcregs_t * cc,
+ osl_t * osh, uint8 spuravoid);
+
+static void si_pmu_set_4330_plldivs(si_t * sih);
+
+/* FVCO frequency */
+#define FVCO_880 880000 /* 880MHz */
+#define FVCO_1760 1760000 /* 1760MHz */
+#define FVCO_1440 1440000 /* 1440MHz */
+#define FVCO_960 960000 /* 960MHz */
+
+/* Read/write a chipcontrol reg */
+uint32 si_pmu_chipcontrol(si_t * sih, uint reg, uint32 mask, uint32 val)
+{
+ si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, chipcontrol_addr), ~0,
+ reg);
+ return si_corereg(sih, SI_CC_IDX,
+ OFFSETOF(chipcregs_t, chipcontrol_data), mask, val);
+}
+
+/* Read/write a regcontrol reg */
+uint32 si_pmu_regcontrol(si_t * sih, uint reg, uint32 mask, uint32 val)
+{
+ si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, regcontrol_addr), ~0,
+ reg);
+ return si_corereg(sih, SI_CC_IDX,
+ OFFSETOF(chipcregs_t, regcontrol_data), mask, val);
+}
+
+/* Read/write a pllcontrol reg */
+uint32 si_pmu_pllcontrol(si_t * sih, uint reg, uint32 mask, uint32 val)
+{
+ si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, pllcontrol_addr), ~0,
+ reg);
+ return si_corereg(sih, SI_CC_IDX,
+ OFFSETOF(chipcregs_t, pllcontrol_data), mask, val);
+}
+
+/* PMU PLL update */
+void si_pmu_pllupd(si_t * sih)
+{
+ si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, pmucontrol),
+ PCTL_PLL_PLLCTL_UPD, PCTL_PLL_PLLCTL_UPD);
+}
+
+/* Setup switcher voltage */
+void
+BCMATTACHFN(si_pmu_set_switcher_voltage) (si_t * sih, osl_t * osh,
+ uint8 bb_voltage, uint8 rf_voltage) {
+ chipcregs_t *cc;
+ uint origidx;
+
+ ASSERT(sih->cccaps & CC_CAP_PMU);
+
+ /* Remember original core before switch to chipc */
+ origidx = si_coreidx(sih);
+ cc = si_setcoreidx(sih, SI_CC_IDX);
+ ASSERT(cc != NULL);
+
+ W_REG(osh, &cc->regcontrol_addr, 0x01);
+ W_REG(osh, &cc->regcontrol_data, (uint32) (bb_voltage & 0x1f) << 22);
+
+ W_REG(osh, &cc->regcontrol_addr, 0x00);
+ W_REG(osh, &cc->regcontrol_data, (uint32) (rf_voltage & 0x1f) << 14);
+
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+}
+
+void
+BCMATTACHFN(si_pmu_set_ldo_voltage) (si_t * sih, osl_t * osh, uint8 ldo,
+ uint8 voltage) {
+ uint8 sr_cntl_shift = 0, rc_shift = 0, shift = 0, mask = 0;
+ uint8 addr = 0;
+
+ ASSERT(sih->cccaps & CC_CAP_PMU);
+
+ switch (CHIPID(sih->chip)) {
+ case BCM4336_CHIP_ID:
+ switch (ldo) {
+ case SET_LDO_VOLTAGE_CLDO_PWM:
+ addr = 4;
+ rc_shift = 1;
+ mask = 0xf;
+ break;
+ case SET_LDO_VOLTAGE_CLDO_BURST:
+ addr = 4;
+ rc_shift = 5;
+ mask = 0xf;
+ break;
+ case SET_LDO_VOLTAGE_LNLDO1:
+ addr = 4;
+ rc_shift = 17;
+ mask = 0xf;
+ break;
+ default:
+ ASSERT(FALSE);
+ return;
+ }
+ break;
+ case BCM4330_CHIP_ID:
+ switch (ldo) {
+ case SET_LDO_VOLTAGE_CBUCK_PWM:
+ addr = 3;
+ rc_shift = 0;
+ mask = 0x1f;
+ break;
+ default:
+ ASSERT(FALSE);
+ break;
+ }
+ break;
+ default:
+ ASSERT(FALSE);
+ return;
+ }
+
+ shift = sr_cntl_shift + rc_shift;
+
+ si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, regcontrol_addr),
+ ~0, addr);
+ si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, regcontrol_data),
+ mask << shift, (voltage & mask) << shift);
+}
+
+/* d11 slow to fast clock transition time in slow clock cycles */
+#define D11SCC_SLOW2FAST_TRANSITION 2
+
+uint16 BCMINITFN(si_pmu_fast_pwrup_delay) (si_t * sih, osl_t * osh) {
+ uint delay = PMU_MAX_TRANSITION_DLY;
+ chipcregs_t *cc;
+ uint origidx;
+#ifdef BCMDBG
+ char chn[8];
+ chn[0] = 0; /* to suppress compile error */
+#endif
+
+ ASSERT(sih->cccaps & CC_CAP_PMU);
+
+ /* Remember original core before switch to chipc */
+ origidx = si_coreidx(sih);
+ cc = si_setcoreidx(sih, SI_CC_IDX);
+ ASSERT(cc != NULL);
+
+ switch (CHIPID(sih->chip)) {
+ case BCM43224_CHIP_ID:
+ case BCM43225_CHIP_ID:
+ case BCM43421_CHIP_ID:
+ case BCM43235_CHIP_ID:
+ case BCM43236_CHIP_ID:
+ case BCM43238_CHIP_ID:
+ case BCM4331_CHIP_ID:
+ case BCM6362_CHIP_ID:
+ case BCM4313_CHIP_ID:
+ delay = ISSIM_ENAB(sih) ? 70 : 3700;
+ break;
+ case BCM4329_CHIP_ID:
+ if (ISSIM_ENAB(sih))
+ delay = 70;
+ else {
+ uint32 ilp = si_ilp_clock(sih);
+ delay =
+ (si_pmu_res_uptime(sih, osh, cc, RES4329_HT_AVAIL) +
+ D11SCC_SLOW2FAST_TRANSITION) * ((1000000 + ilp -
+ 1) / ilp);
+ delay = (11 * delay) / 10;
+ }
+ break;
+ case BCM4319_CHIP_ID:
+ delay = ISSIM_ENAB(sih) ? 70 : 3700;
+ break;
+ case BCM4336_CHIP_ID:
+ if (ISSIM_ENAB(sih))
+ delay = 70;
+ else {
+ uint32 ilp = si_ilp_clock(sih);
+ delay =
+ (si_pmu_res_uptime(sih, osh, cc, RES4336_HT_AVAIL) +
+ D11SCC_SLOW2FAST_TRANSITION) * ((1000000 + ilp -
+ 1) / ilp);
+ delay = (11 * delay) / 10;
+ }
+ break;
+ case BCM4330_CHIP_ID:
+ if (ISSIM_ENAB(sih))
+ delay = 70;
+ else {
+ uint32 ilp = si_ilp_clock(sih);
+ delay =
+ (si_pmu_res_uptime(sih, osh, cc, RES4330_HT_AVAIL) +
+ D11SCC_SLOW2FAST_TRANSITION) * ((1000000 + ilp -
+ 1) / ilp);
+ delay = (11 * delay) / 10;
+ }
+ break;
+ default:
+ break;
+ }
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+
+ return (uint16) delay;
+}
+
+uint32 BCMATTACHFN(si_pmu_force_ilp) (si_t * sih, osl_t * osh, bool force) {
+ chipcregs_t *cc;
+ uint origidx;
+ uint32 oldpmucontrol;
+
+ ASSERT(sih->cccaps & CC_CAP_PMU);
+
+ /* Remember original core before switch to chipc */
+ origidx = si_coreidx(sih);
+ cc = si_setcoreidx(sih, SI_CC_IDX);
+ ASSERT(cc != NULL);
+
+ oldpmucontrol = R_REG(osh, &cc->pmucontrol);
+ if (force)
+ W_REG(osh, &cc->pmucontrol, oldpmucontrol &
+ ~(PCTL_HT_REQ_EN | PCTL_ALP_REQ_EN));
+ else
+ W_REG(osh, &cc->pmucontrol, oldpmucontrol |
+ (PCTL_HT_REQ_EN | PCTL_ALP_REQ_EN));
+
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+
+ return oldpmucontrol;
+}
+
+/* Setup resource up/down timers */
+typedef struct {
+ uint8 resnum;
+ uint16 updown;
+} pmu_res_updown_t;
+
+/* Change resource dependancies masks */
+typedef struct {
+ uint32 res_mask; /* resources (chip specific) */
+ int8 action; /* action */
+ uint32 depend_mask; /* changes to the dependancies mask */
+ bool(*filter) (si_t * sih); /* action is taken when filter is NULL or return TRUE */
+} pmu_res_depend_t;
+
+/* Resource dependancies mask change action */
+#define RES_DEPEND_SET 0 /* Override the dependancies mask */
+#define RES_DEPEND_ADD 1 /* Add to the dependancies mask */
+#define RES_DEPEND_REMOVE -1 /* Remove from the dependancies mask */
+
+static const pmu_res_updown_t BCMATTACHDATA(bcm4328a0_res_updown)[] =
+{
+ {
+ RES4328_EXT_SWITCHER_PWM, 0x0101}, {
+ RES4328_BB_SWITCHER_PWM, 0x1f01}, {
+ RES4328_BB_SWITCHER_BURST, 0x010f}, {
+ RES4328_BB_EXT_SWITCHER_BURST, 0x0101}, {
+ RES4328_ILP_REQUEST, 0x0202}, {
+ RES4328_RADIO_SWITCHER_PWM, 0x0f01}, {
+ RES4328_RADIO_SWITCHER_BURST, 0x0f01}, {
+ RES4328_ROM_SWITCH, 0x0101}, {
+ RES4328_PA_REF_LDO, 0x0f01}, {
+ RES4328_RADIO_LDO, 0x0f01}, {
+ RES4328_AFE_LDO, 0x0f01}, {
+ RES4328_PLL_LDO, 0x0f01}, {
+ RES4328_BG_FILTBYP, 0x0101}, {
+ RES4328_TX_FILTBYP, 0x0101}, {
+ RES4328_RX_FILTBYP, 0x0101}, {
+ RES4328_XTAL_PU, 0x0101}, {
+ RES4328_XTAL_EN, 0xa001}, {
+ RES4328_BB_PLL_FILTBYP, 0x0101}, {
+ RES4328_RF_PLL_FILTBYP, 0x0101}, {
+ RES4328_BB_PLL_PU, 0x0701}
+};
+
+static const pmu_res_depend_t BCMATTACHDATA(bcm4328a0_res_depend)[] =
+{
+ /* Adjust ILP request resource not to force ext/BB switchers into burst mode */
+ {
+ PMURES_BIT(RES4328_ILP_REQUEST),
+ RES_DEPEND_SET,
+ PMURES_BIT(RES4328_EXT_SWITCHER_PWM) |
+ PMURES_BIT(RES4328_BB_SWITCHER_PWM), NULL}
+};
+
+static const pmu_res_updown_t BCMATTACHDATA(bcm4325a0_res_updown_qt)[] =
+{
+ {
+ RES4325_HT_AVAIL, 0x0300}, {
+ RES4325_BBPLL_PWRSW_PU, 0x0101}, {
+ RES4325_RFPLL_PWRSW_PU, 0x0101}, {
+ RES4325_ALP_AVAIL, 0x0100}, {
+ RES4325_XTAL_PU, 0x1000}, {
+ RES4325_LNLDO1_PU, 0x0800}, {
+ RES4325_CLDO_CBUCK_PWM, 0x0101}, {
+ RES4325_CBUCK_PWM, 0x0803}
+};
+
+static const pmu_res_updown_t BCMATTACHDATA(bcm4325a0_res_updown)[] =
+{
+ {
+ RES4325_XTAL_PU, 0x1501}
+};
+
+static const pmu_res_depend_t BCMATTACHDATA(bcm4325a0_res_depend)[] =
+{
+ /* Adjust OTP PU resource dependencies - remove BB BURST */
+ {
+ PMURES_BIT(RES4325_OTP_PU),
+ RES_DEPEND_REMOVE,
+ PMURES_BIT(RES4325_BUCK_BOOST_BURST), NULL},
+ /* Adjust ALP/HT Avail resource dependencies - bring up BB along if it is used. */
+ {
+ PMURES_BIT(RES4325_ALP_AVAIL) | PMURES_BIT(RES4325_HT_AVAIL),
+ RES_DEPEND_ADD,
+ PMURES_BIT(RES4325_BUCK_BOOST_BURST) |
+ PMURES_BIT(RES4325_BUCK_BOOST_PWM), si_pmu_res_depfltr_bb},
+ /* Adjust HT Avail resource dependencies - bring up RF switches along with HT. */
+ {
+ PMURES_BIT(RES4325_HT_AVAIL),
+ RES_DEPEND_ADD,
+ PMURES_BIT(RES4325_RX_PWRSW_PU) |
+ PMURES_BIT(RES4325_TX_PWRSW_PU) |
+ PMURES_BIT(RES4325_LOGEN_PWRSW_PU) |
+ PMURES_BIT(RES4325_AFE_PWRSW_PU), NULL},
+ /* Adjust ALL resource dependencies - remove CBUCK dependancies if it is not used. */
+ {
+ PMURES_BIT(RES4325_ILP_REQUEST) |
+ PMURES_BIT(RES4325_ABUCK_BURST) |
+ PMURES_BIT(RES4325_ABUCK_PWM) |
+ PMURES_BIT(RES4325_LNLDO1_PU) |
+ PMURES_BIT(RES4325C1_LNLDO2_PU) |
+ PMURES_BIT(RES4325_XTAL_PU) |
+ PMURES_BIT(RES4325_ALP_AVAIL) |
+ PMURES_BIT(RES4325_RX_PWRSW_PU) |
+ PMURES_BIT(RES4325_TX_PWRSW_PU) |
+ PMURES_BIT(RES4325_RFPLL_PWRSW_PU) |
+ PMURES_BIT(RES4325_LOGEN_PWRSW_PU) |
+ PMURES_BIT(RES4325_AFE_PWRSW_PU) |
+ PMURES_BIT(RES4325_BBPLL_PWRSW_PU) |
+ PMURES_BIT(RES4325_HT_AVAIL), RES_DEPEND_REMOVE,
+ PMURES_BIT(RES4325B0_CBUCK_LPOM) |
+ PMURES_BIT(RES4325B0_CBUCK_BURST) |
+ PMURES_BIT(RES4325B0_CBUCK_PWM), si_pmu_res_depfltr_ncb}
+};
+
+static const pmu_res_updown_t BCMATTACHDATA(bcm4315a0_res_updown_qt)[] =
+{
+ {
+ RES4315_HT_AVAIL, 0x0101}, {
+ RES4315_XTAL_PU, 0x0100}, {
+ RES4315_LNLDO1_PU, 0x0100}, {
+ RES4315_PALDO_PU, 0x0100}, {
+ RES4315_CLDO_PU, 0x0100}, {
+ RES4315_CBUCK_PWM, 0x0100}, {
+ RES4315_CBUCK_BURST, 0x0100}, {
+ RES4315_CBUCK_LPOM, 0x0100}
+};
+
+static const pmu_res_updown_t BCMATTACHDATA(bcm4315a0_res_updown)[] =
+{
+ {
+ RES4315_XTAL_PU, 0x2501}
+};
+
+static const pmu_res_depend_t BCMATTACHDATA(bcm4315a0_res_depend)[] =
+{
+ /* Adjust OTP PU resource dependencies - not need PALDO unless write */
+ {
+ PMURES_BIT(RES4315_OTP_PU),
+ RES_DEPEND_REMOVE,
+ PMURES_BIT(RES4315_PALDO_PU), si_pmu_res_depfltr_npaldo},
+ /* Adjust ALP/HT Avail resource dependencies - bring up PALDO along if it is used. */
+ {
+ PMURES_BIT(RES4315_ALP_AVAIL) | PMURES_BIT(RES4315_HT_AVAIL),
+ RES_DEPEND_ADD,
+ PMURES_BIT(RES4315_PALDO_PU), si_pmu_res_depfltr_paldo},
+ /* Adjust HT Avail resource dependencies - bring up RF switches along with HT. */
+ {
+ PMURES_BIT(RES4315_HT_AVAIL),
+ RES_DEPEND_ADD,
+ PMURES_BIT(RES4315_RX_PWRSW_PU) |
+ PMURES_BIT(RES4315_TX_PWRSW_PU) |
+ PMURES_BIT(RES4315_LOGEN_PWRSW_PU) |
+ PMURES_BIT(RES4315_AFE_PWRSW_PU), NULL},
+ /* Adjust ALL resource dependencies - remove CBUCK dependancies if it is not used. */
+ {
+ PMURES_BIT(RES4315_CLDO_PU) | PMURES_BIT(RES4315_ILP_REQUEST) |
+ PMURES_BIT(RES4315_LNLDO1_PU) |
+ PMURES_BIT(RES4315_OTP_PU) |
+ PMURES_BIT(RES4315_LNLDO2_PU) |
+ PMURES_BIT(RES4315_XTAL_PU) |
+ PMURES_BIT(RES4315_ALP_AVAIL) |
+ PMURES_BIT(RES4315_RX_PWRSW_PU) |
+ PMURES_BIT(RES4315_TX_PWRSW_PU) |
+ PMURES_BIT(RES4315_RFPLL_PWRSW_PU) |
+ PMURES_BIT(RES4315_LOGEN_PWRSW_PU) |
+ PMURES_BIT(RES4315_AFE_PWRSW_PU) |
+ PMURES_BIT(RES4315_BBPLL_PWRSW_PU) |
+ PMURES_BIT(RES4315_HT_AVAIL), RES_DEPEND_REMOVE,
+ PMURES_BIT(RES4315_CBUCK_LPOM) |
+ PMURES_BIT(RES4315_CBUCK_BURST) |
+ PMURES_BIT(RES4315_CBUCK_PWM), si_pmu_res_depfltr_ncb}
+};
+
+ /* 4329 specific. needs to come back this issue later */
+static const pmu_res_updown_t BCMINITDATA(bcm4329_res_updown)[] =
+{
+ {
+ RES4329_XTAL_PU, 0x1501}
+};
+
+static const pmu_res_depend_t BCMINITDATA(bcm4329_res_depend)[] =
+{
+ /* Adjust HT Avail resource dependencies */
+ {
+ PMURES_BIT(RES4329_HT_AVAIL),
+ RES_DEPEND_ADD,
+ PMURES_BIT(RES4329_CBUCK_LPOM) |
+ PMURES_BIT(RES4329_CBUCK_BURST) |
+ PMURES_BIT(RES4329_CBUCK_PWM) |
+ PMURES_BIT(RES4329_CLDO_PU) |
+ PMURES_BIT(RES4329_PALDO_PU) |
+ PMURES_BIT(RES4329_LNLDO1_PU) |
+ PMURES_BIT(RES4329_XTAL_PU) |
+ PMURES_BIT(RES4329_ALP_AVAIL) |
+ PMURES_BIT(RES4329_RX_PWRSW_PU) |
+ PMURES_BIT(RES4329_TX_PWRSW_PU) |
+ PMURES_BIT(RES4329_RFPLL_PWRSW_PU) |
+ PMURES_BIT(RES4329_LOGEN_PWRSW_PU) |
+ PMURES_BIT(RES4329_AFE_PWRSW_PU) |
+ PMURES_BIT(RES4329_BBPLL_PWRSW_PU), NULL}
+};
+
+static const pmu_res_updown_t BCMATTACHDATA(bcm4319a0_res_updown_qt)[] =
+{
+ {
+ RES4319_HT_AVAIL, 0x0101}, {
+ RES4319_XTAL_PU, 0x0100}, {
+ RES4319_LNLDO1_PU, 0x0100}, {
+ RES4319_PALDO_PU, 0x0100}, {
+ RES4319_CLDO_PU, 0x0100}, {
+ RES4319_CBUCK_PWM, 0x0100}, {
+ RES4319_CBUCK_BURST, 0x0100}, {
+ RES4319_CBUCK_LPOM, 0x0100}
+};
+
+static const pmu_res_updown_t BCMATTACHDATA(bcm4319a0_res_updown)[] =
+{
+ {
+ RES4319_XTAL_PU, 0x3f01}
+};
+
+static const pmu_res_depend_t BCMATTACHDATA(bcm4319a0_res_depend)[] =
+{
+ /* Adjust OTP PU resource dependencies - not need PALDO unless write */
+ {
+ PMURES_BIT(RES4319_OTP_PU),
+ RES_DEPEND_REMOVE,
+ PMURES_BIT(RES4319_PALDO_PU), si_pmu_res_depfltr_npaldo},
+ /* Adjust HT Avail resource dependencies - bring up PALDO along if it is used. */
+ {
+ PMURES_BIT(RES4319_HT_AVAIL),
+ RES_DEPEND_ADD,
+ PMURES_BIT(RES4319_PALDO_PU), si_pmu_res_depfltr_paldo},
+ /* Adjust HT Avail resource dependencies - bring up RF switches along with HT. */
+ {
+ PMURES_BIT(RES4319_HT_AVAIL),
+ RES_DEPEND_ADD,
+ PMURES_BIT(RES4319_RX_PWRSW_PU) |
+ PMURES_BIT(RES4319_TX_PWRSW_PU) |
+ PMURES_BIT(RES4319_RFPLL_PWRSW_PU) |
+ PMURES_BIT(RES4319_LOGEN_PWRSW_PU) |
+ PMURES_BIT(RES4319_AFE_PWRSW_PU), NULL}
+};
+
+static const pmu_res_updown_t BCMATTACHDATA(bcm4336a0_res_updown_qt)[] =
+{
+ {
+ RES4336_HT_AVAIL, 0x0101}, {
+ RES4336_XTAL_PU, 0x0100}, {
+ RES4336_CLDO_PU, 0x0100}, {
+ RES4336_CBUCK_PWM, 0x0100}, {
+ RES4336_CBUCK_BURST, 0x0100}, {
+ RES4336_CBUCK_LPOM, 0x0100}
+};
+
+static const pmu_res_updown_t BCMATTACHDATA(bcm4336a0_res_updown)[] =
+{
+ {
+ RES4336_HT_AVAIL, 0x0D01}
+};
+
+static const pmu_res_depend_t BCMATTACHDATA(bcm4336a0_res_depend)[] =
+{
+ /* Just a dummy entry for now */
+ {
+ PMURES_BIT(RES4336_RSVD), RES_DEPEND_ADD, 0, NULL}
+};
+
+static const pmu_res_updown_t BCMATTACHDATA(bcm4330a0_res_updown_qt)[] =
+{
+ {
+ RES4330_HT_AVAIL, 0x0101}, {
+ RES4330_XTAL_PU, 0x0100}, {
+ RES4330_CLDO_PU, 0x0100}, {
+ RES4330_CBUCK_PWM, 0x0100}, {
+ RES4330_CBUCK_BURST, 0x0100}, {
+ RES4330_CBUCK_LPOM, 0x0100}
+};
+
+static const pmu_res_updown_t BCMATTACHDATA(bcm4330a0_res_updown)[] =
+{
+ {
+ RES4330_HT_AVAIL, 0x0e02}
+};
+
+static const pmu_res_depend_t BCMATTACHDATA(bcm4330a0_res_depend)[] =
+{
+ /* Just a dummy entry for now */
+ {
+ PMURES_BIT(RES4330_HT_AVAIL), RES_DEPEND_ADD, 0, NULL}
+};
+
+/* TRUE if the power topology uses the buck boost to provide 3.3V to VDDIO_RF and WLAN PA */
+static bool BCMATTACHFN(si_pmu_res_depfltr_bb) (si_t * sih) {
+ return (sih->boardflags & BFL_BUCKBOOST) != 0;
+}
+
+/* TRUE if the power topology doesn't use the cbuck. Key on chiprev also if the chip is BCM4325. */
+static bool BCMATTACHFN(si_pmu_res_depfltr_ncb) (si_t * sih) {
+
+ return ((sih->boardflags & BFL_NOCBUCK) != 0);
+}
+
+/* TRUE if the power topology uses the PALDO */
+static bool BCMATTACHFN(si_pmu_res_depfltr_paldo) (si_t * sih) {
+ return (sih->boardflags & BFL_PALDO) != 0;
+}
+
+/* TRUE if the power topology doesn't use the PALDO */
+static bool BCMATTACHFN(si_pmu_res_depfltr_npaldo) (si_t * sih) {
+ return (sih->boardflags & BFL_PALDO) == 0;
+}
+
+#define BCM94325_BBVDDIOSD_BOARDS(sih) (sih->boardtype == BCM94325DEVBU_BOARD || \
+ sih->boardtype == BCM94325BGABU_BOARD)
+
+/* Determine min/max rsrc masks. Value 0 leaves hardware at default. */
+static void si_pmu_res_masks(si_t * sih, uint32 * pmin, uint32 * pmax)
+{
+ uint32 min_mask = 0, max_mask = 0;
+ uint rsrcs;
+ char *val;
+
+ /* # resources */
+ rsrcs = (sih->pmucaps & PCAP_RC_MASK) >> PCAP_RC_SHIFT;
+
+ /* determine min/max rsrc masks */
+ switch (CHIPID(sih->chip)) {
+ case BCM43224_CHIP_ID:
+ case BCM43225_CHIP_ID:
+ case BCM43421_CHIP_ID:
+ case BCM43235_CHIP_ID:
+ case BCM43236_CHIP_ID:
+ case BCM43238_CHIP_ID:
+ case BCM4331_CHIP_ID:
+ case BCM6362_CHIP_ID:
+ /* ??? */
+ break;
+
+ case BCM4329_CHIP_ID:
+ /* 4329 spedific issue. Needs to come back this issue later */
+ /* Down to save the power. */
+ min_mask =
+ PMURES_BIT(RES4329_CBUCK_LPOM) |
+ PMURES_BIT(RES4329_CLDO_PU);
+ /* Allow (but don't require) PLL to turn on */
+ max_mask = 0x3ff63e;
+ break;
+ case BCM4319_CHIP_ID:
+ /* We only need a few resources to be kept on all the time */
+ min_mask = PMURES_BIT(RES4319_CBUCK_LPOM) |
+ PMURES_BIT(RES4319_CLDO_PU);
+
+ /* Allow everything else to be turned on upon requests */
+ max_mask = ~(~0 << rsrcs);
+ break;
+ case BCM4336_CHIP_ID:
+ /* Down to save the power. */
+ min_mask =
+ PMURES_BIT(RES4336_CBUCK_LPOM) | PMURES_BIT(RES4336_CLDO_PU)
+ | PMURES_BIT(RES4336_LDO3P3_PU) | PMURES_BIT(RES4336_OTP_PU)
+ | PMURES_BIT(RES4336_DIS_INT_RESET_PD);
+ /* Allow (but don't require) PLL to turn on */
+ max_mask = 0x1ffffff;
+ break;
+
+ case BCM4330_CHIP_ID:
+ /* Down to save the power. */
+ min_mask =
+ PMURES_BIT(RES4330_CBUCK_LPOM) | PMURES_BIT(RES4330_CLDO_PU)
+ | PMURES_BIT(RES4330_DIS_INT_RESET_PD) |
+ PMURES_BIT(RES4330_LDO3P3_PU) | PMURES_BIT(RES4330_OTP_PU);
+ /* Allow (but don't require) PLL to turn on */
+ max_mask = 0xfffffff;
+ break;
+
+ case BCM4313_CHIP_ID:
+ min_mask = PMURES_BIT(RES4313_BB_PU_RSRC) |
+ PMURES_BIT(RES4313_XTAL_PU_RSRC) |
+ PMURES_BIT(RES4313_ALP_AVAIL_RSRC) |
+ PMURES_BIT(RES4313_BB_PLL_PWRSW_RSRC);
+ max_mask = 0xffff;
+ break;
+ default:
+ break;
+ }
+
+ /* Apply nvram override to min mask */
+ if ((val = getvar(NULL, "rmin")) != NULL) {
+ PMU_MSG(("Applying rmin=%s to min_mask\n", val));
+ min_mask = (uint32) bcm_strtoul(val, NULL, 0);
+ }
+ /* Apply nvram override to max mask */
+ if ((val = getvar(NULL, "rmax")) != NULL) {
+ PMU_MSG(("Applying rmax=%s to max_mask\n", val));
+ max_mask = (uint32) bcm_strtoul(val, NULL, 0);
+ }
+
+ *pmin = min_mask;
+ *pmax = max_mask;
+}
+
+/* initialize PMU resources */
+void BCMATTACHFN(si_pmu_res_init) (si_t * sih, osl_t * osh) {
+ chipcregs_t *cc;
+ uint origidx;
+ const pmu_res_updown_t *pmu_res_updown_table = NULL;
+ uint pmu_res_updown_table_sz = 0;
+ const pmu_res_depend_t *pmu_res_depend_table = NULL;
+ uint pmu_res_depend_table_sz = 0;
+ uint32 min_mask = 0, max_mask = 0;
+ char name[8], *val;
+ uint i, rsrcs;
+
+ ASSERT(sih->cccaps & CC_CAP_PMU);
+
+ /* Remember original core before switch to chipc */
+ origidx = si_coreidx(sih);
+ cc = si_setcoreidx(sih, SI_CC_IDX);
+ ASSERT(cc != NULL);
+
+ switch (CHIPID(sih->chip)) {
+ case BCM4329_CHIP_ID:
+ /* Optimize resources up/down timers */
+ if (ISSIM_ENAB(sih)) {
+ pmu_res_updown_table = NULL;
+ pmu_res_updown_table_sz = 0;
+ } else {
+ pmu_res_updown_table = bcm4329_res_updown;
+ pmu_res_updown_table_sz = ARRAYSIZE(bcm4329_res_updown);
+ }
+ /* Optimize resources dependencies */
+ pmu_res_depend_table = bcm4329_res_depend;
+ pmu_res_depend_table_sz = ARRAYSIZE(bcm4329_res_depend);
+ break;
+
+ case BCM4319_CHIP_ID:
+ /* Optimize resources up/down timers */
+ if (ISSIM_ENAB(sih)) {
+ pmu_res_updown_table = bcm4319a0_res_updown_qt;
+ pmu_res_updown_table_sz =
+ ARRAYSIZE(bcm4319a0_res_updown_qt);
+ } else {
+ pmu_res_updown_table = bcm4319a0_res_updown;
+ pmu_res_updown_table_sz =
+ ARRAYSIZE(bcm4319a0_res_updown);
+ }
+ /* Optimize resources dependancies masks */
+ pmu_res_depend_table = bcm4319a0_res_depend;
+ pmu_res_depend_table_sz = ARRAYSIZE(bcm4319a0_res_depend);
+ break;
+
+ case BCM4336_CHIP_ID:
+ /* Optimize resources up/down timers */
+ if (ISSIM_ENAB(sih)) {
+ pmu_res_updown_table = bcm4336a0_res_updown_qt;
+ pmu_res_updown_table_sz =
+ ARRAYSIZE(bcm4336a0_res_updown_qt);
+ } else {
+ pmu_res_updown_table = bcm4336a0_res_updown;
+ pmu_res_updown_table_sz =
+ ARRAYSIZE(bcm4336a0_res_updown);
+ }
+ /* Optimize resources dependancies masks */
+ pmu_res_depend_table = bcm4336a0_res_depend;
+ pmu_res_depend_table_sz = ARRAYSIZE(bcm4336a0_res_depend);
+ break;
+
+ case BCM4330_CHIP_ID:
+ /* Optimize resources up/down timers */
+ if (ISSIM_ENAB(sih)) {
+ pmu_res_updown_table = bcm4330a0_res_updown_qt;
+ pmu_res_updown_table_sz =
+ ARRAYSIZE(bcm4330a0_res_updown_qt);
+ } else {
+ pmu_res_updown_table = bcm4330a0_res_updown;
+ pmu_res_updown_table_sz =
+ ARRAYSIZE(bcm4330a0_res_updown);
+ }
+ /* Optimize resources dependancies masks */
+ pmu_res_depend_table = bcm4330a0_res_depend;
+ pmu_res_depend_table_sz = ARRAYSIZE(bcm4330a0_res_depend);
+ break;
+
+ default:
+ break;
+ }
+
+ /* # resources */
+ rsrcs = (sih->pmucaps & PCAP_RC_MASK) >> PCAP_RC_SHIFT;
+
+ /* Program up/down timers */
+ while (pmu_res_updown_table_sz--) {
+ ASSERT(pmu_res_updown_table != NULL);
+ PMU_MSG(("Changing rsrc %d res_updn_timer to 0x%x\n",
+ pmu_res_updown_table[pmu_res_updown_table_sz].resnum,
+ pmu_res_updown_table[pmu_res_updown_table_sz].updown));
+ W_REG(osh, &cc->res_table_sel,
+ pmu_res_updown_table[pmu_res_updown_table_sz].resnum);
+ W_REG(osh, &cc->res_updn_timer,
+ pmu_res_updown_table[pmu_res_updown_table_sz].updown);
+ }
+ /* Apply nvram overrides to up/down timers */
+ for (i = 0; i < rsrcs; i++) {
+ snprintf(name, sizeof(name), "r%dt", i);
+ if ((val = getvar(NULL, name)) == NULL)
+ continue;
+ PMU_MSG(("Applying %s=%s to rsrc %d res_updn_timer\n", name,
+ val, i));
+ W_REG(osh, &cc->res_table_sel, (uint32) i);
+ W_REG(osh, &cc->res_updn_timer,
+ (uint32) bcm_strtoul(val, NULL, 0));
+ }
+
+ /* Program resource dependencies table */
+ while (pmu_res_depend_table_sz--) {
+ ASSERT(pmu_res_depend_table != NULL);
+ if (pmu_res_depend_table[pmu_res_depend_table_sz].filter != NULL
+ && !(pmu_res_depend_table[pmu_res_depend_table_sz].
+ filter) (sih))
+ continue;
+ for (i = 0; i < rsrcs; i++) {
+ if ((pmu_res_depend_table[pmu_res_depend_table_sz].
+ res_mask & PMURES_BIT(i)) == 0)
+ continue;
+ W_REG(osh, &cc->res_table_sel, i);
+ switch (pmu_res_depend_table[pmu_res_depend_table_sz].
+ action) {
+ case RES_DEPEND_SET:
+ PMU_MSG(("Changing rsrc %d res_dep_mask to 0x%x\n", i, pmu_res_depend_table[pmu_res_depend_table_sz].depend_mask));
+ W_REG(osh, &cc->res_dep_mask,
+ pmu_res_depend_table
+ [pmu_res_depend_table_sz].depend_mask);
+ break;
+ case RES_DEPEND_ADD:
+ PMU_MSG(("Adding 0x%x to rsrc %d res_dep_mask\n", pmu_res_depend_table[pmu_res_depend_table_sz].depend_mask, i));
+ OR_REG(osh, &cc->res_dep_mask,
+ pmu_res_depend_table
+ [pmu_res_depend_table_sz].depend_mask);
+ break;
+ case RES_DEPEND_REMOVE:
+ PMU_MSG(("Removing 0x%x from rsrc %d res_dep_mask\n", pmu_res_depend_table[pmu_res_depend_table_sz].depend_mask, i));
+ AND_REG(osh, &cc->res_dep_mask,
+ ~pmu_res_depend_table
+ [pmu_res_depend_table_sz].depend_mask);
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+ }
+ }
+ /* Apply nvram overrides to dependancies masks */
+ for (i = 0; i < rsrcs; i++) {
+ snprintf(name, sizeof(name), "r%dd", i);
+ if ((val = getvar(NULL, name)) == NULL)
+ continue;
+ PMU_MSG(("Applying %s=%s to rsrc %d res_dep_mask\n", name, val,
+ i));
+ W_REG(osh, &cc->res_table_sel, (uint32) i);
+ W_REG(osh, &cc->res_dep_mask,
+ (uint32) bcm_strtoul(val, NULL, 0));
+ }
+
+ /* Determine min/max rsrc masks */
+ si_pmu_res_masks(sih, &min_mask, &max_mask);
+
+ /* It is required to program max_mask first and then min_mask */
+
+ /* Program max resource mask */
+
+ if (max_mask) {
+ PMU_MSG(("Changing max_res_mask to 0x%x\n", max_mask));
+ W_REG(osh, &cc->max_res_mask, max_mask);
+ }
+
+ /* Program min resource mask */
+
+ if (min_mask) {
+ PMU_MSG(("Changing min_res_mask to 0x%x\n", min_mask));
+ W_REG(osh, &cc->min_res_mask, min_mask);
+ }
+
+ /* Add some delay; allow resources to come up and settle. */
+ OSL_DELAY(2000);
+
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+}
+
+/* setup pll and query clock speed */
+typedef struct {
+ uint16 freq;
+ uint8 xf;
+ uint8 wbint;
+ uint32 wbfrac;
+} pmu0_xtaltab0_t;
+
+/* the following table is based on 880Mhz fvco */
+static const pmu0_xtaltab0_t BCMINITDATA(pmu0_xtaltab0)[] =
+{
+ {
+ 12000, 1, 73, 349525}, {
+ 13000, 2, 67, 725937}, {
+ 14400, 3, 61, 116508}, {
+ 15360, 4, 57, 305834}, {
+ 16200, 5, 54, 336579}, {
+ 16800, 6, 52, 399457}, {
+ 19200, 7, 45, 873813}, {
+ 19800, 8, 44, 466033}, {
+ 20000, 9, 44, 0}, {
+ 25000, 10, 70, 419430}, {
+ 26000, 11, 67, 725937}, {
+ 30000, 12, 58, 699050}, {
+ 38400, 13, 45, 873813}, {
+ 40000, 14, 45, 0}, {
+ 0, 0, 0, 0}
+};
+
+#define PMU0_XTAL0_DEFAULT 8
+
+/* setup pll and query clock speed */
+typedef struct {
+ uint16 fref;
+ uint8 xf;
+ uint8 p1div;
+ uint8 p2div;
+ uint8 ndiv_int;
+ uint32 ndiv_frac;
+} pmu1_xtaltab0_t;
+
+static const pmu1_xtaltab0_t BCMINITDATA(pmu1_xtaltab0_880_4329)[] =
+{
+ {
+ 12000, 1, 3, 22, 0x9, 0xFFFFEF}, {
+ 13000, 2, 1, 6, 0xb, 0x483483}, {
+ 14400, 3, 1, 10, 0xa, 0x1C71C7}, {
+ 15360, 4, 1, 5, 0xb, 0x755555}, {
+ 16200, 5, 1, 10, 0x5, 0x6E9E06}, {
+ 16800, 6, 1, 10, 0x5, 0x3Cf3Cf}, {
+ 19200, 7, 1, 4, 0xb, 0x755555}, {
+ 19800, 8, 1, 11, 0x4, 0xA57EB}, {
+ 20000, 9, 1, 11, 0x4, 0x0}, {
+ 24000, 10, 3, 11, 0xa, 0x0}, {
+ 25000, 11, 5, 16, 0xb, 0x0}, {
+ 26000, 12, 1, 1, 0x21, 0xD89D89}, {
+ 30000, 13, 3, 8, 0xb, 0x0}, {
+ 37400, 14, 3, 1, 0x46, 0x969696}, {
+ 38400, 15, 1, 1, 0x16, 0xEAAAAA}, {
+ 40000, 16, 1, 2, 0xb, 0}, {
+ 0, 0, 0, 0, 0, 0}
+};
+
+/* the following table is based on 880Mhz fvco */
+static const pmu1_xtaltab0_t BCMINITDATA(pmu1_xtaltab0_880)[] =
+{
+ {
+ 12000, 1, 3, 22, 0x9, 0xFFFFEF}, {
+ 13000, 2, 1, 6, 0xb, 0x483483}, {
+ 14400, 3, 1, 10, 0xa, 0x1C71C7}, {
+ 15360, 4, 1, 5, 0xb, 0x755555}, {
+ 16200, 5, 1, 10, 0x5, 0x6E9E06}, {
+ 16800, 6, 1, 10, 0x5, 0x3Cf3Cf}, {
+ 19200, 7, 1, 4, 0xb, 0x755555}, {
+ 19800, 8, 1, 11, 0x4, 0xA57EB}, {
+ 20000, 9, 1, 11, 0x4, 0x0}, {
+ 24000, 10, 3, 11, 0xa, 0x0}, {
+ 25000, 11, 5, 16, 0xb, 0x0}, {
+ 26000, 12, 1, 2, 0x10, 0xEC4EC4}, {
+ 30000, 13, 3, 8, 0xb, 0x0}, {
+ 33600, 14, 1, 2, 0xd, 0x186186}, {
+ 38400, 15, 1, 2, 0xb, 0x755555}, {
+ 40000, 16, 1, 2, 0xb, 0}, {
+ 0, 0, 0, 0, 0, 0}
+};
+
+#define PMU1_XTALTAB0_880_12000K 0
+#define PMU1_XTALTAB0_880_13000K 1
+#define PMU1_XTALTAB0_880_14400K 2
+#define PMU1_XTALTAB0_880_15360K 3
+#define PMU1_XTALTAB0_880_16200K 4
+#define PMU1_XTALTAB0_880_16800K 5
+#define PMU1_XTALTAB0_880_19200K 6
+#define PMU1_XTALTAB0_880_19800K 7
+#define PMU1_XTALTAB0_880_20000K 8
+#define PMU1_XTALTAB0_880_24000K 9
+#define PMU1_XTALTAB0_880_25000K 10
+#define PMU1_XTALTAB0_880_26000K 11
+#define PMU1_XTALTAB0_880_30000K 12
+#define PMU1_XTALTAB0_880_37400K 13
+#define PMU1_XTALTAB0_880_38400K 14
+#define PMU1_XTALTAB0_880_40000K 15
+
+/* the following table is based on 1760Mhz fvco */
+static const pmu1_xtaltab0_t BCMINITDATA(pmu1_xtaltab0_1760)[] =
+{
+ {
+ 12000, 1, 3, 44, 0x9, 0xFFFFEF}, {
+ 13000, 2, 1, 12, 0xb, 0x483483}, {
+ 14400, 3, 1, 20, 0xa, 0x1C71C7}, {
+ 15360, 4, 1, 10, 0xb, 0x755555}, {
+ 16200, 5, 1, 20, 0x5, 0x6E9E06}, {
+ 16800, 6, 1, 20, 0x5, 0x3Cf3Cf}, {
+ 19200, 7, 1, 18, 0x5, 0x17B425}, {
+ 19800, 8, 1, 22, 0x4, 0xA57EB}, {
+ 20000, 9, 1, 22, 0x4, 0x0}, {
+ 24000, 10, 3, 22, 0xa, 0x0}, {
+ 25000, 11, 5, 32, 0xb, 0x0}, {
+ 26000, 12, 1, 4, 0x10, 0xEC4EC4}, {
+ 30000, 13, 3, 16, 0xb, 0x0}, {
+ 38400, 14, 1, 10, 0x4, 0x955555}, {
+ 40000, 15, 1, 4, 0xb, 0}, {
+ 0, 0, 0, 0, 0, 0}
+};
+
+/* table index */
+#define PMU1_XTALTAB0_1760_12000K 0
+#define PMU1_XTALTAB0_1760_13000K 1
+#define PMU1_XTALTAB0_1760_14400K 2
+#define PMU1_XTALTAB0_1760_15360K 3
+#define PMU1_XTALTAB0_1760_16200K 4
+#define PMU1_XTALTAB0_1760_16800K 5
+#define PMU1_XTALTAB0_1760_19200K 6
+#define PMU1_XTALTAB0_1760_19800K 7
+#define PMU1_XTALTAB0_1760_20000K 8
+#define PMU1_XTALTAB0_1760_24000K 9
+#define PMU1_XTALTAB0_1760_25000K 10
+#define PMU1_XTALTAB0_1760_26000K 11
+#define PMU1_XTALTAB0_1760_30000K 12
+#define PMU1_XTALTAB0_1760_38400K 13
+#define PMU1_XTALTAB0_1760_40000K 14
+
+/* the following table is based on 1440Mhz fvco */
+static const pmu1_xtaltab0_t BCMINITDATA(pmu1_xtaltab0_1440)[] =
+{
+ {
+ 12000, 1, 1, 1, 0x78, 0x0}, {
+ 13000, 2, 1, 1, 0x6E, 0xC4EC4E}, {
+ 14400, 3, 1, 1, 0x64, 0x0}, {
+ 15360, 4, 1, 1, 0x5D, 0xC00000}, {
+ 16200, 5, 1, 1, 0x58, 0xE38E38}, {
+ 16800, 6, 1, 1, 0x55, 0xB6DB6D}, {
+ 19200, 7, 1, 1, 0x4B, 0}, {
+ 19800, 8, 1, 1, 0x48, 0xBA2E8B}, {
+ 20000, 9, 1, 1, 0x48, 0x0}, {
+ 25000, 10, 1, 1, 0x39, 0x999999}, {
+ 26000, 11, 1, 1, 0x37, 0x627627}, {
+ 30000, 12, 1, 1, 0x30, 0x0}, {
+ 37400, 13, 2, 1, 0x4D, 0x15E76}, {
+ 38400, 13, 2, 1, 0x4B, 0x0}, {
+ 40000, 14, 2, 1, 0x48, 0x0}, {
+ 48000, 15, 2, 1, 0x3c, 0x0}, {
+ 0, 0, 0, 0, 0, 0}
+};
+
+/* table index */
+#define PMU1_XTALTAB0_1440_12000K 0
+#define PMU1_XTALTAB0_1440_13000K 1
+#define PMU1_XTALTAB0_1440_14400K 2
+#define PMU1_XTALTAB0_1440_15360K 3
+#define PMU1_XTALTAB0_1440_16200K 4
+#define PMU1_XTALTAB0_1440_16800K 5
+#define PMU1_XTALTAB0_1440_19200K 6
+#define PMU1_XTALTAB0_1440_19800K 7
+#define PMU1_XTALTAB0_1440_20000K 8
+#define PMU1_XTALTAB0_1440_25000K 9
+#define PMU1_XTALTAB0_1440_26000K 10
+#define PMU1_XTALTAB0_1440_30000K 11
+#define PMU1_XTALTAB0_1440_37400K 12
+#define PMU1_XTALTAB0_1440_38400K 13
+#define PMU1_XTALTAB0_1440_40000K 14
+#define PMU1_XTALTAB0_1440_48000K 15
+
+#define XTAL_FREQ_24000MHZ 24000
+#define XTAL_FREQ_30000MHZ 30000
+#define XTAL_FREQ_37400MHZ 37400
+#define XTAL_FREQ_48000MHZ 48000
+
+static const pmu1_xtaltab0_t BCMINITDATA(pmu1_xtaltab0_960)[] =
+{
+ {
+ 12000, 1, 1, 1, 0x50, 0x0}, {
+ 13000, 2, 1, 1, 0x49, 0xD89D89}, {
+ 14400, 3, 1, 1, 0x42, 0xAAAAAA}, {
+ 15360, 4, 1, 1, 0x3E, 0x800000}, {
+ 16200, 5, 1, 1, 0x39, 0x425ED0}, {
+ 16800, 6, 1, 1, 0x39, 0x249249}, {
+ 19200, 7, 1, 1, 0x32, 0x0}, {
+ 19800, 8, 1, 1, 0x30, 0x7C1F07}, {
+ 20000, 9, 1, 1, 0x30, 0x0}, {
+ 25000, 10, 1, 1, 0x26, 0x666666}, {
+ 26000, 11, 1, 1, 0x24, 0xEC4EC4}, {
+ 30000, 12, 1, 1, 0x20, 0x0}, {
+ 37400, 13, 2, 1, 0x33, 0x563EF9}, {
+ 38400, 14, 2, 1, 0x32, 0x0}, {
+ 40000, 15, 2, 1, 0x30, 0x0}, {
+ 48000, 16, 2, 1, 0x28, 0x0}, {
+ 0, 0, 0, 0, 0, 0}
+};
+
+/* table index */
+#define PMU1_XTALTAB0_960_12000K 0
+#define PMU1_XTALTAB0_960_13000K 1
+#define PMU1_XTALTAB0_960_14400K 2
+#define PMU1_XTALTAB0_960_15360K 3
+#define PMU1_XTALTAB0_960_16200K 4
+#define PMU1_XTALTAB0_960_16800K 5
+#define PMU1_XTALTAB0_960_19200K 6
+#define PMU1_XTALTAB0_960_19800K 7
+#define PMU1_XTALTAB0_960_20000K 8
+#define PMU1_XTALTAB0_960_25000K 9
+#define PMU1_XTALTAB0_960_26000K 10
+#define PMU1_XTALTAB0_960_30000K 11
+#define PMU1_XTALTAB0_960_37400K 12
+#define PMU1_XTALTAB0_960_38400K 13
+#define PMU1_XTALTAB0_960_40000K 14
+#define PMU1_XTALTAB0_960_48000K 15
+
+/* select xtal table for each chip */
+static const pmu1_xtaltab0_t *BCMINITFN(si_pmu1_xtaltab0) (si_t * sih) {
+#ifdef BCMDBG
+ char chn[8];
+#endif
+ switch (CHIPID(sih->chip)) {
+ case BCM4329_CHIP_ID:
+ return pmu1_xtaltab0_880_4329;
+ case BCM4319_CHIP_ID:
+ return pmu1_xtaltab0_1440;
+ case BCM4336_CHIP_ID:
+ return pmu1_xtaltab0_960;
+ case BCM4330_CHIP_ID:
+ if (CST4330_CHIPMODE_SDIOD(sih->chipst))
+ return pmu1_xtaltab0_960;
+ else
+ return pmu1_xtaltab0_1440;
+ default:
+ PMU_MSG(("si_pmu1_xtaltab0: Unknown chipid %s\n",
+ bcm_chipname(sih->chip, chn, 8)));
+ break;
+ }
+ ASSERT(0);
+ return NULL;
+}
+
+/* select default xtal frequency for each chip */
+static const pmu1_xtaltab0_t *BCMINITFN(si_pmu1_xtaldef0) (si_t * sih) {
+#ifdef BCMDBG
+ char chn[8];
+#endif
+
+ switch (CHIPID(sih->chip)) {
+ case BCM4329_CHIP_ID:
+ /* Default to 38400Khz */
+ return &pmu1_xtaltab0_880_4329[PMU1_XTALTAB0_880_38400K];
+ case BCM4319_CHIP_ID:
+ /* Default to 30000Khz */
+ return &pmu1_xtaltab0_1440[PMU1_XTALTAB0_1440_30000K];
+ case BCM4336_CHIP_ID:
+ /* Default to 26000Khz */
+ return &pmu1_xtaltab0_960[PMU1_XTALTAB0_960_26000K];
+ case BCM4330_CHIP_ID:
+ /* Default to 37400Khz */
+ if (CST4330_CHIPMODE_SDIOD(sih->chipst))
+ return &pmu1_xtaltab0_960[PMU1_XTALTAB0_960_37400K];
+ else
+ return &pmu1_xtaltab0_1440[PMU1_XTALTAB0_1440_37400K];
+ default:
+ PMU_MSG(("si_pmu1_xtaldef0: Unknown chipid %s\n",
+ bcm_chipname(sih->chip, chn, 8)));
+ break;
+ }
+ ASSERT(0);
+ return NULL;
+}
+
+/* select default pll fvco for each chip */
+static uint32 BCMINITFN(si_pmu1_pllfvco0) (si_t * sih) {
+#ifdef BCMDBG
+ char chn[8];
+#endif
+
+ switch (CHIPID(sih->chip)) {
+ case BCM4329_CHIP_ID:
+ return FVCO_880;
+ case BCM4319_CHIP_ID:
+ return FVCO_1440;
+ case BCM4336_CHIP_ID:
+ return FVCO_960;
+ case BCM4330_CHIP_ID:
+ if (CST4330_CHIPMODE_SDIOD(sih->chipst))
+ return FVCO_960;
+ else
+ return FVCO_1440;
+ default:
+ PMU_MSG(("si_pmu1_pllfvco0: Unknown chipid %s\n",
+ bcm_chipname(sih->chip, chn, 8)));
+ break;
+ }
+ ASSERT(0);
+ return 0;
+}
+
+/* query alp/xtal clock frequency */
+static uint32
+BCMINITFN(si_pmu1_alpclk0) (si_t * sih, osl_t * osh, chipcregs_t * cc) {
+ const pmu1_xtaltab0_t *xt;
+ uint32 xf;
+
+ /* Find the frequency in the table */
+ xf = (R_REG(osh, &cc->pmucontrol) & PCTL_XTALFREQ_MASK) >>
+ PCTL_XTALFREQ_SHIFT;
+ for (xt = si_pmu1_xtaltab0(sih); xt != NULL && xt->fref != 0; xt++)
+ if (xt->xf == xf)
+ break;
+ /* Could not find it so assign a default value */
+ if (xt == NULL || xt->fref == 0)
+ xt = si_pmu1_xtaldef0(sih);
+ ASSERT(xt != NULL && xt->fref != 0);
+
+ return xt->fref * 1000;
+}
+
+/* Set up PLL registers in the PMU as per the crystal speed.
+ * XtalFreq field in pmucontrol register being 0 indicates the PLL
+ * is not programmed and the h/w default is assumed to work, in which
+ * case the xtal frequency is unknown to the s/w so we need to call
+ * si_pmu1_xtaldef0() wherever it is needed to return a default value.
+ */
+static void
+BCMATTACHFN(si_pmu1_pllinit0) (si_t * sih, osl_t * osh, chipcregs_t * cc,
+ uint32 xtal) {
+ const pmu1_xtaltab0_t *xt;
+ uint32 tmp;
+ uint32 buf_strength = 0;
+ uint8 ndiv_mode = 1;
+
+ /* Use h/w default PLL config */
+ if (xtal == 0) {
+ PMU_MSG(("Unspecified xtal frequency, skip PLL configuration\n"));
+ return;
+ }
+
+ /* Find the frequency in the table */
+ for (xt = si_pmu1_xtaltab0(sih); xt != NULL && xt->fref != 0; xt++)
+ if (xt->fref == xtal)
+ break;
+
+ /* Check current PLL state, bail out if it has been programmed or
+ * we don't know how to program it.
+ */
+ if (xt == NULL || xt->fref == 0) {
+ PMU_MSG(("Unsupported xtal frequency %d.%d MHz, skip PLL configuration\n", xtal / 1000, xtal % 1000));
+ return;
+ }
+ /* for 4319 bootloader already programs the PLL but bootloader does not program the
+ PLL4 and PLL5. So Skip this check for 4319
+ */
+ if ((((R_REG(osh, &cc->pmucontrol) & PCTL_XTALFREQ_MASK) >>
+ PCTL_XTALFREQ_SHIFT) == xt->xf) &&
+ !((CHIPID(sih->chip) == BCM4319_CHIP_ID)
+ || (CHIPID(sih->chip) == BCM4330_CHIP_ID))) {
+ PMU_MSG(("PLL already programmed for %d.%d MHz\n",
+ xt->fref / 1000, xt->fref % 1000));
+ return;
+ }
+
+ PMU_MSG(("XTAL %d.%d MHz (%d)\n", xtal / 1000, xtal % 1000, xt->xf));
+ PMU_MSG(("Programming PLL for %d.%d MHz\n", xt->fref / 1000,
+ xt->fref % 1000));
+
+ switch (CHIPID(sih->chip)) {
+ case BCM4329_CHIP_ID:
+ /* Change the BBPLL drive strength to 8 for all channels */
+ buf_strength = 0x888888;
+ AND_REG(osh, &cc->min_res_mask,
+ ~(PMURES_BIT(RES4329_BBPLL_PWRSW_PU) |
+ PMURES_BIT(RES4329_HT_AVAIL)));
+ AND_REG(osh, &cc->max_res_mask,
+ ~(PMURES_BIT(RES4329_BBPLL_PWRSW_PU) |
+ PMURES_BIT(RES4329_HT_AVAIL)));
+ SPINWAIT(R_REG(osh, &cc->clk_ctl_st) & CCS_HTAVAIL,
+ PMU_MAX_TRANSITION_DLY);
+ ASSERT(!(R_REG(osh, &cc->clk_ctl_st) & CCS_HTAVAIL));
+ W_REG(osh, &cc->pllcontrol_addr, PMU1_PLL0_PLLCTL4);
+ if (xt->fref == 38400)
+ tmp = 0x200024C0;
+ else if (xt->fref == 37400)
+ tmp = 0x20004500;
+ else if (xt->fref == 26000)
+ tmp = 0x200024C0;
+ else
+ tmp = 0x200005C0; /* Chip Dflt Settings */
+ W_REG(osh, &cc->pllcontrol_data, tmp);
+ W_REG(osh, &cc->pllcontrol_addr, PMU1_PLL0_PLLCTL5);
+ tmp =
+ R_REG(osh,
+ &cc->pllcontrol_data) & PMU1_PLL0_PC5_CLK_DRV_MASK;
+ if ((xt->fref == 38400) || (xt->fref == 37400)
+ || (xt->fref == 26000))
+ tmp |= 0x15;
+ else
+ tmp |= 0x25; /* Chip Dflt Settings */
+ W_REG(osh, &cc->pllcontrol_data, tmp);
+ break;
+
+ case BCM4319_CHIP_ID:
+ /* Change the BBPLL drive strength to 2 for all channels */
+ buf_strength = 0x222222;
+
+ /* Make sure the PLL is off */
+ /* WAR65104: Disable the HT_AVAIL resource first and then
+ * after a delay (more than downtime for HT_AVAIL) remove the
+ * BBPLL resource; backplane clock moves to ALP from HT.
+ */
+ AND_REG(osh, &cc->min_res_mask,
+ ~(PMURES_BIT(RES4319_HT_AVAIL)));
+ AND_REG(osh, &cc->max_res_mask,
+ ~(PMURES_BIT(RES4319_HT_AVAIL)));
+
+ OSL_DELAY(100);
+ AND_REG(osh, &cc->min_res_mask,
+ ~(PMURES_BIT(RES4319_BBPLL_PWRSW_PU)));
+ AND_REG(osh, &cc->max_res_mask,
+ ~(PMURES_BIT(RES4319_BBPLL_PWRSW_PU)));
+
+ OSL_DELAY(100);
+ SPINWAIT(R_REG(osh, &cc->clk_ctl_st) & CCS_HTAVAIL,
+ PMU_MAX_TRANSITION_DLY);
+ ASSERT(!(R_REG(osh, &cc->clk_ctl_st) & CCS_HTAVAIL));
+ W_REG(osh, &cc->pllcontrol_addr, PMU1_PLL0_PLLCTL4);
+ tmp = 0x200005c0;
+ W_REG(osh, &cc->pllcontrol_data, tmp);
+ break;
+
+ case BCM4336_CHIP_ID:
+ AND_REG(osh, &cc->min_res_mask,
+ ~(PMURES_BIT(RES4336_HT_AVAIL) |
+ PMURES_BIT(RES4336_MACPHY_CLKAVAIL)));
+ AND_REG(osh, &cc->max_res_mask,
+ ~(PMURES_BIT(RES4336_HT_AVAIL) |
+ PMURES_BIT(RES4336_MACPHY_CLKAVAIL)));
+ OSL_DELAY(100);
+ SPINWAIT(R_REG(osh, &cc->clk_ctl_st) & CCS_HTAVAIL,
+ PMU_MAX_TRANSITION_DLY);
+ ASSERT(!(R_REG(osh, &cc->clk_ctl_st) & CCS_HTAVAIL));
+ break;
+
+ case BCM4330_CHIP_ID:
+ AND_REG(osh, &cc->min_res_mask,
+ ~(PMURES_BIT(RES4330_HT_AVAIL) |
+ PMURES_BIT(RES4330_MACPHY_CLKAVAIL)));
+ AND_REG(osh, &cc->max_res_mask,
+ ~(PMURES_BIT(RES4330_HT_AVAIL) |
+ PMURES_BIT(RES4330_MACPHY_CLKAVAIL)));
+ OSL_DELAY(100);
+ SPINWAIT(R_REG(osh, &cc->clk_ctl_st) & CCS_HTAVAIL,
+ PMU_MAX_TRANSITION_DLY);
+ ASSERT(!(R_REG(osh, &cc->clk_ctl_st) & CCS_HTAVAIL));
+ break;
+
+ default:
+ ASSERT(0);
+ }
+
+ PMU_MSG(("Done masking\n"));
+
+ /* Write p1div and p2div to pllcontrol[0] */
+ W_REG(osh, &cc->pllcontrol_addr, PMU1_PLL0_PLLCTL0);
+ tmp = R_REG(osh, &cc->pllcontrol_data) &
+ ~(PMU1_PLL0_PC0_P1DIV_MASK | PMU1_PLL0_PC0_P2DIV_MASK);
+ tmp |=
+ ((xt->
+ p1div << PMU1_PLL0_PC0_P1DIV_SHIFT) & PMU1_PLL0_PC0_P1DIV_MASK) |
+ ((xt->
+ p2div << PMU1_PLL0_PC0_P2DIV_SHIFT) & PMU1_PLL0_PC0_P2DIV_MASK);
+ W_REG(osh, &cc->pllcontrol_data, tmp);
+
+ if ((CHIPID(sih->chip) == BCM4330_CHIP_ID))
+ si_pmu_set_4330_plldivs(sih);
+
+ if ((CHIPID(sih->chip) == BCM4329_CHIP_ID)
+ && (CHIPREV(sih->chiprev) == 0)) {
+
+ W_REG(osh, &cc->pllcontrol_addr, PMU1_PLL0_PLLCTL1);
+ tmp = R_REG(osh, &cc->pllcontrol_data);
+ tmp = tmp & (~DOT11MAC_880MHZ_CLK_DIVISOR_MASK);
+ tmp = tmp | DOT11MAC_880MHZ_CLK_DIVISOR_VAL;
+ W_REG(osh, &cc->pllcontrol_data, tmp);
+ }
+ if ((CHIPID(sih->chip) == BCM4319_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM4336_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM4330_CHIP_ID))
+ ndiv_mode = PMU1_PLL0_PC2_NDIV_MODE_MFB;
+ else
+ ndiv_mode = PMU1_PLL0_PC2_NDIV_MODE_MASH;
+
+ /* Write ndiv_int and ndiv_mode to pllcontrol[2] */
+ W_REG(osh, &cc->pllcontrol_addr, PMU1_PLL0_PLLCTL2);
+ tmp = R_REG(osh, &cc->pllcontrol_data) &
+ ~(PMU1_PLL0_PC2_NDIV_INT_MASK | PMU1_PLL0_PC2_NDIV_MODE_MASK);
+ tmp |=
+ ((xt->
+ ndiv_int << PMU1_PLL0_PC2_NDIV_INT_SHIFT) &
+ PMU1_PLL0_PC2_NDIV_INT_MASK) | ((ndiv_mode <<
+ PMU1_PLL0_PC2_NDIV_MODE_SHIFT) &
+ PMU1_PLL0_PC2_NDIV_MODE_MASK);
+ W_REG(osh, &cc->pllcontrol_data, tmp);
+
+ /* Write ndiv_frac to pllcontrol[3] */
+ W_REG(osh, &cc->pllcontrol_addr, PMU1_PLL0_PLLCTL3);
+ tmp = R_REG(osh, &cc->pllcontrol_data) & ~PMU1_PLL0_PC3_NDIV_FRAC_MASK;
+ tmp |= ((xt->ndiv_frac << PMU1_PLL0_PC3_NDIV_FRAC_SHIFT) &
+ PMU1_PLL0_PC3_NDIV_FRAC_MASK);
+ W_REG(osh, &cc->pllcontrol_data, tmp);
+
+ /* Write clock driving strength to pllcontrol[5] */
+ if (buf_strength) {
+ PMU_MSG(("Adjusting PLL buffer drive strength: %x\n",
+ buf_strength));
+
+ W_REG(osh, &cc->pllcontrol_addr, PMU1_PLL0_PLLCTL5);
+ tmp =
+ R_REG(osh,
+ &cc->pllcontrol_data) & ~PMU1_PLL0_PC5_CLK_DRV_MASK;
+ tmp |= (buf_strength << PMU1_PLL0_PC5_CLK_DRV_SHIFT);
+ W_REG(osh, &cc->pllcontrol_data, tmp);
+ }
+
+ PMU_MSG(("Done pll\n"));
+
+ /* to operate the 4319 usb in 24MHz/48MHz; chipcontrol[2][84:83] needs
+ * to be updated.
+ */
+ if ((CHIPID(sih->chip) == BCM4319_CHIP_ID)
+ && (xt->fref != XTAL_FREQ_30000MHZ)) {
+ W_REG(osh, &cc->chipcontrol_addr, PMU1_PLL0_CHIPCTL2);
+ tmp =
+ R_REG(osh,
+ &cc->chipcontrol_data) & ~CCTL_4319USB_XTAL_SEL_MASK;
+ if (xt->fref == XTAL_FREQ_24000MHZ) {
+ tmp |=
+ (CCTL_4319USB_24MHZ_PLL_SEL <<
+ CCTL_4319USB_XTAL_SEL_SHIFT);
+ } else if (xt->fref == XTAL_FREQ_48000MHZ) {
+ tmp |=
+ (CCTL_4319USB_48MHZ_PLL_SEL <<
+ CCTL_4319USB_XTAL_SEL_SHIFT);
+ }
+ W_REG(osh, &cc->chipcontrol_data, tmp);
+ }
+
+ /* Flush deferred pll control registers writes */
+ if (sih->pmurev >= 2)
+ OR_REG(osh, &cc->pmucontrol, PCTL_PLL_PLLCTL_UPD);
+
+ /* Write XtalFreq. Set the divisor also. */
+ tmp = R_REG(osh, &cc->pmucontrol) &
+ ~(PCTL_ILP_DIV_MASK | PCTL_XTALFREQ_MASK);
+ tmp |= (((((xt->fref + 127) / 128) - 1) << PCTL_ILP_DIV_SHIFT) &
+ PCTL_ILP_DIV_MASK) |
+ ((xt->xf << PCTL_XTALFREQ_SHIFT) & PCTL_XTALFREQ_MASK);
+
+ if ((CHIPID(sih->chip) == BCM4329_CHIP_ID)
+ && CHIPREV(sih->chiprev) == 0) {
+ /* clear the htstretch before clearing HTReqEn */
+ AND_REG(osh, &cc->clkstretch, ~CSTRETCH_HT);
+ tmp &= ~PCTL_HT_REQ_EN;
+ }
+
+ W_REG(osh, &cc->pmucontrol, tmp);
+}
+
+/* query the CPU clock frequency */
+static uint32
+BCMINITFN(si_pmu1_cpuclk0) (si_t * sih, osl_t * osh, chipcregs_t * cc) {
+ uint32 tmp, m1div;
+#ifdef BCMDBG
+ uint32 ndiv_int, ndiv_frac, p2div, p1div, fvco;
+ uint32 fref;
+#endif
+ uint32 FVCO = si_pmu1_pllfvco0(sih);
+
+ /* Read m1div from pllcontrol[1] */
+ W_REG(osh, &cc->pllcontrol_addr, PMU1_PLL0_PLLCTL1);
+ tmp = R_REG(osh, &cc->pllcontrol_data);
+ m1div = (tmp & PMU1_PLL0_PC1_M1DIV_MASK) >> PMU1_PLL0_PC1_M1DIV_SHIFT;
+
+#ifdef BCMDBG
+ /* Read p2div/p1div from pllcontrol[0] */
+ W_REG(osh, &cc->pllcontrol_addr, PMU1_PLL0_PLLCTL0);
+ tmp = R_REG(osh, &cc->pllcontrol_data);
+ p2div = (tmp & PMU1_PLL0_PC0_P2DIV_MASK) >> PMU1_PLL0_PC0_P2DIV_SHIFT;
+ p1div = (tmp & PMU1_PLL0_PC0_P1DIV_MASK) >> PMU1_PLL0_PC0_P1DIV_SHIFT;
+
+ /* Calculate fvco based on xtal freq and ndiv and pdiv */
+ W_REG(osh, &cc->pllcontrol_addr, PMU1_PLL0_PLLCTL2);
+ tmp = R_REG(osh, &cc->pllcontrol_data);
+ ndiv_int =
+ (tmp & PMU1_PLL0_PC2_NDIV_INT_MASK) >> PMU1_PLL0_PC2_NDIV_INT_SHIFT;
+
+ W_REG(osh, &cc->pllcontrol_addr, PMU1_PLL0_PLLCTL3);
+ tmp = R_REG(osh, &cc->pllcontrol_data);
+ ndiv_frac =
+ (tmp & PMU1_PLL0_PC3_NDIV_FRAC_MASK) >>
+ PMU1_PLL0_PC3_NDIV_FRAC_SHIFT;
+
+ fref = si_pmu1_alpclk0(sih, osh, cc) / 1000;
+
+ fvco = (fref * ndiv_int) << 8;
+ fvco += (fref * (ndiv_frac >> 12)) >> 4;
+ fvco += (fref * (ndiv_frac & 0xfff)) >> 12;
+ fvco >>= 8;
+ fvco *= p2div;
+ fvco /= p1div;
+ fvco /= 1000;
+ fvco *= 1000;
+
+ PMU_MSG(("si_pmu1_cpuclk0: ndiv_int %u ndiv_frac %u p2div %u p1div %u fvco %u\n", ndiv_int, ndiv_frac, p2div, p1div, fvco));
+
+ FVCO = fvco;
+#endif /* BCMDBG */
+
+ /* Return ARM/SB clock */
+ return FVCO / m1div * 1000;
+}
+
+/* initialize PLL */
+void BCMATTACHFN(si_pmu_pll_init) (si_t * sih, osl_t * osh, uint xtalfreq) {
+ chipcregs_t *cc;
+ uint origidx;
+#ifdef BCMDBG
+ char chn[8];
+#endif
+
+ ASSERT(sih->cccaps & CC_CAP_PMU);
+
+ /* Remember original core before switch to chipc */
+ origidx = si_coreidx(sih);
+ cc = si_setcoreidx(sih, SI_CC_IDX);
+ ASSERT(cc != NULL);
+
+ switch (CHIPID(sih->chip)) {
+ case BCM4329_CHIP_ID:
+ if (xtalfreq == 0)
+ xtalfreq = 38400;
+ si_pmu1_pllinit0(sih, osh, cc, xtalfreq);
+ break;
+ case BCM4313_CHIP_ID:
+ case BCM43224_CHIP_ID:
+ case BCM43225_CHIP_ID:
+ case BCM43421_CHIP_ID:
+ case BCM43235_CHIP_ID:
+ case BCM43236_CHIP_ID:
+ case BCM43238_CHIP_ID:
+ case BCM4331_CHIP_ID:
+ case BCM6362_CHIP_ID:
+ /* ??? */
+ break;
+ case BCM4319_CHIP_ID:
+ case BCM4336_CHIP_ID:
+ case BCM4330_CHIP_ID:
+ si_pmu1_pllinit0(sih, osh, cc, xtalfreq);
+ break;
+ default:
+ PMU_MSG(("No PLL init done for chip %s rev %d pmurev %d\n",
+ bcm_chipname(sih->chip, chn, 8), sih->chiprev,
+ sih->pmurev));
+ break;
+ }
+
+#ifdef BCMDBG_FORCEHT
+ OR_REG(osh, &cc->clk_ctl_st, CCS_FORCEHT);
+#endif
+
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+}
+
+/* query alp/xtal clock frequency */
+uint32 BCMINITFN(si_pmu_alp_clock) (si_t * sih, osl_t * osh) {
+ chipcregs_t *cc;
+ uint origidx;
+ uint32 clock = ALP_CLOCK;
+#ifdef BCMDBG
+ char chn[8];
+#endif
+
+ ASSERT(sih->cccaps & CC_CAP_PMU);
+
+ /* Remember original core before switch to chipc */
+ origidx = si_coreidx(sih);
+ cc = si_setcoreidx(sih, SI_CC_IDX);
+ ASSERT(cc != NULL);
+
+ switch (CHIPID(sih->chip)) {
+ case BCM43224_CHIP_ID:
+ case BCM43225_CHIP_ID:
+ case BCM43421_CHIP_ID:
+ case BCM43235_CHIP_ID:
+ case BCM43236_CHIP_ID:
+ case BCM43238_CHIP_ID:
+ case BCM4331_CHIP_ID:
+ case BCM6362_CHIP_ID:
+ case BCM4716_CHIP_ID:
+ case BCM4748_CHIP_ID:
+ case BCM47162_CHIP_ID:
+ case BCM4313_CHIP_ID:
+ case BCM5357_CHIP_ID:
+ /* always 20Mhz */
+ clock = 20000 * 1000;
+ break;
+ case BCM4329_CHIP_ID:
+ case BCM4319_CHIP_ID:
+ case BCM4336_CHIP_ID:
+ case BCM4330_CHIP_ID:
+
+ clock = si_pmu1_alpclk0(sih, osh, cc);
+ break;
+ case BCM5356_CHIP_ID:
+ /* always 25Mhz */
+ clock = 25000 * 1000;
+ break;
+ default:
+ PMU_MSG(("No ALP clock specified "
+ "for chip %s rev %d pmurev %d, using default %d Hz\n",
+ bcm_chipname(sih->chip, chn, 8), sih->chiprev,
+ sih->pmurev, clock));
+ break;
+ }
+
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+ return clock;
+}
+
+/* Find the output of the "m" pll divider given pll controls that start with
+ * pllreg "pll0" i.e. 12 for main 6 for phy, 0 for misc.
+ */
+static uint32
+BCMINITFN(si_pmu5_clock) (si_t * sih, osl_t * osh, chipcregs_t * cc, uint pll0,
+ uint m) {
+ uint32 tmp, div, ndiv, p1, p2, fc;
+
+ if ((pll0 & 3) || (pll0 > PMU4716_MAINPLL_PLL0)) {
+ PMU_ERROR(("%s: Bad pll0: %d\n", __func__, pll0));
+ return 0;
+ }
+
+ /* Strictly there is an m5 divider, but I'm not sure we use it */
+ if ((m == 0) || (m > 4)) {
+ PMU_ERROR(("%s: Bad m divider: %d\n", __func__, m));
+ return 0;
+ }
+
+ if (CHIPID(sih->chip) == BCM5357_CHIP_ID) {
+ /* Detect failure in clock setting */
+ if ((R_REG(osh, &cc->chipstatus) & 0x40000) != 0) {
+ return (133 * 1000000);
+ }
+ }
+
+ W_REG(osh, &cc->pllcontrol_addr, pll0 + PMU5_PLL_P1P2_OFF);
+ (void)R_REG(osh, &cc->pllcontrol_addr);
+ tmp = R_REG(osh, &cc->pllcontrol_data);
+ p1 = (tmp & PMU5_PLL_P1_MASK) >> PMU5_PLL_P1_SHIFT;
+ p2 = (tmp & PMU5_PLL_P2_MASK) >> PMU5_PLL_P2_SHIFT;
+
+ W_REG(osh, &cc->pllcontrol_addr, pll0 + PMU5_PLL_M14_OFF);
+ (void)R_REG(osh, &cc->pllcontrol_addr);
+ tmp = R_REG(osh, &cc->pllcontrol_data);
+ div = (tmp >> ((m - 1) * PMU5_PLL_MDIV_WIDTH)) & PMU5_PLL_MDIV_MASK;
+
+ W_REG(osh, &cc->pllcontrol_addr, pll0 + PMU5_PLL_NM5_OFF);
+ (void)R_REG(osh, &cc->pllcontrol_addr);
+ tmp = R_REG(osh, &cc->pllcontrol_data);
+ ndiv = (tmp & PMU5_PLL_NDIV_MASK) >> PMU5_PLL_NDIV_SHIFT;
+
+ /* Do calculation in Mhz */
+ fc = si_pmu_alp_clock(sih, osh) / 1000000;
+ fc = (p1 * ndiv * fc) / p2;
+
+ PMU_NONE(("%s: p1=%d, p2=%d, ndiv=%d(0x%x), m%d=%d; fc=%d, clock=%d\n",
+ __func__, p1, p2, ndiv, ndiv, m, div, fc, fc / div));
+
+ /* Return clock in Hertz */
+ return ((fc / div) * 1000000);
+}
+
+/* query backplane clock frequency */
+/* For designs that feed the same clock to both backplane
+ * and CPU just return the CPU clock speed.
+ */
+uint32 BCMINITFN(si_pmu_si_clock) (si_t * sih, osl_t * osh) {
+ chipcregs_t *cc;
+ uint origidx;
+ uint32 clock = HT_CLOCK;
+#ifdef BCMDBG
+ char chn[8];
+#endif
+
+ ASSERT(sih->cccaps & CC_CAP_PMU);
+
+ /* Remember original core before switch to chipc */
+ origidx = si_coreidx(sih);
+ cc = si_setcoreidx(sih, SI_CC_IDX);
+ ASSERT(cc != NULL);
+
+ switch (CHIPID(sih->chip)) {
+ case BCM43224_CHIP_ID:
+ case BCM43225_CHIP_ID:
+ case BCM43421_CHIP_ID:
+ case BCM4331_CHIP_ID:
+ case BCM6362_CHIP_ID:
+ /* 96MHz backplane clock */
+ clock = 96000 * 1000;
+ break;
+ case BCM4716_CHIP_ID:
+ case BCM4748_CHIP_ID:
+ case BCM47162_CHIP_ID:
+ clock =
+ si_pmu5_clock(sih, osh, cc, PMU4716_MAINPLL_PLL0,
+ PMU5_MAINPLL_SI);
+ break;
+ case BCM4329_CHIP_ID:
+ if (CHIPREV(sih->chiprev) == 0)
+ clock = 38400 * 1000;
+ else
+ clock = si_pmu1_cpuclk0(sih, osh, cc);
+ break;
+ case BCM4319_CHIP_ID:
+ case BCM4336_CHIP_ID:
+ case BCM4330_CHIP_ID:
+ clock = si_pmu1_cpuclk0(sih, osh, cc);
+ break;
+ case BCM4313_CHIP_ID:
+ /* 80MHz backplane clock */
+ clock = 80000 * 1000;
+ break;
+ case BCM43235_CHIP_ID:
+ case BCM43236_CHIP_ID:
+ case BCM43238_CHIP_ID:
+ clock =
+ (cc->chipstatus & CST43236_BP_CLK) ? (120000 *
+ 1000) : (96000 *
+ 1000);
+ break;
+ case BCM5356_CHIP_ID:
+ clock =
+ si_pmu5_clock(sih, osh, cc, PMU5356_MAINPLL_PLL0,
+ PMU5_MAINPLL_SI);
+ break;
+ case BCM5357_CHIP_ID:
+ clock =
+ si_pmu5_clock(sih, osh, cc, PMU5357_MAINPLL_PLL0,
+ PMU5_MAINPLL_SI);
+ break;
+ default:
+ PMU_MSG(("No backplane clock specified "
+ "for chip %s rev %d pmurev %d, using default %d Hz\n",
+ bcm_chipname(sih->chip, chn, 8), sih->chiprev,
+ sih->pmurev, clock));
+ break;
+ }
+
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+ return clock;
+}
+
+/* query CPU clock frequency */
+uint32 BCMINITFN(si_pmu_cpu_clock) (si_t * sih, osl_t * osh) {
+ chipcregs_t *cc;
+ uint origidx;
+ uint32 clock;
+
+ ASSERT(sih->cccaps & CC_CAP_PMU);
+
+ if ((sih->pmurev >= 5) &&
+ !((CHIPID(sih->chip) == BCM4329_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM4319_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM43236_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM4336_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM4330_CHIP_ID))) {
+ uint pll;
+
+ switch (CHIPID(sih->chip)) {
+ case BCM5356_CHIP_ID:
+ pll = PMU5356_MAINPLL_PLL0;
+ break;
+ case BCM5357_CHIP_ID:
+ pll = PMU5357_MAINPLL_PLL0;
+ break;
+ default:
+ pll = PMU4716_MAINPLL_PLL0;
+ break;
+ }
+
+ /* Remember original core before switch to chipc */
+ origidx = si_coreidx(sih);
+ cc = si_setcoreidx(sih, SI_CC_IDX);
+ ASSERT(cc != NULL);
+
+ clock = si_pmu5_clock(sih, osh, cc, pll, PMU5_MAINPLL_CPU);
+
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+ } else
+ clock = si_pmu_si_clock(sih, osh);
+
+ return clock;
+}
+
+/* query memory clock frequency */
+uint32 BCMINITFN(si_pmu_mem_clock) (si_t * sih, osl_t * osh) {
+ chipcregs_t *cc;
+ uint origidx;
+ uint32 clock;
+
+ ASSERT(sih->cccaps & CC_CAP_PMU);
+
+ if ((sih->pmurev >= 5) &&
+ !((CHIPID(sih->chip) == BCM4329_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM4319_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM4330_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM4336_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM43236_CHIP_ID))) {
+ uint pll;
+
+ switch (CHIPID(sih->chip)) {
+ case BCM5356_CHIP_ID:
+ pll = PMU5356_MAINPLL_PLL0;
+ break;
+ case BCM5357_CHIP_ID:
+ pll = PMU5357_MAINPLL_PLL0;
+ break;
+ default:
+ pll = PMU4716_MAINPLL_PLL0;
+ break;
+ }
+
+ /* Remember original core before switch to chipc */
+ origidx = si_coreidx(sih);
+ cc = si_setcoreidx(sih, SI_CC_IDX);
+ ASSERT(cc != NULL);
+
+ clock = si_pmu5_clock(sih, osh, cc, pll, PMU5_MAINPLL_MEM);
+
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+ } else {
+ clock = si_pmu_si_clock(sih, osh);
+ }
+
+ return clock;
+}
+
+/* Measure ILP clock frequency */
+#define ILP_CALC_DUR 10 /* ms, make sure 1000 can be divided by it. */
+
+static uint32 ilpcycles_per_sec = 0;
+
+uint32 BCMINITFN(si_pmu_ilp_clock) (si_t * sih, osl_t * osh) {
+ if (ISSIM_ENAB(sih))
+ return ILP_CLOCK;
+
+ if (ilpcycles_per_sec == 0) {
+ uint32 start, end, delta;
+ uint32 origidx = si_coreidx(sih);
+ chipcregs_t *cc = si_setcoreidx(sih, SI_CC_IDX);
+ ASSERT(cc != NULL);
+ start = R_REG(osh, &cc->pmutimer);
+ OSL_DELAY(ILP_CALC_DUR * 1000);
+ end = R_REG(osh, &cc->pmutimer);
+ delta = end - start;
+ ilpcycles_per_sec = delta * (1000 / ILP_CALC_DUR);
+ si_setcoreidx(sih, origidx);
+ }
+
+ return ilpcycles_per_sec;
+}
+
+/* SDIO Pad drive strength to select value mappings */
+typedef struct {
+ uint8 strength; /* Pad Drive Strength in mA */
+ uint8 sel; /* Chip-specific select value */
+} sdiod_drive_str_t;
+
+/* SDIO Drive Strength to sel value table for PMU Rev 1 */
+static const sdiod_drive_str_t BCMINITDATA(sdiod_drive_strength_tab1)[] =
+{
+ {
+ 4, 0x2}, {
+ 2, 0x3}, {
+ 1, 0x0}, {
+0, 0x0}};
+
+/* SDIO Drive Strength to sel value table for PMU Rev 2, 3 */
+static const sdiod_drive_str_t BCMINITDATA(sdiod_drive_strength_tab2)[] =
+{
+ {
+ 12, 0x7}, {
+ 10, 0x6}, {
+ 8, 0x5}, {
+ 6, 0x4}, {
+ 4, 0x2}, {
+ 2, 0x1}, {
+0, 0x0}};
+
+/* SDIO Drive Strength to sel value table for PMU Rev 8 (1.8V) */
+static const sdiod_drive_str_t BCMINITDATA(sdiod_drive_strength_tab3)[] =
+{
+ {
+ 32, 0x7}, {
+ 26, 0x6}, {
+ 22, 0x5}, {
+ 16, 0x4}, {
+ 12, 0x3}, {
+ 8, 0x2}, {
+ 4, 0x1}, {
+0, 0x0}};
+
+#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu))
+
+void
+BCMINITFN(si_sdiod_drive_strength_init) (si_t * sih, osl_t * osh,
+ uint32 drivestrength) {
+ chipcregs_t *cc;
+ uint origidx, intr_val = 0;
+ sdiod_drive_str_t *str_tab = NULL;
+ uint32 str_mask = 0;
+ uint32 str_shift = 0;
+#ifdef BCMDBG
+ char chn[8];
+#endif
+
+ if (!(sih->cccaps & CC_CAP_PMU)) {
+ return;
+ }
+
+ /* Remember original core before switch to chipc */
+ cc = (chipcregs_t *) si_switch_core(sih, CC_CORE_ID, &origidx,
+ &intr_val);
+
+ switch (SDIOD_DRVSTR_KEY(sih->chip, sih->pmurev)) {
+ case SDIOD_DRVSTR_KEY(BCM4336_CHIP_ID, 8):
+ str_tab = (sdiod_drive_str_t *) & sdiod_drive_strength_tab3;
+ str_mask = 0x00003800;
+ str_shift = 11;
+ break;
+
+ default:
+ PMU_MSG(("No SDIO Drive strength init done for chip %s rev %d pmurev %d\n", bcm_chipname(sih->chip, chn, 8), sih->chiprev, sih->pmurev));
+
+ break;
+ }
+
+ if (str_tab != NULL) {
+ uint32 drivestrength_sel = 0;
+ uint32 cc_data_temp;
+ int i;
+
+ for (i = 0; str_tab[i].strength != 0; i++) {
+ if (drivestrength >= str_tab[i].strength) {
+ drivestrength_sel = str_tab[i].sel;
+ break;
+ }
+ }
+
+ W_REG(osh, &cc->chipcontrol_addr, 1);
+ cc_data_temp = R_REG(osh, &cc->chipcontrol_data);
+ cc_data_temp &= ~str_mask;
+ drivestrength_sel <<= str_shift;
+ cc_data_temp |= drivestrength_sel;
+ W_REG(osh, &cc->chipcontrol_data, cc_data_temp);
+
+ PMU_MSG(("SDIO: %dmA drive strength selected, set to 0x%08x\n",
+ drivestrength, cc_data_temp));
+ }
+
+ /* Return to original core */
+ si_restore_core(sih, origidx, intr_val);
+}
+
+/* initialize PMU */
+void BCMATTACHFN(si_pmu_init) (si_t * sih, osl_t * osh) {
+ chipcregs_t *cc;
+ uint origidx;
+
+ ASSERT(sih->cccaps & CC_CAP_PMU);
+
+ /* Remember original core before switch to chipc */
+ origidx = si_coreidx(sih);
+ cc = si_setcoreidx(sih, SI_CC_IDX);
+ ASSERT(cc != NULL);
+
+ if (sih->pmurev == 1)
+ AND_REG(osh, &cc->pmucontrol, ~PCTL_NOILP_ON_WAIT);
+ else if (sih->pmurev >= 2)
+ OR_REG(osh, &cc->pmucontrol, PCTL_NOILP_ON_WAIT);
+
+ if ((CHIPID(sih->chip) == BCM4329_CHIP_ID) && (sih->chiprev == 2)) {
+ /* Fix for 4329b0 bad LPOM state. */
+ W_REG(osh, &cc->regcontrol_addr, 2);
+ OR_REG(osh, &cc->regcontrol_data, 0x100);
+
+ W_REG(osh, &cc->regcontrol_addr, 3);
+ OR_REG(osh, &cc->regcontrol_data, 0x4);
+ }
+
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+}
+
+/* Return up time in ILP cycles for the given resource. */
+static uint
+BCMINITFN(si_pmu_res_uptime) (si_t * sih, osl_t * osh, chipcregs_t * cc,
+ uint8 rsrc) {
+ uint32 deps;
+ uint up, i, dup, dmax;
+ uint32 min_mask = 0, max_mask = 0;
+
+ /* uptime of resource 'rsrc' */
+ W_REG(osh, &cc->res_table_sel, rsrc);
+ up = (R_REG(osh, &cc->res_updn_timer) >> 8) & 0xff;
+
+ /* direct dependancies of resource 'rsrc' */
+ deps = si_pmu_res_deps(sih, osh, cc, PMURES_BIT(rsrc), FALSE);
+ for (i = 0; i <= PMURES_MAX_RESNUM; i++) {
+ if (!(deps & PMURES_BIT(i)))
+ continue;
+ deps &= ~si_pmu_res_deps(sih, osh, cc, PMURES_BIT(i), TRUE);
+ }
+ si_pmu_res_masks(sih, &min_mask, &max_mask);
+ deps &= ~min_mask;
+
+ /* max uptime of direct dependancies */
+ dmax = 0;
+ for (i = 0; i <= PMURES_MAX_RESNUM; i++) {
+ if (!(deps & PMURES_BIT(i)))
+ continue;
+ dup = si_pmu_res_uptime(sih, osh, cc, (uint8) i);
+ if (dmax < dup)
+ dmax = dup;
+ }
+
+ PMU_MSG(("si_pmu_res_uptime: rsrc %u uptime %u(deps 0x%08x uptime %u)\n", rsrc, up, deps, dmax));
+
+ return up + dmax + PMURES_UP_TRANSITION;
+}
+
+/* Return dependancies (direct or all/indirect) for the given resources */
+static uint32
+si_pmu_res_deps(si_t * sih, osl_t * osh, chipcregs_t * cc, uint32 rsrcs,
+ bool all)
+{
+ uint32 deps = 0;
+ uint32 i;
+
+ for (i = 0; i <= PMURES_MAX_RESNUM; i++) {
+ if (!(rsrcs & PMURES_BIT(i)))
+ continue;
+ W_REG(osh, &cc->res_table_sel, i);
+ deps |= R_REG(osh, &cc->res_dep_mask);
+ }
+
+ return !all ? deps : (deps
+ ? (deps |
+ si_pmu_res_deps(sih, osh, cc, deps,
+ TRUE)) : 0);
+}
+
+/* power up/down OTP through PMU resources */
+void si_pmu_otp_power(si_t * sih, osl_t * osh, bool on)
+{
+ chipcregs_t *cc;
+ uint origidx;
+ uint32 rsrcs = 0; /* rsrcs to turn on/off OTP power */
+
+ ASSERT(sih->cccaps & CC_CAP_PMU);
+
+ /* Don't do anything if OTP is disabled */
+ if (si_is_otp_disabled(sih)) {
+ PMU_MSG(("si_pmu_otp_power: OTP is disabled\n"));
+ return;
+ }
+
+ /* Remember original core before switch to chipc */
+ origidx = si_coreidx(sih);
+ cc = si_setcoreidx(sih, SI_CC_IDX);
+ ASSERT(cc != NULL);
+
+ switch (CHIPID(sih->chip)) {
+ case BCM4329_CHIP_ID:
+ rsrcs = PMURES_BIT(RES4329_OTP_PU);
+ break;
+ case BCM4319_CHIP_ID:
+ rsrcs = PMURES_BIT(RES4319_OTP_PU);
+ break;
+ case BCM4336_CHIP_ID:
+ rsrcs = PMURES_BIT(RES4336_OTP_PU);
+ break;
+ case BCM4330_CHIP_ID:
+ rsrcs = PMURES_BIT(RES4330_OTP_PU);
+ break;
+ default:
+ break;
+ }
+
+ if (rsrcs != 0) {
+ uint32 otps;
+
+ /* Figure out the dependancies (exclude min_res_mask) */
+ uint32 deps = si_pmu_res_deps(sih, osh, cc, rsrcs, TRUE);
+ uint32 min_mask = 0, max_mask = 0;
+ si_pmu_res_masks(sih, &min_mask, &max_mask);
+ deps &= ~min_mask;
+ /* Turn on/off the power */
+ if (on) {
+ PMU_MSG(("Adding rsrc 0x%x to min_res_mask\n",
+ rsrcs | deps));
+ OR_REG(osh, &cc->min_res_mask, (rsrcs | deps));
+ SPINWAIT(!(R_REG(osh, &cc->res_state) & rsrcs),
+ PMU_MAX_TRANSITION_DLY);
+ ASSERT(R_REG(osh, &cc->res_state) & rsrcs);
+ } else {
+ PMU_MSG(("Removing rsrc 0x%x from min_res_mask\n",
+ rsrcs | deps));
+ AND_REG(osh, &cc->min_res_mask, ~(rsrcs | deps));
+ }
+
+ SPINWAIT((((otps = R_REG(osh, &cc->otpstatus)) & OTPS_READY) !=
+ (on ? OTPS_READY : 0)), 100);
+ ASSERT((otps & OTPS_READY) == (on ? OTPS_READY : 0));
+ if ((otps & OTPS_READY) != (on ? OTPS_READY : 0))
+ PMU_MSG(("OTP ready bit not %s after wait\n",
+ (on ? "ON" : "OFF")));
+ }
+
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+}
+
+void si_pmu_rcal(si_t * sih, osl_t * osh)
+{
+ chipcregs_t *cc;
+ uint origidx;
+
+ ASSERT(sih->cccaps & CC_CAP_PMU);
+
+ /* Remember original core before switch to chipc */
+ origidx = si_coreidx(sih);
+ cc = si_setcoreidx(sih, SI_CC_IDX);
+ ASSERT(cc != NULL);
+
+ switch (CHIPID(sih->chip)) {
+ case BCM4329_CHIP_ID:{
+ uint8 rcal_code;
+ uint32 val;
+
+ /* Kick RCal */
+ W_REG(osh, &cc->chipcontrol_addr, 1);
+
+ /* Power Down RCAL Block */
+ AND_REG(osh, &cc->chipcontrol_data, ~0x04);
+
+ /* Power Up RCAL block */
+ OR_REG(osh, &cc->chipcontrol_data, 0x04);
+
+ /* Wait for completion */
+ SPINWAIT(0 == (R_REG(osh, &cc->chipstatus) & 0x08),
+ 10 * 1000 * 1000);
+ ASSERT(R_REG(osh, &cc->chipstatus) & 0x08);
+
+ /* Drop the LSB to convert from 5 bit code to 4 bit code */
+ rcal_code =
+ (uint8) (R_REG(osh, &cc->chipstatus) >> 5) & 0x0f;
+
+ PMU_MSG(("RCal completed, status 0x%x, code 0x%x\n",
+ R_REG(osh, &cc->chipstatus), rcal_code));
+
+ /* Write RCal code into pmu_vreg_ctrl[32:29] */
+ W_REG(osh, &cc->regcontrol_addr, 0);
+ val =
+ R_REG(osh,
+ &cc->
+ regcontrol_data) & ~((uint32) 0x07 << 29);
+ val |= (uint32) (rcal_code & 0x07) << 29;
+ W_REG(osh, &cc->regcontrol_data, val);
+ W_REG(osh, &cc->regcontrol_addr, 1);
+ val = R_REG(osh, &cc->regcontrol_data) & ~(uint32) 0x01;
+ val |= (uint32) ((rcal_code >> 3) & 0x01);
+ W_REG(osh, &cc->regcontrol_data, val);
+
+ /* Write RCal code into pmu_chip_ctrl[33:30] */
+ W_REG(osh, &cc->chipcontrol_addr, 0);
+ val =
+ R_REG(osh,
+ &cc->
+ chipcontrol_data) & ~((uint32) 0x03 << 30);
+ val |= (uint32) (rcal_code & 0x03) << 30;
+ W_REG(osh, &cc->chipcontrol_data, val);
+ W_REG(osh, &cc->chipcontrol_addr, 1);
+ val =
+ R_REG(osh, &cc->chipcontrol_data) & ~(uint32) 0x03;
+ val |= (uint32) ((rcal_code >> 2) & 0x03);
+ W_REG(osh, &cc->chipcontrol_data, val);
+
+ /* Set override in pmu_chip_ctrl[29] */
+ W_REG(osh, &cc->chipcontrol_addr, 0);
+ OR_REG(osh, &cc->chipcontrol_data, (0x01 << 29));
+
+ /* Power off RCal block */
+ W_REG(osh, &cc->chipcontrol_addr, 1);
+ AND_REG(osh, &cc->chipcontrol_data, ~0x04);
+
+ break;
+ }
+ default:
+ break;
+ }
+
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+}
+
+void si_pmu_spuravoid(si_t * sih, osl_t * osh, uint8 spuravoid)
+{
+ chipcregs_t *cc;
+ uint origidx, intr_val;
+ uint32 tmp = 0;
+
+ /* Remember original core before switch to chipc */
+ cc = (chipcregs_t *) si_switch_core(sih, CC_CORE_ID, &origidx,
+ &intr_val);
+ ASSERT(cc != NULL);
+
+ /* force the HT off */
+ if (CHIPID(sih->chip) == BCM4336_CHIP_ID) {
+ tmp = R_REG(osh, &cc->max_res_mask);
+ tmp &= ~RES4336_HT_AVAIL;
+ W_REG(osh, &cc->max_res_mask, tmp);
+ /* wait for the ht to really go away */
+ SPINWAIT(((R_REG(osh, &cc->clk_ctl_st) & CCS_HTAVAIL) == 0),
+ 10000);
+ ASSERT((R_REG(osh, &cc->clk_ctl_st) & CCS_HTAVAIL) == 0);
+ }
+
+ /* update the pll changes */
+ si_pmu_spuravoid_pllupdate(sih, cc, osh, spuravoid);
+
+ /* enable HT back on */
+ if (CHIPID(sih->chip) == BCM4336_CHIP_ID) {
+ tmp = R_REG(osh, &cc->max_res_mask);
+ tmp |= RES4336_HT_AVAIL;
+ W_REG(osh, &cc->max_res_mask, tmp);
+ }
+
+ /* Return to original core */
+ si_restore_core(sih, origidx, intr_val);
+}
+
+static void
+si_pmu_spuravoid_pllupdate(si_t * sih, chipcregs_t * cc, osl_t * osh,
+ uint8 spuravoid)
+{
+ uint32 tmp = 0;
+ uint8 phypll_offset = 0;
+ uint8 bcm5357_bcm43236_p1div[] = { 0x1, 0x5, 0x5 };
+ uint8 bcm5357_bcm43236_ndiv[] = { 0x30, 0xf6, 0xfc };
+
+ switch (CHIPID(sih->chip)) {
+ case BCM5357_CHIP_ID:
+ case BCM43235_CHIP_ID:
+ case BCM43236_CHIP_ID:
+ case BCM43238_CHIP_ID:
+
+ /* BCM5357 needs to touch PLL1_PLLCTL[02], so offset PLL0_PLLCTL[02] by 6 */
+ phypll_offset = (CHIPID(sih->chip) == BCM5357_CHIP_ID) ? 6 : 0;
+
+ /* RMW only the P1 divider */
+ W_REG(osh, &cc->pllcontrol_addr,
+ PMU1_PLL0_PLLCTL0 + phypll_offset);
+ tmp = R_REG(osh, &cc->pllcontrol_data);
+ tmp &= (~(PMU1_PLL0_PC0_P1DIV_MASK));
+ tmp |=
+ (bcm5357_bcm43236_p1div[spuravoid] <<
+ PMU1_PLL0_PC0_P1DIV_SHIFT);
+ W_REG(osh, &cc->pllcontrol_data, tmp);
+
+ /* RMW only the int feedback divider */
+ W_REG(osh, &cc->pllcontrol_addr,
+ PMU1_PLL0_PLLCTL2 + phypll_offset);
+ tmp = R_REG(osh, &cc->pllcontrol_data);
+ tmp &= ~(PMU1_PLL0_PC2_NDIV_INT_MASK);
+ tmp |=
+ (bcm5357_bcm43236_ndiv[spuravoid]) <<
+ PMU1_PLL0_PC2_NDIV_INT_SHIFT;
+ W_REG(osh, &cc->pllcontrol_data, tmp);
+
+ tmp = 1 << 10;
+ break;
+
+ case BCM4331_CHIP_ID:
+ if (spuravoid == 2) {
+ W_REG(osh, &cc->pllcontrol_addr, PMU1_PLL0_PLLCTL0);
+ W_REG(osh, &cc->pllcontrol_data, 0x11500014);
+ W_REG(osh, &cc->pllcontrol_addr, PMU1_PLL0_PLLCTL2);
+ W_REG(osh, &cc->pllcontrol_data, 0x0FC00a08);
+ } else if (spuravoid == 1) {
+ W_REG(osh, &cc->pllcontrol_addr, PMU1_PLL0_PLLCTL0);
+ W_REG(osh, &cc->pllcontrol_data, 0x11500014);
+ W_REG(osh, &cc->pllcontrol_addr, PMU1_PLL0_PLLCTL2);
+ W_REG(osh, &cc->pllcontrol_data, 0x0F600a08);
+ } else {
+ W_REG(osh, &cc->pllcontrol_addr, PMU1_PLL0_PLLCTL0);
+ W_REG(osh, &cc->pllcontrol_data, 0x11100014);
+ W_REG(osh, &cc->pllcontrol_addr, PMU1_PLL0_PLLCTL2);
+ W_REG(osh, &cc->pllcontrol_data, 0x03000a08);
+ }
+ tmp = 1 << 10;
+ break;
+
+ case BCM43224_CHIP_ID:
+ case BCM43225_CHIP_ID:
+ case BCM43421_CHIP_ID:
+ case BCM6362_CHIP_ID:
+ if (spuravoid == 1) {
+ W_REG(osh, &cc->pllcontrol_addr, PMU1_PLL0_PLLCTL0);
+ W_REG(osh, &cc->pllcontrol_data, 0x11500010);
+ W_REG(osh, &cc->pllcontrol_addr, PMU1_PLL0_PLLCTL1);
+ W_REG(osh, &cc->pllcontrol_data, 0x000C0C06);
+ W_REG(osh, &cc->pllcontrol_addr, PMU1_PLL0_PLLCTL2);
+ W_REG(osh, &cc->pllcontrol_data, 0x0F600a08);
+ W_REG(osh, &cc->pllcontrol_addr, PMU1_PLL0_PLLCTL3);
+ W_REG(osh, &cc->pllcontrol_data, 0x00000000);
+ W_REG(osh, &cc->pllcontrol_addr, PMU1_PLL0_PLLCTL4);
+ W_REG(osh, &cc->pllcontrol_data, 0x2001E920);
+ W_REG(osh, &cc->pllcontrol_addr, PMU1_PLL0_PLLCTL5);
+ W_REG(osh, &cc->pllcontrol_data, 0x88888815);
+ } else {
+ W_REG(osh, &cc->pllcontrol_addr, PMU1_PLL0_PLLCTL0);
+ W_REG(osh, &cc->pllcontrol_data, 0x11100010);
+ W_REG(osh, &cc->pllcontrol_addr, PMU1_PLL0_PLLCTL1);
+ W_REG(osh, &cc->pllcontrol_data, 0x000c0c06);
+ W_REG(osh, &cc->pllcontrol_addr, PMU1_PLL0_PLLCTL2);
+ W_REG(osh, &cc->pllcontrol_data, 0x03000a08);
+ W_REG(osh, &cc->pllcontrol_addr, PMU1_PLL0_PLLCTL3);
+ W_REG(osh, &cc->pllcontrol_data, 0x00000000);
+ W_REG(osh, &cc->pllcontrol_addr, PMU1_PLL0_PLLCTL4);
+ W_REG(osh, &cc->pllcontrol_data, 0x200005c0);
+ W_REG(osh, &cc->pllcontrol_addr, PMU1_PLL0_PLLCTL5);
+ W_REG(osh, &cc->pllcontrol_data, 0x88888815);
+ }
+ tmp = 1 << 10;
+ break;
+
+ W_REG(osh, &cc->pllcontrol_addr, PMU1_PLL0_PLLCTL0);
+ W_REG(osh, &cc->pllcontrol_data, 0x11100008);
+ W_REG(osh, &cc->pllcontrol_addr, PMU1_PLL0_PLLCTL1);
+ W_REG(osh, &cc->pllcontrol_data, 0x0c000c06);
+ W_REG(osh, &cc->pllcontrol_addr, PMU1_PLL0_PLLCTL2);
+ W_REG(osh, &cc->pllcontrol_data, 0x03000a08);
+ W_REG(osh, &cc->pllcontrol_addr, PMU1_PLL0_PLLCTL3);
+ W_REG(osh, &cc->pllcontrol_data, 0x00000000);
+ W_REG(osh, &cc->pllcontrol_addr, PMU1_PLL0_PLLCTL4);
+ W_REG(osh, &cc->pllcontrol_data, 0x200005c0);
+ W_REG(osh, &cc->pllcontrol_addr, PMU1_PLL0_PLLCTL5);
+ W_REG(osh, &cc->pllcontrol_data, 0x88888855);
+
+ tmp = 1 << 10;
+ break;
+
+ case BCM4716_CHIP_ID:
+ case BCM4748_CHIP_ID:
+ case BCM47162_CHIP_ID:
+ if (spuravoid == 1) {
+ W_REG(osh, &cc->pllcontrol_addr, PMU1_PLL0_PLLCTL0);
+ W_REG(osh, &cc->pllcontrol_data, 0x11500060);
+ W_REG(osh, &cc->pllcontrol_addr, PMU1_PLL0_PLLCTL1);
+ W_REG(osh, &cc->pllcontrol_data, 0x080C0C06);
+ W_REG(osh, &cc->pllcontrol_addr, PMU1_PLL0_PLLCTL2);
+ W_REG(osh, &cc->pllcontrol_data, 0x0F600000);
+ W_REG(osh, &cc->pllcontrol_addr, PMU1_PLL0_PLLCTL3);
+ W_REG(osh, &cc->pllcontrol_data, 0x00000000);
+ W_REG(osh, &cc->pllcontrol_addr, PMU1_PLL0_PLLCTL4);
+ W_REG(osh, &cc->pllcontrol_data, 0x2001E924);
+ W_REG(osh, &cc->pllcontrol_addr, PMU1_PLL0_PLLCTL5);
+ W_REG(osh, &cc->pllcontrol_data, 0x88888815);
+ } else {
+ W_REG(osh, &cc->pllcontrol_addr, PMU1_PLL0_PLLCTL0);
+ W_REG(osh, &cc->pllcontrol_data, 0x11100060);
+ W_REG(osh, &cc->pllcontrol_addr, PMU1_PLL0_PLLCTL1);
+ W_REG(osh, &cc->pllcontrol_data, 0x080c0c06);
+ W_REG(osh, &cc->pllcontrol_addr, PMU1_PLL0_PLLCTL2);
+ W_REG(osh, &cc->pllcontrol_data, 0x03000000);
+ W_REG(osh, &cc->pllcontrol_addr, PMU1_PLL0_PLLCTL3);
+ W_REG(osh, &cc->pllcontrol_data, 0x00000000);
+ W_REG(osh, &cc->pllcontrol_addr, PMU1_PLL0_PLLCTL4);
+ W_REG(osh, &cc->pllcontrol_data, 0x200005c0);
+ W_REG(osh, &cc->pllcontrol_addr, PMU1_PLL0_PLLCTL5);
+ W_REG(osh, &cc->pllcontrol_data, 0x88888815);
+ }
+
+ tmp = 3 << 9;
+ break;
+
+ case BCM4319_CHIP_ID:
+ W_REG(osh, &cc->pllcontrol_addr, PMU1_PLL0_PLLCTL0);
+ W_REG(osh, &cc->pllcontrol_data, 0x11100070);
+ W_REG(osh, &cc->pllcontrol_addr, PMU1_PLL0_PLLCTL1);
+ W_REG(osh, &cc->pllcontrol_data, 0x1014140a);
+ W_REG(osh, &cc->pllcontrol_addr, PMU1_PLL0_PLLCTL5);
+ W_REG(osh, &cc->pllcontrol_data, 0x88888854);
+
+ if (spuravoid == 1) { /* spur_avoid ON, enable 41/82/164Mhz clock mode */
+ W_REG(osh, &cc->pllcontrol_addr, PMU1_PLL0_PLLCTL2);
+ W_REG(osh, &cc->pllcontrol_data, 0x05201828);
+ } else { /* enable 40/80/160Mhz clock mode */
+ W_REG(osh, &cc->pllcontrol_addr, PMU1_PLL0_PLLCTL2);
+ W_REG(osh, &cc->pllcontrol_data, 0x05001828);
+ }
+ break;
+ case BCM4336_CHIP_ID:
+ /* Looks like these are only for default xtal freq 26MHz */
+ W_REG(osh, &cc->pllcontrol_addr, PMU1_PLL0_PLLCTL0);
+ W_REG(osh, &cc->pllcontrol_data, 0x02100020);
+
+ W_REG(osh, &cc->pllcontrol_addr, PMU1_PLL0_PLLCTL1);
+ W_REG(osh, &cc->pllcontrol_data, 0x0C0C0C0C);
+
+ W_REG(osh, &cc->pllcontrol_addr, PMU1_PLL0_PLLCTL2);
+ W_REG(osh, &cc->pllcontrol_data, 0x01240C0C);
+
+ W_REG(osh, &cc->pllcontrol_addr, PMU1_PLL0_PLLCTL4);
+ W_REG(osh, &cc->pllcontrol_data, 0x202C2820);
+
+ W_REG(osh, &cc->pllcontrol_addr, PMU1_PLL0_PLLCTL5);
+ W_REG(osh, &cc->pllcontrol_data, 0x88888825);
+
+ W_REG(osh, &cc->pllcontrol_addr, PMU1_PLL0_PLLCTL3);
+ if (spuravoid == 1) {
+ W_REG(osh, &cc->pllcontrol_data, 0x00EC4EC4);
+ } else {
+ W_REG(osh, &cc->pllcontrol_data, 0x00762762);
+ }
+
+ tmp = PCTL_PLL_PLLCTL_UPD;
+ break;
+
+ default:
+ PMU_ERROR(("%s: unknown spuravoidance settings for chip %s, not changing PLL\n", __func__, bcm_chipname(sih->chip, chn, 8)));
+ break;
+ }
+
+ tmp |= R_REG(osh, &cc->pmucontrol);
+ W_REG(osh, &cc->pmucontrol, tmp);
+}
+
+bool si_pmu_is_otp_powered(si_t * sih, osl_t * osh)
+{
+ uint idx;
+ chipcregs_t *cc;
+ bool st;
+
+ /* Remember original core before switch to chipc */
+ idx = si_coreidx(sih);
+ cc = si_setcoreidx(sih, SI_CC_IDX);
+ ASSERT(cc != NULL);
+
+ switch (CHIPID(sih->chip)) {
+ case BCM4329_CHIP_ID:
+ st = (R_REG(osh, &cc->res_state) & PMURES_BIT(RES4329_OTP_PU))
+ != 0;
+ break;
+ case BCM4319_CHIP_ID:
+ st = (R_REG(osh, &cc->res_state) & PMURES_BIT(RES4319_OTP_PU))
+ != 0;
+ break;
+ case BCM4336_CHIP_ID:
+ st = (R_REG(osh, &cc->res_state) & PMURES_BIT(RES4336_OTP_PU))
+ != 0;
+ break;
+ case BCM4330_CHIP_ID:
+ st = (R_REG(osh, &cc->res_state) & PMURES_BIT(RES4330_OTP_PU))
+ != 0;
+ break;
+
+ /* These chip doesn't use PMU bit to power up/down OTP. OTP always on.
+ * Use OTP_INIT command to reset/refresh state.
+ */
+ case BCM43224_CHIP_ID:
+ case BCM43225_CHIP_ID:
+ case BCM43421_CHIP_ID:
+ case BCM43236_CHIP_ID:
+ case BCM43235_CHIP_ID:
+ case BCM43238_CHIP_ID:
+ st = TRUE;
+ break;
+ default:
+ st = TRUE;
+ break;
+ }
+
+ /* Return to original core */
+ si_setcoreidx(sih, idx);
+ return st;
+}
+
+void
+#if defined(BCMDBG)
+si_pmu_sprom_enable(si_t * sih, osl_t * osh, bool enable)
+#else
+BCMATTACHFN(si_pmu_sprom_enable) (si_t * sih, osl_t * osh, bool enable)
+#endif
+{
+ chipcregs_t *cc;
+ uint origidx;
+
+ /* Remember original core before switch to chipc */
+ origidx = si_coreidx(sih);
+ cc = si_setcoreidx(sih, SI_CC_IDX);
+ ASSERT(cc != NULL);
+
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+}
+
+/* initialize PMU chip controls and other chip level stuff */
+void BCMATTACHFN(si_pmu_chip_init) (si_t * sih, osl_t * osh) {
+ uint origidx;
+
+ ASSERT(sih->cccaps & CC_CAP_PMU);
+
+#ifdef CHIPC_UART_ALWAYS_ON
+ si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, clk_ctl_st),
+ CCS_FORCEALP, CCS_FORCEALP);
+#endif /* CHIPC_UART_ALWAYS_ON */
+
+ /* Gate off SPROM clock and chip select signals */
+ si_pmu_sprom_enable(sih, osh, FALSE);
+
+ /* Remember original core */
+ origidx = si_coreidx(sih);
+
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+}
+
+/* initialize PMU switch/regulators */
+void BCMATTACHFN(si_pmu_swreg_init) (si_t * sih, osl_t * osh) {
+ ASSERT(sih->cccaps & CC_CAP_PMU);
+
+ switch (CHIPID(sih->chip)) {
+ case BCM4336_CHIP_ID:
+ /* Reduce CLDO PWM output voltage to 1.2V */
+ si_pmu_set_ldo_voltage(sih, osh, SET_LDO_VOLTAGE_CLDO_PWM, 0xe);
+ /* Reduce CLDO BURST output voltage to 1.2V */
+ si_pmu_set_ldo_voltage(sih, osh, SET_LDO_VOLTAGE_CLDO_BURST,
+ 0xe);
+ /* Reduce LNLDO1 output voltage to 1.2V */
+ si_pmu_set_ldo_voltage(sih, osh, SET_LDO_VOLTAGE_LNLDO1, 0xe);
+ if (CHIPREV(sih->chiprev) == 0)
+ si_pmu_regcontrol(sih, 2, 0x400000, 0x400000);
+ break;
+
+ case BCM4330_CHIP_ID:
+ /* CBUCK Voltage is 1.8 by default and set that to 1.5 */
+ si_pmu_set_ldo_voltage(sih, osh, SET_LDO_VOLTAGE_CBUCK_PWM, 0);
+ break;
+ default:
+ break;
+ }
+}
+
+void si_pmu_radio_enable(si_t * sih, bool enable)
+{
+ ASSERT(sih->cccaps & CC_CAP_PMU);
+
+ switch (CHIPID(sih->chip)) {
+ case BCM4319_CHIP_ID:
+ if (enable)
+ si_write_wrapperreg(sih, AI_OOBSELOUTB74,
+ (uint32) 0x868584);
+ else
+ si_write_wrapperreg(sih, AI_OOBSELOUTB74,
+ (uint32) 0x060584);
+ break;
+ }
+}
+
+/* Wait for a particular clock level to be on the backplane */
+uint32
+si_pmu_waitforclk_on_backplane(si_t * sih, osl_t * osh, uint32 clk,
+ uint32 delay)
+{
+ chipcregs_t *cc;
+ uint origidx;
+
+ ASSERT(sih->cccaps & CC_CAP_PMU);
+
+ /* Remember original core before switch to chipc */
+ origidx = si_coreidx(sih);
+ cc = si_setcoreidx(sih, SI_CC_IDX);
+ ASSERT(cc != NULL);
+
+ if (delay)
+ SPINWAIT(((R_REG(osh, &cc->pmustatus) & clk) != clk), delay);
+
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+
+ return (R_REG(osh, &cc->pmustatus) & clk);
+}
+
+/*
+ * Measures the ALP clock frequency in KHz. Returns 0 if not possible.
+ * Possible only if PMU rev >= 10 and there is an external LPO 32768Hz crystal.
+ */
+
+#define EXT_ILP_HZ 32768
+
+uint32 BCMATTACHFN(si_pmu_measure_alpclk) (si_t * sih, osl_t * osh) {
+ chipcregs_t *cc;
+ uint origidx;
+ uint32 alp_khz;
+
+ if (sih->pmurev < 10)
+ return 0;
+
+ ASSERT(sih->cccaps & CC_CAP_PMU);
+
+ /* Remember original core before switch to chipc */
+ origidx = si_coreidx(sih);
+ cc = si_setcoreidx(sih, SI_CC_IDX);
+ ASSERT(cc != NULL);
+
+ if (R_REG(osh, &cc->pmustatus) & PST_EXTLPOAVAIL) {
+ uint32 ilp_ctr, alp_hz;
+
+ /* Enable the reg to measure the freq, in case disabled before */
+ W_REG(osh, &cc->pmu_xtalfreq,
+ 1U << PMU_XTALFREQ_REG_MEASURE_SHIFT);
+
+ /* Delay for well over 4 ILP clocks */
+ OSL_DELAY(1000);
+
+ /* Read the latched number of ALP ticks per 4 ILP ticks */
+ ilp_ctr =
+ R_REG(osh,
+ &cc->pmu_xtalfreq) & PMU_XTALFREQ_REG_ILPCTR_MASK;
+
+ /* Turn off the PMU_XTALFREQ_REG_MEASURE_SHIFT bit to save power */
+ W_REG(osh, &cc->pmu_xtalfreq, 0);
+
+ /* Calculate ALP frequency */
+ alp_hz = (ilp_ctr * EXT_ILP_HZ) / 4;
+
+ /* Round to nearest 100KHz, and at the same time convert to KHz */
+ alp_khz = (alp_hz + 50000) / 100000 * 100;
+ } else
+ alp_khz = 0;
+
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
+
+ return alp_khz;
+}
+
+static void BCMATTACHFN(si_pmu_set_4330_plldivs) (si_t * sih) {
+ uint32 FVCO = si_pmu1_pllfvco0(sih) / 1000;
+ uint32 m1div, m2div, m3div, m4div, m5div, m6div;
+ uint32 pllc1, pllc2;
+
+ m2div = m3div = m4div = m6div = FVCO / 80;
+ m5div = FVCO / 160;
+
+ if (CST4330_CHIPMODE_SDIOD(sih->chipst))
+ m1div = FVCO / 80;
+ else
+ m1div = FVCO / 90;
+ pllc1 =
+ (m1div << PMU1_PLL0_PC1_M1DIV_SHIFT) | (m2div <<
+ PMU1_PLL0_PC1_M2DIV_SHIFT) |
+ (m3div << PMU1_PLL0_PC1_M3DIV_SHIFT) | (m4div <<
+ PMU1_PLL0_PC1_M4DIV_SHIFT);
+ si_pmu_pllcontrol(sih, PMU1_PLL0_PLLCTL1, ~0, pllc1);
+
+ pllc2 = si_pmu_pllcontrol(sih, PMU1_PLL0_PLLCTL1, 0, 0);
+ pllc2 &= ~(PMU1_PLL0_PC2_M5DIV_MASK | PMU1_PLL0_PC2_M6DIV_MASK);
+ pllc2 |=
+ ((m5div << PMU1_PLL0_PC2_M5DIV_SHIFT) |
+ (m6div << PMU1_PLL0_PC2_M6DIV_SHIFT));
+ si_pmu_pllcontrol(sih, PMU1_PLL0_PLLCTL2, ~0, pllc2);
+}
diff --git a/drivers/staging/brcm80211/util/linux_osl.c b/drivers/staging/brcm80211/util/linux_osl.c
new file mode 100644
index 000000000000..a1898bb58633
--- /dev/null
+++ b/drivers/staging/brcm80211/util/linux_osl.c
@@ -0,0 +1,516 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <typedefs.h>
+#include <bcmendian.h>
+#include <linuxver.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <linux/delay.h>
+#ifdef mips
+#include <asm/paccess.h>
+#endif /* mips */
+#include <pcicfg.h>
+
+#include <linux/fs.h>
+
+#define PCI_CFG_RETRY 10
+
+#define OS_HANDLE_MAGIC 0x1234abcd /* Magic # to recognise osh */
+#define BCM_MEM_FILENAME_LEN 24 /* Mem. filename length */
+
+typedef struct bcm_mem_link {
+ struct bcm_mem_link *prev;
+ struct bcm_mem_link *next;
+ uint size;
+ int line;
+ char file[BCM_MEM_FILENAME_LEN];
+} bcm_mem_link_t;
+
+struct osl_info {
+ osl_pubinfo_t pub;
+ uint magic;
+ void *pdev;
+ uint malloced;
+ uint failed;
+ uint bustype;
+ bcm_mem_link_t *dbgmem_list;
+};
+
+/* Global ASSERT type flag */
+uint32 g_assert_type = 0;
+
+static int16 linuxbcmerrormap[] = { 0, /* 0 */
+ -EINVAL, /* BCME_ERROR */
+ -EINVAL, /* BCME_BADARG */
+ -EINVAL, /* BCME_BADOPTION */
+ -EINVAL, /* BCME_NOTUP */
+ -EINVAL, /* BCME_NOTDOWN */
+ -EINVAL, /* BCME_NOTAP */
+ -EINVAL, /* BCME_NOTSTA */
+ -EINVAL, /* BCME_BADKEYIDX */
+ -EINVAL, /* BCME_RADIOOFF */
+ -EINVAL, /* BCME_NOTBANDLOCKED */
+ -EINVAL, /* BCME_NOCLK */
+ -EINVAL, /* BCME_BADRATESET */
+ -EINVAL, /* BCME_BADBAND */
+ -E2BIG, /* BCME_BUFTOOSHORT */
+ -E2BIG, /* BCME_BUFTOOLONG */
+ -EBUSY, /* BCME_BUSY */
+ -EINVAL, /* BCME_NOTASSOCIATED */
+ -EINVAL, /* BCME_BADSSIDLEN */
+ -EINVAL, /* BCME_OUTOFRANGECHAN */
+ -EINVAL, /* BCME_BADCHAN */
+ -EFAULT, /* BCME_BADADDR */
+ -ENOMEM, /* BCME_NORESOURCE */
+ -EOPNOTSUPP, /* BCME_UNSUPPORTED */
+ -EMSGSIZE, /* BCME_BADLENGTH */
+ -EINVAL, /* BCME_NOTREADY */
+ -EPERM, /* BCME_NOTPERMITTED */
+ -ENOMEM, /* BCME_NOMEM */
+ -EINVAL, /* BCME_ASSOCIATED */
+ -ERANGE, /* BCME_RANGE */
+ -EINVAL, /* BCME_NOTFOUND */
+ -EINVAL, /* BCME_WME_NOT_ENABLED */
+ -EINVAL, /* BCME_TSPEC_NOTFOUND */
+ -EINVAL, /* BCME_ACM_NOTSUPPORTED */
+ -EINVAL, /* BCME_NOT_WME_ASSOCIATION */
+ -EIO, /* BCME_SDIO_ERROR */
+ -ENODEV, /* BCME_DONGLE_DOWN */
+ -EINVAL, /* BCME_VERSION */
+ -EIO, /* BCME_TXFAIL */
+ -EIO, /* BCME_RXFAIL */
+ -EINVAL, /* BCME_NODEVICE */
+ -EINVAL, /* BCME_NMODE_DISABLED */
+ -ENODATA, /* BCME_NONRESIDENT */
+
+/* When an new error code is added to bcmutils.h, add os
+ * spcecific error translation here as well
+ */
+/* check if BCME_LAST changed since the last time this function was updated */
+#if BCME_LAST != -42
+#error "You need to add a OS error translation in the linuxbcmerrormap \
+ for new error code defined in bcmutils.h"
+#endif
+};
+
+/* translate bcmerrors into linux errors */
+int osl_error(int bcmerror)
+{
+ if (bcmerror > 0)
+ bcmerror = 0;
+ else if (bcmerror < BCME_LAST)
+ bcmerror = BCME_ERROR;
+
+ /* Array bounds covered by ASSERT in osl_attach */
+ return linuxbcmerrormap[-bcmerror];
+}
+
+osl_t *osl_attach(void *pdev, uint bustype, bool pkttag)
+{
+ osl_t *osh;
+
+ osh = kmalloc(sizeof(osl_t), GFP_ATOMIC);
+ ASSERT(osh);
+
+ bzero(osh, sizeof(osl_t));
+
+ /* Check that error map has the right number of entries in it */
+ ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(linuxbcmerrormap) - 1));
+
+ osh->magic = OS_HANDLE_MAGIC;
+ osh->malloced = 0;
+ osh->failed = 0;
+ osh->dbgmem_list = NULL;
+ osh->pdev = pdev;
+ osh->pub.pkttag = pkttag;
+ osh->bustype = bustype;
+
+ switch (bustype) {
+ case PCI_BUS:
+ case SI_BUS:
+ osh->pub.mmbus = TRUE;
+ break;
+ case JTAG_BUS:
+ case SDIO_BUS:
+ case USB_BUS:
+ case SPI_BUS:
+ case RPC_BUS:
+ osh->pub.mmbus = FALSE;
+ break;
+ default:
+ ASSERT(FALSE);
+ break;
+ }
+
+#ifdef BCMDBG
+ if (pkttag) {
+ struct sk_buff *skb;
+ ASSERT(OSL_PKTTAG_SZ <= sizeof(skb->cb));
+ }
+#endif
+ return osh;
+}
+
+void osl_detach(osl_t * osh)
+{
+ if (osh == NULL)
+ return;
+
+ ASSERT(osh->magic == OS_HANDLE_MAGIC);
+ kfree(osh);
+}
+
+/* Return a new packet. zero out pkttag */
+void *BCMFASTPATH osl_pktget(osl_t * osh, uint len)
+{
+ struct sk_buff *skb;
+
+ if ((skb = dev_alloc_skb(len))) {
+ skb_put(skb, len);
+ skb->priority = 0;
+
+ osh->pub.pktalloced++;
+ }
+
+ return ((void *)skb);
+}
+
+/* Free the driver packet. Free the tag if present */
+void BCMFASTPATH osl_pktfree(osl_t * osh, void *p, bool send)
+{
+ struct sk_buff *skb, *nskb;
+ int nest = 0;
+
+ skb = (struct sk_buff *)p;
+ ASSERT(skb);
+
+ if (send && osh->pub.tx_fn)
+ osh->pub.tx_fn(osh->pub.tx_ctx, p, 0);
+
+ /* perversion: we use skb->next to chain multi-skb packets */
+ while (skb) {
+ nskb = skb->next;
+ skb->next = NULL;
+
+ if (skb->destructor)
+ /* cannot kfree_skb() on hard IRQ (net/core/skbuff.c) if
+ * destructor exists
+ */
+ dev_kfree_skb_any(skb);
+ else
+ /* can free immediately (even in_irq()) if destructor
+ * does not exist
+ */
+ dev_kfree_skb(skb);
+
+ osh->pub.pktalloced--;
+ nest++;
+ skb = nskb;
+ }
+}
+
+uint32 osl_pci_read_config(osl_t * osh, uint offset, uint size)
+{
+ uint val = 0;
+ uint retry = PCI_CFG_RETRY;
+
+ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+
+ /* only 4byte access supported */
+ ASSERT(size == 4);
+
+ do {
+ pci_read_config_dword(osh->pdev, offset, &val);
+ if (val != 0xffffffff)
+ break;
+ } while (retry--);
+
+#ifdef BCMDBG
+ if (retry < PCI_CFG_RETRY)
+ printk("PCI CONFIG READ access to %d required %d retries\n",
+ offset, (PCI_CFG_RETRY - retry));
+#endif /* BCMDBG */
+
+ return (val);
+}
+
+void osl_pci_write_config(osl_t * osh, uint offset, uint size, uint val)
+{
+ uint retry = PCI_CFG_RETRY;
+
+ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+
+ /* only 4byte access supported */
+ ASSERT(size == 4);
+
+ do {
+ pci_write_config_dword(osh->pdev, offset, val);
+ if (offset != PCI_BAR0_WIN)
+ break;
+ if (osl_pci_read_config(osh, offset, size) == val)
+ break;
+ } while (retry--);
+
+#ifdef BCMDBG
+ if (retry < PCI_CFG_RETRY)
+ printk("PCI CONFIG WRITE access to %d required %d retries\n",
+ offset, (PCI_CFG_RETRY - retry));
+#endif /* BCMDBG */
+}
+
+/* return bus # for the pci device pointed by osh->pdev */
+uint osl_pci_bus(osl_t * osh)
+{
+ ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
+
+ return ((struct pci_dev *)osh->pdev)->bus->number;
+}
+
+/* return slot # for the pci device pointed by osh->pdev */
+uint osl_pci_slot(osl_t * osh)
+{
+ ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
+
+ return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn);
+}
+
+static void
+osl_pcmcia_attr(osl_t * osh, uint offset, char *buf, int size, bool write)
+{
+}
+
+void osl_pcmcia_read_attr(osl_t * osh, uint offset, void *buf, int size)
+{
+ osl_pcmcia_attr(osh, offset, (char *)buf, size, FALSE);
+}
+
+void osl_pcmcia_write_attr(osl_t * osh, uint offset, void *buf, int size)
+{
+ osl_pcmcia_attr(osh, offset, (char *)buf, size, TRUE);
+}
+
+void *osl_malloc(osl_t * osh, uint size)
+{
+ void *addr;
+
+ /* only ASSERT if osh is defined */
+ if (osh)
+ ASSERT(osh->magic == OS_HANDLE_MAGIC);
+
+ if ((addr = kmalloc(size, GFP_ATOMIC)) == NULL) {
+ if (osh)
+ osh->failed++;
+ return (NULL);
+ }
+ if (osh)
+ osh->malloced += size;
+
+ return (addr);
+}
+
+void osl_mfree(osl_t * osh, void *addr, uint size)
+{
+ if (osh) {
+ ASSERT(osh->magic == OS_HANDLE_MAGIC);
+ osh->malloced -= size;
+ }
+ kfree(addr);
+}
+
+uint osl_malloced(osl_t * osh)
+{
+ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+ return (osh->malloced);
+}
+
+uint osl_malloc_failed(osl_t * osh)
+{
+ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+ return (osh->failed);
+}
+
+uint osl_dma_consistent_align(void)
+{
+ return (PAGE_SIZE);
+}
+
+void *osl_dma_alloc_consistent(osl_t * osh, uint size, uint16 align_bits,
+ uint * alloced, ulong * pap)
+{
+ uint16 align = (1 << align_bits);
+ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+
+ if (!ISALIGNED(DMA_CONSISTENT_ALIGN, align))
+ size += align;
+ *alloced = size;
+
+ return (pci_alloc_consistent(osh->pdev, size, (dma_addr_t *) pap));
+}
+
+void osl_dma_free_consistent(osl_t * osh, void *va, uint size, ulong pa)
+{
+ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+
+ pci_free_consistent(osh->pdev, size, va, (dma_addr_t) pa);
+}
+
+uint BCMFASTPATH osl_dma_map(osl_t * osh, void *va, uint size, int direction)
+{
+ int dir;
+
+ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+ dir = (direction == DMA_TX) ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
+ return (pci_map_single(osh->pdev, va, size, dir));
+}
+
+void BCMFASTPATH osl_dma_unmap(osl_t * osh, uint pa, uint size, int direction)
+{
+ int dir;
+
+ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+ dir = (direction == DMA_TX) ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
+ pci_unmap_single(osh->pdev, (uint32) pa, size, dir);
+}
+
+#if defined(BCMDBG_ASSERT)
+void osl_assert(char *exp, char *file, int line)
+{
+ char tempbuf[256];
+ char *basename;
+
+ basename = strrchr(file, '/');
+ /* skip the '/' */
+ if (basename)
+ basename++;
+
+ if (!basename)
+ basename = file;
+
+#ifdef BCMDBG_ASSERT
+ snprintf(tempbuf, 256,
+ "assertion \"%s\" failed: file \"%s\", line %d\n", exp,
+ basename, line);
+
+ /* Print assert message and give it time to be written to /var/log/messages */
+ if (!in_interrupt()) {
+ const int delay = 3;
+ printk("%s", tempbuf);
+ printk("panic in %d seconds\n", delay);
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(delay * HZ);
+ }
+
+ switch (g_assert_type) {
+ case 0:
+ panic("%s", tempbuf);
+ break;
+ case 1:
+ printk("%s", tempbuf);
+ BUG();
+ break;
+ case 2:
+ printk("%s", tempbuf);
+ break;
+ default:
+ break;
+ }
+#endif /* BCMDBG_ASSERT */
+
+}
+#endif /* defined(BCMDBG_ASSERT) */
+
+void osl_delay(uint usec)
+{
+ uint d;
+
+ while (usec > 0) {
+ d = MIN(usec, 1000);
+ udelay(d);
+ usec -= d;
+ }
+}
+
+/* Clone a packet.
+ * The pkttag contents are NOT cloned.
+ */
+void *osl_pktdup(osl_t * osh, void *skb)
+{
+ void *p;
+
+ if ((p = skb_clone((struct sk_buff *)skb, GFP_ATOMIC)) == NULL)
+ return NULL;
+
+ /* skb_clone copies skb->cb.. we don't want that */
+ if (osh->pub.pkttag)
+ bzero((void *)((struct sk_buff *)p)->cb, OSL_PKTTAG_SZ);
+
+ /* Increment the packet counter */
+ osh->pub.pktalloced++;
+ return (p);
+}
+
+#ifdef BCMSDIO
+uint8 osl_readb(osl_t * osh, volatile uint8 * r)
+{
+ osl_rreg_fn_t rreg = ((osl_pubinfo_t *) osh)->rreg_fn;
+ void *ctx = ((osl_pubinfo_t *) osh)->reg_ctx;
+
+ return (uint8) ((rreg) (ctx, (void *)r, sizeof(uint8)));
+}
+
+uint16 osl_readw(osl_t * osh, volatile uint16 * r)
+{
+ osl_rreg_fn_t rreg = ((osl_pubinfo_t *) osh)->rreg_fn;
+ void *ctx = ((osl_pubinfo_t *) osh)->reg_ctx;
+
+ return (uint16) ((rreg) (ctx, (void *)r, sizeof(uint16)));
+}
+
+uint32 osl_readl(osl_t * osh, volatile uint32 * r)
+{
+ osl_rreg_fn_t rreg = ((osl_pubinfo_t *) osh)->rreg_fn;
+ void *ctx = ((osl_pubinfo_t *) osh)->reg_ctx;
+
+ return (uint32) ((rreg) (ctx, (void *)r, sizeof(uint32)));
+}
+
+void osl_writeb(osl_t * osh, volatile uint8 * r, uint8 v)
+{
+ osl_wreg_fn_t wreg = ((osl_pubinfo_t *) osh)->wreg_fn;
+ void *ctx = ((osl_pubinfo_t *) osh)->reg_ctx;
+
+ ((wreg) (ctx, (void *)r, v, sizeof(uint8)));
+}
+
+void osl_writew(osl_t * osh, volatile uint16 * r, uint16 v)
+{
+ osl_wreg_fn_t wreg = ((osl_pubinfo_t *) osh)->wreg_fn;
+ void *ctx = ((osl_pubinfo_t *) osh)->reg_ctx;
+
+ ((wreg) (ctx, (void *)r, v, sizeof(uint16)));
+}
+
+void osl_writel(osl_t * osh, volatile uint32 * r, uint32 v)
+{
+ osl_wreg_fn_t wreg = ((osl_pubinfo_t *) osh)->wreg_fn;
+ void *ctx = ((osl_pubinfo_t *) osh)->reg_ctx;
+
+ ((wreg) (ctx, (void *)r, v, sizeof(uint32)));
+}
+#endif /* BCMSDIO */
+/* Linux Kernel: File Operations: end */
diff --git a/drivers/staging/brcm80211/util/nicpci.c b/drivers/staging/brcm80211/util/nicpci.c
new file mode 100644
index 000000000000..feaa54ffa35b
--- /dev/null
+++ b/drivers/staging/brcm80211/util/nicpci.c
@@ -0,0 +1,880 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <siutils.h>
+#include <hndsoc.h>
+#include <bcmdevs.h>
+#include <sbchipc.h>
+#include <pci_core.h>
+#include <pcie_core.h>
+#include <nicpci.h>
+#include <pcicfg.h>
+
+typedef struct {
+ union {
+ sbpcieregs_t *pcieregs;
+ sbpciregs_t *pciregs;
+ } regs; /* Memory mapped register to the core */
+
+ si_t *sih; /* System interconnect handle */
+ osl_t *osh; /* OSL handle */
+ uint8 pciecap_lcreg_offset; /* PCIE capability LCreg offset in the config space */
+ bool pcie_pr42767;
+ uint8 pcie_polarity;
+ uint8 pcie_war_aspm_ovr; /* Override ASPM/Clkreq settings */
+
+ uint8 pmecap_offset; /* PM Capability offset in the config space */
+ bool pmecap; /* Capable of generating PME */
+} pcicore_info_t;
+
+/* debug/trace */
+#define PCI_ERROR(args)
+#define PCIE_PUB(sih) ((BUSTYPE((sih)->bustype) == PCI_BUS) && ((sih)->buscoretype == PCIE_CORE_ID))
+
+/* routines to access mdio slave device registers */
+static bool pcie_mdiosetblock(pcicore_info_t * pi, uint blk);
+static int pcie_mdioop(pcicore_info_t * pi, uint physmedia, uint regaddr,
+ bool write, uint * val);
+static int pcie_mdiowrite(pcicore_info_t * pi, uint physmedia, uint readdr,
+ uint val);
+static int pcie_mdioread(pcicore_info_t * pi, uint physmedia, uint readdr,
+ uint * ret_val);
+
+static void pcie_extendL1timer(pcicore_info_t * pi, bool extend);
+static void pcie_clkreq_upd(pcicore_info_t * pi, uint state);
+
+static void pcie_war_aspm_clkreq(pcicore_info_t * pi);
+static void pcie_war_serdes(pcicore_info_t * pi);
+static void pcie_war_noplldown(pcicore_info_t * pi);
+static void pcie_war_polarity(pcicore_info_t * pi);
+static void pcie_war_pci_setup(pcicore_info_t * pi);
+
+static bool pcicore_pmecap(pcicore_info_t * pi);
+
+#define PCIE_ASPM(sih) ((PCIE_PUB(sih)) && (((sih)->buscorerev >= 3) && ((sih)->buscorerev <= 5)))
+
+#define DWORD_ALIGN(x) (x & ~(0x03))
+#define BYTE_POS(x) (x & 0x3)
+#define WORD_POS(x) (x & 0x1)
+
+#define BYTE_SHIFT(x) (8 * BYTE_POS(x))
+#define WORD_SHIFT(x) (16 * WORD_POS(x))
+
+#define BYTE_VAL(a, x) ((a >> BYTE_SHIFT(x)) & 0xFF)
+#define WORD_VAL(a, x) ((a >> WORD_SHIFT(x)) & 0xFFFF)
+
+#define read_pci_cfg_byte(a) \
+ (BYTE_VAL(OSL_PCI_READ_CONFIG(osh, DWORD_ALIGN(a), 4), a) & 0xff)
+
+#define read_pci_cfg_word(a) \
+ (WORD_VAL(OSL_PCI_READ_CONFIG(osh, DWORD_ALIGN(a), 4), a) & 0xffff)
+
+#define write_pci_cfg_byte(a, val) do { \
+ uint32 tmpval; \
+ tmpval = (OSL_PCI_READ_CONFIG(osh, DWORD_ALIGN(a), 4) & ~0xFF << BYTE_POS(a)) | \
+ val << BYTE_POS(a); \
+ OSL_PCI_WRITE_CONFIG(osh, DWORD_ALIGN(a), 4, tmpval); \
+ } while (0)
+
+#define write_pci_cfg_word(a, val) do { \
+ uint32 tmpval; \
+ tmpval = (OSL_PCI_READ_CONFIG(osh, DWORD_ALIGN(a), 4) & ~0xFFFF << WORD_POS(a)) | \
+ val << WORD_POS(a); \
+ OSL_PCI_WRITE_CONFIG(osh, DWORD_ALIGN(a), 4, tmpval); \
+ } while (0)
+
+/* delay needed between the mdio control/ mdiodata register data access */
+#define PR28829_DELAY() OSL_DELAY(10)
+
+/* Initialize the PCI core. It's caller's responsibility to make sure that this is done
+ * only once
+ */
+void *pcicore_init(si_t * sih, osl_t * osh, void *regs)
+{
+ pcicore_info_t *pi;
+
+ ASSERT(sih->bustype == PCI_BUS);
+
+ /* alloc pcicore_info_t */
+ if ((pi = MALLOC(osh, sizeof(pcicore_info_t))) == NULL) {
+ PCI_ERROR(("pci_attach: malloc failed! malloced %d bytes\n",
+ MALLOCED(osh)));
+ return (NULL);
+ }
+
+ bzero(pi, sizeof(pcicore_info_t));
+
+ pi->sih = sih;
+ pi->osh = osh;
+
+ if (sih->buscoretype == PCIE_CORE_ID) {
+ uint8 cap_ptr;
+ pi->regs.pcieregs = (sbpcieregs_t *) regs;
+ cap_ptr =
+ pcicore_find_pci_capability(pi->osh, PCI_CAP_PCIECAP_ID,
+ NULL, NULL);
+ ASSERT(cap_ptr);
+ pi->pciecap_lcreg_offset = cap_ptr + PCIE_CAP_LINKCTRL_OFFSET;
+ } else
+ pi->regs.pciregs = (sbpciregs_t *) regs;
+
+ return pi;
+}
+
+void pcicore_deinit(void *pch)
+{
+ pcicore_info_t *pi = (pcicore_info_t *) pch;
+
+ if (pi == NULL)
+ return;
+ MFREE(pi->osh, pi, sizeof(pcicore_info_t));
+}
+
+/* return cap_offset if requested capability exists in the PCI config space */
+/* Note that it's caller's responsibility to make sure it's a pci bus */
+uint8
+pcicore_find_pci_capability(osl_t * osh, uint8 req_cap_id, uchar * buf,
+ uint32 * buflen)
+{
+ uint8 cap_id;
+ uint8 cap_ptr = 0;
+ uint32 bufsize;
+ uint8 byte_val;
+
+ /* check for Header type 0 */
+ byte_val = read_pci_cfg_byte(PCI_CFG_HDR);
+ if ((byte_val & 0x7f) != PCI_HEADER_NORMAL)
+ goto end;
+
+ /* check if the capability pointer field exists */
+ byte_val = read_pci_cfg_byte(PCI_CFG_STAT);
+ if (!(byte_val & PCI_CAPPTR_PRESENT))
+ goto end;
+
+ cap_ptr = read_pci_cfg_byte(PCI_CFG_CAPPTR);
+ /* check if the capability pointer is 0x00 */
+ if (cap_ptr == 0x00)
+ goto end;
+
+ /* loop thr'u the capability list and see if the pcie capabilty exists */
+
+ cap_id = read_pci_cfg_byte(cap_ptr);
+
+ while (cap_id != req_cap_id) {
+ cap_ptr = read_pci_cfg_byte((cap_ptr + 1));
+ if (cap_ptr == 0x00)
+ break;
+ cap_id = read_pci_cfg_byte(cap_ptr);
+ }
+ if (cap_id != req_cap_id) {
+ goto end;
+ }
+ /* found the caller requested capability */
+ if ((buf != NULL) && (buflen != NULL)) {
+ uint8 cap_data;
+
+ bufsize = *buflen;
+ if (!bufsize)
+ goto end;
+ *buflen = 0;
+ /* copy the cpability data excluding cap ID and next ptr */
+ cap_data = cap_ptr + 2;
+ if ((bufsize + cap_data) > SZPCR)
+ bufsize = SZPCR - cap_data;
+ *buflen = bufsize;
+ while (bufsize--) {
+ *buf = read_pci_cfg_byte(cap_data);
+ cap_data++;
+ buf++;
+ }
+ }
+ end:
+ return cap_ptr;
+}
+
+/* ***** Register Access API */
+uint
+pcie_readreg(osl_t * osh, sbpcieregs_t * pcieregs, uint addrtype, uint offset)
+{
+ uint retval = 0xFFFFFFFF;
+
+ ASSERT(pcieregs != NULL);
+
+ switch (addrtype) {
+ case PCIE_CONFIGREGS:
+ W_REG(osh, (&pcieregs->configaddr), offset);
+ (void)R_REG(osh, (&pcieregs->configaddr));
+ retval = R_REG(osh, &(pcieregs->configdata));
+ break;
+ case PCIE_PCIEREGS:
+ W_REG(osh, &(pcieregs->pcieindaddr), offset);
+ (void)R_REG(osh, (&pcieregs->pcieindaddr));
+ retval = R_REG(osh, &(pcieregs->pcieinddata));
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+
+ return retval;
+}
+
+uint
+pcie_writereg(osl_t * osh, sbpcieregs_t * pcieregs, uint addrtype, uint offset,
+ uint val)
+{
+ ASSERT(pcieregs != NULL);
+
+ switch (addrtype) {
+ case PCIE_CONFIGREGS:
+ W_REG(osh, (&pcieregs->configaddr), offset);
+ W_REG(osh, (&pcieregs->configdata), val);
+ break;
+ case PCIE_PCIEREGS:
+ W_REG(osh, (&pcieregs->pcieindaddr), offset);
+ W_REG(osh, (&pcieregs->pcieinddata), val);
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+ return 0;
+}
+
+static bool pcie_mdiosetblock(pcicore_info_t * pi, uint blk)
+{
+ sbpcieregs_t *pcieregs = pi->regs.pcieregs;
+ uint mdiodata, i = 0;
+ uint pcie_serdes_spinwait = 200;
+
+ mdiodata =
+ MDIODATA_START | MDIODATA_WRITE | (MDIODATA_DEV_ADDR <<
+ MDIODATA_DEVADDR_SHF) |
+ (MDIODATA_BLK_ADDR << MDIODATA_REGADDR_SHF) | MDIODATA_TA | (blk <<
+ 4);
+ W_REG(pi->osh, &pcieregs->mdiodata, mdiodata);
+
+ PR28829_DELAY();
+ /* retry till the transaction is complete */
+ while (i < pcie_serdes_spinwait) {
+ if (R_REG(pi->osh, &(pcieregs->mdiocontrol)) &
+ MDIOCTL_ACCESS_DONE) {
+ break;
+ }
+ OSL_DELAY(1000);
+ i++;
+ }
+
+ if (i >= pcie_serdes_spinwait) {
+ PCI_ERROR(("pcie_mdiosetblock: timed out\n"));
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+static int
+pcie_mdioop(pcicore_info_t * pi, uint physmedia, uint regaddr, bool write,
+ uint * val)
+{
+ sbpcieregs_t *pcieregs = pi->regs.pcieregs;
+ uint mdiodata;
+ uint i = 0;
+ uint pcie_serdes_spinwait = 10;
+
+ /* enable mdio access to SERDES */
+ W_REG(pi->osh, (&pcieregs->mdiocontrol),
+ MDIOCTL_PREAM_EN | MDIOCTL_DIVISOR_VAL);
+
+ if (pi->sih->buscorerev >= 10) {
+ /* new serdes is slower in rw, using two layers of reg address mapping */
+ if (!pcie_mdiosetblock(pi, physmedia))
+ return 1;
+ mdiodata = (MDIODATA_DEV_ADDR << MDIODATA_DEVADDR_SHF) |
+ (regaddr << MDIODATA_REGADDR_SHF);
+ pcie_serdes_spinwait *= 20;
+ } else {
+ mdiodata = (physmedia << MDIODATA_DEVADDR_SHF_OLD) |
+ (regaddr << MDIODATA_REGADDR_SHF_OLD);
+ }
+
+ if (!write)
+ mdiodata |= (MDIODATA_START | MDIODATA_READ | MDIODATA_TA);
+ else
+ mdiodata |=
+ (MDIODATA_START | MDIODATA_WRITE | MDIODATA_TA | *val);
+
+ W_REG(pi->osh, &pcieregs->mdiodata, mdiodata);
+
+ PR28829_DELAY();
+
+ /* retry till the transaction is complete */
+ while (i < pcie_serdes_spinwait) {
+ if (R_REG(pi->osh, &(pcieregs->mdiocontrol)) &
+ MDIOCTL_ACCESS_DONE) {
+ if (!write) {
+ PR28829_DELAY();
+ *val =
+ (R_REG(pi->osh, &(pcieregs->mdiodata)) &
+ MDIODATA_MASK);
+ }
+ /* Disable mdio access to SERDES */
+ W_REG(pi->osh, (&pcieregs->mdiocontrol), 0);
+ return 0;
+ }
+ OSL_DELAY(1000);
+ i++;
+ }
+
+ PCI_ERROR(("pcie_mdioop: timed out op: %d\n", write));
+ /* Disable mdio access to SERDES */
+ W_REG(pi->osh, (&pcieregs->mdiocontrol), 0);
+ return 1;
+}
+
+/* use the mdio interface to read from mdio slaves */
+static int
+pcie_mdioread(pcicore_info_t * pi, uint physmedia, uint regaddr, uint * regval)
+{
+ return pcie_mdioop(pi, physmedia, regaddr, FALSE, regval);
+}
+
+/* use the mdio interface to write to mdio slaves */
+static int
+pcie_mdiowrite(pcicore_info_t * pi, uint physmedia, uint regaddr, uint val)
+{
+ return pcie_mdioop(pi, physmedia, regaddr, TRUE, &val);
+}
+
+/* ***** Support functions ***** */
+uint8 pcie_clkreq(void *pch, uint32 mask, uint32 val)
+{
+ pcicore_info_t *pi = (pcicore_info_t *) pch;
+ uint32 reg_val;
+ uint8 offset;
+
+ offset = pi->pciecap_lcreg_offset;
+ if (!offset)
+ return 0;
+
+ reg_val = OSL_PCI_READ_CONFIG(pi->osh, offset, sizeof(uint32));
+ /* set operation */
+ if (mask) {
+ if (val)
+ reg_val |= PCIE_CLKREQ_ENAB;
+ else
+ reg_val &= ~PCIE_CLKREQ_ENAB;
+ OSL_PCI_WRITE_CONFIG(pi->osh, offset, sizeof(uint32), reg_val);
+ reg_val = OSL_PCI_READ_CONFIG(pi->osh, offset, sizeof(uint32));
+ }
+ if (reg_val & PCIE_CLKREQ_ENAB)
+ return 1;
+ else
+ return 0;
+}
+
+static void pcie_extendL1timer(pcicore_info_t * pi, bool extend)
+{
+ uint32 w;
+ si_t *sih = pi->sih;
+ osl_t *osh = pi->osh;
+ sbpcieregs_t *pcieregs = pi->regs.pcieregs;
+
+ if (!PCIE_PUB(sih) || sih->buscorerev < 7)
+ return;
+
+ w = pcie_readreg(osh, pcieregs, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG);
+ if (extend)
+ w |= PCIE_ASPMTIMER_EXTEND;
+ else
+ w &= ~PCIE_ASPMTIMER_EXTEND;
+ pcie_writereg(osh, pcieregs, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG, w);
+ w = pcie_readreg(osh, pcieregs, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG);
+}
+
+/* centralized clkreq control policy */
+static void pcie_clkreq_upd(pcicore_info_t * pi, uint state)
+{
+ si_t *sih = pi->sih;
+ ASSERT(PCIE_PUB(sih));
+
+ switch (state) {
+ case SI_DOATTACH:
+ if (PCIE_ASPM(sih))
+ pcie_clkreq((void *)pi, 1, 0);
+ break;
+ case SI_PCIDOWN:
+ if (sih->buscorerev == 6) { /* turn on serdes PLL down */
+ si_corereg(sih, SI_CC_IDX,
+ OFFSETOF(chipcregs_t, chipcontrol_addr), ~0,
+ 0);
+ si_corereg(sih, SI_CC_IDX,
+ OFFSETOF(chipcregs_t, chipcontrol_data),
+ ~0x40, 0);
+ } else if (pi->pcie_pr42767) {
+ pcie_clkreq((void *)pi, 1, 1);
+ }
+ break;
+ case SI_PCIUP:
+ if (sih->buscorerev == 6) { /* turn off serdes PLL down */
+ si_corereg(sih, SI_CC_IDX,
+ OFFSETOF(chipcregs_t, chipcontrol_addr), ~0,
+ 0);
+ si_corereg(sih, SI_CC_IDX,
+ OFFSETOF(chipcregs_t, chipcontrol_data),
+ ~0x40, 0x40);
+ } else if (PCIE_ASPM(sih)) { /* disable clkreq */
+ pcie_clkreq((void *)pi, 1, 0);
+ }
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+}
+
+/* ***** PCI core WARs ***** */
+/* Done only once at attach time */
+static void pcie_war_polarity(pcicore_info_t * pi)
+{
+ uint32 w;
+
+ if (pi->pcie_polarity != 0)
+ return;
+
+ w = pcie_readreg(pi->osh, pi->regs.pcieregs, PCIE_PCIEREGS,
+ PCIE_PLP_STATUSREG);
+
+ /* Detect the current polarity at attach and force that polarity and
+ * disable changing the polarity
+ */
+ if ((w & PCIE_PLP_POLARITYINV_STAT) == 0)
+ pi->pcie_polarity = (SERDES_RX_CTRL_FORCE);
+ else
+ pi->pcie_polarity =
+ (SERDES_RX_CTRL_FORCE | SERDES_RX_CTRL_POLARITY);
+}
+
+/* enable ASPM and CLKREQ if srom doesn't have it */
+/* Needs to happen when update to shadow SROM is needed
+ * : Coming out of 'standby'/'hibernate'
+ * : If pcie_war_aspm_ovr state changed
+ */
+static void pcie_war_aspm_clkreq(pcicore_info_t * pi)
+{
+ sbpcieregs_t *pcieregs = pi->regs.pcieregs;
+ si_t *sih = pi->sih;
+ uint16 val16, *reg16;
+ uint32 w;
+
+ if (!PCIE_ASPM(sih))
+ return;
+
+ /* bypass this on QT or VSIM */
+ if (!ISSIM_ENAB(sih)) {
+
+ reg16 = &pcieregs->sprom[SRSH_ASPM_OFFSET];
+ val16 = R_REG(pi->osh, reg16);
+
+ val16 &= ~SRSH_ASPM_ENB;
+ if (pi->pcie_war_aspm_ovr == PCIE_ASPM_ENAB)
+ val16 |= SRSH_ASPM_ENB;
+ else if (pi->pcie_war_aspm_ovr == PCIE_ASPM_L1_ENAB)
+ val16 |= SRSH_ASPM_L1_ENB;
+ else if (pi->pcie_war_aspm_ovr == PCIE_ASPM_L0s_ENAB)
+ val16 |= SRSH_ASPM_L0s_ENB;
+
+ W_REG(pi->osh, reg16, val16);
+
+ w = OSL_PCI_READ_CONFIG(pi->osh, pi->pciecap_lcreg_offset,
+ sizeof(uint32));
+ w &= ~PCIE_ASPM_ENAB;
+ w |= pi->pcie_war_aspm_ovr;
+ OSL_PCI_WRITE_CONFIG(pi->osh, pi->pciecap_lcreg_offset,
+ sizeof(uint32), w);
+ }
+
+ reg16 = &pcieregs->sprom[SRSH_CLKREQ_OFFSET_REV5];
+ val16 = R_REG(pi->osh, reg16);
+
+ if (pi->pcie_war_aspm_ovr != PCIE_ASPM_DISAB) {
+ val16 |= SRSH_CLKREQ_ENB;
+ pi->pcie_pr42767 = TRUE;
+ } else
+ val16 &= ~SRSH_CLKREQ_ENB;
+
+ W_REG(pi->osh, reg16, val16);
+}
+
+/* Apply the polarity determined at the start */
+/* Needs to happen when coming out of 'standby'/'hibernate' */
+static void pcie_war_serdes(pcicore_info_t * pi)
+{
+ uint32 w = 0;
+
+ if (pi->pcie_polarity != 0)
+ pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_CTRL,
+ pi->pcie_polarity);
+
+ pcie_mdioread(pi, MDIODATA_DEV_PLL, SERDES_PLL_CTRL, &w);
+ if (w & PLL_CTRL_FREQDET_EN) {
+ w &= ~PLL_CTRL_FREQDET_EN;
+ pcie_mdiowrite(pi, MDIODATA_DEV_PLL, SERDES_PLL_CTRL, w);
+ }
+}
+
+/* Fix MISC config to allow coming out of L2/L3-Ready state w/o PRST */
+/* Needs to happen when coming out of 'standby'/'hibernate' */
+static void BCMINITFN(pcie_misc_config_fixup) (pcicore_info_t * pi) {
+ sbpcieregs_t *pcieregs = pi->regs.pcieregs;
+ uint16 val16, *reg16;
+
+ reg16 = &pcieregs->sprom[SRSH_PCIE_MISC_CONFIG];
+ val16 = R_REG(pi->osh, reg16);
+
+ if ((val16 & SRSH_L23READY_EXIT_NOPERST) == 0) {
+ val16 |= SRSH_L23READY_EXIT_NOPERST;
+ W_REG(pi->osh, reg16, val16);
+ }
+}
+
+/* quick hack for testing */
+/* Needs to happen when coming out of 'standby'/'hibernate' */
+static void pcie_war_noplldown(pcicore_info_t * pi)
+{
+ sbpcieregs_t *pcieregs = pi->regs.pcieregs;
+ uint16 *reg16;
+
+ ASSERT(pi->sih->buscorerev == 7);
+
+ /* turn off serdes PLL down */
+ si_corereg(pi->sih, SI_CC_IDX, OFFSETOF(chipcregs_t, chipcontrol),
+ CHIPCTRL_4321_PLL_DOWN, CHIPCTRL_4321_PLL_DOWN);
+
+ /* clear srom shadow backdoor */
+ reg16 = &pcieregs->sprom[SRSH_BD_OFFSET];
+ W_REG(pi->osh, reg16, 0);
+}
+
+/* Needs to happen when coming out of 'standby'/'hibernate' */
+static void pcie_war_pci_setup(pcicore_info_t * pi)
+{
+ si_t *sih = pi->sih;
+ osl_t *osh = pi->osh;
+ sbpcieregs_t *pcieregs = pi->regs.pcieregs;
+ uint32 w;
+
+ if ((sih->buscorerev == 0) || (sih->buscorerev == 1)) {
+ w = pcie_readreg(osh, pcieregs, PCIE_PCIEREGS,
+ PCIE_TLP_WORKAROUNDSREG);
+ w |= 0x8;
+ pcie_writereg(osh, pcieregs, PCIE_PCIEREGS,
+ PCIE_TLP_WORKAROUNDSREG, w);
+ }
+
+ if (sih->buscorerev == 1) {
+ w = pcie_readreg(osh, pcieregs, PCIE_PCIEREGS, PCIE_DLLP_LCREG);
+ w |= (0x40);
+ pcie_writereg(osh, pcieregs, PCIE_PCIEREGS, PCIE_DLLP_LCREG, w);
+ }
+
+ if (sih->buscorerev == 0) {
+ pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_TIMER1, 0x8128);
+ pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_CDR, 0x0100);
+ pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_CDRBW, 0x1466);
+ } else if (PCIE_ASPM(sih)) {
+ /* Change the L1 threshold for better performance */
+ w = pcie_readreg(osh, pcieregs, PCIE_PCIEREGS,
+ PCIE_DLLP_PMTHRESHREG);
+ w &= ~(PCIE_L1THRESHOLDTIME_MASK);
+ w |= (PCIE_L1THRESHOLD_WARVAL << PCIE_L1THRESHOLDTIME_SHIFT);
+ pcie_writereg(osh, pcieregs, PCIE_PCIEREGS,
+ PCIE_DLLP_PMTHRESHREG, w);
+
+ pcie_war_serdes(pi);
+
+ pcie_war_aspm_clkreq(pi);
+ } else if (pi->sih->buscorerev == 7)
+ pcie_war_noplldown(pi);
+
+ /* Note that the fix is actually in the SROM, that's why this is open-ended */
+ if (pi->sih->buscorerev >= 6)
+ pcie_misc_config_fixup(pi);
+}
+
+void pcie_war_ovr_aspm_update(void *pch, uint8 aspm)
+{
+ pcicore_info_t *pi = (pcicore_info_t *) pch;
+
+ if (!PCIE_ASPM(pi->sih))
+ return;
+
+ /* Validate */
+ if (aspm > PCIE_ASPM_ENAB)
+ return;
+
+ pi->pcie_war_aspm_ovr = aspm;
+
+ /* Update the current state */
+ pcie_war_aspm_clkreq(pi);
+}
+
+/* ***** Functions called during driver state changes ***** */
+void BCMATTACHFN(pcicore_attach) (void *pch, char *pvars, int state) {
+ pcicore_info_t *pi = (pcicore_info_t *) pch;
+ si_t *sih = pi->sih;
+
+ /* Determine if this board needs override */
+ if (PCIE_ASPM(sih)) {
+ if ((uint32) getintvar(pvars, "boardflags2") & BFL2_PCIEWAR_OVR) {
+ pi->pcie_war_aspm_ovr = PCIE_ASPM_DISAB;
+ } else {
+ pi->pcie_war_aspm_ovr = PCIE_ASPM_ENAB;
+ }
+ }
+
+ /* These need to happen in this order only */
+ pcie_war_polarity(pi);
+
+ pcie_war_serdes(pi);
+
+ pcie_war_aspm_clkreq(pi);
+
+ pcie_clkreq_upd(pi, state);
+
+}
+
+void pcicore_hwup(void *pch)
+{
+ pcicore_info_t *pi = (pcicore_info_t *) pch;
+
+ if (!pi || !PCIE_PUB(pi->sih))
+ return;
+
+ pcie_war_pci_setup(pi);
+}
+
+void pcicore_up(void *pch, int state)
+{
+ pcicore_info_t *pi = (pcicore_info_t *) pch;
+
+ if (!pi || !PCIE_PUB(pi->sih))
+ return;
+
+ /* Restore L1 timer for better performance */
+ pcie_extendL1timer(pi, TRUE);
+
+ pcie_clkreq_upd(pi, state);
+}
+
+/* When the device is going to enter D3 state (or the system is going to enter S3/S4 states */
+void pcicore_sleep(void *pch)
+{
+ pcicore_info_t *pi = (pcicore_info_t *) pch;
+ uint32 w;
+
+ if (!pi || !PCIE_ASPM(pi->sih))
+ return;
+
+ w = OSL_PCI_READ_CONFIG(pi->osh, pi->pciecap_lcreg_offset,
+ sizeof(uint32));
+ w &= ~PCIE_CAP_LCREG_ASPML1;
+ OSL_PCI_WRITE_CONFIG(pi->osh, pi->pciecap_lcreg_offset, sizeof(uint32),
+ w);
+
+ pi->pcie_pr42767 = FALSE;
+}
+
+void pcicore_down(void *pch, int state)
+{
+ pcicore_info_t *pi = (pcicore_info_t *) pch;
+
+ if (!pi || !PCIE_PUB(pi->sih))
+ return;
+
+ pcie_clkreq_upd(pi, state);
+
+ /* Reduce L1 timer for better power savings */
+ pcie_extendL1timer(pi, FALSE);
+}
+
+/* ***** Wake-on-wireless-LAN (WOWL) support functions ***** */
+/* Just uses PCI config accesses to find out, when needed before sb_attach is done */
+bool pcicore_pmecap_fast(osl_t * osh)
+{
+ uint8 cap_ptr;
+ uint32 pmecap;
+
+ cap_ptr =
+ pcicore_find_pci_capability(osh, PCI_CAP_POWERMGMTCAP_ID, NULL,
+ NULL);
+
+ if (!cap_ptr)
+ return FALSE;
+
+ pmecap = OSL_PCI_READ_CONFIG(osh, cap_ptr, sizeof(uint32));
+
+ return ((pmecap & PME_CAP_PM_STATES) != 0);
+}
+
+/* return TRUE if PM capability exists in the pci config space
+ * Uses and caches the information using core handle
+ */
+static bool pcicore_pmecap(pcicore_info_t * pi)
+{
+ uint8 cap_ptr;
+ uint32 pmecap;
+
+ if (!pi->pmecap_offset) {
+ cap_ptr =
+ pcicore_find_pci_capability(pi->osh,
+ PCI_CAP_POWERMGMTCAP_ID, NULL,
+ NULL);
+ if (!cap_ptr)
+ return FALSE;
+
+ pi->pmecap_offset = cap_ptr;
+
+ pmecap =
+ OSL_PCI_READ_CONFIG(pi->osh, pi->pmecap_offset,
+ sizeof(uint32));
+
+ /* At least one state can generate PME */
+ pi->pmecap = (pmecap & PME_CAP_PM_STATES) != 0;
+ }
+
+ return (pi->pmecap);
+}
+
+/* Enable PME generation */
+void pcicore_pmeen(void *pch)
+{
+ pcicore_info_t *pi = (pcicore_info_t *) pch;
+ uint32 w;
+
+ /* if not pmecapable return */
+ if (!pcicore_pmecap(pi))
+ return;
+
+ w = OSL_PCI_READ_CONFIG(pi->osh, pi->pmecap_offset + PME_CSR_OFFSET,
+ sizeof(uint32));
+ w |= (PME_CSR_PME_EN);
+ OSL_PCI_WRITE_CONFIG(pi->osh, pi->pmecap_offset + PME_CSR_OFFSET,
+ sizeof(uint32), w);
+}
+
+/*
+ * Return TRUE if PME status set
+ */
+bool pcicore_pmestat(void *pch)
+{
+ pcicore_info_t *pi = (pcicore_info_t *) pch;
+ uint32 w;
+
+ if (!pcicore_pmecap(pi))
+ return FALSE;
+
+ w = OSL_PCI_READ_CONFIG(pi->osh, pi->pmecap_offset + PME_CSR_OFFSET,
+ sizeof(uint32));
+
+ return (w & PME_CSR_PME_STAT) == PME_CSR_PME_STAT;
+}
+
+/* Disable PME generation, clear the PME status bit if set
+ */
+void pcicore_pmeclr(void *pch)
+{
+ pcicore_info_t *pi = (pcicore_info_t *) pch;
+ uint32 w;
+
+ if (!pcicore_pmecap(pi))
+ return;
+
+ w = OSL_PCI_READ_CONFIG(pi->osh, pi->pmecap_offset + PME_CSR_OFFSET,
+ sizeof(uint32));
+
+ PCI_ERROR(("pcicore_pci_pmeclr PMECSR : 0x%x\n", w));
+
+ /* PMESTAT is cleared by writing 1 to it */
+ w &= ~(PME_CSR_PME_EN);
+
+ OSL_PCI_WRITE_CONFIG(pi->osh, pi->pmecap_offset + PME_CSR_OFFSET,
+ sizeof(uint32), w);
+}
+
+uint32 pcie_lcreg(void *pch, uint32 mask, uint32 val)
+{
+ pcicore_info_t *pi = (pcicore_info_t *) pch;
+ uint8 offset;
+
+ offset = pi->pciecap_lcreg_offset;
+ if (!offset)
+ return 0;
+
+ /* set operation */
+ if (mask)
+ OSL_PCI_WRITE_CONFIG(pi->osh, offset, sizeof(uint32), val);
+
+ return OSL_PCI_READ_CONFIG(pi->osh, offset, sizeof(uint32));
+}
+
+uint32
+pcicore_pciereg(void *pch, uint32 offset, uint32 mask, uint32 val, uint type)
+{
+ uint32 reg_val = 0;
+ pcicore_info_t *pi = (pcicore_info_t *) pch;
+ sbpcieregs_t *pcieregs = pi->regs.pcieregs;
+ osl_t *osh = pi->osh;
+
+ if (mask) {
+ PCI_ERROR(("PCIEREG: 0x%x writeval 0x%x\n", offset, val));
+ pcie_writereg(osh, pcieregs, type, offset, val);
+ }
+
+ /* Should not read register 0x154 */
+ if (pi->sih->buscorerev <= 5 && offset == PCIE_DLLP_PCIE11
+ && type == PCIE_PCIEREGS)
+ return reg_val;
+
+ reg_val = pcie_readreg(osh, pcieregs, type, offset);
+ PCI_ERROR(("PCIEREG: 0x%x readval is 0x%x\n", offset, reg_val));
+
+ return reg_val;
+}
+
+uint32
+pcicore_pcieserdesreg(void *pch, uint32 mdioslave, uint32 offset, uint32 mask,
+ uint32 val)
+{
+ uint32 reg_val = 0;
+ pcicore_info_t *pi = (pcicore_info_t *) pch;
+
+ if (mask) {
+ PCI_ERROR(("PCIEMDIOREG: 0x%x writeval 0x%x\n", offset, val));
+ pcie_mdiowrite(pi, mdioslave, offset, val);
+ }
+
+ if (pcie_mdioread(pi, mdioslave, offset, &reg_val))
+ reg_val = 0xFFFFFFFF;
+ PCI_ERROR(("PCIEMDIOREG: dev 0x%x offset 0x%x read 0x%x\n", mdioslave,
+ offset, reg_val));
+
+ return reg_val;
+}
diff --git a/drivers/staging/brcm80211/util/nvram/nvram_ro.c b/drivers/staging/brcm80211/util/nvram/nvram_ro.c
new file mode 100644
index 000000000000..68c69eedbd7f
--- /dev/null
+++ b/drivers/staging/brcm80211/util/nvram/nvram_ro.c
@@ -0,0 +1,198 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <siutils.h>
+#include <bcmendian.h>
+#include <bcmnvram.h>
+#include <sbchipc.h>
+#include <bcmsrom.h>
+#include <bcmotp.h>
+#include <bcmdevs.h>
+#include <hndsoc.h>
+
+#define NVR_MSG(x)
+
+typedef struct _vars {
+ struct _vars *next;
+ int bufsz; /* allocated size */
+ int size; /* actual vars size */
+ char *vars;
+} vars_t;
+
+#define VARS_T_OH sizeof(vars_t)
+
+static vars_t *vars = NULL;
+
+#define NVRAM_FILE 1
+
+static char *findvar(char *vars, char *lim, const char *name);
+
+#if defined(FLASH)
+/* copy flash to ram */
+static void BCMINITFN(get_flash_nvram) (si_t * sih, struct nvram_header * nvh) {
+ osl_t *osh;
+ uint nvs, bufsz;
+ vars_t *new;
+
+ osh = si_osh(sih);
+
+ nvs = R_REG(osh, &nvh->len) - sizeof(struct nvram_header);
+ bufsz = nvs + VARS_T_OH;
+
+ if ((new = (vars_t *) MALLOC(osh, bufsz)) == NULL) {
+ NVR_MSG(("Out of memory for flash vars\n"));
+ return;
+ }
+ new->vars = (char *)new + VARS_T_OH;
+
+ new->bufsz = bufsz;
+ new->size = nvs;
+ new->next = vars;
+ vars = new;
+
+ bcopy((char *)(&nvh[1]), new->vars, nvs);
+
+ NVR_MSG(("%s: flash nvram @ %p, copied %d bytes to %p\n", __func__,
+ nvh, nvs, new->vars));
+}
+#endif /* FLASH */
+
+int BCMATTACHFN(nvram_init) (void *si) {
+
+ /* Make sure we read nvram in flash just once before freeing the memory */
+ if (vars != NULL) {
+ NVR_MSG(("nvram_init: called again without calling nvram_exit()\n"));
+ return 0;
+ }
+ return 0;
+}
+
+int BCMATTACHFN(nvram_append) (void *si, char *varlst, uint varsz) {
+ uint bufsz = VARS_T_OH;
+ vars_t *new;
+
+ if ((new = MALLOC(si_osh((si_t *) si), bufsz)) == NULL)
+ return BCME_NOMEM;
+
+ new->vars = varlst;
+ new->bufsz = bufsz;
+ new->size = varsz;
+ new->next = vars;
+ vars = new;
+
+ return BCME_OK;
+}
+
+void BCMUNINITFN(nvram_exit) (void *si) {
+ vars_t *this, *next;
+ si_t *sih;
+
+ sih = (si_t *) si;
+ this = vars;
+
+ if (this)
+ MFREE(si_osh(sih), this->vars, this->size);
+
+ while (this) {
+ next = this->next;
+ MFREE(si_osh(sih), this, this->bufsz);
+ this = next;
+ }
+ vars = NULL;
+}
+
+static char *findvar(char *vars, char *lim, const char *name)
+{
+ char *s;
+ int len;
+
+ len = strlen(name);
+
+ for (s = vars; (s < lim) && *s;) {
+ if ((bcmp(s, name, len) == 0) && (s[len] == '='))
+ return (&s[len + 1]);
+
+ while (*s++) ;
+ }
+
+ return NULL;
+}
+
+char *nvram_get(const char *name)
+{
+ char *v = NULL;
+ vars_t *cur;
+
+ for (cur = vars; cur; cur = cur->next)
+ if ((v = findvar(cur->vars, cur->vars + cur->size, name)))
+ break;
+
+ return v;
+}
+
+int BCMATTACHFN(nvram_set) (const char *name, const char *value) {
+ return 0;
+}
+
+int BCMATTACHFN(nvram_unset) (const char *name) {
+ return 0;
+}
+
+int BCMATTACHFN(nvram_reset) (void *si) {
+ return 0;
+}
+
+int BCMATTACHFN(nvram_commit) (void) {
+ return 0;
+}
+
+int nvram_getall(char *buf, int count)
+{
+ int len, resid = count;
+ vars_t *this;
+
+ this = vars;
+ while (this) {
+ char *from, *lim, *to;
+ int acc;
+
+ from = this->vars;
+ lim = (char *)((uintptr) this->vars + this->size);
+ to = buf;
+ acc = 0;
+ while ((from < lim) && (*from)) {
+ len = strlen(from) + 1;
+ if (resid < (acc + len))
+ return BCME_BUFTOOSHORT;
+ bcopy(from, to, len);
+ acc += len;
+ from += len;
+ to += len;
+ }
+
+ resid -= acc;
+ buf += acc;
+ this = this->next;
+ }
+ if (resid < 1)
+ return BCME_BUFTOOSHORT;
+ *buf = '\0';
+ return 0;
+}
diff --git a/drivers/staging/brcm80211/util/qmath.c b/drivers/staging/brcm80211/util/qmath.c
new file mode 100644
index 000000000000..99a17765622b
--- /dev/null
+++ b/drivers/staging/brcm80211/util/qmath.c
@@ -0,0 +1,680 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "qmath.h"
+
+/*
+Description: This function saturate input 32 bit number into a 16 bit number.
+If input number is greater than 0x7fff then output is saturated to 0x7fff.
+else if input number is less than 0xffff8000 then output is saturated to 0xffff8000
+else output is same as input.
+*/
+int16 qm_sat32(int32 op)
+{
+ int16 result;
+ if (op > (int32) 0x7fff) {
+ result = 0x7fff;
+ } else if (op < (int32) 0xffff8000) {
+ result = (int16) (0x8000);
+ } else {
+ result = (int16) op;
+ }
+ return result;
+}
+
+/*
+Description: This function multiply two input 16 bit numbers and return the 32 bit result.
+This multiplication is similar to compiler multiplication. This operation is defined if
+16 bit multiplication on the processor platform is cheaper than 32 bit multiplication (as
+the most of qmath functions can be replaced with processor intrinsic instructions).
+*/
+int32 qm_mul321616(int16 op1, int16 op2)
+{
+ return ((int32) (op1) * (int32) (op2));
+}
+
+/*
+Description: This function make 16 bit multiplication and return the result in 16 bits.
+To fit the result into 16 bits the 32 bit multiplication result is right
+shifted by 16 bits.
+*/
+int16 qm_mul16(int16 op1, int16 op2)
+{
+ int32 result;
+ result = ((int32) (op1) * (int32) (op2));
+ return ((int16) (result >> 16));
+}
+
+/*
+Description: This function multiply two 16 bit numbers and return the result in 32 bits.
+This function remove the extra sign bit created by the multiplication by leftshifting the
+32 bit multiplication result by 1 bit before returning the result. So the output is
+twice that of compiler multiplication. (i.e. qm_muls321616(2,3)=12).
+When both input 16 bit numbers are 0x8000, then the result is saturated to 0x7fffffff.
+*/
+int32 qm_muls321616(int16 op1, int16 op2)
+{
+ int32 result;
+ if (op1 == (int16) (0x8000) && op2 == (int16) (0x8000)) {
+ result = 0x7fffffff;
+ } else {
+ result = ((int32) (op1) * (int32) (op2));
+ result = result << 1;
+ }
+ return result;
+}
+
+/*
+Description: This function make 16 bit unsigned multiplication. To fit the output into
+16 bits the 32 bit multiplication result is right shifted by 16 bits.
+*/
+uint16 qm_mulu16(uint16 op1, uint16 op2)
+{
+ return (uint16) (((uint32) op1 * (uint32) op2) >> 16);
+}
+
+/*
+Description: This function make 16 bit multiplication and return the result in 16 bits.
+To fit the multiplication result into 16 bits the multiplication result is right shifted by
+15 bits. Right shifting 15 bits instead of 16 bits is done to remove the extra sign bit formed
+due to the multiplication.
+When both the 16bit inputs are 0x8000 then the output is saturated to 0x7fffffff.
+*/
+int16 qm_muls16(int16 op1, int16 op2)
+{
+ int32 result;
+ if (op1 == (int16) 0x8000 && op2 == (int16) 0x8000) {
+ result = 0x7fffffff;
+ } else {
+ result = ((int32) (op1) * (int32) (op2));
+ }
+ return ((int16) (result >> 15));
+}
+
+/*
+Description: This function add two 32 bit numbers and return the 32bit result.
+If the result overflow 32 bits, the output will be saturated to 32bits.
+*/
+int32 qm_add32(int32 op1, int32 op2)
+{
+ int32 result;
+ result = op1 + op2;
+ if (op1 < 0 && op2 < 0 && result > 0) {
+ result = 0x80000000;
+ } else if (op1 > 0 && op2 > 0 && result < 0) {
+ result = 0x7fffffff;
+ }
+ return result;
+}
+
+/*
+Description: This function add two 16 bit numbers and return the 16bit result.
+If the result overflow 16 bits, the output will be saturated to 16bits.
+*/
+int16 qm_add16(int16 op1, int16 op2)
+{
+ int16 result;
+ int32 temp = (int32) op1 + (int32) op2;
+ if (temp > (int32) 0x7fff) {
+ result = (int16) 0x7fff;
+ } else if (temp < (int32) 0xffff8000) {
+ result = (int16) 0xffff8000;
+ } else {
+ result = (int16) temp;
+ }
+ return result;
+}
+
+/*
+Description: This function make 16 bit subtraction and return the 16bit result.
+If the result overflow 16 bits, the output will be saturated to 16bits.
+*/
+int16 qm_sub16(int16 op1, int16 op2)
+{
+ int16 result;
+ int32 temp = (int32) op1 - (int32) op2;
+ if (temp > (int32) 0x7fff) {
+ result = (int16) 0x7fff;
+ } else if (temp < (int32) 0xffff8000) {
+ result = (int16) 0xffff8000;
+ } else {
+ result = (int16) temp;
+ }
+ return result;
+}
+
+/*
+Description: This function make 32 bit subtraction and return the 32bit result.
+If the result overflow 32 bits, the output will be saturated to 32bits.
+*/
+int32 qm_sub32(int32 op1, int32 op2)
+{
+ int32 result;
+ result = op1 - op2;
+ if (op1 >= 0 && op2 < 0 && result < 0) {
+ result = 0x7fffffff;
+ } else if (op1 < 0 && op2 > 0 && result > 0) {
+ result = 0x80000000;
+ }
+ return result;
+}
+
+/*
+Description: This function multiply input 16 bit numbers and accumulate the result
+into the input 32 bit number and return the 32 bit accumulated result.
+If the accumulation result in overflow, then the output will be saturated.
+*/
+int32 qm_mac321616(int32 acc, int16 op1, int16 op2)
+{
+ int32 result;
+ result = qm_add32(acc, qm_mul321616(op1, op2));
+ return result;
+}
+
+/*
+Description: This function make a 32 bit saturated left shift when the specified shift
+is +ve. This function will make a 32 bit right shift when the specified shift is -ve.
+This function return the result after shifting operation.
+*/
+int32 qm_shl32(int32 op, int shift)
+{
+ int i;
+ int32 result;
+ result = op;
+ if (shift > 31)
+ shift = 31;
+ else if (shift < -31)
+ shift = -31;
+ if (shift >= 0) {
+ for (i = 0; i < shift; i++) {
+ result = qm_add32(result, result);
+ }
+ } else {
+ result = result >> (-shift);
+ }
+ return result;
+}
+
+/*
+Description: This function make a 32 bit right shift when shift is +ve.
+This function make a 32 bit saturated left shift when shift is -ve. This function
+return the result of the shift operation.
+*/
+int32 qm_shr32(int32 op, int shift)
+{
+ return qm_shl32(op, -shift);
+}
+
+/*
+Description: This function make a 16 bit saturated left shift when the specified shift
+is +ve. This function will make a 16 bit right shift when the specified shift is -ve.
+This function return the result after shifting operation.
+*/
+int16 qm_shl16(int16 op, int shift)
+{
+ int i;
+ int16 result;
+ result = op;
+ if (shift > 15)
+ shift = 15;
+ else if (shift < -15)
+ shift = -15;
+ if (shift > 0) {
+ for (i = 0; i < shift; i++) {
+ result = qm_add16(result, result);
+ }
+ } else {
+ result = result >> (-shift);
+ }
+ return result;
+}
+
+/*
+Description: This function make a 16 bit right shift when shift is +ve.
+This function make a 16 bit saturated left shift when shift is -ve. This function
+return the result of the shift operation.
+*/
+int16 qm_shr16(int16 op, int shift)
+{
+ return qm_shl16(op, -shift);
+}
+
+/*
+Description: This function return the number of redundant sign bits in a 16 bit number.
+Example: qm_norm16(0x0080) = 7.
+*/
+int16 qm_norm16(int16 op)
+{
+ uint16 u16extraSignBits;
+ if (op == 0) {
+ return 15;
+ } else {
+ u16extraSignBits = 0;
+ while ((op >> 15) == (op >> 14)) {
+ u16extraSignBits++;
+ op = op << 1;
+ }
+ }
+ return u16extraSignBits;
+}
+
+/*
+Description: This function return the number of redundant sign bits in a 32 bit number.
+Example: qm_norm32(0x00000080) = 23
+*/
+int16 qm_norm32(int32 op)
+{
+ uint16 u16extraSignBits;
+ if (op == 0) {
+ return 31;
+ } else {
+ u16extraSignBits = 0;
+ while ((op >> 31) == (op >> 30)) {
+ u16extraSignBits++;
+ op = op << 1;
+ }
+ }
+ return u16extraSignBits;
+}
+
+/*
+Description: This function divide two 16 bit unsigned numbers.
+The numerator should be less than denominator. So the quotient is always less than 1.
+This function return the quotient in q.15 format.
+*/
+int16 qm_div_s(int16 num, int16 denom)
+{
+ int16 var_out;
+ int16 iteration;
+ int32 L_num;
+ int32 L_denom;
+ L_num = (num) << 15;
+ L_denom = (denom) << 15;
+ for (iteration = 0; iteration < 15; iteration++) {
+ L_num <<= 1;
+ if (L_num >= L_denom) {
+ L_num = qm_sub32(L_num, L_denom);
+ L_num = qm_add32(L_num, 1);
+ }
+ }
+ var_out = (int16) (L_num & 0x7fff);
+ return (var_out);
+}
+
+/*
+Description: This function compute the absolute value of a 16 bit number.
+*/
+int16 qm_abs16(int16 op)
+{
+ if (op < 0) {
+ if (op == (int16) 0xffff8000) {
+ return 0x7fff;
+ } else {
+ return -op;
+ }
+ } else {
+ return op;
+ }
+}
+
+/*
+Description: This function divide two 16 bit numbers.
+The quotient is returned through return value.
+The qformat of the quotient is returned through the pointer (qQuotient) passed
+to this function. The qformat of quotient is adjusted appropriately such that
+the quotient occupies all 16 bits.
+*/
+int16 qm_div16(int16 num, int16 denom, int16 * qQuotient)
+{
+ int16 sign;
+ int16 nNum, nDenom;
+ sign = num ^ denom;
+ num = qm_abs16(num);
+ denom = qm_abs16(denom);
+ nNum = qm_norm16(num);
+ nDenom = qm_norm16(denom);
+ num = qm_shl16(num, nNum - 1);
+ denom = qm_shl16(denom, nDenom);
+ *qQuotient = nNum - 1 - nDenom + 15;
+ if (sign >= 0) {
+ return qm_div_s(num, denom);
+ } else {
+ return -qm_div_s(num, denom);
+ }
+}
+
+/*
+Description: This function compute absolute value of a 32 bit number.
+*/
+int32 qm_abs32(int32 op)
+{
+ if (op < 0) {
+ if (op == (int32) 0x80000000) {
+ return 0x7fffffff;
+ } else {
+ return -op;
+ }
+ } else {
+ return op;
+ }
+}
+
+/*
+Description: This function divide two 32 bit numbers. The division is performed
+by considering only important 16 bits in 32 bit numbers.
+The quotient is returned through return value.
+The qformat of the quotient is returned through the pointer (qquotient) passed
+to this function. The qformat of quotient is adjusted appropriately such that
+the quotient occupies all 16 bits.
+*/
+int16 qm_div163232(int32 num, int32 denom, int16 * qquotient)
+{
+ int32 sign;
+ int16 nNum, nDenom;
+ sign = num ^ denom;
+ num = qm_abs32(num);
+ denom = qm_abs32(denom);
+ nNum = qm_norm32(num);
+ nDenom = qm_norm32(denom);
+ num = qm_shl32(num, nNum - 1);
+ denom = qm_shl32(denom, nDenom);
+ *qquotient = nNum - 1 - nDenom + 15;
+ if (sign >= 0) {
+ return qm_div_s((int16) (num >> 16), (int16) (denom >> 16));
+ } else {
+ return -qm_div_s((int16) (num >> 16), (int16) (denom >> 16));
+ }
+}
+
+/*
+Description: This function multiply a 32 bit number with a 16 bit number.
+The multiplicaton result is right shifted by 16 bits to fit the result
+into 32 bit output.
+*/
+int32 qm_mul323216(int32 op1, int16 op2)
+{
+ int16 hi;
+ uint16 lo;
+ int32 result;
+ hi = op1 >> 16;
+ lo = (int16) (op1 & 0xffff);
+ result = qm_mul321616(hi, op2);
+ result = result + (qm_mulsu321616(op2, lo) >> 16);
+ return result;
+}
+
+/*
+Description: This function multiply signed 16 bit number with unsigned 16 bit number and return
+the result in 32 bits.
+*/
+int32 qm_mulsu321616(int16 op1, uint16 op2)
+{
+ return (int32) (op1) * op2;
+}
+
+/*
+Description: This function multiply 32 bit number with 16 bit number. The multiplication result is
+right shifted by 15 bits to fit the result into 32 bits. Right shifting by only 15 bits instead of
+16 bits is done to remove the extra sign bit formed by multiplication from the return value.
+When the input numbers are 0x80000000, 0x8000 the return value is saturated to 0x7fffffff.
+*/
+int32 qm_muls323216(int32 op1, int16 op2)
+{
+ int16 hi;
+ uint16 lo;
+ int32 result;
+ hi = op1 >> 16;
+ lo = (int16) (op1 & 0xffff);
+ result = qm_muls321616(hi, op2);
+ result = qm_add32(result, (qm_mulsu321616(op2, lo) >> 15));
+ return result;
+}
+
+/*
+Description: This function multiply two 32 bit numbers. The multiplication result is right
+shifted by 32 bits to fit the multiplication result into 32 bits. The right shifted
+multiplication result is returned as output.
+*/
+int32 qm_mul32(int32 a, int32 b)
+{
+ int16 hi1, hi2;
+ uint16 lo1, lo2;
+ int32 result;
+ hi1 = a >> 16;
+ hi2 = b >> 16;
+ lo1 = (uint16) (a & 0xffff);
+ lo2 = (uint16) (b & 0xffff);
+ result = qm_mul321616(hi1, hi2);
+ result = result + (qm_mulsu321616(hi1, lo2) >> 16);
+ result = result + (qm_mulsu321616(hi2, lo1) >> 16);
+ return result;
+}
+
+/*
+Description: This function multiply two 32 bit numbers. The multiplication result is
+right shifted by 31 bits to fit the multiplication result into 32 bits. The right
+shifted multiplication result is returned as output. Right shifting by only 31 bits
+instead of 32 bits is done to remove the extra sign bit formed by multiplication.
+When the input numbers are 0x80000000, 0x80000000 the return value is saturated to
+0x7fffffff.
+*/
+int32 qm_muls32(int32 a, int32 b)
+{
+ int16 hi1, hi2;
+ uint16 lo1, lo2;
+ int32 result;
+ hi1 = a >> 16;
+ hi2 = b >> 16;
+ lo1 = (uint16) (a & 0xffff);
+ lo2 = (uint16) (b & 0xffff);
+ result = qm_muls321616(hi1, hi2);
+ result = qm_add32(result, (qm_mulsu321616(hi1, lo2) >> 15));
+ result = qm_add32(result, (qm_mulsu321616(hi2, lo1) >> 15));
+ result = qm_add32(result, (qm_mulu16(lo1, lo2) >> 15));
+ return result;
+}
+
+/* This table is log2(1+(i/32)) where i=[0:1:31], in q.15 format */
+static const int16 log_table[] = {
+ 0,
+ 1455,
+ 2866,
+ 4236,
+ 5568,
+ 6863,
+ 8124,
+ 9352,
+ 10549,
+ 11716,
+ 12855,
+ 13968,
+ 15055,
+ 16117,
+ 17156,
+ 18173,
+ 19168,
+ 20143,
+ 21098,
+ 22034,
+ 22952,
+ 23852,
+ 24736,
+ 25604,
+ 26455,
+ 27292,
+ 28114,
+ 28922,
+ 29717,
+ 30498,
+ 31267,
+ 32024
+};
+
+#define LOG_TABLE_SIZE 32 /* log_table size */
+#define LOG2_LOG_TABLE_SIZE 5 /* log2(log_table size) */
+#define Q_LOG_TABLE 15 /* qformat of log_table */
+#define LOG10_2 19728 /* log10(2) in q.16 */
+
+/*
+Description:
+This routine takes the input number N and its q format qN and compute
+the log10(N). This routine first normalizes the input no N. Then N is in mag*(2^x) format.
+mag is any number in the range 2^30-(2^31 - 1). Then log2(mag * 2^x) = log2(mag) + x is computed.
+From that log10(mag * 2^x) = log2(mag * 2^x) * log10(2) is computed.
+This routine looks the log2 value in the table considering LOG2_LOG_TABLE_SIZE+1 MSBs.
+As the MSB is always 1, only next LOG2_OF_LOG_TABLE_SIZE MSBs are used for table lookup.
+Next 16 MSBs are used for interpolation.
+Inputs:
+N - number to which log10 has to be found.
+qN - q format of N
+log10N - address where log10(N) will be written.
+qLog10N - address where log10N qformat will be written.
+Note/Problem:
+For accurate results input should be in normalized or near normalized form.
+*/
+void qm_log10(int32 N, int16 qN, int16 * log10N, int16 * qLog10N)
+{
+ int16 s16norm, s16tableIndex, s16errorApproximation;
+ uint16 u16offset;
+ int32 s32log;
+
+ /* Logerithm of negative values is undefined.
+ * assert N is greater than 0.
+ */
+ /* ASSERT(N > 0); */
+
+ /* normalize the N. */
+ s16norm = qm_norm32(N);
+ N = N << s16norm;
+
+ /* The qformat of N after normalization.
+ * -30 is added to treat the no as between 1.0 to 2.0
+ * i.e. after adding the -30 to the qformat the decimal point will be
+ * just rigtht of the MSB. (i.e. after sign bit and 1st MSB). i.e.
+ * at the right side of 30th bit.
+ */
+ qN = qN + s16norm - 30;
+
+ /* take the table index as the LOG2_OF_LOG_TABLE_SIZE bits right of the MSB */
+ s16tableIndex = (int16) (N >> (32 - (2 + LOG2_LOG_TABLE_SIZE)));
+
+ /* remove the MSB. the MSB is always 1 after normalization. */
+ s16tableIndex =
+ s16tableIndex & (int16) ((1 << LOG2_LOG_TABLE_SIZE) - 1);
+
+ /* remove the (1+LOG2_OF_LOG_TABLE_SIZE) MSBs in the N. */
+ N = N & ((1 << (32 - (2 + LOG2_LOG_TABLE_SIZE))) - 1);
+
+ /* take the offset as the 16 MSBS after table index.
+ */
+ u16offset = (uint16) (N >> (32 - (2 + LOG2_LOG_TABLE_SIZE + 16)));
+
+ /* look the log value in the table. */
+ s32log = log_table[s16tableIndex]; /* q.15 format */
+
+ /* interpolate using the offset. */
+ s16errorApproximation = (int16) qm_mulu16(u16offset, (uint16) (log_table[s16tableIndex + 1] - log_table[s16tableIndex])); /* q.15 */
+
+ s32log = qm_add16((int16) s32log, s16errorApproximation); /* q.15 format */
+
+ /* adjust for the qformat of the N as
+ * log2(mag * 2^x) = log2(mag) + x
+ */
+ s32log = qm_add32(s32log, ((int32) - qN) << 15); /* q.15 format */
+
+ /* normalize the result. */
+ s16norm = qm_norm32(s32log);
+
+ /* bring all the important bits into lower 16 bits */
+ s32log = qm_shl32(s32log, s16norm - 16); /* q.15+s16norm-16 format */
+
+ /* compute the log10(N) by multiplying log2(N) with log10(2).
+ * as log10(mag * 2^x) = log2(mag * 2^x) * log10(2)
+ * log10N in q.15+s16norm-16+1 (LOG10_2 is in q.16)
+ */
+ *log10N = qm_muls16((int16) s32log, (int16) LOG10_2);
+
+ /* write the q format of the result. */
+ *qLog10N = 15 + s16norm - 16 + 1;
+
+ return;
+}
+
+/*
+Description:
+This routine compute 1/N.
+This routine reformates the given no N as N * 2^qN where N is in between 0.5 and 1.0
+in q.15 format in 16 bits. So the problem now boils down to finding the inverse of a
+q.15 no in 16 bits which is in the range of 0.5 to 1.0. The output is always between
+2.0 to 1. So the output is 2.0 to 1.0 in q.30 format. Once the final output format is found
+by taking the qN into account. Inverse is found with newton rapson method. Initially
+inverse (x) is guessed as 1/0.75 (with appropriate sign). The new guess is calculated
+using the formula x' = 2*x - N*x*x. After 4 or 5 iterations the inverse is very close to
+inverse of N.
+Inputs:
+N - number to which 1/N has to be found.
+qn - q format of N.
+sqrtN - address where 1/N has to be written.
+qsqrtN - address where q format of 1/N has to be written.
+*/
+#define qx 29
+void qm_1byN(int32 N, int16 qN, int32 * result, int16 * qResult)
+{
+ int16 normN;
+ int32 s32firstTerm, s32secondTerm, x;
+ int i;
+
+ normN = qm_norm32(N);
+
+ /* limit N to least significant 16 bits. 15th bit is the sign bit. */
+ N = qm_shl32(N, normN - 16);
+ qN = qN + normN - 16 - 15;
+ /* -15 is added to treat N as 16 bit q.15 number in the range from 0.5 to 1 */
+
+ /* Take the initial guess as 1/0.75 in qx format with appropriate sign. */
+ if (N >= 0) {
+ x = (int32) ((1 / 0.75) * (1 << qx));
+ /* input no is in the range 0.5 to 1. So 1/0.75 is taken as initial guess. */
+ } else {
+ x = (int32) ((1 / -0.75) * (1 << qx));
+ /* input no is in the range -0.5 to -1. So 1/-0.75 is taken as initial guess. */
+ }
+
+ /* iterate the equation x = 2*x - N*x*x for 4 times. */
+ for (i = 0; i < 4; i++) {
+ s32firstTerm = qm_shl32(x, 1); /* s32firstTerm = 2*x in q.29 */
+ s32secondTerm =
+ qm_muls321616((int16) (s32firstTerm >> 16),
+ (int16) (s32firstTerm >> 16));
+ /* s32secondTerm = x*x in q.(29+1-16)*2+1 */
+ s32secondTerm =
+ qm_muls321616((int16) (s32secondTerm >> 16), (int16) N);
+ /* s32secondTerm = N*x*x in q.((29+1-16)*2+1)-16+15+1 i.e. in q.29 */
+ x = qm_sub32(s32firstTerm, s32secondTerm);
+ /* can be added directly as both are in q.29 */
+ }
+
+ /* Bring the x to q.30 format. */
+ *result = qm_shl32(x, 1);
+ /* giving the output in q.30 format for q.15 input in 16 bits. */
+
+ /* compute the final q format of the result. */
+ *qResult = -qN + 30; /* adjusting the q format of actual output */
+
+ return;
+}
+
+#undef qx
diff --git a/drivers/staging/brcm80211/util/siutils.c b/drivers/staging/brcm80211/util/siutils.c
new file mode 100644
index 000000000000..af35564eea78
--- /dev/null
+++ b/drivers/staging/brcm80211/util/siutils.c
@@ -0,0 +1,2914 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <siutils.h>
+#include <bcmdevs.h>
+#include <hndsoc.h>
+#include <sbchipc.h>
+#include <pci_core.h>
+#include <pcie_core.h>
+#include <nicpci.h>
+#include <bcmnvram.h>
+#include <bcmsrom.h>
+#include <hndtcam.h>
+#include <pcicfg.h>
+#include <sbsocram.h>
+#ifdef BCMSDIO
+#include <bcmsdh.h>
+#include <sdio.h>
+#include <sbsdio.h>
+#include <sbhnddma.h>
+#include <sbsdpcmdev.h>
+#include <bcmsdpcm.h>
+#endif /* BCMSDIO */
+#include <hndpmu.h>
+
+/* this file now contains only definitions for sb functions, only necessary
+*for devices using Sonics backplanes (bcm4329)
+*/
+
+/* if an amba SDIO device is supported, please further restrict the inclusion
+ * of this file
+ */
+#ifdef BCMSDIO
+#include "siutils_priv.h"
+#endif
+
+/* local prototypes */
+static si_info_t *si_doattach(si_info_t * sii, uint devid, osl_t * osh,
+ void *regs, uint bustype, void *sdh, char **vars,
+ uint * varsz);
+static bool si_buscore_prep(si_info_t * sii, uint bustype, uint devid,
+ void *sdh);
+static bool si_buscore_setup(si_info_t * sii, chipcregs_t * cc, uint bustype,
+ uint32 savewin, uint * origidx, void *regs);
+static void si_nvram_process(si_info_t * sii, char *pvars);
+
+/* dev path concatenation util */
+static char *si_devpathvar(si_t * sih, char *var, int len, const char *name);
+static bool _si_clkctl_cc(si_info_t * sii, uint mode);
+static bool si_ispcie(si_info_t * sii);
+static uint BCMINITFN(socram_banksize) (si_info_t * sii, sbsocramregs_t * r,
+ uint8 idx, uint8 mtype);
+
+/* global variable to indicate reservation/release of gpio's */
+static uint32 si_gpioreservation = 0;
+
+/* global flag to prevent shared resources from being initialized multiple times in si_attach() */
+
+/*
+ * Allocate a si handle.
+ * devid - pci device id (used to determine chip#)
+ * osh - opaque OS handle
+ * regs - virtual address of initial core registers
+ * bustype - pci/sb/sdio/etc
+ * vars - pointer to a pointer area for "environment" variables
+ * varsz - pointer to int to return the size of the vars
+ */
+si_t *BCMATTACHFN(si_attach) (uint devid, osl_t * osh, void *regs,
+ uint bustype, void *sdh, char **vars,
+ uint * varsz) {
+ si_info_t *sii;
+
+ /* alloc si_info_t */
+ if ((sii = MALLOC(osh, sizeof(si_info_t))) == NULL) {
+ SI_ERROR(("si_attach: malloc failed! malloced %d bytes\n",
+ MALLOCED(osh)));
+ return (NULL);
+ }
+
+ if (si_doattach(sii, devid, osh, regs, bustype, sdh, vars, varsz) ==
+ NULL) {
+ MFREE(osh, sii, sizeof(si_info_t));
+ return (NULL);
+ }
+ sii->vars = vars ? *vars : NULL;
+ sii->varsz = varsz ? *varsz : 0;
+
+ return (si_t *) sii;
+}
+
+/* global kernel resource */
+static si_info_t ksii;
+
+static uint32 wd_msticks; /* watchdog timer ticks normalized to ms */
+
+static bool
+BCMATTACHFN(si_buscore_prep) (si_info_t * sii, uint bustype, uint devid,
+ void *sdh) {
+
+ /* kludge to enable the clock on the 4306 which lacks a slowclock */
+ if (BUSTYPE(bustype) == PCI_BUS && !si_ispcie(sii))
+ si_clkctl_xtal(&sii->pub, XTAL | PLL, ON);
+
+#if defined(BCMSDIO)
+ if (BUSTYPE(bustype) == SDIO_BUS) {
+ int err;
+ uint8 clkset;
+
+ /* Try forcing SDIO core to do ALPAvail request only */
+ clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+ clkset, &err);
+ if (!err) {
+ uint8 clkval;
+
+ /* If register supported, wait for ALPAvail and then force ALP */
+ clkval =
+ bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, NULL);
+ if ((clkval & ~SBSDIO_AVBITS) == clkset) {
+ SPINWAIT(((clkval =
+ bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR,
+ NULL)),
+ !SBSDIO_ALPAV(clkval)),
+ PMU_MAX_TRANSITION_DLY);
+ if (!SBSDIO_ALPAV(clkval)) {
+ SI_ERROR(("timeout on ALPAV wait, clkval 0x%02x\n", clkval));
+ return FALSE;
+ }
+ clkset =
+ SBSDIO_FORCE_HW_CLKREQ_OFF |
+ SBSDIO_FORCE_ALP;
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR,
+ clkset, &err);
+ OSL_DELAY(65);
+ }
+ }
+
+ /* Also, disable the extra SDIO pull-ups */
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SDIOPULLUP, 0,
+ NULL);
+ }
+#endif /* defined(BCMSDIO) */
+
+ return TRUE;
+}
+
+static bool
+BCMATTACHFN(si_buscore_setup) (si_info_t * sii, chipcregs_t * cc, uint bustype,
+ uint32 savewin, uint * origidx, void *regs) {
+ bool pci, pcie;
+ uint i;
+ uint pciidx, pcieidx, pcirev, pcierev;
+
+ cc = si_setcoreidx(&sii->pub, SI_CC_IDX);
+ ASSERT((uintptr) cc);
+
+ /* get chipcommon rev */
+ sii->pub.ccrev = (int)si_corerev(&sii->pub);
+
+ /* get chipcommon chipstatus */
+ if (sii->pub.ccrev >= 11)
+ sii->pub.chipst = R_REG(sii->osh, &cc->chipstatus);
+
+ /* get chipcommon capabilites */
+ sii->pub.cccaps = R_REG(sii->osh, &cc->capabilities);
+ /* get chipcommon extended capabilities */
+
+ if (sii->pub.ccrev >= 35)
+ sii->pub.cccaps_ext = R_REG(sii->osh, &cc->capabilities_ext);
+
+ /* get pmu rev and caps */
+ if (sii->pub.cccaps & CC_CAP_PMU) {
+ sii->pub.pmucaps = R_REG(sii->osh, &cc->pmucapabilities);
+ sii->pub.pmurev = sii->pub.pmucaps & PCAP_REV_MASK;
+ }
+
+ /*
+ SI_MSG(("Chipc: rev %d, caps 0x%x, chipst 0x%x pmurev %d, pmucaps 0x%x\n",
+ sii->pub.ccrev, sii->pub.cccaps, sii->pub.chipst, sii->pub.pmurev,
+ sii->pub.pmucaps));
+ */
+
+ /* figure out bus/orignal core idx */
+ sii->pub.buscoretype = NODEV_CORE_ID;
+ sii->pub.buscorerev = NOREV;
+ sii->pub.buscoreidx = BADIDX;
+
+ pci = pcie = FALSE;
+ pcirev = pcierev = NOREV;
+ pciidx = pcieidx = BADIDX;
+
+ for (i = 0; i < sii->numcores; i++) {
+ uint cid, crev;
+
+ si_setcoreidx(&sii->pub, i);
+ cid = si_coreid(&sii->pub);
+ crev = si_corerev(&sii->pub);
+
+ /* Display cores found */
+ SI_VMSG(("CORE[%d]: id 0x%x rev %d base 0x%x regs 0x%p\n",
+ i, cid, crev, sii->coresba[i], sii->regs[i]));
+
+ if (BUSTYPE(bustype) == PCI_BUS) {
+ if (cid == PCI_CORE_ID) {
+ pciidx = i;
+ pcirev = crev;
+ pci = TRUE;
+ } else if (cid == PCIE_CORE_ID) {
+ pcieidx = i;
+ pcierev = crev;
+ pcie = TRUE;
+ }
+ }
+#ifdef BCMSDIO
+ else if (((BUSTYPE(bustype) == SDIO_BUS) ||
+ (BUSTYPE(bustype) == SPI_BUS)) &&
+ ((cid == PCMCIA_CORE_ID) || (cid == SDIOD_CORE_ID))) {
+ sii->pub.buscorerev = crev;
+ sii->pub.buscoretype = cid;
+ sii->pub.buscoreidx = i;
+ }
+#endif /* BCMSDIO */
+
+ /* find the core idx before entering this func. */
+ if ((savewin && (savewin == sii->coresba[i])) ||
+ (regs == sii->regs[i]))
+ *origidx = i;
+ }
+
+ if (pci && pcie) {
+ if (si_ispcie(sii))
+ pci = FALSE;
+ else
+ pcie = FALSE;
+ }
+ if (pci) {
+ sii->pub.buscoretype = PCI_CORE_ID;
+ sii->pub.buscorerev = pcirev;
+ sii->pub.buscoreidx = pciidx;
+ } else if (pcie) {
+ sii->pub.buscoretype = PCIE_CORE_ID;
+ sii->pub.buscorerev = pcierev;
+ sii->pub.buscoreidx = pcieidx;
+ }
+
+ SI_VMSG(("Buscore id/type/rev %d/0x%x/%d\n", sii->pub.buscoreidx,
+ sii->pub.buscoretype, sii->pub.buscorerev));
+
+ /* fixup necessary chip/core configurations */
+ if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
+ if (SI_FAST(sii)) {
+ if (!sii->pch &&
+ ((sii->pch =
+ (void *)(uintptr) pcicore_init(&sii->pub,
+ sii->osh,
+ (void *)
+ PCIEREGS(sii))) ==
+ NULL))
+ return FALSE;
+ }
+ if (si_pci_fixcfg(&sii->pub)) {
+ SI_ERROR(("si_doattach: sb_pci_fixcfg failed\n"));
+ return FALSE;
+ }
+ }
+
+ /* return to the original core */
+ si_setcoreidx(&sii->pub, *origidx);
+
+ return TRUE;
+}
+
+static void BCMATTACHFN(si_nvram_process) (si_info_t * sii, char *pvars) {
+ uint w = 0;
+
+ /* get boardtype and boardrev */
+ switch (BUSTYPE(sii->pub.bustype)) {
+ case PCI_BUS:
+ /* do a pci config read to get subsystem id and subvendor id */
+ w = OSL_PCI_READ_CONFIG(sii->osh, PCI_CFG_SVID, sizeof(uint32));
+ /* Let nvram variables override subsystem Vend/ID */
+ if ((sii->pub.boardvendor =
+ (uint16) si_getdevpathintvar(&sii->pub, "boardvendor"))
+ == 0)
+ sii->pub.boardvendor = w & 0xffff;
+ else
+ SI_ERROR(("Overriding boardvendor: 0x%x instead of 0x%x\n", sii->pub.boardvendor, w & 0xffff));
+ if ((sii->pub.boardtype =
+ (uint16) si_getdevpathintvar(&sii->pub, "boardtype"))
+ == 0)
+ sii->pub.boardtype = (w >> 16) & 0xffff;
+ else
+ SI_ERROR(("Overriding boardtype: 0x%x instead of 0x%x\n", sii->pub.boardtype, (w >> 16) & 0xffff));
+ break;
+
+#ifdef BCMSDIO
+ case SDIO_BUS:
+#endif
+ sii->pub.boardvendor = getintvar(pvars, "manfid");
+ sii->pub.boardtype = getintvar(pvars, "prodid");
+ break;
+
+#ifdef BCMSDIO
+ case SPI_BUS:
+ sii->pub.boardvendor = VENDOR_BROADCOM;
+ sii->pub.boardtype = SPI_BOARD;
+ break;
+#endif
+
+ case SI_BUS:
+ case JTAG_BUS:
+ sii->pub.boardvendor = VENDOR_BROADCOM;
+ if (pvars == NULL
+ || ((sii->pub.boardtype = getintvar(pvars, "prodid")) == 0))
+ if ((sii->pub.boardtype =
+ getintvar(NULL, "boardtype")) == 0)
+ sii->pub.boardtype = 0xffff;
+ break;
+ }
+
+ if (sii->pub.boardtype == 0) {
+ SI_ERROR(("si_doattach: unknown board type\n"));
+ ASSERT(sii->pub.boardtype);
+ }
+
+ sii->pub.boardflags = getintvar(pvars, "boardflags");
+}
+
+/* this is will make Sonics calls directly, since Sonics is no longer supported in the Si abstraction */
+/* this has been customized for the bcm 4329 ONLY */
+#ifdef BCMSDIO
+static si_info_t *BCMATTACHFN(si_doattach) (si_info_t * sii, uint devid,
+ osl_t * osh, void *regs,
+ uint bustype, void *sdh,
+ char **vars, uint * varsz) {
+ struct si_pub *sih = &sii->pub;
+ uint32 w, savewin;
+ chipcregs_t *cc;
+ char *pvars = NULL;
+ uint origidx;
+
+ ASSERT(GOODREGS(regs));
+
+ bzero((uchar *) sii, sizeof(si_info_t));
+
+ savewin = 0;
+
+ sih->buscoreidx = BADIDX;
+
+ sii->curmap = regs;
+ sii->sdh = sdh;
+ sii->osh = osh;
+
+ /* find Chipcommon address */
+ cc = (chipcregs_t *) sii->curmap;
+ sih->bustype = bustype;
+
+ if (bustype != BUSTYPE(bustype)) {
+ SI_ERROR(("si_doattach: bus type %d does not match configured bus type %d\n", bustype, BUSTYPE(bustype)));
+ return NULL;
+ }
+
+ /* bus/core/clk setup for register access */
+ if (!si_buscore_prep(sii, bustype, devid, sdh)) {
+ SI_ERROR(("si_doattach: si_core_clk_prep failed %d\n",
+ bustype));
+ return NULL;
+ }
+
+ /* ChipID recognition.
+ * We assume we can read chipid at offset 0 from the regs arg.
+ * If we add other chiptypes (or if we need to support old sdio hosts w/o chipcommon),
+ * some way of recognizing them needs to be added here.
+ */
+ w = R_REG(osh, &cc->chipid);
+ sih->socitype = (w & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
+ /* Might as wll fill in chip id rev & pkg */
+ sih->chip = w & CID_ID_MASK;
+ sih->chiprev = (w & CID_REV_MASK) >> CID_REV_SHIFT;
+ sih->chippkg = (w & CID_PKG_MASK) >> CID_PKG_SHIFT;
+
+ if ((CHIPID(sih->chip) == BCM4329_CHIP_ID) && (sih->chiprev == 0) &&
+ (sih->chippkg != BCM4329_289PIN_PKG_ID)) {
+ sih->chippkg = BCM4329_182PIN_PKG_ID;
+ }
+ sih->issim = IS_SIM(sih->chippkg);
+
+ /* scan for cores */
+ /* SI_MSG(("Found chip type SB (0x%08x)\n", w)); */
+ sb_scan(&sii->pub, regs, devid);
+
+ /* no cores found, bail out */
+ if (sii->numcores == 0) {
+ SI_ERROR(("si_doattach: could not find any cores\n"));
+ return NULL;
+ }
+ /* bus/core/clk setup */
+ origidx = SI_CC_IDX;
+ if (!si_buscore_setup(sii, cc, bustype, savewin, &origidx, regs)) {
+ SI_ERROR(("si_doattach: si_buscore_setup failed\n"));
+ goto exit;
+ }
+
+ /* Init nvram from flash if it exists */
+ nvram_init((void *)&(sii->pub));
+
+ /* Init nvram from sprom/otp if they exist */
+ if (srom_var_init
+ (&sii->pub, BUSTYPE(bustype), regs, sii->osh, vars, varsz)) {
+ SI_ERROR(("si_doattach: srom_var_init failed: bad srom\n"));
+ goto exit;
+ }
+ pvars = vars ? *vars : NULL;
+ si_nvram_process(sii, pvars);
+
+ /* === NVRAM, clock is ready === */
+
+ cc = (chipcregs_t *) si_setcore(sih, CC_CORE_ID, 0);
+ W_REG(osh, &cc->gpiopullup, 0);
+ W_REG(osh, &cc->gpiopulldown, 0);
+ sb_setcoreidx(sih, origidx);
+
+ /* PMU specific initializations */
+ if (PMUCTL_ENAB(sih)) {
+ uint32 xtalfreq;
+ si_pmu_init(sih, sii->osh);
+ si_pmu_chip_init(sih, sii->osh);
+ xtalfreq = getintvar(pvars, "xtalfreq");
+ /* If xtalfreq var not available, try to measure it */
+ if (xtalfreq == 0)
+ xtalfreq = si_pmu_measure_alpclk(sih, sii->osh);
+ si_pmu_pll_init(sih, sii->osh, xtalfreq);
+ si_pmu_res_init(sih, sii->osh);
+ si_pmu_swreg_init(sih, sii->osh);
+ }
+
+ /* setup the GPIO based LED powersave register */
+ if ((w = getintvar(pvars, "leddc")) == 0)
+ w = DEFAULT_GPIOTIMERVAL;
+ sb_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, gpiotimerval), ~0, w);
+
+#ifdef BCMDBG
+ /* clear any previous epidiag-induced target abort */
+ sb_taclear(sih, FALSE);
+#endif /* BCMDBG */
+
+ return (sii);
+
+ exit:
+ return NULL;
+}
+
+#else /* BCMSDIO */
+static si_info_t *BCMATTACHFN(si_doattach) (si_info_t * sii, uint devid,
+ osl_t * osh, void *regs,
+ uint bustype, void *sdh,
+ char **vars, uint * varsz) {
+ struct si_pub *sih = &sii->pub;
+ uint32 w, savewin;
+ chipcregs_t *cc;
+ char *pvars = NULL;
+ uint origidx;
+
+ ASSERT(GOODREGS(regs));
+
+ bzero((uchar *) sii, sizeof(si_info_t));
+
+ savewin = 0;
+
+ sih->buscoreidx = BADIDX;
+
+ sii->curmap = regs;
+ sii->sdh = sdh;
+ sii->osh = osh;
+
+ /* check to see if we are a si core mimic'ing a pci core */
+ if ((bustype == PCI_BUS) &&
+ (OSL_PCI_READ_CONFIG(sii->osh, PCI_SPROM_CONTROL, sizeof(uint32)) ==
+ 0xffffffff)) {
+ SI_ERROR(("%s: incoming bus is PCI but it's a lie, switching to SI " "devid:0x%x\n", __func__, devid));
+ bustype = SI_BUS;
+ }
+
+ /* find Chipcommon address */
+ if (bustype == PCI_BUS) {
+ savewin =
+ OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32));
+ if (!GOODCOREADDR(savewin, SI_ENUM_BASE))
+ savewin = SI_ENUM_BASE;
+ OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, SI_ENUM_BASE);
+ cc = (chipcregs_t *) regs;
+ } else {
+ cc = (chipcregs_t *) REG_MAP(SI_ENUM_BASE, SI_CORE_SIZE);
+ }
+
+ sih->bustype = bustype;
+ if (bustype != BUSTYPE(bustype)) {
+ SI_ERROR(("si_doattach: bus type %d does not match configured bus type %d\n", bustype, BUSTYPE(bustype)));
+ return NULL;
+ }
+
+ /* bus/core/clk setup for register access */
+ if (!si_buscore_prep(sii, bustype, devid, sdh)) {
+ SI_ERROR(("si_doattach: si_core_clk_prep failed %d\n",
+ bustype));
+ return NULL;
+ }
+
+ /* ChipID recognition.
+ * We assume we can read chipid at offset 0 from the regs arg.
+ * If we add other chiptypes (or if we need to support old sdio hosts w/o chipcommon),
+ * some way of recognizing them needs to be added here.
+ */
+ w = R_REG(osh, &cc->chipid);
+ sih->socitype = (w & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
+ /* Might as wll fill in chip id rev & pkg */
+ sih->chip = w & CID_ID_MASK;
+ sih->chiprev = (w & CID_REV_MASK) >> CID_REV_SHIFT;
+ sih->chippkg = (w & CID_PKG_MASK) >> CID_PKG_SHIFT;
+
+ sih->issim = IS_SIM(sih->chippkg);
+
+ /* scan for cores */
+ if (CHIPTYPE(sii->pub.socitype) == SOCI_AI) {
+ SI_MSG(("Found chip type AI (0x%08x)\n", w));
+ /* pass chipc address instead of original core base */
+ ai_scan(&sii->pub, (void *)(uintptr) cc, devid);
+ } else {
+ SI_ERROR(("Found chip of unknown type (0x%08x)\n", w));
+ return NULL;
+ }
+ /* no cores found, bail out */
+ if (sii->numcores == 0) {
+ SI_ERROR(("si_doattach: could not find any cores\n"));
+ return NULL;
+ }
+ /* bus/core/clk setup */
+ origidx = SI_CC_IDX;
+ if (!si_buscore_setup(sii, cc, bustype, savewin, &origidx, regs)) {
+ SI_ERROR(("si_doattach: si_buscore_setup failed\n"));
+ goto exit;
+ }
+
+ /* assume current core is CC */
+ if ((sii->pub.ccrev == 0x25)
+ &&
+ ((CHIPID(sih->chip) == BCM43236_CHIP_ID
+ || CHIPID(sih->chip) == BCM43235_CHIP_ID
+ || CHIPID(sih->chip) == BCM43238_CHIP_ID)
+ && (CHIPREV(sii->pub.chiprev) <= 2))) {
+
+ if ((cc->chipstatus & CST43236_BP_CLK) != 0) {
+ uint clkdiv;
+ clkdiv = R_REG(osh, &cc->clkdiv);
+ /* otp_clk_div is even number, 120/14 < 9mhz */
+ clkdiv = (clkdiv & ~CLKD_OTP) | (14 << CLKD_OTP_SHIFT);
+ W_REG(osh, &cc->clkdiv, clkdiv);
+ SI_ERROR(("%s: set clkdiv to %x\n", __func__, clkdiv));
+ }
+ OSL_DELAY(10);
+ }
+
+ /* Init nvram from flash if it exists */
+ nvram_init((void *)&(sii->pub));
+
+ /* Init nvram from sprom/otp if they exist */
+ if (srom_var_init
+ (&sii->pub, BUSTYPE(bustype), regs, sii->osh, vars, varsz)) {
+ SI_ERROR(("si_doattach: srom_var_init failed: bad srom\n"));
+ goto exit;
+ }
+ pvars = vars ? *vars : NULL;
+ si_nvram_process(sii, pvars);
+
+ /* === NVRAM, clock is ready === */
+ cc = (chipcregs_t *) si_setcore(sih, CC_CORE_ID, 0);
+ W_REG(osh, &cc->gpiopullup, 0);
+ W_REG(osh, &cc->gpiopulldown, 0);
+ si_setcoreidx(sih, origidx);
+
+ /* PMU specific initializations */
+ if (PMUCTL_ENAB(sih)) {
+ uint32 xtalfreq;
+ si_pmu_init(sih, sii->osh);
+ si_pmu_chip_init(sih, sii->osh);
+ xtalfreq = getintvar(pvars, "xtalfreq");
+ /* If xtalfreq var not available, try to measure it */
+ if (xtalfreq == 0)
+ xtalfreq = si_pmu_measure_alpclk(sih, sii->osh);
+ si_pmu_pll_init(sih, sii->osh, xtalfreq);
+ si_pmu_res_init(sih, sii->osh);
+ si_pmu_swreg_init(sih, sii->osh);
+ }
+
+ /* setup the GPIO based LED powersave register */
+ if ((w = getintvar(pvars, "leddc")) == 0)
+ w = DEFAULT_GPIOTIMERVAL;
+ si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, gpiotimerval), ~0, w);
+
+ if (PCIE(sii)) {
+ ASSERT(sii->pch != NULL);
+ pcicore_attach(sii->pch, pvars, SI_DOATTACH);
+ }
+
+ if ((CHIPID(sih->chip) == BCM43224_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM43421_CHIP_ID)) {
+ /* enable 12 mA drive strenth for 43224 and set chipControl register bit 15 */
+ if (CHIPREV(sih->chiprev) == 0) {
+ SI_MSG(("Applying 43224A0 WARs\n"));
+ si_corereg(sih, SI_CC_IDX,
+ OFFSETOF(chipcregs_t, chipcontrol),
+ CCTRL43224_GPIO_TOGGLE,
+ CCTRL43224_GPIO_TOGGLE);
+ si_pmu_chipcontrol(sih, 0, CCTRL_43224A0_12MA_LED_DRIVE,
+ CCTRL_43224A0_12MA_LED_DRIVE);
+ }
+ if (CHIPREV(sih->chiprev) >= 1) {
+ SI_MSG(("Applying 43224B0+ WARs\n"));
+ si_pmu_chipcontrol(sih, 0, CCTRL_43224B0_12MA_LED_DRIVE,
+ CCTRL_43224B0_12MA_LED_DRIVE);
+ }
+ }
+
+ if (CHIPID(sih->chip) == BCM4313_CHIP_ID) {
+ /* enable 12 mA drive strenth for 4313 and set chipControl register bit 1 */
+ SI_MSG(("Applying 4313 WARs\n"));
+ si_pmu_chipcontrol(sih, 0, CCTRL_4313_12MA_LED_DRIVE,
+ CCTRL_4313_12MA_LED_DRIVE);
+ }
+
+ if (CHIPID(sih->chip) == BCM4331_CHIP_ID) {
+ /* Enable Ext PA lines depending on chip package option */
+ si_chipcontrl_epa4331(sih, TRUE);
+ }
+
+ return (sii);
+ exit:
+ if (BUSTYPE(sih->bustype) == PCI_BUS) {
+ if (sii->pch)
+ pcicore_deinit(sii->pch);
+ sii->pch = NULL;
+ }
+
+ return NULL;
+}
+#endif /* BCMSDIO */
+
+/* may be called with core in reset */
+void BCMATTACHFN(si_detach) (si_t * sih) {
+ si_info_t *sii;
+ uint idx;
+
+ struct si_pub *si_local = NULL;
+ bcopy(&sih, &si_local, sizeof(si_t **));
+
+ sii = SI_INFO(sih);
+
+ if (sii == NULL)
+ return;
+
+ if (BUSTYPE(sih->bustype) == SI_BUS)
+ for (idx = 0; idx < SI_MAXCORES; idx++)
+ if (sii->regs[idx]) {
+ REG_UNMAP(sii->regs[idx]);
+ sii->regs[idx] = NULL;
+ }
+
+ nvram_exit((void *)si_local); /* free up nvram buffers */
+
+ if (BUSTYPE(sih->bustype) == PCI_BUS) {
+ if (sii->pch)
+ pcicore_deinit(sii->pch);
+ sii->pch = NULL;
+ }
+#if !defined(BCMBUSTYPE) || (BCMBUSTYPE == SI_BUS)
+ if (sii != &ksii)
+#endif /* !BCMBUSTYPE || (BCMBUSTYPE == SI_BUS) */
+ MFREE(sii->osh, sii, sizeof(si_info_t));
+}
+
+void *si_osh(si_t * sih)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+ return sii->osh;
+}
+
+void si_setosh(si_t * sih, osl_t * osh)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+ if (sii->osh != NULL) {
+ SI_ERROR(("osh is already set....\n"));
+ ASSERT(!sii->osh);
+ }
+ sii->osh = osh;
+}
+
+/* register driver interrupt disabling and restoring callback functions */
+void
+si_register_intr_callback(si_t * sih, void *intrsoff_fn, void *intrsrestore_fn,
+ void *intrsenabled_fn, void *intr_arg)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+ sii->intr_arg = intr_arg;
+ sii->intrsoff_fn = (si_intrsoff_t) intrsoff_fn;
+ sii->intrsrestore_fn = (si_intrsrestore_t) intrsrestore_fn;
+ sii->intrsenabled_fn = (si_intrsenabled_t) intrsenabled_fn;
+ /* save current core id. when this function called, the current core
+ * must be the core which provides driver functions(il, et, wl, etc.)
+ */
+ sii->dev_coreid = sii->coreid[sii->curidx];
+}
+
+void si_deregister_intr_callback(si_t * sih)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+ sii->intrsoff_fn = NULL;
+}
+
+uint si_intflag(si_t * sih)
+{
+ si_info_t *sii = SI_INFO(sih);
+
+ if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return R_REG(sii->osh,
+ ((uint32 *) (uintptr) (sii->oob_router +
+ OOB_STATUSA)));
+ else {
+ ASSERT(0);
+ return 0;
+ }
+}
+
+uint si_flag(si_t * sih)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_flag(sih);
+ else {
+ ASSERT(0);
+ return 0;
+ }
+}
+
+void si_setint(si_t * sih, int siflag)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ ai_setint(sih, siflag);
+ else
+ ASSERT(0);
+}
+
+#ifndef BCMSDIO
+uint si_coreid(si_t * sih)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+ return sii->coreid[sii->curidx];
+}
+#endif
+
+uint si_coreidx(si_t * sih)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+ return sii->curidx;
+}
+
+/* return the core-type instantiation # of the current core */
+uint si_coreunit(si_t * sih)
+{
+ si_info_t *sii;
+ uint idx;
+ uint coreid;
+ uint coreunit;
+ uint i;
+
+ sii = SI_INFO(sih);
+ coreunit = 0;
+
+ idx = sii->curidx;
+
+ ASSERT(GOODREGS(sii->curmap));
+ coreid = si_coreid(sih);
+
+ /* count the cores of our type */
+ for (i = 0; i < idx; i++)
+ if (sii->coreid[i] == coreid)
+ coreunit++;
+
+ return (coreunit);
+}
+
+uint si_corevendor(si_t * sih)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_corevendor(sih);
+ else {
+ ASSERT(0);
+ return 0;
+ }
+}
+
+bool si_backplane64(si_t * sih)
+{
+ return ((sih->cccaps & CC_CAP_BKPLN64) != 0);
+}
+
+#ifndef BCMSDIO
+uint si_corerev(si_t * sih)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_corerev(sih);
+ else {
+ ASSERT(0);
+ return 0;
+ }
+}
+#endif
+
+/* return index of coreid or BADIDX if not found */
+uint si_findcoreidx(si_t * sih, uint coreid, uint coreunit)
+{
+ si_info_t *sii;
+ uint found;
+ uint i;
+
+ sii = SI_INFO(sih);
+
+ found = 0;
+
+ for (i = 0; i < sii->numcores; i++)
+ if (sii->coreid[i] == coreid) {
+ if (found == coreunit)
+ return (i);
+ found++;
+ }
+
+ return (BADIDX);
+}
+
+/* return list of found cores */
+uint si_corelist(si_t * sih, uint coreid[])
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ bcopy((uchar *) sii->coreid, (uchar *) coreid,
+ (sii->numcores * sizeof(uint)));
+ return (sii->numcores);
+}
+
+/* return current register mapping */
+void *si_coreregs(si_t * sih)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+ ASSERT(GOODREGS(sii->curmap));
+
+ return (sii->curmap);
+}
+
+/*
+ * This function changes logical "focus" to the indicated core;
+ * must be called with interrupts off.
+ * Moreover, callers should keep interrupts off during switching out of and back to d11 core
+ */
+void *si_setcore(si_t * sih, uint coreid, uint coreunit)
+{
+ uint idx;
+
+ idx = si_findcoreidx(sih, coreid, coreunit);
+ if (!GOODIDX(idx))
+ return (NULL);
+
+ if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_setcoreidx(sih, idx);
+ else {
+#ifdef BCMSDIO
+ return sb_setcoreidx(sih, idx);
+#else
+ ASSERT(0);
+ return NULL;
+#endif
+ }
+}
+
+#ifndef BCMSDIO
+void *si_setcoreidx(si_t * sih, uint coreidx)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_setcoreidx(sih, coreidx);
+ else {
+ ASSERT(0);
+ return NULL;
+ }
+}
+#endif
+
+/* Turn off interrupt as required by sb_setcore, before switch core */
+void *si_switch_core(si_t * sih, uint coreid, uint * origidx, uint * intr_val)
+{
+ void *cc;
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ if (SI_FAST(sii)) {
+ /* Overloading the origidx variable to remember the coreid,
+ * this works because the core ids cannot be confused with
+ * core indices.
+ */
+ *origidx = coreid;
+ if (coreid == CC_CORE_ID)
+ return (void *)CCREGS_FAST(sii);
+ else if (coreid == sih->buscoretype)
+ return (void *)PCIEREGS(sii);
+ }
+ INTR_OFF(sii, *intr_val);
+ *origidx = sii->curidx;
+ cc = si_setcore(sih, coreid, 0);
+ ASSERT(cc != NULL);
+
+ return cc;
+}
+
+/* restore coreidx and restore interrupt */
+void si_restore_core(si_t * sih, uint coreid, uint intr_val)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+ if (SI_FAST(sii)
+ && ((coreid == CC_CORE_ID) || (coreid == sih->buscoretype)))
+ return;
+
+ si_setcoreidx(sih, coreid);
+ INTR_RESTORE(sii, intr_val);
+}
+
+int si_numaddrspaces(si_t * sih)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_numaddrspaces(sih);
+ else {
+ ASSERT(0);
+ return 0;
+ }
+}
+
+uint32 si_addrspace(si_t * sih, uint asidx)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_addrspace(sih, asidx);
+ else {
+ ASSERT(0);
+ return 0;
+ }
+}
+
+uint32 si_addrspacesize(si_t * sih, uint asidx)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_addrspacesize(sih, asidx);
+ else {
+ ASSERT(0);
+ return 0;
+ }
+}
+
+uint32 si_core_cflags(si_t * sih, uint32 mask, uint32 val)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_core_cflags(sih, mask, val);
+ else {
+ ASSERT(0);
+ return 0;
+ }
+}
+
+void si_core_cflags_wo(si_t * sih, uint32 mask, uint32 val)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ ai_core_cflags_wo(sih, mask, val);
+ else
+ ASSERT(0);
+}
+
+uint32 si_core_sflags(si_t * sih, uint32 mask, uint32 val)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_core_sflags(sih, mask, val);
+ else {
+ ASSERT(0);
+ return 0;
+ }
+}
+
+bool si_iscoreup(si_t * sih)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_iscoreup(sih);
+ else {
+#ifdef BCMSDIO
+ return sb_iscoreup(sih);
+#else
+ ASSERT(0);
+ return FALSE;
+#endif
+ }
+}
+
+void si_write_wrapperreg(si_t * sih, uint32 offset, uint32 val)
+{
+ /* only for 4319, no requirement for SOCI_SB */
+ if (CHIPTYPE(sih->socitype) == SOCI_AI) {
+ ai_write_wrap_reg(sih, offset, val);
+ }
+}
+
+uint si_corereg(si_t * sih, uint coreidx, uint regoff, uint mask, uint val)
+{
+
+ if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ return ai_corereg(sih, coreidx, regoff, mask, val);
+ else {
+#ifdef BCMSDIO
+ return sb_corereg(sih, coreidx, regoff, mask, val);
+#else
+ ASSERT(0);
+ return 0;
+#endif
+ }
+}
+
+void si_core_disable(si_t * sih, uint32 bits)
+{
+
+ if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ ai_core_disable(sih, bits);
+#ifdef BCMSDIO
+ else
+ sb_core_disable(sih, bits);
+#endif
+}
+
+void si_core_reset(si_t * sih, uint32 bits, uint32 resetbits)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_AI)
+ ai_core_reset(sih, bits, resetbits);
+#ifdef BCMSDIO
+ else
+ sb_core_reset(sih, bits, resetbits);
+#endif
+}
+
+/* Run bist on current core. Caller needs to take care of core-specific bist hazards */
+int si_corebist(si_t * sih)
+{
+ uint32 cflags;
+ int result = 0;
+
+ /* Read core control flags */
+ cflags = si_core_cflags(sih, 0, 0);
+
+ /* Set bist & fgc */
+ si_core_cflags(sih, ~0, (SICF_BIST_EN | SICF_FGC));
+
+ /* Wait for bist done */
+ SPINWAIT(((si_core_sflags(sih, 0, 0) & SISF_BIST_DONE) == 0), 100000);
+
+ if (si_core_sflags(sih, 0, 0) & SISF_BIST_ERROR)
+ result = BCME_ERROR;
+
+ /* Reset core control flags */
+ si_core_cflags(sih, 0xffff, cflags);
+
+ return result;
+}
+
+static uint32 BCMINITFN(factor6) (uint32 x) {
+ switch (x) {
+ case CC_F6_2:
+ return 2;
+ case CC_F6_3:
+ return 3;
+ case CC_F6_4:
+ return 4;
+ case CC_F6_5:
+ return 5;
+ case CC_F6_6:
+ return 6;
+ case CC_F6_7:
+ return 7;
+ default:
+ return 0;
+ }
+}
+
+/* calculate the speed the SI would run at given a set of clockcontrol values */
+uint32 BCMINITFN(si_clock_rate) (uint32 pll_type, uint32 n, uint32 m) {
+ uint32 n1, n2, clock, m1, m2, m3, mc;
+
+ n1 = n & CN_N1_MASK;
+ n2 = (n & CN_N2_MASK) >> CN_N2_SHIFT;
+
+ if (pll_type == PLL_TYPE6) {
+ if (m & CC_T6_MMASK)
+ return CC_T6_M1;
+ else
+ return CC_T6_M0;
+ } else if ((pll_type == PLL_TYPE1) ||
+ (pll_type == PLL_TYPE3) ||
+ (pll_type == PLL_TYPE4) || (pll_type == PLL_TYPE7)) {
+ n1 = factor6(n1);
+ n2 += CC_F5_BIAS;
+ } else if (pll_type == PLL_TYPE2) {
+ n1 += CC_T2_BIAS;
+ n2 += CC_T2_BIAS;
+ ASSERT((n1 >= 2) && (n1 <= 7));
+ ASSERT((n2 >= 5) && (n2 <= 23));
+ } else if (pll_type == PLL_TYPE5) {
+ return (100000000);
+ } else
+ ASSERT(0);
+ /* PLL types 3 and 7 use BASE2 (25Mhz) */
+ if ((pll_type == PLL_TYPE3) || (pll_type == PLL_TYPE7)) {
+ clock = CC_CLOCK_BASE2 * n1 * n2;
+ } else
+ clock = CC_CLOCK_BASE1 * n1 * n2;
+
+ if (clock == 0)
+ return 0;
+
+ m1 = m & CC_M1_MASK;
+ m2 = (m & CC_M2_MASK) >> CC_M2_SHIFT;
+ m3 = (m & CC_M3_MASK) >> CC_M3_SHIFT;
+ mc = (m & CC_MC_MASK) >> CC_MC_SHIFT;
+
+ if ((pll_type == PLL_TYPE1) ||
+ (pll_type == PLL_TYPE3) ||
+ (pll_type == PLL_TYPE4) || (pll_type == PLL_TYPE7)) {
+ m1 = factor6(m1);
+ if ((pll_type == PLL_TYPE1) || (pll_type == PLL_TYPE3))
+ m2 += CC_F5_BIAS;
+ else
+ m2 = factor6(m2);
+ m3 = factor6(m3);
+
+ switch (mc) {
+ case CC_MC_BYPASS:
+ return (clock);
+ case CC_MC_M1:
+ return (clock / m1);
+ case CC_MC_M1M2:
+ return (clock / (m1 * m2));
+ case CC_MC_M1M2M3:
+ return (clock / (m1 * m2 * m3));
+ case CC_MC_M1M3:
+ return (clock / (m1 * m3));
+ default:
+ return (0);
+ }
+ } else {
+ ASSERT(pll_type == PLL_TYPE2);
+
+ m1 += CC_T2_BIAS;
+ m2 += CC_T2M2_BIAS;
+ m3 += CC_T2_BIAS;
+ ASSERT((m1 >= 2) && (m1 <= 7));
+ ASSERT((m2 >= 3) && (m2 <= 10));
+ ASSERT((m3 >= 2) && (m3 <= 7));
+
+ if ((mc & CC_T2MC_M1BYP) == 0)
+ clock /= m1;
+ if ((mc & CC_T2MC_M2BYP) == 0)
+ clock /= m2;
+ if ((mc & CC_T2MC_M3BYP) == 0)
+ clock /= m3;
+
+ return (clock);
+ }
+}
+
+uint32 BCMINITFN(si_clock) (si_t * sih) {
+ si_info_t *sii;
+ chipcregs_t *cc;
+ uint32 n, m;
+ uint idx;
+ uint32 pll_type, rate;
+ uint intr_val = 0;
+
+ sii = SI_INFO(sih);
+ INTR_OFF(sii, intr_val);
+ if (PMUCTL_ENAB(sih)) {
+ rate = si_pmu_si_clock(sih, sii->osh);
+ goto exit;
+ }
+
+ idx = sii->curidx;
+ cc = (chipcregs_t *) si_setcore(sih, CC_CORE_ID, 0);
+ ASSERT(cc != NULL);
+
+ n = R_REG(sii->osh, &cc->clockcontrol_n);
+ pll_type = sih->cccaps & CC_CAP_PLL_MASK;
+ if (pll_type == PLL_TYPE6)
+ m = R_REG(sii->osh, &cc->clockcontrol_m3);
+ else if (pll_type == PLL_TYPE3)
+ m = R_REG(sii->osh, &cc->clockcontrol_m2);
+ else
+ m = R_REG(sii->osh, &cc->clockcontrol_sb);
+
+ /* calculate rate */
+ rate = si_clock_rate(pll_type, n, m);
+
+ if (pll_type == PLL_TYPE3)
+ rate = rate / 2;
+
+ /* switch back to previous core */
+ si_setcoreidx(sih, idx);
+ exit:
+ INTR_RESTORE(sii, intr_val);
+
+ return rate;
+}
+
+uint32 BCMINITFN(si_alp_clock) (si_t * sih) {
+ if (PMUCTL_ENAB(sih))
+ return si_pmu_alp_clock(sih, si_osh(sih));
+
+ return ALP_CLOCK;
+}
+
+uint32 BCMINITFN(si_ilp_clock) (si_t * sih) {
+ if (PMUCTL_ENAB(sih))
+ return si_pmu_ilp_clock(sih, si_osh(sih));
+
+ return ILP_CLOCK;
+}
+
+/* set chip watchdog reset timer to fire in 'ticks' */
+void si_watchdog(si_t * sih, uint ticks)
+{
+ uint nb, maxt;
+
+ if (PMUCTL_ENAB(sih)) {
+
+ if ((CHIPID(sih->chip) == BCM4319_CHIP_ID) &&
+ (CHIPREV(sih->chiprev) == 0) && (ticks != 0)) {
+ si_corereg(sih, SI_CC_IDX,
+ OFFSETOF(chipcregs_t, clk_ctl_st), ~0, 0x2);
+ si_setcore(sih, USB20D_CORE_ID, 0);
+ si_core_disable(sih, 1);
+ si_setcore(sih, CC_CORE_ID, 0);
+ }
+
+ nb = (sih->ccrev < 26) ? 16 : ((sih->ccrev >= 37) ? 32 : 24);
+ /* The mips compiler uses the sllv instruction,
+ * so we specially handle the 32-bit case.
+ */
+ if (nb == 32)
+ maxt = 0xffffffff;
+ else
+ maxt = ((1 << nb) - 1);
+
+ if (ticks == 1)
+ ticks = 2;
+ else if (ticks > maxt)
+ ticks = maxt;
+
+ si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, pmuwatchdog),
+ ~0, ticks);
+ } else {
+ /* make sure we come up in fast clock mode; or if clearing, clear clock */
+ si_clkctl_cc(sih, ticks ? CLK_FAST : CLK_DYNAMIC);
+ maxt = (1 << 28) - 1;
+ if (ticks > maxt)
+ ticks = maxt;
+
+ si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, watchdog), ~0,
+ ticks);
+ }
+}
+
+/* trigger watchdog reset after ms milliseconds */
+void si_watchdog_ms(si_t * sih, uint32 ms)
+{
+ si_watchdog(sih, wd_msticks * ms);
+}
+
+uint16 BCMATTACHFN(si_d11_devid) (si_t * sih) {
+ si_info_t *sii = SI_INFO(sih);
+ uint16 device;
+
+ /* normal case: nvram variable with devpath->devid->wl0id */
+ if ((device = (uint16) si_getdevpathintvar(sih, "devid")) != 0) ;
+ /* Get devid from OTP/SPROM depending on where the SROM is read */
+ else if ((device = (uint16) getintvar(sii->vars, "devid")) != 0) ;
+ /* no longer support wl0id, but keep the code here for backward compatibility. */
+ else if ((device = (uint16) getintvar(sii->vars, "wl0id")) != 0) ;
+ else
+ /* ignore it */
+ device = 0xffff;
+
+ return device;
+}
+
+/* return the slow clock source - LPO, XTAL, or PCI */
+static uint si_slowclk_src(si_info_t * sii)
+{
+ chipcregs_t *cc;
+
+ ASSERT(SI_FAST(sii) || si_coreid(&sii->pub) == CC_CORE_ID);
+
+ if (sii->pub.ccrev < 6) {
+ if ((BUSTYPE(sii->pub.bustype) == PCI_BUS) &&
+ (OSL_PCI_READ_CONFIG(sii->osh, PCI_GPIO_OUT, sizeof(uint32))
+ & PCI_CFG_GPIO_SCS))
+ return (SCC_SS_PCI);
+ else
+ return (SCC_SS_XTAL);
+ } else if (sii->pub.ccrev < 10) {
+ cc = (chipcregs_t *) si_setcoreidx(&sii->pub, sii->curidx);
+ return (R_REG(sii->osh, &cc->slow_clk_ctl) & SCC_SS_MASK);
+ } else /* Insta-clock */
+ return (SCC_SS_XTAL);
+}
+
+/* return the ILP (slowclock) min or max frequency */
+static uint si_slowclk_freq(si_info_t * sii, bool max_freq, chipcregs_t * cc)
+{
+ uint32 slowclk;
+ uint div;
+
+ ASSERT(SI_FAST(sii) || si_coreid(&sii->pub) == CC_CORE_ID);
+
+ /* shouldn't be here unless we've established the chip has dynamic clk control */
+ ASSERT(R_REG(sii->osh, &cc->capabilities) & CC_CAP_PWR_CTL);
+
+ slowclk = si_slowclk_src(sii);
+ if (sii->pub.ccrev < 6) {
+ if (slowclk == SCC_SS_PCI)
+ return (max_freq ? (PCIMAXFREQ / 64)
+ : (PCIMINFREQ / 64));
+ else
+ return (max_freq ? (XTALMAXFREQ / 32)
+ : (XTALMINFREQ / 32));
+ } else if (sii->pub.ccrev < 10) {
+ div = 4 *
+ (((R_REG(sii->osh, &cc->slow_clk_ctl) & SCC_CD_MASK) >>
+ SCC_CD_SHIFT) + 1);
+ if (slowclk == SCC_SS_LPO)
+ return (max_freq ? LPOMAXFREQ : LPOMINFREQ);
+ else if (slowclk == SCC_SS_XTAL)
+ return (max_freq ? (XTALMAXFREQ / div)
+ : (XTALMINFREQ / div));
+ else if (slowclk == SCC_SS_PCI)
+ return (max_freq ? (PCIMAXFREQ / div)
+ : (PCIMINFREQ / div));
+ else
+ ASSERT(0);
+ } else {
+ /* Chipc rev 10 is InstaClock */
+ div = R_REG(sii->osh, &cc->system_clk_ctl) >> SYCC_CD_SHIFT;
+ div = 4 * (div + 1);
+ return (max_freq ? XTALMAXFREQ : (XTALMINFREQ / div));
+ }
+ return (0);
+}
+
+static void BCMINITFN(si_clkctl_setdelay) (si_info_t * sii, void *chipcregs) {
+ chipcregs_t *cc = (chipcregs_t *) chipcregs;
+ uint slowmaxfreq, pll_delay, slowclk;
+ uint pll_on_delay, fref_sel_delay;
+
+ pll_delay = PLL_DELAY;
+
+ /* If the slow clock is not sourced by the xtal then add the xtal_on_delay
+ * since the xtal will also be powered down by dynamic clk control logic.
+ */
+
+ slowclk = si_slowclk_src(sii);
+ if (slowclk != SCC_SS_XTAL)
+ pll_delay += XTAL_ON_DELAY;
+
+ /* Starting with 4318 it is ILP that is used for the delays */
+ slowmaxfreq =
+ si_slowclk_freq(sii, (sii->pub.ccrev >= 10) ? FALSE : TRUE, cc);
+
+ pll_on_delay = ((slowmaxfreq * pll_delay) + 999999) / 1000000;
+ fref_sel_delay = ((slowmaxfreq * FREF_DELAY) + 999999) / 1000000;
+
+ W_REG(sii->osh, &cc->pll_on_delay, pll_on_delay);
+ W_REG(sii->osh, &cc->fref_sel_delay, fref_sel_delay);
+}
+
+/* initialize power control delay registers */
+void BCMINITFN(si_clkctl_init) (si_t * sih) {
+ si_info_t *sii;
+ uint origidx = 0;
+ chipcregs_t *cc;
+ bool fast;
+
+ if (!CCCTL_ENAB(sih))
+ return;
+
+ sii = SI_INFO(sih);
+ fast = SI_FAST(sii);
+ if (!fast) {
+ origidx = sii->curidx;
+ if ((cc =
+ (chipcregs_t *) si_setcore(sih, CC_CORE_ID, 0)) == NULL)
+ return;
+ } else if ((cc = (chipcregs_t *) CCREGS_FAST(sii)) == NULL)
+ return;
+ ASSERT(cc != NULL);
+
+ /* set all Instaclk chip ILP to 1 MHz */
+ if (sih->ccrev >= 10)
+ SET_REG(sii->osh, &cc->system_clk_ctl, SYCC_CD_MASK,
+ (ILP_DIV_1MHZ << SYCC_CD_SHIFT));
+
+ si_clkctl_setdelay(sii, (void *)(uintptr) cc);
+
+ if (!fast)
+ si_setcoreidx(sih, origidx);
+}
+
+/* return the value suitable for writing to the dot11 core FAST_PWRUP_DELAY register */
+uint16 BCMINITFN(si_clkctl_fast_pwrup_delay) (si_t * sih) {
+ si_info_t *sii;
+ uint origidx = 0;
+ chipcregs_t *cc;
+ uint slowminfreq;
+ uint16 fpdelay;
+ uint intr_val = 0;
+ bool fast;
+
+ sii = SI_INFO(sih);
+ if (PMUCTL_ENAB(sih)) {
+ INTR_OFF(sii, intr_val);
+ fpdelay = si_pmu_fast_pwrup_delay(sih, sii->osh);
+ INTR_RESTORE(sii, intr_val);
+ return fpdelay;
+ }
+
+ if (!CCCTL_ENAB(sih))
+ return 0;
+
+ fast = SI_FAST(sii);
+ fpdelay = 0;
+ if (!fast) {
+ origidx = sii->curidx;
+ INTR_OFF(sii, intr_val);
+ if ((cc =
+ (chipcregs_t *) si_setcore(sih, CC_CORE_ID, 0)) == NULL)
+ goto done;
+ } else if ((cc = (chipcregs_t *) CCREGS_FAST(sii)) == NULL)
+ goto done;
+ ASSERT(cc != NULL);
+
+ slowminfreq = si_slowclk_freq(sii, FALSE, cc);
+ fpdelay = (((R_REG(sii->osh, &cc->pll_on_delay) + 2) * 1000000) +
+ (slowminfreq - 1)) / slowminfreq;
+
+ done:
+ if (!fast) {
+ si_setcoreidx(sih, origidx);
+ INTR_RESTORE(sii, intr_val);
+ }
+ return fpdelay;
+}
+
+/* turn primary xtal and/or pll off/on */
+int si_clkctl_xtal(si_t * sih, uint what, bool on)
+{
+ si_info_t *sii;
+ uint32 in, out, outen;
+
+ sii = SI_INFO(sih);
+
+ switch (BUSTYPE(sih->bustype)) {
+
+#ifdef BCMSDIO
+ case SDIO_BUS:
+ return (-1);
+#endif /* BCMSDIO */
+
+ case PCI_BUS:
+ /* pcie core doesn't have any mapping to control the xtal pu */
+ if (PCIE(sii))
+ return -1;
+
+ in = OSL_PCI_READ_CONFIG(sii->osh, PCI_GPIO_IN, sizeof(uint32));
+ out =
+ OSL_PCI_READ_CONFIG(sii->osh, PCI_GPIO_OUT, sizeof(uint32));
+ outen =
+ OSL_PCI_READ_CONFIG(sii->osh, PCI_GPIO_OUTEN,
+ sizeof(uint32));
+
+ /*
+ * Avoid glitching the clock if GPRS is already using it.
+ * We can't actually read the state of the PLLPD so we infer it
+ * by the value of XTAL_PU which *is* readable via gpioin.
+ */
+ if (on && (in & PCI_CFG_GPIO_XTAL))
+ return (0);
+
+ if (what & XTAL)
+ outen |= PCI_CFG_GPIO_XTAL;
+ if (what & PLL)
+ outen |= PCI_CFG_GPIO_PLL;
+
+ if (on) {
+ /* turn primary xtal on */
+ if (what & XTAL) {
+ out |= PCI_CFG_GPIO_XTAL;
+ if (what & PLL)
+ out |= PCI_CFG_GPIO_PLL;
+ OSL_PCI_WRITE_CONFIG(sii->osh, PCI_GPIO_OUT,
+ sizeof(uint32), out);
+ OSL_PCI_WRITE_CONFIG(sii->osh, PCI_GPIO_OUTEN,
+ sizeof(uint32), outen);
+ OSL_DELAY(XTAL_ON_DELAY);
+ }
+
+ /* turn pll on */
+ if (what & PLL) {
+ out &= ~PCI_CFG_GPIO_PLL;
+ OSL_PCI_WRITE_CONFIG(sii->osh, PCI_GPIO_OUT,
+ sizeof(uint32), out);
+ OSL_DELAY(2000);
+ }
+ } else {
+ if (what & XTAL)
+ out &= ~PCI_CFG_GPIO_XTAL;
+ if (what & PLL)
+ out |= PCI_CFG_GPIO_PLL;
+ OSL_PCI_WRITE_CONFIG(sii->osh, PCI_GPIO_OUT,
+ sizeof(uint32), out);
+ OSL_PCI_WRITE_CONFIG(sii->osh, PCI_GPIO_OUTEN,
+ sizeof(uint32), outen);
+ }
+
+ default:
+ return (-1);
+ }
+
+ return (0);
+}
+
+/*
+ * clock control policy function throught chipcommon
+ *
+ * set dynamic clk control mode (forceslow, forcefast, dynamic)
+ * returns true if we are forcing fast clock
+ * this is a wrapper over the next internal function
+ * to allow flexible policy settings for outside caller
+ */
+bool si_clkctl_cc(si_t * sih, uint mode)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ /* chipcommon cores prior to rev6 don't support dynamic clock control */
+ if (sih->ccrev < 6)
+ return FALSE;
+
+ if (PCI_FORCEHT(sii))
+ return (mode == CLK_FAST);
+
+ return _si_clkctl_cc(sii, mode);
+}
+
+/* clk control mechanism through chipcommon, no policy checking */
+static bool _si_clkctl_cc(si_info_t * sii, uint mode)
+{
+ uint origidx = 0;
+ chipcregs_t *cc;
+ uint32 scc;
+ uint intr_val = 0;
+ bool fast = SI_FAST(sii);
+
+ /* chipcommon cores prior to rev6 don't support dynamic clock control */
+ if (sii->pub.ccrev < 6)
+ return (FALSE);
+
+ /* Chips with ccrev 10 are EOL and they don't have SYCC_HR which we use below */
+ ASSERT(sii->pub.ccrev != 10);
+
+ if (!fast) {
+ INTR_OFF(sii, intr_val);
+ origidx = sii->curidx;
+
+ if ((BUSTYPE(sii->pub.bustype) == SI_BUS) &&
+ si_setcore(&sii->pub, MIPS33_CORE_ID, 0) &&
+ (si_corerev(&sii->pub) <= 7) && (sii->pub.ccrev >= 10))
+ goto done;
+
+ cc = (chipcregs_t *) si_setcore(&sii->pub, CC_CORE_ID, 0);
+ } else if ((cc = (chipcregs_t *) CCREGS_FAST(sii)) == NULL)
+ goto done;
+ ASSERT(cc != NULL);
+
+ if (!CCCTL_ENAB(&sii->pub) && (sii->pub.ccrev < 20))
+ goto done;
+
+ switch (mode) {
+ case CLK_FAST: /* FORCEHT, fast (pll) clock */
+ if (sii->pub.ccrev < 10) {
+ /* don't forget to force xtal back on before we clear SCC_DYN_XTAL.. */
+ si_clkctl_xtal(&sii->pub, XTAL, ON);
+ SET_REG(sii->osh, &cc->slow_clk_ctl,
+ (SCC_XC | SCC_FS | SCC_IP), SCC_IP);
+ } else if (sii->pub.ccrev < 20) {
+ OR_REG(sii->osh, &cc->system_clk_ctl, SYCC_HR);
+ } else {
+ OR_REG(sii->osh, &cc->clk_ctl_st, CCS_FORCEHT);
+ }
+
+ /* wait for the PLL */
+ if (PMUCTL_ENAB(&sii->pub)) {
+ uint32 htavail = CCS_HTAVAIL;
+ SPINWAIT(((R_REG(sii->osh, &cc->clk_ctl_st) & htavail)
+ == 0), PMU_MAX_TRANSITION_DLY);
+ ASSERT(R_REG(sii->osh, &cc->clk_ctl_st) & htavail);
+ } else {
+ OSL_DELAY(PLL_DELAY);
+ }
+ break;
+
+ case CLK_DYNAMIC: /* enable dynamic clock control */
+ if (sii->pub.ccrev < 10) {
+ scc = R_REG(sii->osh, &cc->slow_clk_ctl);
+ scc &= ~(SCC_FS | SCC_IP | SCC_XC);
+ if ((scc & SCC_SS_MASK) != SCC_SS_XTAL)
+ scc |= SCC_XC;
+ W_REG(sii->osh, &cc->slow_clk_ctl, scc);
+
+ /* for dynamic control, we have to release our xtal_pu "force on" */
+ if (scc & SCC_XC)
+ si_clkctl_xtal(&sii->pub, XTAL, OFF);
+ } else if (sii->pub.ccrev < 20) {
+ /* Instaclock */
+ AND_REG(sii->osh, &cc->system_clk_ctl, ~SYCC_HR);
+ } else {
+ AND_REG(sii->osh, &cc->clk_ctl_st, ~CCS_FORCEHT);
+ }
+ break;
+
+ default:
+ ASSERT(0);
+ }
+
+ done:
+ if (!fast) {
+ si_setcoreidx(&sii->pub, origidx);
+ INTR_RESTORE(sii, intr_val);
+ }
+ return (mode == CLK_FAST);
+}
+
+/* Build device path. Support SI, PCI, and JTAG for now. */
+int BCMATTACHFN(si_devpath) (si_t * sih, char *path, int size) {
+ int slen;
+
+ ASSERT(path != NULL);
+ ASSERT(size >= SI_DEVPATH_BUFSZ);
+
+ if (!path || size <= 0)
+ return -1;
+
+ switch (BUSTYPE(sih->bustype)) {
+ case SI_BUS:
+ case JTAG_BUS:
+ slen = snprintf(path, (size_t) size, "sb/%u/", si_coreidx(sih));
+ break;
+ case PCI_BUS:
+ ASSERT((SI_INFO(sih))->osh != NULL);
+ slen = snprintf(path, (size_t) size, "pci/%u/%u/",
+ OSL_PCI_BUS((SI_INFO(sih))->osh),
+ OSL_PCI_SLOT((SI_INFO(sih))->osh));
+ break;
+
+#ifdef BCMSDIO
+ case SDIO_BUS:
+ SI_ERROR(("si_devpath: device 0 assumed\n"));
+ slen = snprintf(path, (size_t) size, "sd/%u/", si_coreidx(sih));
+ break;
+#endif
+ default:
+ slen = -1;
+ ASSERT(0);
+ break;
+ }
+
+ if (slen < 0 || slen >= size) {
+ path[0] = '\0';
+ return -1;
+ }
+
+ return 0;
+}
+
+/* Get a variable, but only if it has a devpath prefix */
+char *BCMATTACHFN(si_getdevpathvar) (si_t * sih, const char *name) {
+ char varname[SI_DEVPATH_BUFSZ + 32];
+
+ si_devpathvar(sih, varname, sizeof(varname), name);
+
+ return (getvar(NULL, varname));
+}
+
+/* Get a variable, but only if it has a devpath prefix */
+int BCMATTACHFN(si_getdevpathintvar) (si_t * sih, const char *name) {
+#if defined(BCMBUSTYPE) && (BCMBUSTYPE == SI_BUS)
+ return (getintvar(NULL, name));
+#else
+ char varname[SI_DEVPATH_BUFSZ + 32];
+
+ si_devpathvar(sih, varname, sizeof(varname), name);
+
+ return (getintvar(NULL, varname));
+#endif
+}
+
+char *si_getnvramflvar(si_t * sih, const char *name)
+{
+ return (getvar(NULL, name));
+}
+
+/* Concatenate the dev path with a varname into the given 'var' buffer
+ * and return the 'var' pointer.
+ * Nothing is done to the arguments if len == 0 or var is NULL, var is still returned.
+ * On overflow, the first char will be set to '\0'.
+ */
+static char *BCMATTACHFN(si_devpathvar) (si_t * sih, char *var, int len,
+ const char *name) {
+ uint path_len;
+
+ if (!var || len <= 0)
+ return var;
+
+ if (si_devpath(sih, var, len) == 0) {
+ path_len = strlen(var);
+
+ if (strlen(name) + 1 > (uint) (len - path_len))
+ var[0] = '\0';
+ else
+ strncpy(var + path_len, name, len - path_len - 1);
+ }
+
+ return var;
+}
+
+uint32 si_pciereg(si_t * sih, uint32 offset, uint32 mask, uint32 val, uint type)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ if (!PCIE(sii)) {
+ SI_ERROR(("%s: Not a PCIE device\n", __func__));
+ return 0;
+ }
+
+ return pcicore_pciereg(sii->pch, offset, mask, val, type);
+}
+
+uint32
+si_pcieserdesreg(si_t * sih, uint32 mdioslave, uint32 offset, uint32 mask,
+ uint32 val)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ if (!PCIE(sii)) {
+ SI_ERROR(("%s: Not a PCIE device\n", __func__));
+ return 0;
+ }
+
+ return pcicore_pcieserdesreg(sii->pch, mdioslave, offset, mask, val);
+
+}
+
+/* return TRUE if PCIE capability exists in the pci config space */
+static bool si_ispcie(si_info_t * sii)
+{
+ uint8 cap_ptr;
+
+ if (BUSTYPE(sii->pub.bustype) != PCI_BUS)
+ return FALSE;
+
+ cap_ptr =
+ pcicore_find_pci_capability(sii->osh, PCI_CAP_PCIECAP_ID, NULL,
+ NULL);
+ if (!cap_ptr)
+ return FALSE;
+
+ return TRUE;
+}
+
+/* Wake-on-wireless-LAN (WOWL) support functions */
+/* Enable PME generation and disable clkreq */
+void si_pci_pmeen(si_t * sih)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ pcicore_pmeen(sii->pch);
+}
+
+/* Return TRUE if PME status is set */
+bool si_pci_pmestat(si_t * sih)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ return pcicore_pmestat(sii->pch);
+}
+
+/* Disable PME generation, clear the PME status bit if set */
+void si_pci_pmeclr(si_t * sih)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ pcicore_pmeclr(sii->pch);
+}
+
+#ifdef BCMSDIO
+/* initialize the sdio core */
+void si_sdio_init(si_t * sih)
+{
+ si_info_t *sii = SI_INFO(sih);
+
+ if (((sih->buscoretype == PCMCIA_CORE_ID) && (sih->buscorerev >= 8)) ||
+ (sih->buscoretype == SDIOD_CORE_ID)) {
+ uint idx;
+ sdpcmd_regs_t *sdpregs;
+
+ /* get the current core index */
+ idx = sii->curidx;
+ ASSERT(idx == si_findcoreidx(sih, D11_CORE_ID, 0));
+
+ /* switch to sdio core */
+ if (!
+ (sdpregs =
+ (sdpcmd_regs_t *) si_setcore(sih, PCMCIA_CORE_ID, 0)))
+ sdpregs =
+ (sdpcmd_regs_t *) si_setcore(sih, SDIOD_CORE_ID, 0);
+ ASSERT(sdpregs);
+
+ SI_MSG(("si_sdio_init: For PCMCIA/SDIO Corerev %d, enable ints from core %d " "through SD core %d (%p)\n", sih->buscorerev, idx, sii->curidx, sdpregs));
+
+ /* enable backplane error and core interrupts */
+ W_REG(sii->osh, &sdpregs->hostintmask, I_SBINT);
+ W_REG(sii->osh, &sdpregs->sbintmask,
+ (I_SB_SERR | I_SB_RESPERR | (1 << idx)));
+
+ /* switch back to previous core */
+ si_setcoreidx(sih, idx);
+ }
+
+ /* enable interrupts */
+ bcmsdh_intr_enable(sii->sdh);
+
+}
+#endif /* BCMSDIO */
+
+bool BCMATTACHFN(si_pci_war16165) (si_t * sih) {
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ return (PCI(sii) && (sih->buscorerev <= 10));
+}
+
+/* Disable pcie_war_ovr for some platforms (sigh!)
+ * This is for boards that have BFL2_PCIEWAR_OVR set
+ * but are in systems that still want the benefits of ASPM
+ * Note that this should be done AFTER si_doattach
+ */
+void si_pcie_war_ovr_update(si_t * sih, uint8 aspm)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ if (!PCIE(sii))
+ return;
+
+ pcie_war_ovr_aspm_update(sii->pch, aspm);
+}
+
+/* back door for other module to override chippkg */
+void si_chippkg_set(si_t * sih, uint val)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ sii->pub.chippkg = val;
+}
+
+void BCMINITFN(si_pci_up) (si_t * sih) {
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ /* if not pci bus, we're done */
+ if (BUSTYPE(sih->bustype) != PCI_BUS)
+ return;
+
+ if (PCI_FORCEHT(sii))
+ _si_clkctl_cc(sii, CLK_FAST);
+
+ if (PCIE(sii))
+ pcicore_up(sii->pch, SI_PCIUP);
+
+}
+
+/* Unconfigure and/or apply various WARs when system is going to sleep mode */
+void BCMUNINITFN(si_pci_sleep) (si_t * sih) {
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ pcicore_sleep(sii->pch);
+}
+
+/* Unconfigure and/or apply various WARs when going down */
+void BCMINITFN(si_pci_down) (si_t * sih) {
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ /* if not pci bus, we're done */
+ if (BUSTYPE(sih->bustype) != PCI_BUS)
+ return;
+
+ /* release FORCEHT since chip is going to "down" state */
+ if (PCI_FORCEHT(sii))
+ _si_clkctl_cc(sii, CLK_DYNAMIC);
+
+ pcicore_down(sii->pch, SI_PCIDOWN);
+}
+
+/*
+ * Configure the pci core for pci client (NIC) action
+ * coremask is the bitvec of cores by index to be enabled.
+ */
+void BCMATTACHFN(si_pci_setup) (si_t * sih, uint coremask) {
+ si_info_t *sii;
+ sbpciregs_t *pciregs = NULL;
+ uint32 siflag = 0, w;
+ uint idx = 0;
+
+ sii = SI_INFO(sih);
+
+ if (BUSTYPE(sii->pub.bustype) != PCI_BUS)
+ return;
+
+ ASSERT(PCI(sii) || PCIE(sii));
+ ASSERT(sii->pub.buscoreidx != BADIDX);
+
+ if (PCI(sii)) {
+ /* get current core index */
+ idx = sii->curidx;
+
+ /* we interrupt on this backplane flag number */
+ siflag = si_flag(sih);
+
+ /* switch over to pci core */
+ pciregs =
+ (sbpciregs_t *) si_setcoreidx(sih, sii->pub.buscoreidx);
+ }
+
+ /*
+ * Enable sb->pci interrupts. Assume
+ * PCI rev 2.3 support was added in pci core rev 6 and things changed..
+ */
+ if (PCIE(sii) || (PCI(sii) && ((sii->pub.buscorerev) >= 6))) {
+ /* pci config write to set this core bit in PCIIntMask */
+ w = OSL_PCI_READ_CONFIG(sii->osh, PCI_INT_MASK, sizeof(uint32));
+ w |= (coremask << PCI_SBIM_SHIFT);
+ OSL_PCI_WRITE_CONFIG(sii->osh, PCI_INT_MASK, sizeof(uint32), w);
+ } else {
+ /* set sbintvec bit for our flag number */
+ si_setint(sih, siflag);
+ }
+
+ if (PCI(sii)) {
+ OR_REG(sii->osh, &pciregs->sbtopci2,
+ (SBTOPCI_PREF | SBTOPCI_BURST));
+ if (sii->pub.buscorerev >= 11) {
+ OR_REG(sii->osh, &pciregs->sbtopci2,
+ SBTOPCI_RC_READMULTI);
+ w = R_REG(sii->osh, &pciregs->clkrun);
+ W_REG(sii->osh, &pciregs->clkrun,
+ (w | PCI_CLKRUN_DSBL));
+ w = R_REG(sii->osh, &pciregs->clkrun);
+ }
+
+ /* switch back to previous core */
+ si_setcoreidx(sih, idx);
+ }
+}
+
+uint8 si_pcieclkreq(si_t * sih, uint32 mask, uint32 val)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ if (!(PCIE(sii)))
+ return 0;
+ return pcie_clkreq(sii->pch, mask, val);
+}
+
+uint32 si_pcielcreg(si_t * sih, uint32 mask, uint32 val)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ if (!PCIE(sii))
+ return 0;
+
+ return pcie_lcreg(sii->pch, mask, val);
+}
+
+/* indirect way to read pcie config regs */
+uint si_pcie_readreg(void *sih, uint addrtype, uint offset)
+{
+ return pcie_readreg(((si_info_t *) sih)->osh,
+ (sbpcieregs_t *) PCIEREGS(((si_info_t *) sih)),
+ addrtype, offset);
+}
+
+/*
+ * Fixup SROMless PCI device's configuration.
+ * The current core may be changed upon return.
+ */
+int si_pci_fixcfg(si_t * sih)
+{
+ uint origidx, pciidx;
+ sbpciregs_t *pciregs = NULL;
+ sbpcieregs_t *pcieregs = NULL;
+ void *regs = NULL;
+ uint16 val16, *reg16 = NULL;
+
+ si_info_t *sii = SI_INFO(sih);
+
+ ASSERT(BUSTYPE(sii->pub.bustype) == PCI_BUS);
+
+ /* Fixup PI in SROM shadow area to enable the correct PCI core access */
+ /* save the current index */
+ origidx = si_coreidx(&sii->pub);
+
+ /* check 'pi' is correct and fix it if not */
+ if (sii->pub.buscoretype == PCIE_CORE_ID) {
+ pcieregs =
+ (sbpcieregs_t *) si_setcore(&sii->pub, PCIE_CORE_ID, 0);
+ regs = pcieregs;
+ ASSERT(pcieregs != NULL);
+ reg16 = &pcieregs->sprom[SRSH_PI_OFFSET];
+ } else if (sii->pub.buscoretype == PCI_CORE_ID) {
+ pciregs = (sbpciregs_t *) si_setcore(&sii->pub, PCI_CORE_ID, 0);
+ regs = pciregs;
+ ASSERT(pciregs != NULL);
+ reg16 = &pciregs->sprom[SRSH_PI_OFFSET];
+ }
+ pciidx = si_coreidx(&sii->pub);
+ val16 = R_REG(sii->osh, reg16);
+ if (((val16 & SRSH_PI_MASK) >> SRSH_PI_SHIFT) != (uint16) pciidx) {
+ val16 =
+ (uint16) (pciidx << SRSH_PI_SHIFT) | (val16 &
+ ~SRSH_PI_MASK);
+ W_REG(sii->osh, reg16, val16);
+ }
+
+ /* restore the original index */
+ si_setcoreidx(&sii->pub, origidx);
+
+ pcicore_hwup(sii->pch);
+ return 0;
+}
+
+/* change logical "focus" to the gpio core for optimized access */
+void *si_gpiosetcore(si_t * sih)
+{
+ return (si_setcoreidx(sih, SI_CC_IDX));
+}
+
+/* mask&set gpiocontrol bits */
+uint32 si_gpiocontrol(si_t * sih, uint32 mask, uint32 val, uint8 priority)
+{
+ uint regoff;
+
+ regoff = 0;
+
+ /* gpios could be shared on router platforms
+ * ignore reservation if it's high priority (e.g., test apps)
+ */
+ if ((priority != GPIO_HI_PRIORITY) &&
+ (BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+ mask = priority ? (si_gpioreservation & mask) :
+ ((si_gpioreservation | mask) & ~(si_gpioreservation));
+ val &= mask;
+ }
+
+ regoff = OFFSETOF(chipcregs_t, gpiocontrol);
+ return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
+}
+
+/* mask&set gpio output enable bits */
+uint32 si_gpioouten(si_t * sih, uint32 mask, uint32 val, uint8 priority)
+{
+ uint regoff;
+
+ regoff = 0;
+
+ /* gpios could be shared on router platforms
+ * ignore reservation if it's high priority (e.g., test apps)
+ */
+ if ((priority != GPIO_HI_PRIORITY) &&
+ (BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+ mask = priority ? (si_gpioreservation & mask) :
+ ((si_gpioreservation | mask) & ~(si_gpioreservation));
+ val &= mask;
+ }
+
+ regoff = OFFSETOF(chipcregs_t, gpioouten);
+ return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
+}
+
+/* mask&set gpio output bits */
+uint32 si_gpioout(si_t * sih, uint32 mask, uint32 val, uint8 priority)
+{
+ uint regoff;
+
+ regoff = 0;
+
+ /* gpios could be shared on router platforms
+ * ignore reservation if it's high priority (e.g., test apps)
+ */
+ if ((priority != GPIO_HI_PRIORITY) &&
+ (BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+ mask = priority ? (si_gpioreservation & mask) :
+ ((si_gpioreservation | mask) & ~(si_gpioreservation));
+ val &= mask;
+ }
+
+ regoff = OFFSETOF(chipcregs_t, gpioout);
+ return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
+}
+
+/* reserve one gpio */
+uint32 si_gpioreserve(si_t * sih, uint32 gpio_bitmask, uint8 priority)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ /* only cores on SI_BUS share GPIO's and only applcation users need to
+ * reserve/release GPIO
+ */
+ if ((BUSTYPE(sih->bustype) != SI_BUS) || (!priority)) {
+ ASSERT((BUSTYPE(sih->bustype) == SI_BUS) && (priority));
+ return 0xffffffff;
+ }
+ /* make sure only one bit is set */
+ if ((!gpio_bitmask) || ((gpio_bitmask) & (gpio_bitmask - 1))) {
+ ASSERT((gpio_bitmask)
+ && !((gpio_bitmask) & (gpio_bitmask - 1)));
+ return 0xffffffff;
+ }
+
+ /* already reserved */
+ if (si_gpioreservation & gpio_bitmask)
+ return 0xffffffff;
+ /* set reservation */
+ si_gpioreservation |= gpio_bitmask;
+
+ return si_gpioreservation;
+}
+
+/* release one gpio */
+/*
+ * releasing the gpio doesn't change the current value on the GPIO last write value
+ * persists till some one overwrites it
+ */
+
+uint32 si_gpiorelease(si_t * sih, uint32 gpio_bitmask, uint8 priority)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ /* only cores on SI_BUS share GPIO's and only applcation users need to
+ * reserve/release GPIO
+ */
+ if ((BUSTYPE(sih->bustype) != SI_BUS) || (!priority)) {
+ ASSERT((BUSTYPE(sih->bustype) == SI_BUS) && (priority));
+ return 0xffffffff;
+ }
+ /* make sure only one bit is set */
+ if ((!gpio_bitmask) || ((gpio_bitmask) & (gpio_bitmask - 1))) {
+ ASSERT((gpio_bitmask)
+ && !((gpio_bitmask) & (gpio_bitmask - 1)));
+ return 0xffffffff;
+ }
+
+ /* already released */
+ if (!(si_gpioreservation & gpio_bitmask))
+ return 0xffffffff;
+
+ /* clear reservation */
+ si_gpioreservation &= ~gpio_bitmask;
+
+ return si_gpioreservation;
+}
+
+/* return the current gpioin register value */
+uint32 si_gpioin(si_t * sih)
+{
+ si_info_t *sii;
+ uint regoff;
+
+ sii = SI_INFO(sih);
+ regoff = 0;
+
+ regoff = OFFSETOF(chipcregs_t, gpioin);
+ return (si_corereg(sih, SI_CC_IDX, regoff, 0, 0));
+}
+
+/* mask&set gpio interrupt polarity bits */
+uint32 si_gpiointpolarity(si_t * sih, uint32 mask, uint32 val, uint8 priority)
+{
+ si_info_t *sii;
+ uint regoff;
+
+ sii = SI_INFO(sih);
+ regoff = 0;
+
+ /* gpios could be shared on router platforms */
+ if ((BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+ mask = priority ? (si_gpioreservation & mask) :
+ ((si_gpioreservation | mask) & ~(si_gpioreservation));
+ val &= mask;
+ }
+
+ regoff = OFFSETOF(chipcregs_t, gpiointpolarity);
+ return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
+}
+
+/* mask&set gpio interrupt mask bits */
+uint32 si_gpiointmask(si_t * sih, uint32 mask, uint32 val, uint8 priority)
+{
+ si_info_t *sii;
+ uint regoff;
+
+ sii = SI_INFO(sih);
+ regoff = 0;
+
+ /* gpios could be shared on router platforms */
+ if ((BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+ mask = priority ? (si_gpioreservation & mask) :
+ ((si_gpioreservation | mask) & ~(si_gpioreservation));
+ val &= mask;
+ }
+
+ regoff = OFFSETOF(chipcregs_t, gpiointmask);
+ return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
+}
+
+/* assign the gpio to an led */
+uint32 si_gpioled(si_t * sih, uint32 mask, uint32 val)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+ if (sih->ccrev < 16)
+ return 0xffffffff;
+
+ /* gpio led powersave reg */
+ return (si_corereg
+ (sih, SI_CC_IDX, OFFSETOF(chipcregs_t, gpiotimeroutmask), mask,
+ val));
+}
+
+/* mask&set gpio timer val */
+uint32 si_gpiotimerval(si_t * sih, uint32 mask, uint32 gpiotimerval)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ if (sih->ccrev < 16)
+ return 0xffffffff;
+
+ return (si_corereg(sih, SI_CC_IDX,
+ OFFSETOF(chipcregs_t, gpiotimerval), mask,
+ gpiotimerval));
+}
+
+uint32 si_gpiopull(si_t * sih, bool updown, uint32 mask, uint32 val)
+{
+ si_info_t *sii;
+ uint offs;
+
+ sii = SI_INFO(sih);
+ if (sih->ccrev < 20)
+ return 0xffffffff;
+
+ offs =
+ (updown ? OFFSETOF(chipcregs_t, gpiopulldown) :
+ OFFSETOF(chipcregs_t, gpiopullup));
+ return (si_corereg(sih, SI_CC_IDX, offs, mask, val));
+}
+
+uint32 si_gpioevent(si_t * sih, uint regtype, uint32 mask, uint32 val)
+{
+ si_info_t *sii;
+ uint offs;
+
+ sii = SI_INFO(sih);
+ if (sih->ccrev < 11)
+ return 0xffffffff;
+
+ if (regtype == GPIO_REGEVT)
+ offs = OFFSETOF(chipcregs_t, gpioevent);
+ else if (regtype == GPIO_REGEVT_INTMSK)
+ offs = OFFSETOF(chipcregs_t, gpioeventintmask);
+ else if (regtype == GPIO_REGEVT_INTPOL)
+ offs = OFFSETOF(chipcregs_t, gpioeventintpolarity);
+ else
+ return 0xffffffff;
+
+ return (si_corereg(sih, SI_CC_IDX, offs, mask, val));
+}
+
+void *BCMATTACHFN(si_gpio_handler_register) (si_t * sih, uint32 event,
+ bool level, gpio_handler_t cb,
+ void *arg) {
+ si_info_t *sii;
+ gpioh_item_t *gi;
+
+ ASSERT(event);
+ ASSERT(cb != NULL);
+
+ sii = SI_INFO(sih);
+ if (sih->ccrev < 11)
+ return NULL;
+
+ if ((gi = MALLOC(sii->osh, sizeof(gpioh_item_t))) == NULL)
+ return NULL;
+
+ bzero(gi, sizeof(gpioh_item_t));
+ gi->event = event;
+ gi->handler = cb;
+ gi->arg = arg;
+ gi->level = level;
+
+ gi->next = sii->gpioh_head;
+ sii->gpioh_head = gi;
+
+ return (void *)(gi);
+}
+
+void BCMATTACHFN(si_gpio_handler_unregister) (si_t * sih, void *gpioh) {
+ si_info_t *sii;
+ gpioh_item_t *p, *n;
+
+ sii = SI_INFO(sih);
+ if (sih->ccrev < 11)
+ return;
+
+ ASSERT(sii->gpioh_head != NULL);
+ if ((void *)sii->gpioh_head == gpioh) {
+ sii->gpioh_head = sii->gpioh_head->next;
+ MFREE(sii->osh, gpioh, sizeof(gpioh_item_t));
+ return;
+ } else {
+ p = sii->gpioh_head;
+ n = p->next;
+ while (n) {
+ if ((void *)n == gpioh) {
+ p->next = n->next;
+ MFREE(sii->osh, gpioh, sizeof(gpioh_item_t));
+ return;
+ }
+ p = n;
+ n = n->next;
+ }
+ }
+
+ ASSERT(0); /* Not found in list */
+}
+
+void si_gpio_handler_process(si_t * sih)
+{
+ si_info_t *sii;
+ gpioh_item_t *h;
+ uint32 status;
+ uint32 level = si_gpioin(sih);
+ uint32 edge = si_gpioevent(sih, GPIO_REGEVT, 0, 0);
+
+ sii = SI_INFO(sih);
+ for (h = sii->gpioh_head; h != NULL; h = h->next) {
+ if (h->handler) {
+ status = (h->level ? level : edge);
+
+ if (status & h->event)
+ h->handler(status, h->arg);
+ }
+ }
+
+ si_gpioevent(sih, GPIO_REGEVT, edge, edge); /* clear edge-trigger status */
+}
+
+uint32 si_gpio_int_enable(si_t * sih, bool enable)
+{
+ si_info_t *sii;
+ uint offs;
+
+ sii = SI_INFO(sih);
+ if (sih->ccrev < 11)
+ return 0xffffffff;
+
+ offs = OFFSETOF(chipcregs_t, intmask);
+ return (si_corereg
+ (sih, SI_CC_IDX, offs, CI_GPIO, (enable ? CI_GPIO : 0)));
+}
+
+/* Return the size of the specified SOCRAM bank */
+static uint
+socram_banksize(si_info_t * sii, sbsocramregs_t * regs, uint8 index,
+ uint8 mem_type)
+{
+ uint banksize, bankinfo;
+ uint bankidx = index | (mem_type << SOCRAM_BANKIDX_MEMTYPE_SHIFT);
+
+ ASSERT(mem_type <= SOCRAM_MEMTYPE_DEVRAM);
+
+ W_REG(sii->osh, &regs->bankidx, bankidx);
+ bankinfo = R_REG(sii->osh, &regs->bankinfo);
+ banksize =
+ SOCRAM_BANKINFO_SZBASE * ((bankinfo & SOCRAM_BANKINFO_SZMASK) + 1);
+ return banksize;
+}
+
+void si_socdevram(si_t * sih, bool set, uint8 * enable, uint8 * protect)
+{
+ si_info_t *sii;
+ uint origidx;
+ uint intr_val = 0;
+ sbsocramregs_t *regs;
+ bool wasup;
+ uint corerev;
+
+ sii = SI_INFO(sih);
+
+ /* Block ints and save current core */
+ INTR_OFF(sii, intr_val);
+ origidx = si_coreidx(sih);
+
+ if (!set)
+ *enable = *protect = 0;
+
+ /* Switch to SOCRAM core */
+ if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
+ goto done;
+
+ /* Get info for determining size */
+ if (!(wasup = si_iscoreup(sih)))
+ si_core_reset(sih, 0, 0);
+
+ corerev = si_corerev(sih);
+ if (corerev >= 10) {
+ uint32 extcinfo;
+ uint8 nb;
+ uint8 i;
+ uint32 bankidx, bankinfo;
+
+ extcinfo = R_REG(sii->osh, &regs->extracoreinfo);
+ nb = ((extcinfo & SOCRAM_DEVRAMBANK_MASK) >>
+ SOCRAM_DEVRAMBANK_SHIFT);
+ for (i = 0; i < nb; i++) {
+ bankidx =
+ i | (SOCRAM_MEMTYPE_DEVRAM <<
+ SOCRAM_BANKIDX_MEMTYPE_SHIFT);
+ W_REG(sii->osh, &regs->bankidx, bankidx);
+ bankinfo = R_REG(sii->osh, &regs->bankinfo);
+ if (set) {
+ bankinfo &= ~SOCRAM_BANKINFO_DEVRAMSEL_MASK;
+ bankinfo &= ~SOCRAM_BANKINFO_DEVRAMPRO_MASK;
+ if (*enable) {
+ bankinfo |=
+ (1 <<
+ SOCRAM_BANKINFO_DEVRAMSEL_SHIFT);
+ if (*protect)
+ bankinfo |=
+ (1 <<
+ SOCRAM_BANKINFO_DEVRAMPRO_SHIFT);
+ }
+ W_REG(sii->osh, &regs->bankinfo, bankinfo);
+ } else if (i == 0) {
+ if (bankinfo & SOCRAM_BANKINFO_DEVRAMSEL_MASK) {
+ *enable = 1;
+ if (bankinfo &
+ SOCRAM_BANKINFO_DEVRAMPRO_MASK)
+ *protect = 1;
+ }
+ }
+ }
+ }
+
+ /* Return to previous state and core */
+ if (!wasup)
+ si_core_disable(sih, 0);
+ si_setcoreidx(sih, origidx);
+
+ done:
+ INTR_RESTORE(sii, intr_val);
+}
+
+bool si_socdevram_pkg(si_t * sih)
+{
+ if (si_socdevram_size(sih) > 0)
+ return TRUE;
+ else
+ return FALSE;
+}
+
+uint32 si_socdevram_size(si_t * sih)
+{
+ si_info_t *sii;
+ uint origidx;
+ uint intr_val = 0;
+ uint32 memsize = 0;
+ sbsocramregs_t *regs;
+ bool wasup;
+ uint corerev;
+
+ sii = SI_INFO(sih);
+
+ /* Block ints and save current core */
+ INTR_OFF(sii, intr_val);
+ origidx = si_coreidx(sih);
+
+ /* Switch to SOCRAM core */
+ if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
+ goto done;
+
+ /* Get info for determining size */
+ if (!(wasup = si_iscoreup(sih)))
+ si_core_reset(sih, 0, 0);
+
+ corerev = si_corerev(sih);
+ if (corerev >= 10) {
+ uint32 extcinfo;
+ uint8 nb;
+ uint8 i;
+
+ extcinfo = R_REG(sii->osh, &regs->extracoreinfo);
+ nb = (((extcinfo & SOCRAM_DEVRAMBANK_MASK) >>
+ SOCRAM_DEVRAMBANK_SHIFT));
+ for (i = 0; i < nb; i++)
+ memsize +=
+ socram_banksize(sii, regs, i,
+ SOCRAM_MEMTYPE_DEVRAM);
+ }
+
+ /* Return to previous state and core */
+ if (!wasup)
+ si_core_disable(sih, 0);
+ si_setcoreidx(sih, origidx);
+
+ done:
+ INTR_RESTORE(sii, intr_val);
+
+ return memsize;
+}
+
+/* Return the RAM size of the SOCRAM core */
+uint32 si_socram_size(si_t * sih)
+{
+ si_info_t *sii;
+ uint origidx;
+ uint intr_val = 0;
+
+ sbsocramregs_t *regs;
+ bool wasup;
+ uint corerev;
+ uint32 coreinfo;
+ uint memsize = 0;
+
+ sii = SI_INFO(sih);
+
+ /* Block ints and save current core */
+ INTR_OFF(sii, intr_val);
+ origidx = si_coreidx(sih);
+
+ /* Switch to SOCRAM core */
+ if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
+ goto done;
+
+ /* Get info for determining size */
+ if (!(wasup = si_iscoreup(sih)))
+ si_core_reset(sih, 0, 0);
+ corerev = si_corerev(sih);
+ coreinfo = R_REG(sii->osh, &regs->coreinfo);
+
+ /* Calculate size from coreinfo based on rev */
+ if (corerev == 0)
+ memsize = 1 << (16 + (coreinfo & SRCI_MS0_MASK));
+ else if (corerev < 3) {
+ memsize = 1 << (SR_BSZ_BASE + (coreinfo & SRCI_SRBSZ_MASK));
+ memsize *= (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
+ } else if ((corerev <= 7) || (corerev == 12)) {
+ uint nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
+ uint bsz = (coreinfo & SRCI_SRBSZ_MASK);
+ uint lss = (coreinfo & SRCI_LSS_MASK) >> SRCI_LSS_SHIFT;
+ if (lss != 0)
+ nb--;
+ memsize = nb * (1 << (bsz + SR_BSZ_BASE));
+ if (lss != 0)
+ memsize += (1 << ((lss - 1) + SR_BSZ_BASE));
+ } else {
+ uint8 i;
+ uint nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
+ for (i = 0; i < nb; i++)
+ memsize +=
+ socram_banksize(sii, regs, i, SOCRAM_MEMTYPE_RAM);
+ }
+
+ /* Return to previous state and core */
+ if (!wasup)
+ si_core_disable(sih, 0);
+ si_setcoreidx(sih, origidx);
+
+ done:
+ INTR_RESTORE(sii, intr_val);
+
+ return memsize;
+}
+
+void si_chipcontrl_epa4331(si_t * sih, bool on)
+{
+ si_info_t *sii;
+ chipcregs_t *cc;
+ uint origidx;
+ uint32 val;
+
+ sii = SI_INFO(sih);
+ origidx = si_coreidx(sih);
+
+ cc = (chipcregs_t *) si_setcore(sih, CC_CORE_ID, 0);
+
+ val = R_REG(sii->osh, &cc->chipcontrol);
+
+ if (on) {
+ if (sih->chippkg == 9 || sih->chippkg == 0xb) {
+ /* Ext PA Controls for 4331 12x9 Package */
+ W_REG(sii->osh, &cc->chipcontrol, val |
+ (CCTRL4331_EXTPA_EN |
+ CCTRL4331_EXTPA_ON_GPIO2_5));
+ } else {
+ /* Ext PA Controls for 4331 12x12 Package */
+ W_REG(sii->osh, &cc->chipcontrol,
+ val | (CCTRL4331_EXTPA_EN));
+ }
+ } else {
+ val &= ~(CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_ON_GPIO2_5);
+ W_REG(sii->osh, &cc->chipcontrol, val);
+ }
+
+ si_setcoreidx(sih, origidx);
+}
+
+/* Enable BT-COEX & Ex-PA for 4313 */
+void si_epa_4313war(si_t * sih)
+{
+ si_info_t *sii;
+ chipcregs_t *cc;
+ uint origidx;
+
+ sii = SI_INFO(sih);
+ origidx = si_coreidx(sih);
+
+ cc = (chipcregs_t *) si_setcore(sih, CC_CORE_ID, 0);
+
+ /* EPA Fix */
+ W_REG(sii->osh, &cc->gpiocontrol,
+ R_REG(sii->osh, &cc->gpiocontrol) | GPIO_CTRL_EPA_EN_MASK);
+
+ si_setcoreidx(sih, origidx);
+}
+
+/* check if the device is removed */
+bool si_deviceremoved(si_t * sih)
+{
+ uint32 w;
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ switch (BUSTYPE(sih->bustype)) {
+ case PCI_BUS:
+ ASSERT(sii->osh != NULL);
+ w = OSL_PCI_READ_CONFIG(sii->osh, PCI_CFG_VID, sizeof(uint32));
+ if ((w & 0xFFFF) != VENDOR_BROADCOM)
+ return TRUE;
+ break;
+ }
+ return FALSE;
+}
+
+bool si_is_sprom_available(si_t * sih)
+{
+ if (sih->ccrev >= 31) {
+ si_info_t *sii;
+ uint origidx;
+ chipcregs_t *cc;
+ uint32 sromctrl;
+
+ if ((sih->cccaps & CC_CAP_SROM) == 0)
+ return FALSE;
+
+ sii = SI_INFO(sih);
+ origidx = sii->curidx;
+ cc = si_setcoreidx(sih, SI_CC_IDX);
+ sromctrl = R_REG(sii->osh, &cc->sromcontrol);
+ si_setcoreidx(sih, origidx);
+ return (sromctrl & SRC_PRESENT);
+ }
+
+ switch (CHIPID(sih->chip)) {
+ case BCM4329_CHIP_ID:
+ return (sih->chipst & CST4329_SPROM_SEL) != 0;
+ case BCM4319_CHIP_ID:
+ return (sih->chipst & CST4319_SPROM_SEL) != 0;
+ case BCM4336_CHIP_ID:
+ return (sih->chipst & CST4336_SPROM_PRESENT) != 0;
+ case BCM4330_CHIP_ID:
+ return (sih->chipst & CST4330_SPROM_PRESENT) != 0;
+ case BCM4313_CHIP_ID:
+ return (sih->chipst & CST4313_SPROM_PRESENT) != 0;
+ case BCM4331_CHIP_ID:
+ return (sih->chipst & CST4331_SPROM_PRESENT) != 0;
+ default:
+ return TRUE;
+ }
+}
+
+bool si_is_otp_disabled(si_t * sih)
+{
+ switch (CHIPID(sih->chip)) {
+ case BCM4329_CHIP_ID:
+ return (sih->chipst & CST4329_SPROM_OTP_SEL_MASK) ==
+ CST4329_OTP_PWRDN;
+ case BCM4319_CHIP_ID:
+ return (sih->chipst & CST4319_SPROM_OTP_SEL_MASK) ==
+ CST4319_OTP_PWRDN;
+ case BCM4336_CHIP_ID:
+ return ((sih->chipst & CST4336_OTP_PRESENT) == 0);
+ case BCM4330_CHIP_ID:
+ return ((sih->chipst & CST4330_OTP_PRESENT) == 0);
+ case BCM4313_CHIP_ID:
+ return (sih->chipst & CST4313_OTP_PRESENT) == 0;
+ /* These chips always have their OTP on */
+ case BCM43224_CHIP_ID:
+ case BCM43225_CHIP_ID:
+ case BCM43421_CHIP_ID:
+ case BCM43235_CHIP_ID:
+ case BCM43236_CHIP_ID:
+ case BCM43238_CHIP_ID:
+ case BCM4331_CHIP_ID:
+ default:
+ return FALSE;
+ }
+}
+
+bool si_is_otp_powered(si_t * sih)
+{
+ if (PMUCTL_ENAB(sih))
+ return si_pmu_is_otp_powered(sih, si_osh(sih));
+ return TRUE;
+}
+
+void si_otp_power(si_t * sih, bool on)
+{
+ if (PMUCTL_ENAB(sih))
+ si_pmu_otp_power(sih, si_osh(sih), on);
+ OSL_DELAY(1000);
+}
+
+bool
+#if defined(BCMDBG)
+si_is_sprom_enabled(si_t * sih)
+#else
+BCMATTACHFN(si_is_sprom_enabled) (si_t * sih)
+#endif
+{
+
+ return TRUE;
+}
+
+void
+#if defined(BCMDBG)
+si_sprom_enable(si_t * sih, bool enable)
+#else
+BCMATTACHFN(si_sprom_enable) (si_t * sih, bool enable)
+#endif
+{
+ if (PMUCTL_ENAB(sih))
+ si_pmu_sprom_enable(sih, si_osh(sih), enable);
+}
+
+/* Return BCME_NOTFOUND if the card doesn't have CIS format nvram */
+int si_cis_source(si_t * sih)
+{
+ /* Many chips have the same mapping of their chipstatus field */
+ static const uint cis_sel[] =
+ { CIS_DEFAULT, CIS_SROM, CIS_OTP, CIS_SROM };
+ static const uint cis_43236_sel[] =
+ { CIS_DEFAULT, CIS_SROM, CIS_OTP, CIS_OTP };
+
+ /* PCI chips use SROM format instead of CIS */
+ if (BUSTYPE(sih->bustype) == PCI_BUS)
+ return BCME_NOTFOUND;
+
+ switch (CHIPID(sih->chip)) {
+ case BCM43235_CHIP_ID:
+ case BCM43236_CHIP_ID:
+ case BCM43238_CHIP_ID:{
+ uint8 strap =
+ (sih->
+ chipst & CST4322_SPROM_OTP_SEL_MASK) >>
+ CST4322_SPROM_OTP_SEL_SHIFT;
+ return ((strap >=
+ sizeof(cis_sel)) ? CIS_DEFAULT :
+ cis_43236_sel[strap]);
+ }
+
+ case BCM4329_CHIP_ID:
+ return ((sih->chipst & CST4329_SPROM_OTP_SEL_MASK) >=
+ sizeof(cis_sel)) ? CIS_DEFAULT : cis_sel[(sih->
+ chipst &
+ CST4329_SPROM_OTP_SEL_MASK)];
+ case BCM4319_CHIP_ID:{
+ uint cis_sel4319 =
+ ((sih->
+ chipst & CST4319_SPROM_OTP_SEL_MASK) >>
+ CST4319_SPROM_OTP_SEL_SHIFT);
+ return (cis_sel4319 >=
+ sizeof(cis_sel)) ? CIS_DEFAULT :
+ cis_sel[cis_sel4319];
+ }
+ case BCM4336_CHIP_ID:{
+ if (sih->chipst & CST4336_SPROM_PRESENT)
+ return CIS_SROM;
+ if (sih->chipst & CST4336_OTP_PRESENT)
+ return CIS_OTP;
+ return CIS_DEFAULT;
+ }
+ case BCM4330_CHIP_ID:{
+ if (sih->chipst & CST4330_SPROM_PRESENT)
+ return CIS_SROM;
+ if (sih->chipst & CST4330_OTP_PRESENT)
+ return CIS_OTP;
+ return CIS_DEFAULT;
+ }
+ default:
+ return CIS_DEFAULT;
+ }
+}
diff --git a/drivers/staging/brcm80211/util/siutils_priv.h b/drivers/staging/brcm80211/util/siutils_priv.h
new file mode 100644
index 000000000000..b0c780375c90
--- /dev/null
+++ b/drivers/staging/brcm80211/util/siutils_priv.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _siutils_priv_h_
+#define _siutils_priv_h_
+
+/* Silicon Backplane externs */
+extern void sb_scan(si_t * sih, void *regs, uint devid);
+uint sb_coreid(si_t * sih);
+uint sb_corerev(si_t * sih);
+extern uint sb_corereg(si_t * sih, uint coreidx, uint regoff, uint mask,
+ uint val);
+extern bool sb_iscoreup(si_t * sih);
+void *sb_setcoreidx(si_t * sih, uint coreidx);
+extern uint32 sb_base(uint32 admatch);
+extern void sb_core_reset(si_t * sih, uint32 bits, uint32 resetbits);
+extern void sb_core_disable(si_t * sih, uint32 bits);
+extern bool sb_taclear(si_t * sih, bool details);
+#endif /* _siutils_priv_h_ */