diff options
author | 2016-11-16 06:06:04 +0000 | |
---|---|---|
committer | 2016-11-16 06:06:04 +0000 | |
commit | dc039b831364682ad8fda4985a99a3756b4446eb (patch) | |
tree | 2ded43b553e496de860b94a95a2844a992c05f80 /sys | |
parent | allocate dmamaps for every descriptor in the tx ring. (diff) | |
download | wireguard-openbsd-dc039b831364682ad8fda4985a99a3756b4446eb.tar.xz wireguard-openbsd-dc039b831364682ad8fda4985a99a3756b4446eb.zip |
rework re_start and re_txeof to be more like other drivers.
the important change is to no longer account for packets and
descriptors separately, instead we just deal with available descriptors
on the ring.
this allows us to guarantee that oactive will only be set after
re_start has queued at least one packet, which in turn guarantees
that the chip will interrupt in the future and run re_txeof to clear
oactive.
without this change it was possible for a call of re_start to almost
fill the descriptor ring, followed by a second call that could fail
to post a packet because it had run out of descriptors and would
then set oactive. if re_txeof ran concurrently it could complete
all the previously posted packets without noticing that oactive had
been set and therefore needed to be cleared. in that situation
oactive would be set, but nothing would be set up to clear it
in the immediate future.
this fixes a lockup reported by haesbaert.
Diffstat (limited to 'sys')
-rw-r--r-- | sys/dev/ic/re.c | 203 |
1 files changed, 89 insertions, 114 deletions
diff --git a/sys/dev/ic/re.c b/sys/dev/ic/re.c index ee9d02657b9..f7f6fa24306 100644 --- a/sys/dev/ic/re.c +++ b/sys/dev/ic/re.c @@ -1,4 +1,4 @@ -/* $OpenBSD: re.c,v 1.197 2016/11/16 02:50:17 dlg Exp $ */ +/* $OpenBSD: re.c,v 1.198 2016/11/16 06:06:04 dlg Exp $ */ /* $FreeBSD: if_re.c,v 1.31 2004/09/04 07:54:05 ru Exp $ */ /* * Copyright (c) 1997, 1998-2003 @@ -152,7 +152,7 @@ int redebug = 0; static inline void re_set_bufaddr(struct rl_desc *, bus_addr_t); -int re_encap(struct rl_softc *, struct mbuf *, struct rl_txq *, int *); +int re_encap(struct rl_softc *, unsigned int, struct mbuf *); int re_newbuf(struct rl_softc *); int re_rx_list_init(struct rl_softc *); @@ -1008,7 +1008,7 @@ re_attach(struct rl_softc *sc, const char *intrstr) ifp->if_start = re_start; ifp->if_watchdog = re_watchdog; ifp->if_hardmtu = sc->rl_max_mtu; - IFQ_SET_MAXLEN(&ifp->if_snd, RL_TX_QLEN); + IFQ_SET_MAXLEN(&ifp->if_snd, sc->rl_ldata.rl_tx_desc_cnt); ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4; @@ -1408,28 +1408,30 @@ re_rxeof(struct rl_softc *sc) int re_txeof(struct rl_softc *sc) { - struct ifnet *ifp; + struct ifnet *ifp = &sc->sc_arpcom.ac_if; struct rl_txq *txq; uint32_t txstat; - int idx, descidx, tx_free, freed = 0; + unsigned int prod, cons; + unsigned int idx; + int free = 0; ifp = &sc->sc_arpcom.ac_if; - for (idx = sc->rl_ldata.rl_txq_considx; - idx != sc->rl_ldata.rl_txq_prodidx; idx = RL_NEXT_TXQ(sc, idx)) { - txq = &sc->rl_ldata.rl_txq[idx]; + prod = sc->rl_ldata.rl_txq_prodidx; + cons = sc->rl_ldata.rl_txq_considx; - descidx = txq->txq_descidx; - RL_TXDESCSYNC(sc, descidx, - BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); - txstat = - letoh32(sc->rl_ldata.rl_tx_list[descidx].rl_cmdstat); - RL_TXDESCSYNC(sc, descidx, BUS_DMASYNC_PREREAD); - KASSERT((txstat & RL_TDESC_CMD_EOF) != 0); - if (txstat & RL_TDESC_CMD_OWN) + while (prod != cons) { + txq = &sc->rl_ldata.rl_txq[cons]; + + idx = txq->txq_descidx; + RL_TXDESCSYNC(sc, idx, BUS_DMASYNC_POSTREAD); + txstat = letoh32(sc->rl_ldata.rl_tx_list[idx].rl_cmdstat); + RL_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD); + if (ISSET(txstat, RL_TDESC_CMD_OWN)) { + free = 2; break; + } - freed += txq->txq_nsegs; bus_dmamap_sync(sc->sc_dmat, txq->txq_dmamap, 0, txq->txq_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->sc_dmat, txq->txq_dmamap); @@ -1442,15 +1444,15 @@ re_txeof(struct rl_softc *sc) ifp->if_oerrors++; else ifp->if_opackets++; + + cons = RL_NEXT_TX_DESC(sc, idx); + free = 1; } - if (freed == 0) + if (free == 0) return (0); - tx_free = atomic_add_int_nv(&sc->rl_ldata.rl_tx_free, freed); - KASSERT(tx_free <= sc->rl_ldata.rl_tx_desc_cnt); - - sc->rl_ldata.rl_txq_considx = idx; + sc->rl_ldata.rl_txq_considx = cons; /* * Some chips will ignore a second TX request issued while an @@ -1461,7 +1463,7 @@ re_txeof(struct rl_softc *sc) */ if (ifq_is_oactive(&ifp->if_snd)) ifq_restart(&ifp->if_snd); - else if (tx_free < sc->rl_ldata.rl_tx_desc_cnt) + else if (free == 2) ifq_serialize(&ifp->if_snd, &sc->rl_start); else ifp->if_timer = 0; @@ -1570,11 +1572,11 @@ re_intr(void *arg) } int -re_encap(struct rl_softc *sc, struct mbuf *m, struct rl_txq *txq, int *used) +re_encap(struct rl_softc *sc, unsigned int idx, struct mbuf *m) { + struct rl_txq *txq; bus_dmamap_t map; - struct mbuf *mp, mh; - int error, seg, nsegs, uidx, startidx, curidx, lastidx, pad; + int error, seg, nsegs, curidx, lastidx, pad; int off; struct ip *ip; struct rl_desc *d; @@ -1597,6 +1599,8 @@ re_encap(struct rl_softc *sc, struct mbuf *m, struct rl_txq *txq, int *used) m->m_pkthdr.len > RL_MTU && (m->m_pkthdr.csum_flags & (M_IPV4_CSUM_OUT|M_TCP_CSUM_OUT|M_UDP_CSUM_OUT)) != 0) { + struct mbuf mh, *mp; + mp = m_getptr(m, ETHER_HDR_LEN, &off); mh.m_flags = 0; mh.m_data = mtod(mp, caddr_t) + off; @@ -1631,6 +1635,7 @@ re_encap(struct rl_softc *sc, struct mbuf *m, struct rl_txq *txq, int *used) } } + txq = &sc->rl_ldata.rl_txq[idx]; map = txq->txq_dmamap; error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, @@ -1647,9 +1652,12 @@ re_encap(struct rl_softc *sc, struct mbuf *m, struct rl_txq *txq, int *used) /* FALLTHROUGH */ default: - return (ENOMEM); + return (0); } + bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, + BUS_DMASYNC_PREWRITE); + nsegs = map->dm_nsegs; pad = 0; @@ -1671,18 +1679,6 @@ re_encap(struct rl_softc *sc, struct mbuf *m, struct rl_txq *txq, int *used) nsegs++; } - if (*used + nsegs + 1 >= sc->rl_ldata.rl_tx_free) { - error = ENOBUFS; - goto fail_unload; - } - - /* - * Make sure that the caches are synchronized before we - * ask the chip to start DMA for the packet data. - */ - bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, - BUS_DMASYNC_PREWRITE); - /* * Set up hardware VLAN tagging. Note: vlan tag info must * appear in all descriptors of a multi-descriptor @@ -1705,84 +1701,66 @@ re_encap(struct rl_softc *sc, struct mbuf *m, struct rl_txq *txq, int *used) * set this descriptor later when it start transmission or * reception.) */ - curidx = startidx = sc->rl_ldata.rl_tx_nextfree; - lastidx = -1; - for (seg = 0; seg < map->dm_nsegs; - seg++, curidx = RL_NEXT_TX_DESC(sc, curidx)) { + curidx = idx; + cmdstat = RL_TDESC_CMD_SOF; + + for (seg = 0; seg < map->dm_nsegs; seg++) { d = &sc->rl_ldata.rl_tx_list[curidx]; - RL_TXDESCSYNC(sc, curidx, - BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); - cmdstat = letoh32(d->rl_cmdstat); - RL_TXDESCSYNC(sc, curidx, BUS_DMASYNC_PREREAD); - if (cmdstat & RL_TDESC_STAT_OWN) { - printf("%s: tried to map busy TX descriptor\n", - sc->sc_dev.dv_xname); - for (; seg > 0; seg --) { - uidx = (curidx + sc->rl_ldata.rl_tx_desc_cnt - - seg) % sc->rl_ldata.rl_tx_desc_cnt; - sc->rl_ldata.rl_tx_list[uidx].rl_cmdstat = 0; - RL_TXDESCSYNC(sc, uidx, - BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); - } - error = EBUSY; - goto fail_unload; - } + + RL_TXDESCSYNC(sc, curidx, BUS_DMASYNC_POSTWRITE); d->rl_vlanctl = htole32(vlanctl); re_set_bufaddr(d, map->dm_segs[seg].ds_addr); - cmdstat = csum_flags | map->dm_segs[seg].ds_len; - if (seg == 0) - cmdstat |= RL_TDESC_CMD_SOF; - else - cmdstat |= RL_TDESC_CMD_OWN; + cmdstat |= csum_flags | map->dm_segs[seg].ds_len; + if (curidx == sc->rl_ldata.rl_tx_desc_cnt - 1) cmdstat |= RL_TDESC_CMD_EOR; - if (seg == nsegs - 1) { - cmdstat |= RL_TDESC_CMD_EOF; - lastidx = curidx; - } + d->rl_cmdstat = htole32(cmdstat); - RL_TXDESCSYNC(sc, curidx, - BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); + + RL_TXDESCSYNC(sc, curidx, BUS_DMASYNC_PREWRITE); + + lastidx = curidx; + cmdstat = RL_TDESC_CMD_OWN; + curidx = RL_NEXT_TX_DESC(sc, curidx); } + if (pad) { d = &sc->rl_ldata.rl_tx_list[curidx]; + + RL_TXDESCSYNC(sc, curidx, BUS_DMASYNC_POSTWRITE); + d->rl_vlanctl = htole32(vlanctl); re_set_bufaddr(d, RL_TXPADDADDR(sc)); cmdstat = csum_flags | RL_TDESC_CMD_OWN | RL_TDESC_CMD_EOF | (RL_IP4CSUMTX_PADLEN + 1 - m->m_pkthdr.len); + if (curidx == sc->rl_ldata.rl_tx_desc_cnt - 1) cmdstat |= RL_TDESC_CMD_EOR; + d->rl_cmdstat = htole32(cmdstat); - RL_TXDESCSYNC(sc, curidx, - BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); + + RL_TXDESCSYNC(sc, curidx, BUS_DMASYNC_PREWRITE); + lastidx = curidx; - curidx = RL_NEXT_TX_DESC(sc, curidx); } - KASSERT(lastidx != -1); + + /* d is already pointing at the last descriptor */ + d->rl_cmdstat |= htole32(RL_TDESC_CMD_EOF); /* Transfer ownership of packet to the chip. */ + d = &sc->rl_ldata.rl_tx_list[idx]; - sc->rl_ldata.rl_tx_list[startidx].rl_cmdstat |= - htole32(RL_TDESC_CMD_OWN); - RL_TXDESCSYNC(sc, startidx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); + RL_TXDESCSYNC(sc, curidx, BUS_DMASYNC_POSTWRITE); + d->rl_cmdstat |= htole32(RL_TDESC_CMD_OWN); + RL_TXDESCSYNC(sc, curidx, BUS_DMASYNC_PREWRITE); /* update info of TX queue and descriptors */ txq->txq_mbuf = m; txq->txq_descidx = lastidx; - txq->txq_nsegs = nsegs; - - sc->rl_ldata.rl_tx_nextfree = curidx; - - *used += nsegs; - return (0); - -fail_unload: - bus_dmamap_unload(sc->sc_dmat, map); - - return (error); + return (nsegs); } void @@ -1802,40 +1780,34 @@ re_start(struct ifnet *ifp) { struct rl_softc *sc = ifp->if_softc; struct mbuf *m; - int idx, used = 0, txq_free, error; + unsigned int idx; + unsigned int free, used; + int post = 0; if (!ISSET(sc->rl_flags, RL_FLAG_LINK)) { IFQ_PURGE(&ifp->if_snd); return; } - txq_free = sc->rl_ldata.rl_txq_considx; + free = sc->rl_ldata.rl_txq_considx; idx = sc->rl_ldata.rl_txq_prodidx; - if (txq_free <= idx) - txq_free += RL_TX_QLEN; - txq_free -= idx; + if (free <= idx) + free += sc->rl_ldata.rl_tx_desc_cnt; + free -= idx; for (;;) { - if (txq_free <= 1) { + if (sc->rl_ldata.rl_tx_ndescs >= free + 2) { ifq_set_oactive(&ifp->if_snd); break; } - m = ifq_deq_begin(&ifp->if_snd); + m = ifq_dequeue(&ifp->if_snd); if (m == NULL) break; - error = re_encap(sc, m, &sc->rl_ldata.rl_txq[idx], &used); - if (error == 0) - ifq_deq_commit(&ifp->if_snd, m); - else if (error == ENOBUFS) { - ifq_deq_rollback(&ifp->if_snd, m); - ifq_set_oactive(&ifp->if_snd); - break; - } else { - ifq_deq_commit(&ifp->if_snd, m); + used = re_encap(sc, idx, m); + if (used == 0) { m_freem(m); - ifp->if_oerrors++; continue; } @@ -1843,19 +1815,22 @@ re_start(struct ifnet *ifp) if (ifp->if_bpf) bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT); #endif - idx = RL_NEXT_TXQ(sc, idx); - txq_free--; + + KASSERT(used <= free); + free -= used; + + idx += used; + if (idx >= sc->rl_ldata.rl_tx_desc_cnt) + idx -= sc->rl_ldata.rl_tx_desc_cnt; + + post = 1; } - if (used == 0) + if (post == 0) return; - - ifp->if_timer = 5; - atomic_sub_int(&sc->rl_ldata.rl_tx_free, used); - KASSERT(sc->rl_ldata.rl_tx_free >= 0); + ifp->if_timer = 5; sc->rl_ldata.rl_txq_prodidx = idx; - ifq_serialize(&ifp->if_snd, &sc->rl_start); } |