Assorted fixes for mge(4).

- Use proper map for the busdma sync on mge descriptor.
- Remove unnecesary busdma sync.
- Eliminate redundant locking in mge_reinit_rx() (just assert).
- Kill unused variable.

Submitted by:	Grzegorz Bernacki
Obtained from:	Semihalf
MFC after:	1 week
This commit is contained in:
Rafal Jaworowski 2010-02-17 17:03:04 +00:00
parent f2ee2e68d2
commit d6bdd318a3
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=204009

View File

@ -457,10 +457,7 @@ mge_allocate_dma(struct mge_softc *sc)
{ {
int error; int error;
struct mge_desc_wrapper *dw; struct mge_desc_wrapper *dw;
int num, i; int i;
num = MGE_TX_DESC_NUM + MGE_RX_DESC_NUM;
/* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */ /* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */
error = bus_dma_tag_create(NULL, /* parent */ error = bus_dma_tag_create(NULL, /* parent */
@ -543,7 +540,7 @@ mge_reinit_rx(struct mge_softc *sc)
struct mge_desc_wrapper *dw; struct mge_desc_wrapper *dw;
int i; int i;
MGE_RECEIVE_LOCK(sc); MGE_RECEIVE_LOCK_ASSERT(sc);
mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1); mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
@ -564,8 +561,6 @@ mge_reinit_rx(struct mge_softc *sc)
/* Enable RX queue */ /* Enable RX queue */
MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE)); MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
MGE_RECEIVE_UNLOCK(sc);
} }
#ifdef DEVICE_POLLING #ifdef DEVICE_POLLING
@ -1375,9 +1370,6 @@ mge_encap(struct mge_softc *sc, struct mbuf *m0)
dw = &sc->mge_tx_desc[desc_no]; dw = &sc->mge_tx_desc[desc_no];
mapp = dw->buffer_dmap; mapp = dw->buffer_dmap;
bus_dmamap_sync(sc->mge_desc_dtag, mapp,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
/* Create mapping in DMA memory */ /* Create mapping in DMA memory */
error = bus_dmamap_load_mbuf_sg(sc->mge_tx_dtag, mapp, m0, segs, &nsegs, error = bus_dmamap_load_mbuf_sg(sc->mge_tx_dtag, mapp, m0, segs, &nsegs,
BUS_DMA_NOWAIT); BUS_DMA_NOWAIT);
@ -1401,7 +1393,7 @@ mge_encap(struct mge_softc *sc, struct mbuf *m0)
mge_offload_setup_descriptor(sc, dw); mge_offload_setup_descriptor(sc, dw);
} }
bus_dmamap_sync(sc->mge_desc_dtag, mapp, bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
sc->tx_desc_curr = (++sc->tx_desc_curr) % MGE_TX_DESC_NUM; sc->tx_desc_curr = (++sc->tx_desc_curr) % MGE_TX_DESC_NUM;