- switch adapter and port lock over to using sx so that resources

can be allocated atomically
- add debug macros for printing lock initialization / teardown
- add buffers to port_info and adapter to allow each lock to have a
  unique name
- destroy mutexes initialized by cxgb_offload_init
- remove recursive calls to ADAPTER_LOCK
- move callout_drain calls so that they don't occur with the lock held
- ensure that only as many qsets as are needed are initialized and
  destroyed

MFC after: 3 days
Sponsored by: Chelsio Inc.
This commit is contained in:
Kip Macy 2007-06-17 04:33:38 +00:00
parent 60a35d3afd
commit bb38cd2fbc
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=170869
4 changed files with 205 additions and 85 deletions

View File

@ -40,6 +40,7 @@ __FBSDID("$FreeBSD$");
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/sx.h>
#include <sys/rman.h>
#include <sys/mbuf.h>
#include <sys/socket.h>
@ -65,10 +66,43 @@ __FBSDID("$FreeBSD$");
#include <dev/cxgb/ulp/toecore/toedev.h>
#endif
#define USE_SX
struct adapter;
struct sge_qset;
extern int cxgb_debug;
#ifdef DEBUG_LOCKING
#define MTX_INIT(lock, lockname, class, flags) \
do { \
printf("initializing %s at %s:%d\n", lockname, __FILE__, __LINE__); \
mtx_init((lock), lockname, class, flags); \
} while (0)
#define MTX_DESTROY(lock) \
do { \
printf("destroying %s at %s:%d\n", (lock)->lock_object.lo_name, __FILE__, __LINE__); \
mtx_destroy((lock)); \
} while (0)
#define SX_INIT(lock, lockname) \
do { \
printf("initializing %s at %s:%d\n", lockname, __FILE__, __LINE__); \
sx_init((lock), lockname); \
} while (0)
#define SX_DESTROY(lock) \
do { \
printf("destroying %s at %s:%d\n", (lock)->lock_object.lo_name, __FILE__, __LINE__); \
sx_destroy((lock)); \
} while (0)
#else
#define MTX_INIT mtx_init
#define MTX_DESTROY mtx_destroy
#define SX_INIT sx_init
#define SX_DESTROY sx_destroy
#endif
struct port_info {
struct adapter *adapter;
struct ifnet *ifp;
@ -77,9 +111,12 @@ struct port_info {
struct cphy phy;
struct cmac mac;
struct link_config link_config;
struct ifmedia media;
struct ifmedia media;
#ifdef USE_SX
struct sx lock;
#else
struct mtx lock;
#endif
int port;
uint8_t hw_addr[ETHER_ADDR_LEN];
uint8_t nqsets;
@ -88,6 +125,11 @@ struct port_info {
struct task start_task;
struct task timer_reclaim_task;
struct cdev *port_cdev;
#define PORT_NAME_LEN 32
#define TASKQ_NAME_LEN 32
char lockbuf[PORT_NAME_LEN];
char taskqbuf[TASKQ_NAME_LEN];
};
enum { /* adapter flags */
@ -104,6 +146,8 @@ enum { /* adapter flags */
#define RSPQ_Q_SIZE 1024
#define TX_ETH_Q_SIZE 1024
/*
* Types of Tx queues in each queue set. Order here matters, do not change.
* XXX TOE is not implemented yet, so the extra queues are just placeholders.
@ -165,6 +209,9 @@ struct sge_rspq {
bus_dma_tag_t desc_tag;
bus_dmamap_t desc_map;
struct mbuf *m;
#define RSPQ_NAME_LEN 32
char lockbuf[RSPQ_NAME_LEN];
};
struct rx_desc;
@ -216,6 +263,8 @@ struct sge_txq {
bus_dma_tag_t entry_tag;
struct mbuf_head sendq;
struct mtx lock;
#define TXQ_NAME_LEN 32
char lockbuf[TXQ_NAME_LEN];
};
@ -260,7 +309,6 @@ struct adapter {
bus_space_tag_t bt;
bus_size_t mmio_len;
uint32_t link_width;
/* DMA resources */
bus_dma_tag_t parent_dmat;
@ -283,8 +331,8 @@ struct adapter {
/* Tasks */
struct task ext_intr_task;
struct task slow_intr_task;
struct task tick_task;
struct task process_responses_task;
struct task mr_refresh_task;
struct taskqueue *tq;
struct callout cxgb_tick_ch;
struct callout sge_timer_ch;
@ -310,9 +358,19 @@ struct adapter {
char fw_version[64];
uint32_t open_device_map;
uint32_t registered_device_map;
#ifdef USE_SX
struct sx lock;
#else
struct mtx lock;
#endif
driver_intr_t *cxgb_intr;
int msi_count;
#define ADAPTER_LOCK_NAME_LEN 32
char lockbuf[ADAPTER_LOCK_NAME_LEN];
char reglockbuf[ADAPTER_LOCK_NAME_LEN];
char mdiolockbuf[ADAPTER_LOCK_NAME_LEN];
char elmerlockbuf[ADAPTER_LOCK_NAME_LEN];
};
struct t3_rx_mode {
@ -327,12 +385,32 @@ struct t3_rx_mode {
#define ELMR_LOCK(adapter) mtx_lock(&(adapter)->elmer_lock)
#define ELMR_UNLOCK(adapter) mtx_unlock(&(adapter)->elmer_lock)
#define PORT_LOCK(port) mtx_lock(&(port)->lock);
#define PORT_UNLOCK(port) mtx_unlock(&(port)->lock);
#ifdef USE_SX
#define PORT_LOCK(port) sx_xlock(&(port)->lock);
#define PORT_UNLOCK(port) sx_xunlock(&(port)->lock);
#define PORT_LOCK_INIT(port, name) SX_INIT(&(port)->lock, name)
#define PORT_LOCK_DEINIT(port) SX_DESTROY(&(port)->lock)
#define PORT_LOCK_ASSERT_OWNED(port) sx_assert(&(port)->lock, SA_LOCKED)
#define ADAPTER_LOCK(adap) sx_xlock(&(adap)->lock);
#define ADAPTER_UNLOCK(adap) sx_xunlock(&(adap)->lock);
#define ADAPTER_LOCK_INIT(adap, name) SX_INIT(&(adap)->lock, name)
#define ADAPTER_LOCK_DEINIT(adap) SX_DESTROY(&(adap)->lock)
#define ADAPTER_LOCK_ASSERT_NOTOWNED(adap) sx_assert(&(adap)->lock, SA_UNLOCKED)
#else
#define PORT_LOCK(port) mtx_lock(&(port)->lock);
#define PORT_UNLOCK(port) mtx_unlock(&(port)->lock);
#define PORT_LOCK_INIT(port, name) mtx_init(&(port)->lock, name, 0, MTX_DEF)
#define PORT_LOCK_DEINIT(port) mtx_destroy(&(port)->lock)
#define PORT_LOCK_ASSERT_OWNED(port) mtx_assert(&(port)->lock, MA_OWNED)
#define ADAPTER_LOCK(adap) mtx_lock(&(adap)->lock);
#define ADAPTER_UNLOCK(adap) mtx_unlock(&(adap)->lock);
#define ADAPTER_LOCK_INIT(adap, name) mtx_init(&(adap)->lock, name, 0, MTX_DEF)
#define ADAPTER_LOCK_DEINIT(adap) mtx_destroy(&(adap)->lock)
#define ADAPTER_LOCK_ASSERT_NOTOWNED(adap) mtx_assert(&(adap)->lock, MO_NOTOWNED)
#endif
static __inline uint32_t

View File

@ -96,7 +96,8 @@ static void cxgb_media_status(struct ifnet *, struct ifmediareq *);
static int setup_sge_qsets(adapter_t *);
static void cxgb_async_intr(void *);
static void cxgb_ext_intr_handler(void *, int);
static void cxgb_down(struct adapter *sc);
static void cxgb_tick_handler(void *, int);
static void cxgb_down_locked(struct adapter *sc);
static void cxgb_tick(void *);
static void setup_rss(adapter_t *sc);
@ -318,7 +319,6 @@ upgrade_fw(adapter_t *sc)
device_printf(sc->dev, "Could not find firmware image %s\n", buf);
return (ENOENT);
}
status = t3_load_fw(sc, (const uint8_t *)fw->data, fw->datasize);
firmware_put(fw, FIRMWARE_UNLOAD);
@ -335,7 +335,7 @@ cxgb_controller_attach(device_t dev)
int i, reg, msi_needed, error = 0;
uint32_t vers;
int port_qsets = 1;
sc = device_get_softc(dev);
sc->dev = dev;
sc->msi_count = 0;
@ -368,10 +368,20 @@ cxgb_controller_attach(device_t dev)
return (ENXIO);
}
mtx_init(&sc->sge.reg_lock, "SGE reg lock", NULL, MTX_DEF);
mtx_init(&sc->lock, "cxgb controller lock", NULL, MTX_DEF);
mtx_init(&sc->mdio_lock, "cxgb mdio", NULL, MTX_DEF);
mtx_init(&sc->elmer_lock, "cxgb elmer", NULL, MTX_DEF);
snprintf(sc->lockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb controller lock %d",
device_get_unit(dev));
ADAPTER_LOCK_INIT(sc, sc->lockbuf);
snprintf(sc->reglockbuf, ADAPTER_LOCK_NAME_LEN, "SGE reg lock %d",
device_get_unit(dev));
snprintf(sc->mdiolockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb mdio lock %d",
device_get_unit(dev));
snprintf(sc->elmerlockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb elmer lock %d",
device_get_unit(dev));
MTX_INIT(&sc->sge.reg_lock, sc->reglockbuf, NULL, MTX_DEF);
MTX_INIT(&sc->mdio_lock, sc->mdiolockbuf, NULL, MTX_DEF);
MTX_INIT(&sc->elmer_lock, sc->elmerlockbuf, NULL, MTX_DEF);
sc->bt = rman_get_bustag(sc->regs_res);
sc->bh = rman_get_bushandle(sc->regs_res);
@ -449,10 +459,11 @@ cxgb_controller_attach(device_t dev)
taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s taskq",
device_get_nameunit(dev));
TASK_INIT(&sc->ext_intr_task, 0, cxgb_ext_intr_handler, sc);
TASK_INIT(&sc->tick_task, 0, cxgb_tick_handler, sc);
/* Create a periodic callout for checking adapter status */
callout_init_mtx(&sc->cxgb_tick_ch, &sc->lock, CALLOUT_RETURNUNLOCKED);
callout_init(&sc->cxgb_tick_ch, TRUE);
if (t3_check_fw_version(sc) != 0) {
/*
@ -536,7 +547,11 @@ cxgb_free(struct adapter *sc)
{
int i;
cxgb_down(sc);
ADAPTER_LOCK(sc);
/*
* drops the lock
*/
cxgb_down_locked(sc);
#ifdef MSI_SUPPORTED
if (sc->flags & (USING_MSI | USING_MSIX)) {
@ -551,18 +566,15 @@ cxgb_free(struct adapter *sc)
sc->msix_regs_res);
}
/*
* XXX need to drain the ifq by hand until
* it is taught about mbuf iovecs
*/
callout_drain(&sc->cxgb_tick_ch);
t3_sge_deinit_sw(sc);
if (sc->tq != NULL) {
taskqueue_drain(sc->tq, &sc->ext_intr_task);
taskqueue_drain(sc->tq, &sc->tick_task);
taskqueue_free(sc->tq);
}
tsleep(&sc, 0, "cxgb unload", hz);
for (i = 0; i < (sc)->params.nports; ++i) {
if (sc->portdev[i] != NULL)
@ -579,14 +591,17 @@ cxgb_free(struct adapter *sc)
#endif
t3_free_sge_resources(sc);
t3_sge_free(sc);
cxgb_offload_exit();
if (sc->regs_res != NULL)
bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->regs_rid,
sc->regs_res);
mtx_destroy(&sc->mdio_lock);
mtx_destroy(&sc->sge.reg_lock);
mtx_destroy(&sc->lock);
MTX_DESTROY(&sc->mdio_lock);
MTX_DESTROY(&sc->sge.reg_lock);
MTX_DESTROY(&sc->elmer_lock);
ADAPTER_LOCK_DEINIT(sc);
return;
}
@ -758,12 +773,12 @@ cxgb_port_attach(device_t dev)
struct port_info *p;
struct ifnet *ifp;
int err, media_flags;
char buf[64];
p = device_get_softc(dev);
snprintf(buf, sizeof(buf), "cxgb port %d", p->port);
mtx_init(&p->lock, buf, 0, MTX_DEF);
snprintf(p->lockbuf, PORT_NAME_LEN, "cxgb port lock %d:%d",
device_get_unit(device_get_parent(dev)), p->port);
PORT_LOCK_INIT(p, p->lockbuf);
/* Allocate an ifnet object and set it up */
ifp = p->ifp = if_alloc(IFT_ETHER);
@ -834,10 +849,10 @@ cxgb_port_attach(device_t dev)
}
snprintf(buf, sizeof(buf), "cxgb_port_taskq%d", p->port);
snprintf(p->taskqbuf, TASKQ_NAME_LEN, "cxgb_port_taskq%d", p->port);
#ifdef TASKQUEUE_CURRENT
/* Create a port for handling TX without starvation */
p->tq = taskqueue_create(buf, M_NOWAIT,
p->tq = taskqueue_create(p->taskqbuf, M_NOWAIT,
taskqueue_thread_enqueue, &p->tq);
#else
/* Create a port for handling TX without starvation */
@ -870,13 +885,13 @@ cxgb_port_detach(device_t dev)
cxgb_stop_locked(p);
PORT_UNLOCK(p);
mtx_destroy(&p->lock);
if (p->tq != NULL) {
taskqueue_drain(p->tq, &p->start_task);
taskqueue_free(p->tq);
p->tq = NULL;
}
PORT_LOCK_DEINIT(p);
ether_ifdetach(p->ifp);
if_free(p->ifp);
@ -1268,15 +1283,12 @@ irq_err:
* Release resources when all the ports and offloading have been stopped.
*/
static void
cxgb_down(struct adapter *sc)
cxgb_down_locked(struct adapter *sc)
{
int i;
t3_sge_stop(sc);
ADAPTER_LOCK(sc);
t3_intr_disable(sc);
ADAPTER_UNLOCK(sc);
if (sc->intr_tag != NULL) {
bus_teardown_intr(sc->dev, sc->irq_res, sc->intr_tag);
@ -1292,8 +1304,11 @@ cxgb_down(struct adapter *sc)
if (sc->flags & USING_MSIX)
cxgb_teardown_msix(sc);
ADAPTER_UNLOCK(sc);
callout_drain(&sc->cxgb_tick_ch);
callout_drain(&sc->sge_timer_ch);
if (sc->tq != NULL)
taskqueue_drain(sc->tq, &sc->slow_intr_task);
for (i = 0; i < sc->params.nports; i++)
@ -1363,10 +1378,8 @@ offload_close(struct toedev *tdev)
t3_tp_set_offload_mode(adapter, 0);
clrbit(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT);
ADAPTER_LOCK(adapter);
if (!adapter->open_device_map)
cxgb_down(adapter);
ADAPTER_UNLOCK(adapter);
cxgb_offload_deactivate(adapter);
return (0);
@ -1390,7 +1403,7 @@ cxgb_init_locked(struct port_info *p)
adapter_t *sc = p->adapter;
int err;
mtx_assert(&p->lock, MA_OWNED);
PORT_LOCK_ASSERT_OWNED(p);
ifp = p->ifp;
ADAPTER_LOCK(p->adapter);
@ -1399,9 +1412,10 @@ cxgb_init_locked(struct port_info *p)
cxgb_stop_locked(p);
return;
}
if (p->adapter->open_device_map == 0)
if (p->adapter->open_device_map == 0) {
t3_intr_clear(sc);
t3_sge_init_adapter(sc);
}
setbit(&p->adapter->open_device_map, p->port);
ADAPTER_UNLOCK(p->adapter);
@ -1419,7 +1433,7 @@ cxgb_init_locked(struct port_info *p)
callout_reset(&sc->cxgb_tick_ch, sc->params.stats_update_period * hz,
cxgb_tick, sc);
ifp->if_drv_flags |= IFF_DRV_RUNNING;
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
}
@ -1430,7 +1444,7 @@ cxgb_set_rxmode(struct port_info *p)
struct t3_rx_mode rm;
struct cmac *mac = &p->mac;
mtx_assert(&p->lock, MA_OWNED);
PORT_LOCK_ASSERT_OWNED(p);
t3_init_rx_mode(&rm, p);
t3_mac_set_rx_mode(mac, &rm);
@ -1441,8 +1455,8 @@ cxgb_stop_locked(struct port_info *p)
{
struct ifnet *ifp;
mtx_assert(&p->lock, MA_OWNED);
mtx_assert(&p->adapter->lock, MA_NOTOWNED);
PORT_LOCK_ASSERT_OWNED(p);
ADAPTER_LOCK_ASSERT_NOTOWNED(p->adapter);
ifp = p->ifp;
@ -1453,12 +1467,13 @@ cxgb_stop_locked(struct port_info *p)
ADAPTER_LOCK(p->adapter);
clrbit(&p->adapter->open_device_map, p->port);
/*
* XXX cancel check_task
*/
if (p->adapter->open_device_map == 0)
cxgb_down(p->adapter);
ADAPTER_UNLOCK(p->adapter);
if (p->adapter->open_device_map == 0) {
cxgb_down_locked(p->adapter);
} else
ADAPTER_UNLOCK(p->adapter);
}
static int
@ -1511,27 +1526,25 @@ cxgb_ioctl(struct ifnet *ifp, unsigned long command, caddr_t data)
PORT_UNLOCK(p);
break;
case SIOCSIFFLAGS:
PORT_LOCK(p);
callout_drain(&p->adapter->cxgb_tick_ch);
PORT_LOCK(p);
if (ifp->if_flags & IFF_UP) {
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
flags = p->if_flags;
if (((ifp->if_flags ^ flags) & IFF_PROMISC) ||
((ifp->if_flags ^ flags) & IFF_ALLMULTI))
cxgb_set_rxmode(p);
} else
cxgb_init_locked(p);
p->if_flags = ifp->if_flags;
} else {
callout_stop(&p->adapter->cxgb_tick_ch);
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
cxgb_stop_locked(p);
} else {
adapter_t *sc = p->adapter;
callout_reset(&sc->cxgb_tick_ch,
sc->params.stats_update_period * hz,
cxgb_tick, sc);
}
} else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
cxgb_stop_locked(p);
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
adapter_t *sc = p->adapter;
callout_reset(&sc->cxgb_tick_ch,
sc->params.stats_update_period * hz,
cxgb_tick, sc);
}
PORT_UNLOCK(p);
break;
@ -1746,9 +1759,10 @@ cxgb_async_intr(void *data)
if (cxgb_debug)
device_printf(sc->dev, "cxgb_async_intr\n");
t3_slow_intr_handler(sc);
/*
* May need to sleep - defer to taskqueue
*/
taskqueue_enqueue(sc->tq, &sc->slow_intr_task);
}
static void
@ -1822,14 +1836,25 @@ check_t3b2_mac(struct adapter *adapter)
static void
cxgb_tick(void *arg)
{
adapter_t *sc = (adapter_t *)arg;
taskqueue_enqueue(sc->tq, &sc->tick_task);
if (sc->open_device_map != 0)
callout_reset(&sc->cxgb_tick_ch, sc->params.stats_update_period * hz,
cxgb_tick, sc);
}
static void
cxgb_tick_handler(void *arg, int count)
{
adapter_t *sc = (adapter_t *)arg;
const struct adapter_params *p = &sc->params;
ADAPTER_LOCK(sc);
if (p->linkpoll_period)
check_link_status(sc);
callout_reset(&sc->cxgb_tick_ch, sc->params.stats_update_period * hz,
cxgb_tick, sc);
/*
* adapter lock can currently only be acquire after the

View File

@ -1512,7 +1512,7 @@ cxgb_offload_init(void)
return;
else
inited = 1;
mtx_init(&cxgb_db_lock, "ofld db", NULL, MTX_DEF);
rw_init(&adapter_list_lock, "ofld adap list");
TAILQ_INIT(&client_list);
@ -1555,6 +1555,14 @@ cxgb_offload_init(void)
void
cxgb_offload_exit(void)
{
static int deinited = 0;
if (deinited)
return;
deinited = 1;
mtx_destroy(&cxgb_db_lock);
rw_destroy(&adapter_list_lock);
#if 0
offload_proc_cleanup();
#endif

View File

@ -686,7 +686,8 @@ sge_timer_cb(void *arg)
break;
}
}
callout_reset(&sc->sge_timer_ch, TX_RECLAIM_PERIOD, sge_timer_cb, sc);
if (sc->open_device_map != 0)
callout_reset(&sc->sge_timer_ch, TX_RECLAIM_PERIOD, sge_timer_cb, sc);
}
/*
@ -1467,7 +1468,7 @@ t3_free_qset(adapter_t *sc, struct sge_qset *q)
}
}
for (i = 0; i < SGE_TXQ_PER_SET; ++i) {
for (i = 0; i < SGE_TXQ_PER_SET; i++) {
if (q->txq[i].desc) {
mtx_lock(&sc->sge.reg_lock);
t3_sge_enable_ecntxt(sc, q->txq[i].cntxt_id, 0);
@ -1478,13 +1479,11 @@ t3_free_qset(adapter_t *sc, struct sge_qset *q)
q->txq[i].desc_map);
bus_dma_tag_destroy(q->txq[i].desc_tag);
bus_dma_tag_destroy(q->txq[i].entry_tag);
MTX_DESTROY(&q->txq[i].lock);
}
if (q->txq[i].sdesc) {
free(q->txq[i].sdesc, M_DEVBUF);
}
if (mtx_initialized(&q->txq[i].lock)) {
mtx_destroy(&q->txq[i].lock);
}
}
if (q->rspq.desc) {
@ -1496,11 +1495,9 @@ t3_free_qset(adapter_t *sc, struct sge_qset *q)
bus_dmamem_free(q->rspq.desc_tag, q->rspq.desc,
q->rspq.desc_map);
bus_dma_tag_destroy(q->rspq.desc_tag);
MTX_DESTROY(&q->rspq.lock);
}
if (mtx_initialized(&q->rspq.lock))
mtx_destroy(&q->rspq.lock);
bzero(q, sizeof(*q));
}
@ -1513,9 +1510,12 @@ t3_free_qset(adapter_t *sc, struct sge_qset *q)
void
t3_free_sge_resources(adapter_t *sc)
{
int i;
int i, nqsets;
for (i = 0; i < SGE_QSETS; ++i)
for (nqsets = i = 0; i < (sc)->params.nports; i++)
nqsets += sc->port[i].nqsets;
for (i = 0; i < nqsets; ++i)
t3_free_qset(sc, &sc->sge.qs[i]);
}
@ -1548,13 +1548,17 @@ t3_sge_start(adapter_t *sc)
void
t3_sge_stop(adapter_t *sc)
{
int i;
int i, nqsets;
t3_set_reg_field(sc, A_SG_CONTROL, F_GLOBALENABLE, 0);
if (sc->tq == NULL)
return;
for (i = 0; i < SGE_QSETS; ++i) {
for (nqsets = i = 0; i < (sc)->params.nports; i++)
nqsets += sc->port[i].nqsets;
for (i = 0; i < nqsets; ++i) {
struct sge_qset *qs = &sc->sge.qs[i];
taskqueue_drain(sc->tq, &qs->txq[TXQ_OFLD].qresume_tsk);
@ -2010,7 +2014,9 @@ t3_sge_alloc_qset(adapter_t *sc, u_int id, int nports, int irq_vec_idx,
mbufq_init(&q->txq[i].sendq);
q->txq[i].gen = 1;
q->txq[i].size = p->txq_size[i];
mtx_init(&q->txq[i].lock, "t3 txq lock", NULL, MTX_DEF);
snprintf(q->txq[i].lockbuf, TXQ_NAME_LEN, "t3 txq lock %d:%d:%d",
device_get_unit(sc->dev), irq_vec_idx, i);
MTX_INIT(&q->txq[i].lock, q->txq[i].lockbuf, NULL, MTX_DEF);
}
TASK_INIT(&q->txq[TXQ_OFLD].qresume_tsk, 0, restart_offloadq, q);
@ -2022,8 +2028,7 @@ t3_sge_alloc_qset(adapter_t *sc, u_int id, int nports, int irq_vec_idx,
q->rspq.gen = 1;
q->rspq.size = p->rspq_size;
mtx_init(&q->rspq.lock, "t3 rspq lock", NULL, MTX_DEF);
q->txq[TXQ_ETH].stop_thres = nports *
flits_to_desc(sgl_len(TX_MAX_SEGS + 1) + 3);
@ -2088,6 +2093,10 @@ t3_sge_alloc_qset(adapter_t *sc, u_int id, int nports, int irq_vec_idx,
}
}
snprintf(q->rspq.lockbuf, RSPQ_NAME_LEN, "t3 rspq lock %d:%d",
device_get_unit(sc->dev), irq_vec_idx);
MTX_INIT(&q->rspq.lock, q->rspq.lockbuf, NULL, MTX_DEF);
mtx_unlock(&sc->sge.reg_lock);
t3_update_qset_coalesce(q, p);
q->port = pi;