Remove cxgb private lro implementation and switch to using system implementation.

Obtained from:	Chelsio Inc.
MFC after:	1 week
This commit is contained in:
Kip Macy 2008-08-12 00:27:32 +00:00
parent d77b331074
commit 25292deb42
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=181616
6 changed files with 76 additions and 478 deletions

View File

@ -515,7 +515,6 @@ dev/cs/if_cs_isa.c optional cs isa
dev/cs/if_cs_pccard.c optional cs pccard
dev/cxgb/cxgb_main.c optional cxgb pci
dev/cxgb/cxgb_offload.c optional cxgb pci
dev/cxgb/cxgb_lro.c optional cxgb pci
dev/cxgb/cxgb_sge.c optional cxgb pci
dev/cxgb/cxgb_multiq.c optional cxgb pci
dev/cxgb/common/cxgb_mc5.c optional cxgb pci

View File

@ -47,6 +47,7 @@ $FreeBSD$
#include <net/if.h>
#include <net/if_media.h>
#include <net/if_dl.h>
#include <netinet/tcp_lro.h>
#include <machine/bus.h>
#include <machine/resource.h>
@ -172,32 +173,9 @@ enum { TXQ_ETH = 0,
#define WR_LEN (WR_FLITS * 8)
#define PIO_LEN (WR_LEN - sizeof(struct cpl_tx_pkt_lso))
/* careful, the following are set on priv_flags and must not collide with
* IFF_ flags!
*/
enum {
LRO_ACTIVE = (1 << 8),
};
/* Max concurrent LRO sessions per queue set */
#define MAX_LRO_SES 8
struct t3_lro_session {
struct mbuf *head;
struct mbuf *tail;
uint32_t seq;
uint16_t ip_len;
uint16_t mss;
uint16_t vtag;
uint8_t npkts;
};
struct lro_state {
unsigned short enabled;
unsigned short active_idx;
unsigned int nactive;
struct t3_lro_session sess[MAX_LRO_SES];
struct lro_ctrl ctrl;
};
#define RX_BUNDLE_SIZE 8
@ -316,12 +294,9 @@ enum {
SGE_PSTAT_TX_CSUM, /* # of TX checksum offloads */
SGE_PSTAT_VLANEX, /* # of VLAN tag extractions */
SGE_PSTAT_VLANINS, /* # of VLAN tag insertions */
SGE_PSTATS_LRO_QUEUED, /* # of LRO appended packets */
SGE_PSTATS_LRO_FLUSHED, /* # of LRO flushed packets */
SGE_PSTATS_LRO_X_STREAMS, /* # of exceeded LRO contexts */
};
#define SGE_PSTAT_MAX (SGE_PSTATS_LRO_X_STREAMS+1)
#define SGE_PSTAT_MAX (SGE_PSTAT_VLANINS+1)
#define QS_EXITING 0x1
#define QS_RUNNING 0x2
@ -587,10 +562,7 @@ void t3_sge_deinit_sw(adapter_t *);
void t3_free_tx_desc(struct sge_txq *q, int n);
void t3_free_tx_desc_all(struct sge_txq *q);
void t3_rx_eth_lro(adapter_t *adap, struct sge_rspq *rq, struct mbuf *m,
int ethpad, uint32_t rss_hash, uint32_t rss_csum, int lro);
void t3_rx_eth(struct adapter *adap, struct sge_rspq *rq, struct mbuf *m, int ethpad);
void t3_lro_flush(adapter_t *adap, struct sge_qset *qs, struct lro_state *state);
void t3_add_attach_sysctls(adapter_t *sc);
void t3_add_configured_sysctls(adapter_t *sc);

View File

@ -1,397 +0,0 @@
/**************************************************************************
Copyright (c) 2007, Chelsio Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Neither the name of the Chelsio Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
***************************************************************************/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/module.h>
#include <sys/bus.h>
#include <sys/conf.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/bus_dma.h>
#include <sys/rman.h>
#include <sys/queue.h>
#include <sys/taskqueue.h>
#include <netinet/in_systm.h>
#include <netinet/in.h>
#include <netinet/ip.h>
#include <netinet/tcp.h>
#ifdef CONFIG_DEFINED
#include <cxgb_include.h>
#else
#include <dev/cxgb/cxgb_include.h>
#endif
#include <machine/in_cksum.h>
#ifndef M_LRO
#define M_LRO 0x0200
#endif
#ifdef DEBUG
#define MBUF_HEADER_CHECK(m) do { \
if ((m->m_len == 0) || (m->m_pkthdr.len == 0) \
|| ((m->m_flags & M_PKTHDR) == 0)) \
panic("lro_flush_session - mbuf len=%d pktlen=%d flags=0x%x\n", \
m->m_len, m->m_pkthdr.len, m->m_flags); \
if ((m->m_flags & M_PKTHDR) == 0) \
panic("first mbuf is not packet header - flags=0x%x\n", \
m->m_flags); \
if ((m->m_len < ETHER_HDR_LEN) || (m->m_pkthdr.len < ETHER_HDR_LEN)) \
panic("packet too small len=%d pktlen=%d\n", \
m->m_len, m->m_pkthdr.len);\
} while (0)
#else
#define MBUF_HEADER_CHECK(m)
#endif
#define IPH_OFFSET (2 + sizeof (struct cpl_rx_pkt) + ETHER_HDR_LEN)
#define LRO_SESSION_IDX_HINT_HASH(hash) (hash & (MAX_LRO_SES - 1))
#define LRO_IDX_INC(idx) idx = (idx + 1) & (MAX_LRO_SES - 1)
static __inline int
lro_match(struct mbuf *m, struct ip *ih, struct tcphdr *th)
{
struct ip *sih = (struct ip *)(mtod(m, uint8_t *) + IPH_OFFSET);
struct tcphdr *sth = (struct tcphdr *) (sih + 1);
return (th->th_sport == sth->th_sport &&
th->th_dport == sth->th_dport &&
ih->ip_src.s_addr == sih->ip_src.s_addr &&
ih->ip_dst.s_addr == sih->ip_dst.s_addr);
}
static __inline struct t3_lro_session *
lro_lookup(struct lro_state *l, int idx, struct ip *ih, struct tcphdr *th)
{
struct t3_lro_session *s = NULL;
int active = l->nactive;
while (active) {
s = &l->sess[idx];
if (s->head) {
if (lro_match(s->head, ih, th))
break;
active--;
}
LRO_IDX_INC(idx);
}
return (s);
}
static __inline int
can_lro_packet(struct cpl_rx_pkt *cpl, unsigned int rss_hi)
{
struct ether_header *eh = (struct ether_header *)(cpl + 1);
struct ip *ih = (struct ip *)(eh + 1);
/*
* XXX VLAN support?
*/
if (__predict_false(G_HASHTYPE(ntohl(rss_hi)) != RSS_HASH_4_TUPLE ||
(*((uint8_t *)cpl + 1) & 0x90) != 0x10 ||
cpl->csum != 0xffff || eh->ether_type != ntohs(ETHERTYPE_IP) ||
ih->ip_hl != (sizeof (*ih) >> 2))) {
return 0;
}
return 1;
}
static int
can_lro_tcpsegment(struct tcphdr *th)
{
int olen = (th->th_off << 2) - sizeof (*th);
u8 control_bits = *((u8 *)th + 13);
if (__predict_false((control_bits & 0xB7) != 0x10))
goto no_lro;
if (olen) {
uint32_t *ptr = (u32 *)(th + 1);
if (__predict_false(olen != TCPOLEN_TSTAMP_APPA ||
*ptr != ntohl((TCPOPT_NOP << 24) |
(TCPOPT_NOP << 16) |
(TCPOPT_TIMESTAMP << 8) |
TCPOLEN_TIMESTAMP)))
goto no_lro;
}
return 1;
no_lro:
return 0;
}
static __inline void
lro_new_session_init(struct t3_lro_session *s, struct mbuf *m)
{
struct ip *ih = (struct ip *)(mtod(m, uint8_t *) + IPH_OFFSET);
struct tcphdr *th = (struct tcphdr *) (ih + 1);
int ip_len = ntohs(ih->ip_len);
DPRINTF("%s(s=%p, m=%p)\n", __FUNCTION__, s, m);
s->head = m;
MBUF_HEADER_CHECK(m);
s->ip_len = ip_len;
s->seq = ntohl(th->th_seq) + ip_len - sizeof(*ih) - (th->th_off << 2);
}
static void
lro_flush_session(struct sge_qset *qs, struct t3_lro_session *s, struct mbuf *m)
{
struct lro_state *l = &qs->lro;
struct mbuf *sm = s->head;
struct ip *ih = (struct ip *)(mtod(sm, uint8_t *) + IPH_OFFSET);
DPRINTF("%s(qs=%p, s=%p, ", __FUNCTION__,
qs, s);
if (m)
DPRINTF("m=%p)\n", m);
else
DPRINTF("m=NULL)\n");
ih->ip_len = htons(s->ip_len);
ih->ip_sum = 0;
ih->ip_sum = in_cksum_hdr(ih);
MBUF_HEADER_CHECK(sm);
sm->m_flags |= M_LRO;
t3_rx_eth(qs->port->adapter, &qs->rspq, sm, 2);
if (m) {
s->head = m;
lro_new_session_init(s, m);
} else {
s->head = NULL;
l->nactive--;
}
qs->port_stats[SGE_PSTATS_LRO_FLUSHED]++;
}
static __inline struct t3_lro_session *
lro_new_session(struct sge_qset *qs, struct mbuf *m, uint32_t rss_hash)
{
struct lro_state *l = &qs->lro;
int idx = LRO_SESSION_IDX_HINT_HASH(rss_hash);
struct t3_lro_session *s = &l->sess[idx];
DPRINTF("%s(qs=%p, m=%p, rss_hash=0x%x)\n", __FUNCTION__,
qs, m, rss_hash);
if (__predict_true(!s->head))
goto done;
if (l->nactive > MAX_LRO_SES)
panic("MAX_LRO_PER_QSET exceeded");
if (l->nactive == MAX_LRO_SES) {
lro_flush_session(qs, s, m);
qs->port_stats[SGE_PSTATS_LRO_X_STREAMS]++;
return s;
}
while (1) {
LRO_IDX_INC(idx);
s = &l->sess[idx];
if (!s->head)
break;
}
done:
lro_new_session_init(s, m);
l->nactive++;
return s;
}
static __inline int
lro_update_session(struct t3_lro_session *s, struct mbuf *m)
{
struct mbuf *sm = s->head;
struct cpl_rx_pkt *cpl = (struct cpl_rx_pkt *)(mtod(sm, uint8_t *) + 2);
struct cpl_rx_pkt *ncpl = (struct cpl_rx_pkt *)(mtod(m, uint8_t *) + 2);
struct ip *nih = (struct ip *)(mtod(m, uint8_t *) + IPH_OFFSET);
struct tcphdr *th, *nth = (struct tcphdr *)(nih + 1);
uint32_t seq = ntohl(nth->th_seq);
int plen, tcpiphlen, olen = (nth->th_off << 2) - sizeof (*nth);
DPRINTF("%s(s=%p, m=%p)\n", __FUNCTION__, s, m);
if (cpl->vlan_valid && cpl->vlan != ncpl->vlan) {
return -1;
}
if (__predict_false(seq != s->seq)) {
DPRINTF("sequence mismatch\n");
return -1;
}
MBUF_HEADER_CHECK(sm);
th = (struct tcphdr *)(mtod(sm, uint8_t *) + IPH_OFFSET + sizeof (struct ip));
if (olen) {
uint32_t *ptr = (uint32_t *)(th + 1);
uint32_t *nptr = (uint32_t *)(nth + 1);
if (__predict_false(ntohl(*(ptr + 1)) > ntohl(*(nptr + 1)) ||
!*(nptr + 2))) {
return -1;
}
*(ptr + 1) = *(nptr + 1);
*(ptr + 2) = *(nptr + 2);
}
th->th_ack = nth->th_ack;
th->th_win = nth->th_win;
tcpiphlen = (nth->th_off << 2) + sizeof (*nih);
plen = ntohs(nih->ip_len) - tcpiphlen;
s->seq += plen;
s->ip_len += plen;
sm->m_pkthdr.len += plen;
/*
* XXX FIX ME
*
*
*/
#if 0
/* XXX this I *do not* understand */
if (plen > skb_shinfo(s->skb)->gso_size)
skb_shinfo(s->skb)->gso_size = plen;
#endif
#if __FreeBSD_version > 700000
if (plen > sm->m_pkthdr.tso_segsz)
sm->m_pkthdr.tso_segsz = plen;
#endif
DPRINTF("m_adj(%d)\n", (int)(IPH_OFFSET + tcpiphlen));
m_adj(m, IPH_OFFSET + tcpiphlen);
#if 0
if (__predict_false(!skb_shinfo(s->skb)->frag_list))
skb_shinfo(s->skb)->frag_list = skb;
#endif
#if 0
/*
* XXX we really need to be able to
* support vectors of buffers in FreeBSD
*/
int nr = skb_shinfo(s->skb)->nr_frags;
skb_shinfo(s->skb)->frags[nr].page = frag->page;
skb_shinfo(s->skb)->frags[nr].page_offset =
frag->page_offset + IPH_OFFSET + tcpiphlen;
skb_shinfo(s->skb)->frags[nr].size = plen;
skb_shinfo(s->skb)->nr_frags = ++nr;
#endif
return (0);
}
void
t3_rx_eth_lro(adapter_t *adap, struct sge_rspq *rq, struct mbuf *m,
int ethpad, uint32_t rss_hash, uint32_t rss_csum, int lro)
{
struct sge_qset *qs = rspq_to_qset(rq);
struct cpl_rx_pkt *cpl = (struct cpl_rx_pkt *)(mtod(m, uint8_t *) + ethpad);
struct ether_header *eh = (struct ether_header *)(cpl + 1);
struct ip *ih;
struct tcphdr *th;
struct t3_lro_session *s = NULL;
if (lro == 0)
goto no_lro;
if (!can_lro_packet(cpl, rss_csum))
goto no_lro;
ih = (struct ip *)(eh + 1);
th = (struct tcphdr *)(ih + 1);
s = lro_lookup(&qs->lro,
LRO_SESSION_IDX_HINT_HASH(rss_hash), ih, th);
if (__predict_false(!can_lro_tcpsegment(th))) {
goto no_lro;
} else if (__predict_false(!s)) {
s = lro_new_session(qs, m, rss_hash);
} else {
if (lro_update_session(s, m)) {
lro_flush_session(qs, s, m);
}
#ifdef notyet
if (__predict_false(s->head->m_pkthdr.len + pi->ifp->if_mtu > 65535)) {
lro_flush_session(qs, s, NULL);
}
#endif
}
qs->port_stats[SGE_PSTATS_LRO_QUEUED]++;
return;
no_lro:
if (s)
lro_flush_session(qs, s, NULL);
if (m->m_len == 0 || m->m_pkthdr.len == 0 || (m->m_flags & M_PKTHDR) == 0)
DPRINTF("rx_eth_lro mbuf len=%d pktlen=%d flags=0x%x\n",
m->m_len, m->m_pkthdr.len, m->m_flags);
t3_rx_eth(adap, rq, m, ethpad);
}
void
t3_lro_flush(adapter_t *adap, struct sge_qset *qs, struct lro_state *state)
{
unsigned int idx = state->active_idx;
while (state->nactive) {
struct t3_lro_session *s = &state->sess[idx];
if (s->head)
lro_flush_session(qs, s, NULL);
LRO_IDX_INC(idx);
}
}

View File

@ -903,9 +903,9 @@ cxgb_makedev(struct port_info *pi)
#ifdef TSO_SUPPORTED
#define CXGB_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU)
#define CXGB_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO)
/* Don't enable TSO6 yet */
#define CXGB_CAP_ENABLE (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | IFCAP_TSO4 | IFCAP_JUMBO_MTU)
#define CXGB_CAP_ENABLE (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | IFCAP_TSO4 | IFCAP_JUMBO_MTU | IFCAP_LRO)
#else
#define CXGB_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_JUMBO_MTU)
/* Don't enable TSO6 yet */
@ -1946,6 +1946,24 @@ cxgb_set_mtu(struct port_info *p, int mtu)
return (error);
}
/*
* Mark lro enabled or disabled in all qsets for this port
*/
static int
cxgb_set_lro(struct port_info *p, int enabled)
{
int i;
struct adapter *adp = p->adapter;
struct sge_qset *q;
PORT_LOCK_ASSERT_OWNED(p);
for (i = 0; i < p->nqsets; i++) {
q = &adp->sge.qs[p->first_qset + i];
q->lro.enabled = (enabled != 0);
}
return (0);
}
static int
cxgb_ioctl(struct ifnet *ifp, unsigned long command, caddr_t data)
{
@ -2031,6 +2049,12 @@ cxgb_ioctl(struct ifnet *ifp, unsigned long command, caddr_t data)
error = EINVAL;
}
}
if (mask & IFCAP_LRO) {
ifp->if_capenable ^= IFCAP_LRO;
/* Safe to do this even if cxgb_up not called yet */
cxgb_set_lro(p, ifp->if_capenable & IFCAP_LRO);
}
if (mask & IFCAP_VLAN_HWTAGGING) {
ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
reinit = ifp->if_drv_flags & IFF_DRV_RUNNING;

View File

@ -195,7 +195,6 @@ static uint8_t flit_desc_map[] = {
};
static int lro_default = 0;
int cxgb_debug = 0;
static void sge_timer_cb(void *arg);
@ -1769,6 +1768,8 @@ t3_free_qset(adapter_t *sc, struct sge_qset *q)
MTX_DESTROY(&q->rspq.lock);
}
tcp_lro_free(&q->lro.ctrl);
bzero(q, sizeof(*q));
}
@ -2381,7 +2382,18 @@ t3_sge_alloc_qset(adapter_t *sc, u_int id, int nports, int irq_vec_idx,
q->fl[1].zone = zone_jumbop;
q->fl[1].type = EXT_JUMBOP;
#endif
q->lro.enabled = lro_default;
/*
* We allocate and setup the lro_ctrl structure irrespective of whether
* lro is available and/or enabled.
*/
q->lro.enabled = !!(pi->ifp->if_capenable & IFCAP_LRO);
ret = tcp_lro_init(&q->lro.ctrl);
if (ret) {
printf("error %d from tcp_lro_init\n", ret);
goto err;
}
q->lro.ctrl.ifp = pi->ifp;
mtx_lock_spin(&sc->sge.reg_lock);
ret = -t3_sge_init_rspcntxt(sc, q->rspq.cntxt_id, irq_vec_idx,
@ -2460,6 +2472,11 @@ err:
return (ret);
}
/*
* Remove CPL_RX_PKT headers from the mbuf and reduce it to a regular mbuf with
* ethernet data. Hardware assistance with various checksums and any vlan tag
* will also be taken into account here.
*/
void
t3_rx_eth(struct adapter *adap, struct sge_rspq *rq, struct mbuf *m, int ethpad)
{
@ -2497,8 +2514,6 @@ t3_rx_eth(struct adapter *adap, struct sge_rspq *rq, struct mbuf *m, int ethpad)
m->m_pkthdr.len -= (sizeof(*cpl) + ethpad);
m->m_len -= (sizeof(*cpl) + ethpad);
m->m_data += (sizeof(*cpl) + ethpad);
(*ifp->if_input)(ifp, m);
}
static void
@ -2784,7 +2799,8 @@ process_responses(adapter_t *adap, struct sge_qset *qs, int budget)
struct rsp_desc *r = &rspq->desc[rspq->cidx];
int budget_left = budget;
unsigned int sleeping = 0;
int lro = qs->lro.enabled;
int lro_enabled = qs->lro.enabled;
struct lro_ctrl *lro_ctrl = &qs->lro.ctrl;
struct mbuf *offload_mbufs[RX_BUNDLE_SIZE];
int ngathered = 0;
#ifdef DEBUG
@ -2897,13 +2913,25 @@ process_responses(adapter_t *adap, struct sge_qset *qs, int budget)
DPRINTF("received offload packet\n");
} else if (eth && eop) {
prefetch(mtod(rspq->rspq_mh.mh_head, uint8_t *));
prefetch(mtod(rspq->rspq_mh.mh_head, uint8_t *) + L1_CACHE_BYTES);
struct mbuf *m = rspq->rspq_mh.mh_head;
prefetch(mtod(m, uint8_t *));
prefetch(mtod(m, uint8_t *) + L1_CACHE_BYTES);
t3_rx_eth_lro(adap, rspq, rspq->rspq_mh.mh_head, ethpad,
rss_hash, rss_csum, lro);
t3_rx_eth(adap, rspq, m, ethpad);
if (lro_enabled && lro_ctrl->lro_cnt &&
(tcp_lro_rx(lro_ctrl, m, 0) == 0)) {
/* successfully queue'd for LRO */
} else {
/*
* LRO not enabled, packet unsuitable for LRO,
* or unable to queue. Pass it up right now in
* either case.
*/
struct ifnet *ifp = m->m_pkthdr.rcvif;
(*ifp->if_input)(ifp, m);
}
DPRINTF("received tunnel packet\n");
rspq->rspq_mh.mh_head = NULL;
rspq->rspq_mh.mh_head = NULL;
}
__refill_fl_lt(adap, &qs->fl[0], 32);
@ -2912,8 +2940,14 @@ process_responses(adapter_t *adap, struct sge_qset *qs, int budget)
}
deliver_partial_bundle(&adap->tdev, rspq, offload_mbufs, ngathered);
t3_lro_flush(adap, qs, &qs->lro);
/* Flush LRO */
while (!SLIST_EMPTY(&lro_ctrl->lro_active)) {
struct lro_entry *queued = SLIST_FIRST(&lro_ctrl->lro_active);
SLIST_REMOVE_HEAD(&lro_ctrl->lro_active, next);
tcp_lro_flush(lro_ctrl, queued);
}
if (sleeping)
check_ring_db(adap, qs, sleeping);
@ -3227,34 +3261,6 @@ retry_sbufops:
return (err);
}
static int
t3_lro_enable(SYSCTL_HANDLER_ARGS)
{
adapter_t *sc;
int i, j, enabled, err, nqsets = 0;
#ifndef LRO_WORKING
return (0);
#endif
sc = arg1;
enabled = sc->sge.qs[0].lro.enabled;
err = sysctl_handle_int(oidp, &enabled, arg2, req);
if (err != 0)
return (err);
if (enabled == sc->sge.qs[0].lro.enabled)
return (0);
for (i = 0; i < sc->params.nports; i++)
for (j = 0; j < sc->port[i].nqsets; j++)
nqsets++;
for (i = 0; i < nqsets; i++)
sc->sge.qs[i].lro.enabled = enabled;
return (0);
}
static int
t3_set_coalesce_usecs(SYSCTL_HANDLER_ARGS)
{
@ -3316,12 +3322,6 @@ t3_add_attach_sysctls(adapter_t *sc)
"firmware_version",
CTLFLAG_RD, &sc->fw_version,
0, "firmware version");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
"enable_lro",
CTLTYPE_INT|CTLFLAG_RW, sc,
0, t3_lro_enable,
"I", "enable large receive offload");
SYSCTL_ADD_INT(ctx, children, OID_AUTO,
"hw_revision",
CTLFLAG_RD, &sc->params.rev,

View File

@ -6,7 +6,7 @@ CXGB = ${.CURDIR}/../../../dev/cxgb
KMOD= if_cxgb
SRCS= cxgb_mc5.c cxgb_vsc8211.c cxgb_ael1002.c cxgb_mv88e1xxx.c
SRCS+= cxgb_xgmac.c cxgb_vsc7323.c cxgb_t3_hw.c cxgb_main.c
SRCS+= cxgb_sge.c cxgb_lro.c cxgb_offload.c cxgb_tn1010.c
SRCS+= cxgb_sge.c cxgb_offload.c cxgb_tn1010.c
SRCS+= device_if.h bus_if.h pci_if.h opt_zero.h opt_sched.h
SRCS+= uipc_mvec.c cxgb_support.c cxgb_multiq.c