e1000: Style txrx

Fix up indentation and reflow long lines.

MFC after:	3 days
Sponsored by:	BBOX.io
This commit is contained in:
Kevin Bowling 2024-11-24 00:39:43 -07:00
parent c7fb7b5d9f
commit 7390daf87c
3 changed files with 62 additions and 40 deletions

View File

@ -108,16 +108,19 @@ em_dump_rs(struct e1000_softc *sc)
cur = txr->tx_rsq[rs_cidx];
status = txr->tx_base[cur].upper.fields.status;
if (!(status & E1000_TXD_STAT_DD))
printf("qid[%d]->tx_rsq[%d]: %d clear ", qid, rs_cidx, cur);
printf("qid[%d]->tx_rsq[%d]: %d clear ",
qid, rs_cidx, cur);
} else {
rs_cidx = (rs_cidx-1)&(ntxd-1);
cur = txr->tx_rsq[rs_cidx];
printf("qid[%d]->tx_rsq[rs_cidx-1=%d]: %d ", qid, rs_cidx, cur);
printf("qid[%d]->tx_rsq[rs_cidx-1=%d]: %d ",
qid, rs_cidx, cur);
}
printf("cidx_prev=%d rs_pidx=%d ",txr->tx_cidx_processed,
txr->tx_rs_pidx);
for (i = 0; i < ntxd; i++) {
if (txr->tx_base[i].upper.fields.status & E1000_TXD_STAT_DD)
if (txr->tx_base[i].upper.fields.status &
E1000_TXD_STAT_DD)
printf("%d set ", i);
}
printf("\n");
@ -143,8 +146,8 @@ em_tso_setup(struct e1000_softc *sc, if_pkt_info_t pi, uint32_t *txd_upper,
hdr_len = pi->ipi_ehdrlen + pi->ipi_ip_hlen + pi->ipi_tcp_hlen;
*txd_lower = (E1000_TXD_CMD_DEXT | /* Extended descr type */
E1000_TXD_DTYP_D | /* Data descr type */
E1000_TXD_CMD_TSE); /* Do TSE on this packet */
E1000_TXD_DTYP_D | /* Data descr type */
E1000_TXD_CMD_TSE); /* Do TSE on this packet */
cur = pi->ipi_pidx;
TXD = (struct e1000_context_desc *)&txr->tx_base[cur];
@ -157,7 +160,8 @@ em_tso_setup(struct e1000_softc *sc, if_pkt_info_t pi, uint32_t *txd_upper,
switch(pi->ipi_etype) {
case ETHERTYPE_IP:
/* IP and/or TCP header checksum calculation and insertion. */
*txd_upper = (E1000_TXD_POPTS_IXSM | E1000_TXD_POPTS_TXSM) << 8;
*txd_upper =
(E1000_TXD_POPTS_IXSM | E1000_TXD_POPTS_TXSM) << 8;
TXD->lower_setup.ip_fields.ipcse =
htole16(pi->ipi_ehdrlen + pi->ipi_ip_hlen - 1);
@ -183,7 +187,8 @@ em_tso_setup(struct e1000_softc *sc, if_pkt_info_t pi, uint32_t *txd_upper,
TXD->upper_setup.tcp_fields.tucss = pi->ipi_ehdrlen + pi->ipi_ip_hlen;
TXD->upper_setup.tcp_fields.tucse = 0;
TXD->upper_setup.tcp_fields.tucso =
pi->ipi_ehdrlen + pi->ipi_ip_hlen + offsetof(struct tcphdr, th_sum);
pi->ipi_ehdrlen + pi->ipi_ip_hlen +
offsetof(struct tcphdr, th_sum);
/*
* Payload size per packet w/o any headers.
@ -211,8 +216,8 @@ em_tso_setup(struct e1000_softc *sc, if_pkt_info_t pi, uint32_t *txd_upper,
if (++cur == scctx->isc_ntxd[0]) {
cur = 0;
}
DPRINTF(iflib_get_dev(sc->ctx), "%s: pidx: %d cur: %d\n", __FUNCTION__,
pi->ipi_pidx, cur);
DPRINTF(iflib_get_dev(sc->ctx), "%s: pidx: %d cur: %d\n",
__FUNCTION__, pi->ipi_pidx, cur);
return (cur);
}
@ -277,8 +282,8 @@ em_transmit_checksum_setup(struct e1000_softc *sc, if_pkt_info_t pi,
* ipcse - End offset for header checksum calculation.
* ipcso - Offset of place to put the checksum.
*
* We set ipcsX values regardless of IP version to work around HW issues
* and ipcse must be 0 for IPv6 per "PCIe GbE SDM 2.5" page 61.
* We set ipcsX values regardless of IP version to work around HW
* issues and ipcse must be 0 for IPv6 per "PCIe GbE SDM 2.5" page 61.
* IXSM controls whether it's inserted.
*/
TXD->lower_setup.ip_fields.ipcss = pi->ipi_ehdrlen;
@ -296,7 +301,8 @@ em_transmit_checksum_setup(struct e1000_softc *sc, if_pkt_info_t pi,
* tucse - End offset for payload checksum calculation.
* tucso - Offset of place to put the checksum.
*/
if (csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_IP6_TCP | CSUM_IP6_UDP)) {
if (csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_IP6_TCP |
CSUM_IP6_UDP)) {
uint8_t tucso;
*txd_upper |= E1000_TXD_POPTS_TXSM << 8;
@ -326,7 +332,8 @@ em_transmit_checksum_setup(struct e1000_softc *sc, if_pkt_info_t pi,
cur = 0;
}
DPRINTF(iflib_get_dev(sc->ctx),
"checksum_setup csum_flags=%x txd_upper=%x txd_lower=%x hdr_len=%d cmd=%x\n",
"checksum_setup csum_flags=%x txd_upper=%x txd_lower=%x"
" hdr_len=%d cmd=%x\n",
csum_flags, *txd_upper, *txd_lower, hdr_len, cmd);
return (cur);
}
@ -372,7 +379,8 @@ em_isc_txd_encap(void *arg, if_pkt_info_t pi)
i = em_tso_setup(sc, pi, &txd_upper, &txd_lower);
tso_desc = true;
} else if (csum_flags & EM_CSUM_OFFLOAD) {
i = em_transmit_checksum_setup(sc, pi, &txd_upper, &txd_lower);
i = em_transmit_checksum_setup(sc, pi, &txd_upper,
&txd_lower);
}
if (pi->ipi_mflags & M_VLANTAG) {
@ -414,7 +422,8 @@ em_isc_txd_encap(void *arg, if_pkt_info_t pi)
/* Now make the sentinel */
ctxd = &txr->tx_base[i];
ctxd->buffer_addr = htole64(seg_addr + seg_len);
ctxd->lower.data = htole32(cmd | txd_lower | TSO_WORKAROUND);
ctxd->lower.data =
htole32(cmd | txd_lower | TSO_WORKAROUND);
ctxd->upper.data = htole32(txd_upper);
pidx_last = i;
if (++i == scctx->isc_ntxd[0])
@ -429,7 +438,8 @@ em_isc_txd_encap(void *arg, if_pkt_info_t pi)
pidx_last = i;
if (++i == scctx->isc_ntxd[0])
i = 0;
DPRINTF(iflib_get_dev(sc->ctx), "pidx_last=%d i=%d ntxd[0]=%d\n",
DPRINTF(iflib_get_dev(sc->ctx),
"pidx_last=%d i=%d ntxd[0]=%d\n",
pidx_last, i, scctx->isc_ntxd[0]);
}
}
@ -449,7 +459,8 @@ em_isc_txd_encap(void *arg, if_pkt_info_t pi)
}
ctxd->lower.data |= htole32(E1000_TXD_CMD_EOP | txd_flags);
DPRINTF(iflib_get_dev(sc->ctx),
"tx_buffers[%d]->eop = %d ipi_new_pidx=%d\n", first, pidx_last, i);
"tx_buffers[%d]->eop = %d ipi_new_pidx=%d\n",
first, pidx_last, i);
pi->ipi_new_pidx = i;
/* Sent data accounting for AIM */
@ -508,8 +519,8 @@ em_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear)
delta += ntxd;
MPASS(delta > 0);
DPRINTF(iflib_get_dev(sc->ctx),
"%s: cidx_processed=%u cur=%u clear=%d delta=%d\n",
__FUNCTION__, prev, cur, clear, delta);
"%s: cidx_processed=%u cur=%u clear=%d delta=%d\n",
__FUNCTION__, prev, cur, clear, delta);
processed += delta;
prev = cur;
@ -699,7 +710,8 @@ lem_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
if (scctx->isc_capenable & IFCAP_VLAN_HWTAGGING &&
status & E1000_RXD_STAT_VP) {
ri->iri_vtag = le16toh(rxd->special & E1000_RXD_SPC_VLAN_MASK);
ri->iri_vtag =
le16toh(rxd->special & E1000_RXD_SPC_VLAN_MASK);
ri->iri_flags |= M_VLANTAG;
}
@ -789,7 +801,8 @@ em_receive_checksum(uint16_t status, uint8_t errors, if_rxd_info_t ri)
return;
/* If there is a layer 3 or 4 error we are done */
if (__predict_false(errors & (E1000_RXD_ERR_IPE | E1000_RXD_ERR_TCPE)))
if (__predict_false(errors & (E1000_RXD_ERR_IPE |
E1000_RXD_ERR_TCPE)))
return;
/* IP Checksum Good */

View File

@ -3188,7 +3188,8 @@ em_reset(if_ctx_t ctx)
* response (Rx) to Ethernet PAUSE frames.
* - High water mark should allow for at least two frames to be
* received after sending an XOFF.
* - Low water mark works best when it is very near the high water mark
* - Low water mark works best when it is very near the high water
mark.
* This allows the receiver to restart by sending XON when it has
* drained a bit. Here we use an arbitrary value of 1500 which will
* restart after one full frame is pulled from the buffer. There

View File

@ -102,14 +102,15 @@ igb_tso_setup(struct tx_ring *txr, if_pkt_info_t pi, uint32_t *cmd_type_len,
break;
default:
panic("%s: CSUM_TSO but no supported IP version (0x%04x)",
__func__, ntohs(pi->ipi_etype));
__func__, ntohs(pi->ipi_etype));
break;
}
TXD = (struct e1000_adv_tx_context_desc *) &txr->tx_base[pi->ipi_pidx];
TXD = (struct e1000_adv_tx_context_desc *)&txr->tx_base[pi->ipi_pidx];
/* This is used in the transmit desc in encap */
paylen = pi->ipi_len - pi->ipi_ehdrlen - pi->ipi_ip_hlen - pi->ipi_tcp_hlen;
paylen = pi->ipi_len - pi->ipi_ehdrlen - pi->ipi_ip_hlen -
pi->ipi_tcp_hlen;
/* VLAN MACLEN IPLEN */
if (pi->ipi_mflags & M_VLANTAG) {
@ -147,8 +148,8 @@ igb_tso_setup(struct tx_ring *txr, if_pkt_info_t pi, uint32_t *cmd_type_len,
*
**********************************************************************/
static int
igb_tx_ctx_setup(struct tx_ring *txr, if_pkt_info_t pi, uint32_t *cmd_type_len,
uint32_t *olinfo_status)
igb_tx_ctx_setup(struct tx_ring *txr, if_pkt_info_t pi,
uint32_t *cmd_type_len, uint32_t *olinfo_status)
{
struct e1000_adv_tx_context_desc *TXD;
struct e1000_softc *sc = txr->sc;
@ -164,7 +165,7 @@ igb_tx_ctx_setup(struct tx_ring *txr, if_pkt_info_t pi, uint32_t *cmd_type_len,
*olinfo_status |= pi->ipi_len << E1000_ADVTXD_PAYLEN_SHIFT;
/* Now ready a context descriptor */
TXD = (struct e1000_adv_tx_context_desc *) &txr->tx_base[pi->ipi_pidx];
TXD = (struct e1000_adv_tx_context_desc *)&txr->tx_base[pi->ipi_pidx];
/*
** In advanced descriptors the vlan tag must
@ -246,8 +247,8 @@ igb_isc_txd_encap(void *arg, if_pkt_info_t pi)
pidx_last = olinfo_status = 0;
/* Basic descriptor defines */
cmd_type_len = (E1000_ADVTXD_DTYP_DATA |
E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT);
cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
E1000_ADVTXD_DCMD_DEXT);
if (pi->ipi_mflags & M_VLANTAG)
cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
@ -300,9 +301,9 @@ igb_isc_txd_encap(void *arg, if_pkt_info_t pi)
static void
igb_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx)
{
struct e1000_softc *sc = arg;
struct em_tx_queue *que = &sc->tx_queues[txqid];
struct tx_ring *txr = &que->txr;
struct e1000_softc *sc = arg;
struct em_tx_queue *que = &sc->tx_queues[txqid];
struct tx_ring *txr = &que->txr;
E1000_WRITE_REG(&sc->hw, E1000_TDT(txr->me), pidx);
}
@ -351,7 +352,8 @@ igb_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear)
if (rs_cidx == txr->tx_rs_pidx)
break;
cur = txr->tx_rsq[rs_cidx];
status = ((union e1000_adv_tx_desc *)&txr->tx_base[cur])->wb.status;
status = ((union e1000_adv_tx_desc *)
&txr->tx_base[cur])->wb.status;
} while ((status & E1000_TXD_STAT_DD));
txr->tx_rs_cidx = rs_cidx;
@ -387,7 +389,8 @@ igb_isc_rxd_refill(void *arg, if_rxd_update_t iru)
}
static void
igb_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused, qidx_t pidx)
igb_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused,
qidx_t pidx)
{
struct e1000_softc *sc = arg;
struct em_rx_queue *que = &sc->rx_queues[rxqid];
@ -453,7 +456,8 @@ igb_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
MPASS ((staterr & E1000_RXD_STAT_DD) != 0);
len = le16toh(rxd->wb.upper.length);
ptype = le32toh(rxd->wb.lower.lo_dword.data) & IGB_PKTTYPE_MASK;
ptype =
le32toh(rxd->wb.lower.lo_dword.data) & IGB_PKTTYPE_MASK;
ri->iri_len += len;
rxr->rx_bytes += ri->iri_len;
@ -462,7 +466,8 @@ igb_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
eop = ((staterr & E1000_RXD_STAT_EOP) == E1000_RXD_STAT_EOP);
/* Make sure bad packets are discarded */
if (eop && ((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) != 0)) {
if (eop &&
((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) != 0)) {
sc->dropped_pkts++;
++rxr->rx_discarded;
return (EBADMSG);
@ -524,7 +529,8 @@ igb_rx_checksum(uint32_t staterr, if_rxd_info_t ri, uint32_t ptype)
return;
/* If there is a layer 3 or 4 error we are done */
if (__predict_false(errors & (E1000_RXD_ERR_IPE | E1000_RXD_ERR_TCPE)))
if (__predict_false(errors &
(E1000_RXD_ERR_IPE | E1000_RXD_ERR_TCPE)))
return;
/* IP Checksum Good */
@ -535,11 +541,13 @@ igb_rx_checksum(uint32_t staterr, if_rxd_info_t ri, uint32_t ptype)
if (__predict_true(status &
(E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))) {
/* SCTP header present */
if (__predict_false((ptype & E1000_RXDADV_PKTTYPE_ETQF) == 0 &&
if (__predict_false(
(ptype & E1000_RXDADV_PKTTYPE_ETQF) == 0 &&
(ptype & E1000_RXDADV_PKTTYPE_SCTP) != 0)) {
ri->iri_csum_flags |= CSUM_SCTP_VALID;
} else {
ri->iri_csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
ri->iri_csum_flags |=
CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
ri->iri_csum_data = htons(0xffff);
}
}