MFC r322248:

Fix for mlx4en(4) to properly call m_defrag().

The m_defrag() function can only defrag mbuf chains which have a valid
mbuf packet header. In r291699 when the mlx4en(4) driver was converted
into using BUSDMA(9), the call to m_defrag() was moved after the part
of the transmit routine which strips the header from the mbuf chain.
This effectivly disabled the mbuf defrag mechanism and such packets
simply got dropped.

This patch removes the stripping of mbufs from a chain and loads all
mbufs using busdma. If busdma finds there are no segments, unload
the DMA map and free the mbuf right away, because that means all
data in the mbuf has been inlined in the TX ring. Else proceed
as usual.

Add a per-ring rounter for the number of defrag attempts and
make sure the oversized_packets counter gets zeroed while at it.

The counters are per-ring to avoid excessive cache misses in the
TX path.

Approved by:		re (kib)
Submitted by:		mjoras@
Differential Revision:	https://reviews.freebsd.org/D11683
Sponsored by:		Mellanox Technologies
This commit is contained in:
Hans Petter Selasky 2017-08-15 09:21:46 +00:00
parent f1e0e91693
commit c40fbda341
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/stable/10/; revision=322531
5 changed files with 27 additions and 15 deletions

View File

@ -2681,6 +2681,8 @@ static void mlx4_en_sysctl_stat(struct mlx4_en_priv *priv)
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_chksum_offload",
CTLFLAG_RD, &priv->port_stats.tx_chksum_offload,
"TX checksum offloads");
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "defrag_attempts", CTLFLAG_RD,
&priv->port_stats.defrag_attempts, "Oversized chains defragged");
/* Could strdup the names and add in a loop. This is simpler. */
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_bytes", CTLFLAG_RD,
@ -2774,6 +2776,10 @@ static void mlx4_en_sysctl_stat(struct mlx4_en_priv *priv)
CTLFLAG_RD, &tx_ring->packets, "TX packets");
SYSCTL_ADD_ULONG(ctx, ring_list, OID_AUTO, "bytes",
CTLFLAG_RD, &tx_ring->bytes, "TX bytes");
SYSCTL_ADD_ULONG(ctx, ring_list, OID_AUTO, "tso_packets",
CTLFLAG_RD, &tx_ring->tso_packets, "TSO packets");
SYSCTL_ADD_ULONG(ctx, ring_list, OID_AUTO, "defrag_attempts",
CTLFLAG_RD, &tx_ring->defrag_attempts, "Oversized chains defragged");
}
for (i = 0; i < priv->rx_ring_num; i++) {

View File

@ -224,11 +224,16 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
priv->port_stats.tx_chksum_offload = 0;
priv->port_stats.queue_stopped = 0;
priv->port_stats.wake_queue = 0;
priv->port_stats.oversized_packets = 0;
priv->port_stats.tso_packets = 0;
priv->port_stats.defrag_attempts = 0;
for (i = 0; i < priv->tx_ring_num; i++) {
priv->port_stats.tx_chksum_offload += priv->tx_ring[i]->tx_csum;
priv->port_stats.queue_stopped += priv->tx_ring[i]->queue_stopped;
priv->port_stats.wake_queue += priv->tx_ring[i]->wake_queue;
priv->port_stats.oversized_packets += priv->tx_ring[i]->oversized_packets;
priv->port_stats.tso_packets += priv->tx_ring[i]->tso_packets;
priv->port_stats.defrag_attempts += priv->tx_ring[i]->defrag_attempts;
}
/* RX Statistics */
priv->pkstats.rx_packets = be64_to_cpu(mlx4_en_stats->RTOT_prio_0) +

View File

@ -788,7 +788,7 @@ static int mlx4_en_xmit(struct mlx4_en_priv *priv, int tx_ind, struct mbuf **mbp
num_pkts = DIV_ROUND_UP(payload_len, mss);
ring->bytes += payload_len + (num_pkts * ihs);
ring->packets += num_pkts;
priv->port_stats.tso_packets++;
ring->tso_packets++;
/* store pointer to inline header */
dseg_inline = dseg;
/* copy data inline */
@ -809,20 +809,11 @@ static int mlx4_en_xmit(struct mlx4_en_priv *priv, int tx_ind, struct mbuf **mbp
}
m_adj(mb, ihs);
/* trim off empty mbufs */
while (mb->m_len == 0) {
mb = m_free(mb);
/* check if all data has been inlined */
if (mb == NULL) {
nr_segs = 0;
goto skip_dma;
}
}
err = bus_dmamap_load_mbuf_sg(ring->dma_tag, tx_info->dma_map,
mb, segs, &nr_segs, BUS_DMA_NOWAIT);
if (unlikely(err == EFBIG)) {
/* Too many mbuf fragments */
ring->defrag_attempts++;
m = m_defrag(mb, M_NOWAIT);
if (m == NULL) {
ring->oversized_packets++;
@ -838,11 +829,18 @@ static int mlx4_en_xmit(struct mlx4_en_priv *priv, int tx_ind, struct mbuf **mbp
ring->oversized_packets++;
goto tx_drop;
}
/* make sure all mbuf data is written to RAM */
bus_dmamap_sync(ring->dma_tag, tx_info->dma_map,
BUS_DMASYNC_PREWRITE);
/* If there were no errors and we didn't load anything, don't sync. */
if (nr_segs != 0) {
/* make sure all mbuf data is written to RAM */
bus_dmamap_sync(ring->dma_tag, tx_info->dma_map,
BUS_DMASYNC_PREWRITE);
} else {
/* All data was inlined, free the mbuf. */
bus_dmamap_unload(ring->dma_tag, tx_info->dma_map);
m_freem(mb);
mb = NULL;
}
skip_dma:
/* compute number of DS needed */
ds_cnt = (dseg - ((volatile struct mlx4_wqe_data_seg *)tx_desc)) + nr_segs;

View File

@ -278,6 +278,8 @@ struct mlx4_en_tx_ring {
unsigned long queue_stopped;
unsigned long oversized_packets;
unsigned long wake_queue;
unsigned long tso_packets;
unsigned long defrag_attempts;
struct mlx4_bf bf;
bool bf_enabled;
int hwtstamp_tx_type;

View File

@ -126,6 +126,7 @@ struct mlx4_en_port_stats {
unsigned long rx_chksum_good;
unsigned long rx_chksum_none;
unsigned long tx_chksum_offload;
unsigned long defrag_attempts;
};
struct mlx4_en_perf_stats {