Actively limit the allocation of mbufs to NMBUFS/nmbufs and mbuf clusters

to NMBCLUSTERS/nmbclusters/kern.ipc.nmbclusters.

Add a read-only sysctl kern.ipc.nmbufs matching kern.ipc.nmbclusters.

Submitted by:	Bosko Milekic <bmilekic@dsuper.net>
This commit is contained in:
Mike Smith 1999-12-28 06:35:57 +00:00
parent ecfa9802f0
commit 736e4b67ae
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=55171

View File

@ -81,12 +81,14 @@ SYSCTL_INT(_kern_ipc, OID_AUTO, mbuf_wait, CTLFLAG_RW,
&mbuf_wait, 0, ""); &mbuf_wait, 0, "");
SYSCTL_STRUCT(_kern_ipc, KIPC_MBSTAT, mbstat, CTLFLAG_RW, &mbstat, mbstat, ""); SYSCTL_STRUCT(_kern_ipc, KIPC_MBSTAT, mbstat, CTLFLAG_RW, &mbstat, mbstat, "");
SYSCTL_INT(_kern_ipc, KIPC_NMBCLUSTERS, nmbclusters, CTLFLAG_RD, SYSCTL_INT(_kern_ipc, KIPC_NMBCLUSTERS, nmbclusters, CTLFLAG_RD,
&nmbclusters, 0, "Maximum number of mbuf clusters avaliable"); &nmbclusters, 0, "Maximum number of mbuf clusters available");
SYSCTL_INT(_kern_ipc, OID_AUTO, nmbufs, CTLFLAG_RD, &nmbufs, 0,
"Maximum number of mbufs available");
#ifndef NMBCLUSTERS #ifndef NMBCLUSTERS
#define NMBCLUSTERS (512 + MAXUSERS * 16) #define NMBCLUSTERS (512 + MAXUSERS * 16)
#endif #endif
TUNABLE_INT_DECL("kern.ipc.nmbclusters", NMBCLUSTERS, nmbclusters); TUNABLE_INT_DECL("kern.ipc.nmbclusters", NMBCLUSTERS, nmbclusters);
TUNABLE_INT_DECL("kern.ipc.nmbufs", NMBCLUSTERS * 4, nmbufs); /* XXX fixup? */ TUNABLE_INT_DECL("kern.ipc.nmbufs", NMBCLUSTERS * 4, nmbufs);
static void m_reclaim __P((void)); static void m_reclaim __P((void));
@ -140,6 +142,14 @@ m_mballoc(nmb, how)
register int i; register int i;
int nbytes; int nbytes;
/*
* If we've hit the mbuf limit, stop allocating from mb_map,
* (or trying to) in order to avoid dipping into the section of
* mb_map which we've "reserved" for clusters.
*/
if ((nmb + mbstat.m_mbufs) > nmbufs)
return (0);
/* /*
* Once we run out of map space, it will be impossible to get * Once we run out of map space, it will be impossible to get
* any more (nothing is ever freed back to the map) * any more (nothing is ever freed back to the map)
@ -266,6 +276,16 @@ m_clalloc(ncl, how)
register int i; register int i;
int npg; int npg;
/*
* If we've hit the mcluster number limit, stop allocating from
* mb_map, (or trying to) in order to avoid dipping into the section
* of mb_map which we've "reserved" for mbufs.
*/
if ((ncl + mbstat.m_clusters) > nmbclusters) {
mbstat.m_drops++;
return (0);
}
/* /*
* Once we run out of map space, it will be impossible * Once we run out of map space, it will be impossible
* to get any more (nothing is ever freed back to the * to get any more (nothing is ever freed back to the