Merge the vm_page hold and wire mechanisms.

The hold_count and wire_count fields of struct vm_page are separate
reference counters with similar semantics.  The remaining essential
differences are that holds are not counted as a reference with respect
to LRU, and holds have an implicit free-on-last unhold semantic whereas
vm_page_unwire() callers must explicitly determine whether to free the
page once the last reference to the page is released.

This change removes the KPIs which directly manipulate hold_count.
Functions such as vm_fault_quick_hold_pages() now return wired pages
instead.  Since r328977 the overhead of maintaining LRU for wired pages
is lower, and in many cases vm_fault_quick_hold_pages() callers would
swap holds for wirings on the returned pages anyway, so with this change
we remove a number of page lock acquisitions.

No functional change is intended.  __FreeBSD_version is bumped.

Reviewed by:	alc, kib
Discussed with:	jeff
Discussed with:	jhb, np (cxgbe)
Tested by:	pho (previous version)
Sponsored by:	Netflix
Differential Revision:	https://reviews.freebsd.org/D19247
This commit is contained in:
Mark Johnston 2019-07-08 19:46:20 +00:00
parent e64f3dee49
commit eeacb3b02f
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=349846
32 changed files with 92 additions and 280 deletions

View File

@ -381,7 +381,6 @@ MAN= accept_filter.9 \
vm_page_aflag.9 \
vm_page_free.9 \
vm_page_grab.9 \
vm_page_hold.9 \
vm_page_insert.9 \
vm_page_lookup.9 \
vm_page_rename.9 \
@ -2248,7 +2247,6 @@ MLINKS+=vm_page_aflag.9 vm_page_aflag_clear.9 \
MLINKS+=vm_page_free.9 vm_page_free_toq.9 \
vm_page_free.9 vm_page_free_zero.9 \
vm_page_free.9 vm_page_try_to_free.9
MLINKS+=vm_page_hold.9 vm_page_unhold.9
MLINKS+=vm_page_insert.9 vm_page_remove.9
MLINKS+=vm_page_wire.9 vm_page_unwire.9
MLINKS+=VOP_ACCESS.9 VOP_ACCESSX.9

View File

@ -1,75 +0,0 @@
.\"
.\" Copyright (C) 2001 Chad David <davidc@acns.ab.ca>. All rights reserved.
.\"
.\" Redistribution and use in source and binary forms, with or without
.\" modification, are permitted provided that the following conditions
.\" are met:
.\" 1. Redistributions of source code must retain the above copyright
.\" notice(s), this list of conditions and the following disclaimer as
.\" the first lines of this file unmodified other than the possible
.\" addition of one or more copyright notices.
.\" 2. Redistributions in binary form must reproduce the above copyright
.\" notice(s), this list of conditions and the following disclaimer in the
.\" documentation and/or other materials provided with the distribution.
.\"
.\" THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
.\" EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
.\" WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
.\" DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
.\" DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
.\" (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
.\" SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
.\" CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
.\" DAMAGE.
.\"
.\" $FreeBSD$
.\"
.Dd July 13, 2001
.Dt VM_PAGE_HOLD 9
.Os
.Sh NAME
.Nm vm_page_hold ,
.Nm vm_page_unhold
.Nd "update a page's hold count"
.Sh SYNOPSIS
.In sys/param.h
.In vm/vm.h
.In vm/vm_page.h
.Ft void
.Fn vm_page_hold "vm_page_t m"
.Ft void
.Fn vm_page_unhold "vm_page_t m"
.Sh DESCRIPTION
The
.Fn vm_page_hold
function increases the hold count on a page.
This prevents the page daemon from freeing the page.
.Pp
.Fn vm_page_hold
should only be used for very temporary wiring of a page,
as that page will not be considered for paging or
reallocation for as long as its hold count is greater
than zero.
Also note that while wired pages are removed from whatever
queue they are on,
.Fn vm_page_hold
does not affect the location of the page.
If it is on a queue prior to the call, it will still
be there afterward.
.Pp
If the page needs to be held for a long period of time,
.Xr vm_page_wire 9
should be used.
.Pp
.Fn vm_page_unhold
function reduces the hold count on a page.
If the hold count is zero it is possible that the page will be freed by the
page daemon.
.Sh SEE ALSO
.Xr vm_page_unwire 9 ,
.Xr vm_page_wire 9
.Sh AUTHORS
This manual page was written by
.An Chad David Aq Mt davidc@acns.ab.ca .

View File

@ -3035,7 +3035,7 @@ retry:
}
}
if (m != NULL)
vm_page_hold(m);
vm_page_wire(m);
}
PA_UNLOCK_COND(pa);
PMAP_UNLOCK(pmap);

View File

@ -1003,7 +1003,7 @@ vm_gpa_release(void *cookie)
vm_page_t m = cookie;
vm_page_lock(m);
vm_page_unhold(m);
vm_page_unwire(m, PQ_ACTIVE);
vm_page_unlock(m);
}

View File

@ -3438,9 +3438,8 @@ retry:
goto retry;
if (l1pd & L1_S_PROT_W || (prot & VM_PROT_WRITE) == 0) {
m = PHYS_TO_VM_PAGE(pa);
vm_page_hold(m);
vm_page_wire(m);
}
} else {
/*
* Note that we can't rely on the validity of the L1
@ -3470,7 +3469,7 @@ retry:
if (vm_page_pa_tryrelock(pmap, pa & PG_FRAME, &paddr))
goto retry;
m = PHYS_TO_VM_PAGE(pa);
vm_page_hold(m);
vm_page_wire(m);
}
}

View File

@ -2002,7 +2002,7 @@ retry:
if (vm_page_pa_tryrelock(pmap, pa, &lockpa))
goto retry;
m = PHYS_TO_VM_PAGE(pa);
vm_page_hold(m);
vm_page_wire(m);
}
} else if (pte1_is_link(pte1)) {
pte2p = pmap_pte2(pmap, va);
@ -2014,7 +2014,7 @@ retry:
if (vm_page_pa_tryrelock(pmap, pa, &lockpa))
goto retry;
m = PHYS_TO_VM_PAGE(pa);
vm_page_hold(m);
vm_page_wire(m);
}
}
PA_UNLOCK_COND(lockpa);
@ -6710,10 +6710,9 @@ pmap_pid_dump(int pid)
pa = pte2_pa(pte2);
m = PHYS_TO_VM_PAGE(pa);
printf("va: 0x%x, pa: 0x%x, h: %d, w:"
" %d, f: 0x%x", va, pa,
m->hold_count, m->wire_count,
m->flags);
printf("va: 0x%x, pa: 0x%x, w: %d, "
"f: 0x%x", va, pa,
m->wire_count, m->flags);
npte2++;
index++;
if (index >= 2) {
@ -6823,8 +6822,8 @@ dump_link(pmap_t pmap, uint32_t pte1_idx, boolean_t invalid_ok)
printf(" 0x%08X: 0x%08X, TEX%d, s:%d, g:%d, m:%p", va , pte2,
pte2_class(pte2), !!(pte2 & PTE2_S), !(pte2 & PTE2_NG), m);
if (m != NULL) {
printf(" v:%d h:%d w:%d f:0x%04X\n", m->valid,
m->hold_count, m->wire_count, m->flags);
printf(" v:%d w:%d f:0x%04X\n", m->valid,
m->wire_count, m->flags);
} else {
printf("\n");
}
@ -6933,8 +6932,8 @@ dump_pt2tab(pmap_t pmap)
printf(" 0x%08X: 0x%08X, TEX%d, s:%d, m:%p", va, pte2,
pte2_class(pte2), !!(pte2 & PTE2_S), m);
if (m != NULL)
printf(" , h: %d, w: %d, f: 0x%04X pidx: %lld",
m->hold_count, m->wire_count, m->flags, m->pindex);
printf(" , w: %d, f: 0x%04X pidx: %lld",
m->wire_count, m->flags, m->pindex);
printf("\n");
}
}

View File

@ -1100,7 +1100,7 @@ retry:
(tpte & ~ATTR_MASK) | off, &pa))
goto retry;
m = PHYS_TO_VM_PAGE((tpte & ~ATTR_MASK) | off);
vm_page_hold(m);
vm_page_wire(m);
}
}
PA_UNLOCK_COND(pa);

View File

@ -455,7 +455,7 @@ page_unbusy(vm_page_t pp)
}
static vm_page_t
page_hold(vnode_t *vp, int64_t start)
page_wire(vnode_t *vp, int64_t start)
{
vm_object_t obj;
vm_page_t pp;
@ -482,9 +482,8 @@ page_hold(vnode_t *vp, int64_t start)
ASSERT3U(pp->valid, ==, VM_PAGE_BITS_ALL);
vm_page_lock(pp);
vm_page_hold(pp);
vm_page_wire(pp);
vm_page_unlock(pp);
} else
pp = NULL;
break;
@ -493,11 +492,11 @@ page_hold(vnode_t *vp, int64_t start)
}
static void
page_unhold(vm_page_t pp)
page_unwire(vm_page_t pp)
{
vm_page_lock(pp);
vm_page_unhold(pp);
vm_page_unwire(pp, PQ_ACTIVE);
vm_page_unlock(pp);
}
@ -647,7 +646,7 @@ mappedread(vnode_t *vp, int nbytes, uio_t *uio)
vm_page_t pp;
uint64_t bytes = MIN(PAGESIZE - off, len);
if (pp = page_hold(vp, start)) {
if (pp = page_wire(vp, start)) {
struct sf_buf *sf;
caddr_t va;
@ -660,7 +659,7 @@ mappedread(vnode_t *vp, int nbytes, uio_t *uio)
#endif
zfs_unmap_page(sf);
zfs_vmobject_wlock(obj);
page_unhold(pp);
page_unwire(pp);
} else {
zfs_vmobject_wunlock(obj);
error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl),

View File

@ -198,23 +198,11 @@ linux_get_user_pages_internal(vm_map_t map, unsigned long start, int nr_pages,
vm_prot_t prot;
size_t len;
int count;
int i;
prot = write ? (VM_PROT_READ | VM_PROT_WRITE) : VM_PROT_READ;
len = ((size_t)nr_pages) << PAGE_SHIFT;
count = vm_fault_quick_hold_pages(map, start, len, prot, pages, nr_pages);
if (count == -1)
return (-EFAULT);
for (i = 0; i != nr_pages; i++) {
struct page *pg = pages[i];
vm_page_lock(pg);
vm_page_wire(pg);
vm_page_unhold(pg);
vm_page_unlock(pg);
}
return (nr_pages);
return (count == -1 ? -EFAULT : nr_pages);
}
int
@ -244,11 +232,6 @@ __get_user_pages_fast(unsigned long start, int nr_pages, int write,
if (*mp == NULL)
break;
vm_page_lock(*mp);
vm_page_wire(*mp);
vm_page_unhold(*mp);
vm_page_unlock(*mp);
if ((prot & VM_PROT_WRITE) != 0 &&
(*mp)->dirty != VM_PAGE_BITS_ALL) {
/*

View File

@ -474,13 +474,6 @@ create_pagelist(char __user *buf, size_t count, unsigned short type,
return (-ENOMEM);
}
for (i = 0; i < actual_pages; i++) {
vm_page_lock(pages[i]);
vm_page_wire(pages[i]);
vm_page_unhold(pages[i]);
vm_page_unlock(pages[i]);
}
pagelist->length = count;
pagelist->type = type;
pagelist->offset = offset;

View File

@ -1944,7 +1944,7 @@ aiotx_free_pgs(struct mbuf *m)
for (int i = 0; i < ext_pgs->npgs; i++) {
pg = PHYS_TO_VM_PAGE(ext_pgs->pa[i]);
vm_page_change_lock(pg, &mtx);
vm_page_unhold(pg);
vm_page_unwire(pg, PQ_ACTIVE);
}
if (mtx != NULL)
mtx_unlock(mtx);

View File

@ -112,15 +112,12 @@ free_pageset(struct tom_data *td, struct pageset *ps)
if (ps->prsv.prsv_nppods > 0)
t4_free_page_pods(&ps->prsv);
if (ps->flags & PS_WIRED) {
for (i = 0; i < ps->npages; i++) {
p = ps->pages[i];
vm_page_lock(p);
vm_page_unwire(p, PQ_INACTIVE);
vm_page_unlock(p);
}
} else
vm_page_unhold_pages(ps->pages, ps->npages);
for (i = 0; i < ps->npages; i++) {
p = ps->pages[i];
vm_page_lock(p);
vm_page_unwire(p, PQ_INACTIVE);
vm_page_unlock(p);
}
mtx_lock(&ddp_orphan_pagesets_lock);
TAILQ_INSERT_TAIL(&ddp_orphan_pagesets, ps, link);
taskqueue_enqueue(taskqueue_thread, &ddp_orphan_task);
@ -150,7 +147,7 @@ recycle_pageset(struct toepcb *toep, struct pageset *ps)
{
DDP_ASSERT_LOCKED(toep);
if (!(toep->ddp.flags & DDP_DEAD) && ps->flags & PS_WIRED) {
if (!(toep->ddp.flags & DDP_DEAD)) {
KASSERT(toep->ddp.cached_count + toep->ddp.active_count <
nitems(toep->ddp.db), ("too many wired pagesets"));
TAILQ_INSERT_HEAD(&toep->ddp.cached_pagesets, ps, link);
@ -1179,35 +1176,14 @@ t4_write_page_pods_for_buf(struct adapter *sc, struct sge_wrq *wrq, int tid,
return (0);
}
static void
wire_pageset(struct pageset *ps)
{
vm_page_t p;
int i;
KASSERT(!(ps->flags & PS_WIRED), ("pageset already wired"));
for (i = 0; i < ps->npages; i++) {
p = ps->pages[i];
vm_page_lock(p);
vm_page_wire(p);
vm_page_unhold(p);
vm_page_unlock(p);
}
ps->flags |= PS_WIRED;
}
/*
* Prepare a pageset for DDP. This wires the pageset and sets up page
* pods.
* Prepare a pageset for DDP. This sets up page pods.
*/
static int
prep_pageset(struct adapter *sc, struct toepcb *toep, struct pageset *ps)
{
struct tom_data *td = sc->tom_softc;
if (!(ps->flags & PS_WIRED))
wire_pageset(ps);
if (ps->prsv.prsv_nppods == 0 &&
!t4_alloc_page_pods_for_ps(&td->pr, ps)) {
return (0);

View File

@ -124,8 +124,7 @@ struct pageset {
TAILQ_HEAD(pagesetq, pageset);
#define PS_WIRED 0x0001 /* Pages wired rather than held. */
#define PS_PPODS_WRITTEN 0x0002 /* Page pods written to the card. */
#define PS_PPODS_WRITTEN 0x0001 /* Page pods written to the card. */
struct ddp_buffer {
struct pageset *ps;

View File

@ -1716,7 +1716,7 @@ retry:
}
}
if (m != NULL)
vm_page_hold(m);
vm_page_wire(m);
}
PA_UNLOCK_COND(pa);
PMAP_UNLOCK(pmap);

View File

@ -1015,8 +1015,7 @@ exec_map_first_page(struct image_params *imgp)
vm_page_readahead_finish(ma[i]);
}
vm_page_lock(ma[0]);
vm_page_hold(ma[0]);
vm_page_activate(ma[0]);
vm_page_wire(ma[0]);
vm_page_unlock(ma[0]);
VM_OBJECT_WUNLOCK(object);
@ -1036,7 +1035,7 @@ exec_unmap_first_page(struct image_params *imgp)
sf_buf_free(imgp->firstpage);
imgp->firstpage = NULL;
vm_page_lock(m);
vm_page_unhold(m);
vm_page_unwire(m, PQ_ACTIVE);
vm_page_unlock(m);
}
}

View File

@ -307,7 +307,8 @@ proc_rwmem(struct proc *p, struct uio *uio)
* Release the page.
*/
vm_page_lock(m);
vm_page_unhold(m);
if (vm_page_unwire(m, PQ_ACTIVE) && m->object == NULL)
vm_page_free(m);
vm_page_unlock(m);
} while (error == 0 && uio->uio_resid > 0);

View File

@ -207,11 +207,7 @@ uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio)
vm_page_xunbusy(m);
}
vm_page_lock(m);
vm_page_hold(m);
if (vm_page_active(m))
vm_page_reference(m);
else
vm_page_activate(m);
vm_page_wire(m);
vm_page_unlock(m);
VM_OBJECT_WUNLOCK(obj);
error = uiomove_fromphys(&m, offset, tlen, uio);
@ -222,7 +218,7 @@ uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio)
VM_OBJECT_WUNLOCK(obj);
}
vm_page_lock(m);
vm_page_unhold(m);
vm_page_unwire(m, PQ_ACTIVE);
vm_page_unlock(m);
return (error);

View File

@ -811,7 +811,7 @@ retry:
if (vm_page_pa_tryrelock(pmap, pte_pa, &pa))
goto retry;
m = PHYS_TO_VM_PAGE(pte_pa);
vm_page_hold(m);
vm_page_wire(m);
}
}
PA_UNLOCK_COND(pa);

View File

@ -166,10 +166,6 @@ zbuf_sfbuf_get(struct vm_map *map, vm_offset_t uaddr)
if (vm_fault_quick_hold_pages(map, uaddr, PAGE_SIZE, VM_PROT_READ |
VM_PROT_WRITE, &pp, 1) < 0)
return (NULL);
vm_page_lock(pp);
vm_page_wire(pp);
vm_page_unhold(pp);
vm_page_unlock(pp);
sf = sf_buf_alloc(pp, SFB_NOWAIT);
if (sf == NULL) {
zbuf_page_free(pp);

View File

@ -1275,7 +1275,7 @@ retry:
if (vm_page_pa_tryrelock(pmap, pvo->pvo_pte.pte.pte_lo & PTE_RPGN, &pa))
goto retry;
m = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte.pte_lo & PTE_RPGN);
vm_page_hold(m);
vm_page_wire(m);
}
PA_UNLOCK_COND(pa);
PMAP_UNLOCK(pmap);

View File

@ -1595,7 +1595,7 @@ retry:
pvo->pvo_pte.pa & LPTE_RPGN, &pa))
goto retry;
m = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN);
vm_page_hold(m);
vm_page_wire(m);
}
PA_UNLOCK_COND(pa);
PMAP_UNLOCK(pmap);

View File

@ -2951,7 +2951,7 @@ retry:
if (vm_page_pa_tryrelock(pmap, PTE_PA(pte), &pa))
goto retry;
m = PHYS_TO_VM_PAGE(PTE_PA(pte));
vm_page_hold(m);
vm_page_wire(m);
}
}

View File

@ -884,7 +884,7 @@ retry:
if (vm_page_pa_tryrelock(pmap, phys, &pa))
goto retry;
m = PHYS_TO_VM_PAGE(phys);
vm_page_hold(m);
vm_page_wire(m);
}
}
PA_UNLOCK_COND(pa);

View File

@ -859,7 +859,7 @@ retry:
m = PHYS_TO_VM_PAGE(TLB_DIRECT_TO_PHYS(va));
(void)vm_page_pa_tryrelock(pm, TLB_DIRECT_TO_PHYS(va),
&pa);
vm_page_hold(m);
vm_page_wire(m);
} else {
tp = tsb_kvtotte(va);
if ((tp->tte_data & TD_V) == 0)
@ -872,7 +872,7 @@ retry:
if (vm_page_pa_tryrelock(pm, TTE_GET_PA(tp), &pa))
goto retry;
m = PHYS_TO_VM_PAGE(TTE_GET_PA(tp));
vm_page_hold(m);
vm_page_wire(m);
}
PA_UNLOCK_COND(pa);
PMAP_UNLOCK(pm);

View File

@ -60,7 +60,7 @@
* in the range 5 to 9.
*/
#undef __FreeBSD_version
#define __FreeBSD_version 1300034 /* Master, propagated to newvers */
#define __FreeBSD_version 1300035 /* Master, propagated to newvers */
/*
* __FreeBSD_kernel__ indicates that this system uses the kernel of FreeBSD,

View File

@ -84,6 +84,7 @@ __FBSDID("$FreeBSD$");
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/mman.h>
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/racct.h>
#include <sys/resourcevar.h>
@ -257,7 +258,7 @@ vm_fault_fill_hold(vm_page_t *m_hold, vm_page_t m)
if (m_hold != NULL) {
*m_hold = m;
vm_page_lock(m);
vm_page_hold(m);
vm_page_wire(m);
vm_page_unlock(m);
}
}
@ -505,7 +506,7 @@ vm_fault_populate(struct faultstate *fs, vm_prot_t prot, int fault_type,
vm_page_activate(&m[i]);
if (m_hold != NULL && m[i].pindex == fs->first_pindex) {
*m_hold = &m[i];
vm_page_hold(&m[i]);
vm_page_wire(&m[i]);
}
vm_page_xunbusy_maybelocked(&m[i]);
}
@ -563,6 +564,7 @@ vm_fault_hold(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type,
struct faultstate fs;
struct vnode *vp;
struct domainset *dset;
struct mtx *mtx;
vm_object_t next_object, retry_object;
vm_offset_t e_end, e_start;
vm_pindex_t retry_pindex;
@ -1142,15 +1144,23 @@ readrest:
* We don't chase down the shadow chain
*/
fs.object == fs.first_object->backing_object) {
vm_page_lock(fs.m);
vm_page_dequeue(fs.m);
/*
* Keep the page wired to ensure that it is not
* freed by another thread, such as the page
* daemon, while it is disassociated from an
* object.
*/
mtx = NULL;
vm_page_change_lock(fs.m, &mtx);
vm_page_wire(fs.m);
(void)vm_page_remove(fs.m);
vm_page_unlock(fs.m);
vm_page_lock(fs.first_m);
vm_page_change_lock(fs.first_m, &mtx);
vm_page_replace_checked(fs.m, fs.first_object,
fs.first_pindex, fs.first_m);
vm_page_free(fs.first_m);
vm_page_unlock(fs.first_m);
vm_page_change_lock(fs.m, &mtx);
vm_page_unwire(fs.m, PQ_ACTIVE);
mtx_unlock(mtx);
vm_page_dirty(fs.m);
#if VM_NRESERVLEVEL > 0
/*
@ -1327,7 +1337,7 @@ readrest:
vm_page_activate(fs.m);
if (m_hold != NULL) {
*m_hold = fs.m;
vm_page_hold(fs.m);
vm_page_wire(fs.m);
}
vm_page_unlock(fs.m);
vm_page_xunbusy(fs.m);
@ -1600,7 +1610,9 @@ error:
for (mp = ma; mp < ma + count; mp++)
if (*mp != NULL) {
vm_page_lock(*mp);
vm_page_unhold(*mp);
if (vm_page_unwire(*mp, PQ_INACTIVE) &&
(*mp)->object == NULL)
vm_page_free(*mp);
vm_page_unlock(*mp);
}
return (-1);

View File

@ -223,12 +223,14 @@ vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset)
VM_OBJECT_WLOCK(object);
pindex = OFF_TO_IDX(offset);
m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY);
m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY |
VM_ALLOC_WIRED);
if (m->valid != VM_PAGE_BITS_ALL) {
vm_page_xbusy(m);
rv = vm_pager_get_pages(object, &m, 1, NULL, NULL);
if (rv != VM_PAGER_OK) {
vm_page_lock(m);
vm_page_unwire(m, PQ_NONE);
vm_page_free(m);
vm_page_unlock(m);
m = NULL;
@ -236,10 +238,6 @@ vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset)
}
vm_page_xunbusy(m);
}
vm_page_lock(m);
vm_page_hold(m);
vm_page_activate(m);
vm_page_unlock(m);
out:
VM_OBJECT_WUNLOCK(object);
return (m);
@ -273,7 +271,7 @@ vm_imgact_unmap_page(struct sf_buf *sf)
sf_buf_free(sf);
sched_unpin();
vm_page_lock(m);
vm_page_unhold(m);
vm_page_unwire(m, PQ_ACTIVE);
vm_page_unlock(m);
}

View File

@ -1212,7 +1212,7 @@ next_page:
if (tm->valid != VM_PAGE_BITS_ALL)
goto next_pindex;
vm_page_lock(tm);
if (vm_page_held(tm)) {
if (vm_page_wired(tm)) {
vm_page_unlock(tm);
goto next_pindex;
}

View File

@ -431,8 +431,7 @@ sysctl_vm_page_blacklist(SYSCTL_HANDLER_ARGS)
/*
* Initialize a dummy page for use in scans of the specified paging queue.
* In principle, this function only needs to set the flag PG_MARKER.
* Nonetheless, it write busies and initializes the hold count to one as
* safety precautions.
* Nonetheless, it write busies the page as a safety precaution.
*/
static void
vm_page_init_marker(vm_page_t marker, int queue, uint8_t aflags)
@ -443,7 +442,6 @@ vm_page_init_marker(vm_page_t marker, int queue, uint8_t aflags)
marker->aflags = aflags;
marker->busy_lock = VPB_SINGLE_EXCLUSIVER;
marker->queue = queue;
marker->hold_count = 1;
}
static void
@ -513,7 +511,6 @@ vm_page_init_page(vm_page_t m, vm_paddr_t pa, int segind)
m->object = NULL;
m->wire_count = 0;
m->busy_lock = VPB_UNBUSIED;
m->hold_count = 0;
m->flags = m->aflags = 0;
m->phys_addr = pa;
m->queue = PQ_NONE;
@ -1095,31 +1092,6 @@ vm_page_change_lock(vm_page_t m, struct mtx **mtx)
mtx_lock(mtx1);
}
/*
* Keep page from being freed by the page daemon
* much of the same effect as wiring, except much lower
* overhead and should be used only for *very* temporary
* holding ("wiring").
*/
void
vm_page_hold(vm_page_t mem)
{
vm_page_lock_assert(mem, MA_OWNED);
mem->hold_count++;
}
void
vm_page_unhold(vm_page_t mem)
{
vm_page_lock_assert(mem, MA_OWNED);
KASSERT(mem->hold_count >= 1, ("vm_page_unhold: hold count < 0!!!"));
--mem->hold_count;
if (mem->hold_count == 0 && (mem->flags & PG_UNHOLDFREE) != 0)
vm_page_free_toq(mem);
}
/*
* vm_page_unhold_pages:
*
@ -1133,7 +1105,8 @@ vm_page_unhold_pages(vm_page_t *ma, int count)
mtx = NULL;
for (; count != 0; count--) {
vm_page_change_lock(*ma, &mtx);
vm_page_unhold(*ma);
if (vm_page_unwire(*ma, PQ_ACTIVE) && (*ma)->object == NULL)
vm_page_free(*ma);
ma++;
}
if (mtx != NULL)
@ -1595,7 +1568,7 @@ vm_page_replace(vm_page_t mnew, vm_object_t object, vm_pindex_t pindex)
VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT(mnew->object == NULL,
("vm_page_replace: page %p already in object", mnew));
KASSERT(mnew->queue == PQ_NONE,
KASSERT(mnew->queue == PQ_NONE || vm_page_wired(mnew),
("vm_page_replace: new page %p is on a paging queue", mnew));
/*
@ -2143,7 +2116,7 @@ vm_page_alloc_check(vm_page_t m)
KASSERT(m->queue == PQ_NONE && (m->aflags & PGA_QUEUE_STATE_MASK) == 0,
("page %p has unexpected queue %d, flags %#x",
m, m->queue, (m->aflags & PGA_QUEUE_STATE_MASK)));
KASSERT(!vm_page_held(m), ("page %p is held", m));
KASSERT(!vm_page_wired(m), ("page %p is wired", m));
KASSERT(!vm_page_busied(m), ("page %p is busy", m));
KASSERT(m->dirty == 0, ("page %p is dirty", m));
KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT,
@ -2350,7 +2323,7 @@ vm_page_scan_contig(u_long npages, vm_page_t m_start, vm_page_t m_end,
vm_page_change_lock(m, &m_mtx);
m_inc = 1;
retry:
if (vm_page_held(m))
if (vm_page_wired(m))
run_ext = 0;
#if VM_NRESERVLEVEL > 0
else if ((level = vm_reserv_level(m)) >= 0 &&
@ -2378,13 +2351,11 @@ retry:
*/
VM_OBJECT_RUNLOCK(object);
goto retry;
} else if (vm_page_held(m)) {
} else if (vm_page_wired(m)) {
run_ext = 0;
goto unlock;
}
}
KASSERT((m->flags & PG_UNHOLDFREE) == 0,
("page %p is PG_UNHOLDFREE", m));
/* Don't care: PG_NODUMP, PG_ZERO. */
if (object->type != OBJT_DEFAULT &&
object->type != OBJT_SWAP &&
@ -2520,7 +2491,7 @@ vm_page_reclaim_run(int req_class, int domain, u_long npages, vm_page_t m_run,
*/
vm_page_change_lock(m, &m_mtx);
retry:
if (vm_page_held(m))
if (vm_page_wired(m))
error = EBUSY;
else if ((object = m->object) != NULL) {
/*
@ -2537,13 +2508,11 @@ retry:
*/
VM_OBJECT_WUNLOCK(object);
goto retry;
} else if (vm_page_held(m)) {
} else if (vm_page_wired(m)) {
error = EBUSY;
goto unlock;
}
}
KASSERT((m->flags & PG_UNHOLDFREE) == 0,
("page %p is PG_UNHOLDFREE", m));
/* Don't care: PG_NODUMP, PG_ZERO. */
if (object->type != OBJT_DEFAULT &&
object->type != OBJT_SWAP &&
@ -3476,13 +3445,6 @@ vm_page_free_prep(vm_page_t m)
if (vm_page_wired(m) != 0)
panic("vm_page_free_prep: freeing wired page %p", m);
if (m->hold_count != 0) {
m->flags &= ~PG_ZERO;
KASSERT((m->flags & PG_UNHOLDFREE) == 0,
("vm_page_free_prep: freeing PG_UNHOLDFREE page %p", m));
m->flags |= PG_UNHOLDFREE;
return (false);
}
/*
* Restore the default memory attribute to the page.
@ -3799,7 +3761,7 @@ vm_page_try_to_free(vm_page_t m)
vm_page_assert_locked(m);
VM_OBJECT_ASSERT_WLOCKED(m->object);
KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("page %p is unmanaged", m));
if (m->dirty != 0 || vm_page_held(m) || vm_page_busied(m))
if (m->dirty != 0 || vm_page_wired(m) || vm_page_busied(m))
return (false);
if (m->object->ref_count != 0) {
pmap_remove_all(m);
@ -4539,10 +4501,10 @@ DB_SHOW_COMMAND(pginfo, vm_page_print_pginfo)
else
m = (vm_page_t)addr;
db_printf(
"page %p obj %p pidx 0x%jx phys 0x%jx q %d hold %d wire %d\n"
"page %p obj %p pidx 0x%jx phys 0x%jx q %d wire %d\n"
" af 0x%x of 0x%x f 0x%x act %d busy %x valid 0x%x dirty 0x%x\n",
m, m->object, (uintmax_t)m->pindex, (uintmax_t)m->phys_addr,
m->queue, m->hold_count, m->wire_count, m->aflags, m->oflags,
m->queue, m->wire_count, m->aflags, m->oflags,
m->flags, m->act_count, m->busy_lock, m->valid, m->dirty);
}
#endif /* DDB */

View File

@ -204,15 +204,14 @@ struct vm_page {
struct md_page md; /* machine dependent stuff */
u_int wire_count; /* wired down maps refs (P) */
volatile u_int busy_lock; /* busy owners lock */
uint16_t hold_count; /* page hold count (P) */
uint16_t flags; /* page PG_* flags (P) */
uint8_t order; /* index of the buddy queue (F) */
uint8_t pool; /* vm_phys freepool index (F) */
uint8_t aflags; /* access is atomic */
uint8_t oflags; /* page VPO_* flags (O) */
uint8_t queue; /* page queue index (Q) */
int8_t psind; /* pagesizes[] index (O) */
int8_t segind; /* vm_phys segment index (C) */
uint8_t order; /* index of the buddy queue (F) */
uint8_t pool; /* vm_phys freepool index (F) */
u_char act_count; /* page usage count (P) */
/* NOTE that these must support one bit per DEV_BSIZE in a page */
/* so, on normal X86 kernels, they must be at least 8 bits wide */
@ -388,7 +387,6 @@ extern struct mtx_padalign pa_lock[];
#define PG_ZERO 0x0008 /* page is zeroed */
#define PG_MARKER 0x0010 /* special queue marker page */
#define PG_NODUMP 0x0080 /* don't include this page in a dump */
#define PG_UNHOLDFREE 0x0100 /* delayed free of a held page */
/*
* Misc constants.
@ -516,8 +514,6 @@ malloc2vm_flags(int malloc_flags)
void vm_page_busy_downgrade(vm_page_t m);
void vm_page_busy_sleep(vm_page_t m, const char *msg, bool nonshared);
void vm_page_flash(vm_page_t m);
void vm_page_hold(vm_page_t mem);
void vm_page_unhold(vm_page_t mem);
void vm_page_free(vm_page_t m);
void vm_page_free_zero(vm_page_t m);
@ -816,17 +812,10 @@ vm_page_in_laundry(vm_page_t m)
}
/*
* vm_page_held:
* vm_page_wired:
*
* Return true if a reference prevents the page from being reclaimable.
*/
static inline bool
vm_page_held(vm_page_t m)
{
return (m->hold_count > 0 || m->wire_count > 0);
}
static inline bool
vm_page_wired(vm_page_t m)
{

View File

@ -334,7 +334,7 @@ vm_pageout_cluster(vm_page_t m)
pindex = m->pindex;
vm_page_assert_unbusied(m);
KASSERT(!vm_page_held(m), ("page %p is held", m));
KASSERT(!vm_page_wired(m), ("page %p is wired", m));
pmap_remove_write(m);
vm_page_unlock(m);
@ -373,7 +373,7 @@ more:
break;
}
vm_page_lock(p);
if (vm_page_held(p) || !vm_page_in_laundry(p)) {
if (vm_page_wired(p) || !vm_page_in_laundry(p)) {
vm_page_unlock(p);
ib = 0;
break;
@ -399,7 +399,7 @@ more:
if (p->dirty == 0)
break;
vm_page_lock(p);
if (vm_page_held(p) || !vm_page_in_laundry(p)) {
if (vm_page_wired(p) || !vm_page_in_laundry(p)) {
vm_page_unlock(p);
break;
}
@ -651,7 +651,7 @@ vm_pageout_clean(vm_page_t m, int *numpagedout)
* The page may have been busied or referenced while the object
* and page locks were released.
*/
if (vm_page_busied(m) || vm_page_held(m)) {
if (vm_page_busied(m) || vm_page_wired(m)) {
vm_page_unlock(m);
error = EBUSY;
goto unlock_all;
@ -747,14 +747,10 @@ recheck:
}
/*
* Held pages are essentially stuck in the queue.
*
* Wired pages may not be freed. Complete their removal
* from the queue now to avoid needless revisits during
* future scans.
*/
if (m->hold_count != 0)
continue;
if (vm_page_wired(m)) {
vm_page_dequeue_deferred(m);
continue;
@ -1419,18 +1415,10 @@ recheck:
goto reinsert;
/*
* Held pages are essentially stuck in the queue. So,
* they ought to be discounted from the inactive count.
* See the description of addl_page_shortage above.
*
* Wired pages may not be freed. Complete their removal
* from the queue now to avoid needless revisits during
* future scans.
*/
if (m->hold_count != 0) {
addl_page_shortage++;
goto reinsert;
}
if (vm_page_wired(m)) {
vm_page_dequeue_deferred(m);
continue;

View File

@ -212,7 +212,7 @@ vm_swapout_object_deactivate_pages(pmap_t pmap, vm_object_t first_object,
continue;
VM_CNT_INC(v_pdpages);
vm_page_lock(p);
if (vm_page_held(p) ||
if (vm_page_wired(p) ||
!pmap_page_exists_quick(pmap, p)) {
vm_page_unlock(p);
continue;