mirror of
https://github.com/freebsd/freebsd-src.git
synced 2024-12-03 06:12:46 +00:00
This set of commits to the VM system does the following, and contain
contributions or ideas from Stephen McKay <syssgm@devetir.qld.gov.au>, Alan Cox <alc@cs.rice.edu>, David Greenman <davidg@freebsd.org> and me: More usage of the TAILQ macros. Additional minor fix to queue.h. Performance enhancements to the pageout daemon. Addition of a wait in the case that the pageout daemon has to run immediately. Slightly modify the pageout algorithm. Significant revamp of the pmap/fork code: 1) PTE's and UPAGES's are NO LONGER in the process's map. 2) PTE's and UPAGES's reside in their own objects. 3) TOTAL elimination of recursive page table pagefaults. 4) The page directory now resides in the PTE object. 5) Implemented pmap_copy, thereby speeding up fork time. 6) Changed the pv entries so that the head is a pointer and not an entire entry. 7) Significant cleanup of pmap_protect, and pmap_remove. 8) Removed significant amounts of machine dependent fork code from vm_glue. Pushed much of that code into the machine dependent pmap module. 9) Support more completely the reuse of already zeroed pages (Page table pages and page directories) as being already zeroed. Performance and code cleanups in vm_map: 1) Improved and simplified allocation of map entries. 2) Improved vm_map_copy code. 3) Corrected some minor problems in the simplify code. Implemented splvm (combo of splbio and splimp.) The VM code now seldom uses splhigh. Improved the speed of and simplified kmem_malloc. Minor mod to vm_fault to avoid using pre-zeroed pages in the case of objects with backing objects along with the already existant condition of having a vnode. (If there is a backing object, there will likely be a COW... With a COW, it isn't necessary to start with a pre-zeroed page.) Minor reorg of source to perhaps improve locality of ref.
This commit is contained in:
parent
b667dc9a51
commit
b18bfc3da7
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=15809
@ -35,7 +35,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
|
||||
* $Id: machdep.c,v 1.189 1996/05/03 21:00:53 phk Exp $
|
||||
* $Id: machdep.c,v 1.190 1996/05/10 19:28:44 wollman Exp $
|
||||
*/
|
||||
|
||||
#include "npx.h"
|
||||
@ -378,6 +378,8 @@ again:
|
||||
(nswbuf*MAXPHYS) + pager_map_size, TRUE);
|
||||
exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
|
||||
(16*ARG_MAX), TRUE);
|
||||
exech_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
|
||||
(32*ARG_MAX), TRUE);
|
||||
u_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
|
||||
(maxproc*UPAGES*PAGE_SIZE), FALSE);
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -35,7 +35,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: @(#)trap.c 7.4 (Berkeley) 5/13/91
|
||||
* $Id: trap.c,v 1.74 1996/03/27 17:33:39 bde Exp $
|
||||
* $Id: trap.c,v 1.75 1996/03/28 05:40:57 dyson Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -805,25 +805,11 @@ int trapwrite(addr)
|
||||
|
||||
v = trunc_page(vtopte(va));
|
||||
|
||||
/*
|
||||
* wire the pte page
|
||||
*/
|
||||
if (va < USRSTACK) {
|
||||
vm_map_pageable(&vm->vm_map, v, round_page(v+1), FALSE);
|
||||
}
|
||||
|
||||
/*
|
||||
* fault the data page
|
||||
*/
|
||||
rv = vm_fault(&vm->vm_map, va, VM_PROT_READ|VM_PROT_WRITE, FALSE);
|
||||
|
||||
/*
|
||||
* unwire the pte page
|
||||
*/
|
||||
if (va < USRSTACK) {
|
||||
vm_map_pageable(&vm->vm_map, v, round_page(v+1), TRUE);
|
||||
}
|
||||
|
||||
--p->p_lock;
|
||||
|
||||
if (rv != KERN_SUCCESS)
|
||||
|
@ -38,7 +38,7 @@
|
||||
*
|
||||
* from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
|
||||
* Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
|
||||
* $Id: vm_machdep.c,v 1.61 1996/05/02 10:43:06 phk Exp $
|
||||
* $Id: vm_machdep.c,v 1.62 1996/05/02 14:19:55 phk Exp $
|
||||
*/
|
||||
|
||||
#include "npx.h"
|
||||
@ -862,7 +862,7 @@ int
|
||||
vm_page_zero_idle() {
|
||||
vm_page_t m;
|
||||
if ((cnt.v_free_count > cnt.v_interrupt_free_min) &&
|
||||
(m = vm_page_queue_free.tqh_first)) {
|
||||
(m = TAILQ_FIRST(&vm_page_queue_free))) {
|
||||
TAILQ_REMOVE(&vm_page_queue_free, m, pageq);
|
||||
enable_intr();
|
||||
pmap_zero_page(VM_PAGE_TO_PHYS(m));
|
||||
|
@ -42,7 +42,7 @@
|
||||
*
|
||||
* from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90
|
||||
* from: @(#)pmap.h 7.4 (Berkeley) 5/12/91
|
||||
* $Id: pmap.h,v 1.37 1996/05/02 14:20:04 phk Exp $
|
||||
* $Id: pmap.h,v 1.38 1996/05/02 22:25:18 phk Exp $
|
||||
*/
|
||||
|
||||
#ifndef _MACHINE_PMAP_H_
|
||||
@ -69,6 +69,7 @@
|
||||
|
||||
/* Our various interpretations of the above */
|
||||
#define PG_W PG_AVAIL1 /* "Wired" pseudoflag */
|
||||
#define PG_MANAGED PG_AVAIL2
|
||||
#define PG_FRAME (~PAGE_MASK)
|
||||
#define PG_PROT (PG_RW|PG_U) /* all protection bits . */
|
||||
#define PG_N (PG_NC_PWT|PG_NC_PCD) /* Non-cacheable */
|
||||
@ -87,12 +88,8 @@
|
||||
#define VADDR(pdi, pti) ((vm_offset_t)(((pdi)<<PDRSHIFT)|((pti)<<PAGE_SHIFT)))
|
||||
|
||||
#ifndef NKPT
|
||||
#if 0
|
||||
#define NKPT 26 /* actual number of kernel page tables */
|
||||
#else
|
||||
#define NKPT 9 /* actual number of kernel page tables */
|
||||
#endif
|
||||
#endif
|
||||
#ifndef NKPDE
|
||||
#define NKPDE 63 /* addressable number of page tables/pde's */
|
||||
#endif
|
||||
@ -119,6 +116,7 @@
|
||||
typedef unsigned int *pd_entry_t;
|
||||
typedef unsigned int *pt_entry_t;
|
||||
struct vm_map;
|
||||
struct vm_object;
|
||||
|
||||
#define PDESIZE sizeof(pd_entry_t) /* for assembly files */
|
||||
#define PTESIZE sizeof(pt_entry_t) /* for assembly files */
|
||||
@ -168,6 +166,7 @@ pmap_kextract(vm_offset_t va)
|
||||
|
||||
struct pmap {
|
||||
pd_entry_t *pm_pdir; /* KVA of page directory */
|
||||
vm_object_t pm_pteobj; /* Container for pte's */
|
||||
short pm_dref; /* page directory ref count */
|
||||
short pm_count; /* pmap reference count */
|
||||
struct pmap_statistics pm_stats; /* pmap statistics */
|
||||
@ -203,7 +202,7 @@ extern pt_entry_t *CMAP1;
|
||||
extern vm_offset_t avail_end;
|
||||
extern vm_offset_t avail_start;
|
||||
extern vm_offset_t phys_avail[];
|
||||
extern pv_entry_t pv_table; /* array of entries, one per page */
|
||||
extern pv_entry_t *pv_table; /* array of entries, one per page */
|
||||
extern vm_offset_t virtual_avail;
|
||||
extern vm_offset_t virtual_end;
|
||||
|
||||
@ -217,8 +216,8 @@ struct pcb;
|
||||
void pmap_bootstrap __P(( vm_offset_t, vm_offset_t));
|
||||
pmap_t pmap_kernel __P((void));
|
||||
void *pmap_mapdev __P((vm_offset_t, vm_size_t));
|
||||
pt_entry_t * __pure pmap_pte __P((pmap_t, vm_offset_t)) __pure2;
|
||||
void pmap_unuse_pt __P((pmap_t, vm_offset_t, vm_page_t));
|
||||
unsigned * __pure pmap_pte __P((pmap_t, vm_offset_t)) __pure2;
|
||||
int pmap_unuse_pt __P((pmap_t, vm_offset_t, vm_page_t));
|
||||
vm_page_t pmap_use_pt __P((pmap_t, vm_offset_t));
|
||||
|
||||
#endif /* KERNEL */
|
||||
|
@ -35,7 +35,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
|
||||
* $Id: machdep.c,v 1.189 1996/05/03 21:00:53 phk Exp $
|
||||
* $Id: machdep.c,v 1.190 1996/05/10 19:28:44 wollman Exp $
|
||||
*/
|
||||
|
||||
#include "npx.h"
|
||||
@ -378,6 +378,8 @@ again:
|
||||
(nswbuf*MAXPHYS) + pager_map_size, TRUE);
|
||||
exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
|
||||
(16*ARG_MAX), TRUE);
|
||||
exech_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
|
||||
(32*ARG_MAX), TRUE);
|
||||
u_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
|
||||
(maxproc*UPAGES*PAGE_SIZE), FALSE);
|
||||
|
||||
|
1379
sys/i386/i386/pmap.c
1379
sys/i386/i386/pmap.c
File diff suppressed because it is too large
Load Diff
@ -35,7 +35,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: @(#)trap.c 7.4 (Berkeley) 5/13/91
|
||||
* $Id: trap.c,v 1.74 1996/03/27 17:33:39 bde Exp $
|
||||
* $Id: trap.c,v 1.75 1996/03/28 05:40:57 dyson Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -805,25 +805,11 @@ int trapwrite(addr)
|
||||
|
||||
v = trunc_page(vtopte(va));
|
||||
|
||||
/*
|
||||
* wire the pte page
|
||||
*/
|
||||
if (va < USRSTACK) {
|
||||
vm_map_pageable(&vm->vm_map, v, round_page(v+1), FALSE);
|
||||
}
|
||||
|
||||
/*
|
||||
* fault the data page
|
||||
*/
|
||||
rv = vm_fault(&vm->vm_map, va, VM_PROT_READ|VM_PROT_WRITE, FALSE);
|
||||
|
||||
/*
|
||||
* unwire the pte page
|
||||
*/
|
||||
if (va < USRSTACK) {
|
||||
vm_map_pageable(&vm->vm_map, v, round_page(v+1), TRUE);
|
||||
}
|
||||
|
||||
--p->p_lock;
|
||||
|
||||
if (rv != KERN_SUCCESS)
|
||||
|
@ -38,7 +38,7 @@
|
||||
*
|
||||
* from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
|
||||
* Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
|
||||
* $Id: vm_machdep.c,v 1.61 1996/05/02 10:43:06 phk Exp $
|
||||
* $Id: vm_machdep.c,v 1.62 1996/05/02 14:19:55 phk Exp $
|
||||
*/
|
||||
|
||||
#include "npx.h"
|
||||
@ -862,7 +862,7 @@ int
|
||||
vm_page_zero_idle() {
|
||||
vm_page_t m;
|
||||
if ((cnt.v_free_count > cnt.v_interrupt_free_min) &&
|
||||
(m = vm_page_queue_free.tqh_first)) {
|
||||
(m = TAILQ_FIRST(&vm_page_queue_free))) {
|
||||
TAILQ_REMOVE(&vm_page_queue_free, m, pageq);
|
||||
enable_intr();
|
||||
pmap_zero_page(VM_PAGE_TO_PHYS(m));
|
||||
|
@ -42,7 +42,7 @@
|
||||
*
|
||||
* from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90
|
||||
* from: @(#)pmap.h 7.4 (Berkeley) 5/12/91
|
||||
* $Id: pmap.h,v 1.37 1996/05/02 14:20:04 phk Exp $
|
||||
* $Id: pmap.h,v 1.38 1996/05/02 22:25:18 phk Exp $
|
||||
*/
|
||||
|
||||
#ifndef _MACHINE_PMAP_H_
|
||||
@ -69,6 +69,7 @@
|
||||
|
||||
/* Our various interpretations of the above */
|
||||
#define PG_W PG_AVAIL1 /* "Wired" pseudoflag */
|
||||
#define PG_MANAGED PG_AVAIL2
|
||||
#define PG_FRAME (~PAGE_MASK)
|
||||
#define PG_PROT (PG_RW|PG_U) /* all protection bits . */
|
||||
#define PG_N (PG_NC_PWT|PG_NC_PCD) /* Non-cacheable */
|
||||
@ -87,12 +88,8 @@
|
||||
#define VADDR(pdi, pti) ((vm_offset_t)(((pdi)<<PDRSHIFT)|((pti)<<PAGE_SHIFT)))
|
||||
|
||||
#ifndef NKPT
|
||||
#if 0
|
||||
#define NKPT 26 /* actual number of kernel page tables */
|
||||
#else
|
||||
#define NKPT 9 /* actual number of kernel page tables */
|
||||
#endif
|
||||
#endif
|
||||
#ifndef NKPDE
|
||||
#define NKPDE 63 /* addressable number of page tables/pde's */
|
||||
#endif
|
||||
@ -119,6 +116,7 @@
|
||||
typedef unsigned int *pd_entry_t;
|
||||
typedef unsigned int *pt_entry_t;
|
||||
struct vm_map;
|
||||
struct vm_object;
|
||||
|
||||
#define PDESIZE sizeof(pd_entry_t) /* for assembly files */
|
||||
#define PTESIZE sizeof(pt_entry_t) /* for assembly files */
|
||||
@ -168,6 +166,7 @@ pmap_kextract(vm_offset_t va)
|
||||
|
||||
struct pmap {
|
||||
pd_entry_t *pm_pdir; /* KVA of page directory */
|
||||
vm_object_t pm_pteobj; /* Container for pte's */
|
||||
short pm_dref; /* page directory ref count */
|
||||
short pm_count; /* pmap reference count */
|
||||
struct pmap_statistics pm_stats; /* pmap statistics */
|
||||
@ -203,7 +202,7 @@ extern pt_entry_t *CMAP1;
|
||||
extern vm_offset_t avail_end;
|
||||
extern vm_offset_t avail_start;
|
||||
extern vm_offset_t phys_avail[];
|
||||
extern pv_entry_t pv_table; /* array of entries, one per page */
|
||||
extern pv_entry_t *pv_table; /* array of entries, one per page */
|
||||
extern vm_offset_t virtual_avail;
|
||||
extern vm_offset_t virtual_end;
|
||||
|
||||
@ -217,8 +216,8 @@ struct pcb;
|
||||
void pmap_bootstrap __P(( vm_offset_t, vm_offset_t));
|
||||
pmap_t pmap_kernel __P((void));
|
||||
void *pmap_mapdev __P((vm_offset_t, vm_size_t));
|
||||
pt_entry_t * __pure pmap_pte __P((pmap_t, vm_offset_t)) __pure2;
|
||||
void pmap_unuse_pt __P((pmap_t, vm_offset_t, vm_page_t));
|
||||
unsigned * __pure pmap_pte __P((pmap_t, vm_offset_t)) __pure2;
|
||||
int pmap_unuse_pt __P((pmap_t, vm_offset_t, vm_page_t));
|
||||
vm_page_t pmap_use_pt __P((pmap_t, vm_offset_t));
|
||||
|
||||
#endif /* KERNEL */
|
||||
|
@ -30,7 +30,7 @@
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $Id: spl.h,v 1.12 1995/10/30 17:01:37 bde Exp $
|
||||
* $Id: spl.h,v 1.13 1996/02/07 21:52:57 wollman Exp $
|
||||
*/
|
||||
|
||||
#ifndef _MACHINE_IPL_H_
|
||||
@ -125,6 +125,7 @@ GENSPL(splsoftclock, cpl = SWI_CLOCK_MASK)
|
||||
GENSPL(splsofttty, cpl |= SWI_TTY_MASK)
|
||||
GENSPL(splstatclock, cpl |= stat_imask)
|
||||
GENSPL(spltty, cpl |= tty_imask)
|
||||
GENSPL(splvm, cpl |= net_imask | bio_imask)
|
||||
|
||||
static __inline void
|
||||
spl0(void)
|
||||
|
@ -23,7 +23,7 @@
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $Id: kern_exec.c,v 1.39 1996/04/29 15:07:59 smpatel Exp $
|
||||
* $Id: kern_exec.c,v 1.40 1996/05/01 02:42:47 bde Exp $
|
||||
*/
|
||||
|
||||
#include <sys/param.h>
|
||||
@ -171,7 +171,7 @@ interpret:
|
||||
* Map the image header (first page) of the file into
|
||||
* kernel address space
|
||||
*/
|
||||
error = vm_mmap(kernel_map, /* map */
|
||||
error = vm_mmap(exech_map, /* map */
|
||||
(vm_offset_t *)&imgp->image_header, /* address */
|
||||
PAGE_SIZE, /* size */
|
||||
VM_PROT_READ, /* protection */
|
||||
@ -206,7 +206,7 @@ interpret:
|
||||
/* free old vnode and name buffer */
|
||||
vrele(ndp->ni_vp);
|
||||
FREE(ndp->ni_cnd.cn_pnbuf, M_NAMEI);
|
||||
if (vm_map_remove(kernel_map, (vm_offset_t)imgp->image_header,
|
||||
if (vm_map_remove(exech_map, (vm_offset_t)imgp->image_header,
|
||||
(vm_offset_t)imgp->image_header + PAGE_SIZE))
|
||||
panic("execve: header dealloc failed (1)");
|
||||
|
||||
@ -319,7 +319,7 @@ interpret:
|
||||
* free various allocated resources
|
||||
*/
|
||||
kmem_free(exec_map, (vm_offset_t)imgp->stringbase, ARG_MAX);
|
||||
if (vm_map_remove(kernel_map, (vm_offset_t)imgp->image_header,
|
||||
if (vm_map_remove(exech_map, (vm_offset_t)imgp->image_header,
|
||||
(vm_offset_t)imgp->image_header + PAGE_SIZE))
|
||||
panic("execve: header dealloc failed (2)");
|
||||
vrele(ndp->ni_vp);
|
||||
@ -331,7 +331,7 @@ exec_fail_dealloc:
|
||||
if (imgp->stringbase != NULL)
|
||||
kmem_free(exec_map, (vm_offset_t)imgp->stringbase, ARG_MAX);
|
||||
if (imgp->image_header && imgp->image_header != (char *)-1)
|
||||
if (vm_map_remove(kernel_map, (vm_offset_t)imgp->image_header,
|
||||
if (vm_map_remove(exech_map, (vm_offset_t)imgp->image_header,
|
||||
(vm_offset_t)imgp->image_header + PAGE_SIZE))
|
||||
panic("execve: header dealloc failed (3)");
|
||||
if (ndp->ni_vp)
|
||||
|
@ -35,7 +35,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: @(#)trap.c 7.4 (Berkeley) 5/13/91
|
||||
* $Id: trap.c,v 1.74 1996/03/27 17:33:39 bde Exp $
|
||||
* $Id: trap.c,v 1.75 1996/03/28 05:40:57 dyson Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -805,25 +805,11 @@ int trapwrite(addr)
|
||||
|
||||
v = trunc_page(vtopte(va));
|
||||
|
||||
/*
|
||||
* wire the pte page
|
||||
*/
|
||||
if (va < USRSTACK) {
|
||||
vm_map_pageable(&vm->vm_map, v, round_page(v+1), FALSE);
|
||||
}
|
||||
|
||||
/*
|
||||
* fault the data page
|
||||
*/
|
||||
rv = vm_fault(&vm->vm_map, va, VM_PROT_READ|VM_PROT_WRITE, FALSE);
|
||||
|
||||
/*
|
||||
* unwire the pte page
|
||||
*/
|
||||
if (va < USRSTACK) {
|
||||
vm_map_pageable(&vm->vm_map, v, round_page(v+1), TRUE);
|
||||
}
|
||||
|
||||
--p->p_lock;
|
||||
|
||||
if (rv != KERN_SUCCESS)
|
||||
|
@ -18,7 +18,7 @@
|
||||
* 5. Modifications may be freely made to this file if the above conditions
|
||||
* are met.
|
||||
*
|
||||
* $Id: vfs_bio.c,v 1.88 1996/03/09 06:46:51 dyson Exp $
|
||||
* $Id: vfs_bio.c,v 1.89 1996/05/03 21:01:26 phk Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -509,7 +509,7 @@ brelse(struct buf * bp)
|
||||
/* buffers with no memory */
|
||||
if (bp->b_bufsize == 0) {
|
||||
bp->b_qindex = QUEUE_EMPTY;
|
||||
TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
|
||||
TAILQ_INSERT_HEAD(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
|
||||
LIST_REMOVE(bp, b_hash);
|
||||
LIST_INSERT_HEAD(&invalhash, bp, b_hash);
|
||||
bp->b_dev = NODEV;
|
||||
@ -742,7 +742,7 @@ start:
|
||||
goto trytofreespace;
|
||||
|
||||
/* can we constitute a new buffer? */
|
||||
if ((bp = bufqueues[QUEUE_EMPTY].tqh_first)) {
|
||||
if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]))) {
|
||||
if (bp->b_qindex != QUEUE_EMPTY)
|
||||
panic("getnewbuf: inconsistent EMPTY queue, qindex=%d",
|
||||
bp->b_qindex);
|
||||
@ -756,11 +756,11 @@ trytofreespace:
|
||||
* This is desirable because file data is cached in the
|
||||
* VM/Buffer cache even if a buffer is freed.
|
||||
*/
|
||||
if ((bp = bufqueues[QUEUE_AGE].tqh_first)) {
|
||||
if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]))) {
|
||||
if (bp->b_qindex != QUEUE_AGE)
|
||||
panic("getnewbuf: inconsistent AGE queue, qindex=%d",
|
||||
bp->b_qindex);
|
||||
} else if ((bp = bufqueues[QUEUE_LRU].tqh_first)) {
|
||||
} else if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]))) {
|
||||
if (bp->b_qindex != QUEUE_LRU)
|
||||
panic("getnewbuf: inconsistent LRU queue, qindex=%d",
|
||||
bp->b_qindex);
|
||||
@ -783,7 +783,7 @@ trytofreespace:
|
||||
(vmiospace < maxvmiobufspace)) {
|
||||
--bp->b_usecount;
|
||||
TAILQ_REMOVE(&bufqueues[QUEUE_LRU], bp, b_freelist);
|
||||
if (bufqueues[QUEUE_LRU].tqh_first != NULL) {
|
||||
if (TAILQ_FIRST(&bufqueues[QUEUE_LRU]) != NULL) {
|
||||
TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist);
|
||||
goto start;
|
||||
}
|
||||
@ -1498,9 +1498,9 @@ count_lock_queue()
|
||||
struct buf *bp;
|
||||
|
||||
count = 0;
|
||||
for (bp = bufqueues[QUEUE_LOCKED].tqh_first;
|
||||
for (bp = TAILQ_FIRST(&bufqueues[QUEUE_LOCKED]);
|
||||
bp != NULL;
|
||||
bp = bp->b_freelist.tqe_next)
|
||||
bp = TAILQ_NEXT(bp, b_freelist))
|
||||
count++;
|
||||
return (count);
|
||||
}
|
||||
@ -1663,7 +1663,6 @@ vfs_clean_pages(struct buf * bp)
|
||||
void
|
||||
vfs_bio_clrbuf(struct buf *bp) {
|
||||
int i;
|
||||
int remapbuffer = 0;
|
||||
if( bp->b_flags & B_VMIO) {
|
||||
if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE)) {
|
||||
int mask;
|
||||
@ -1691,14 +1690,12 @@ vfs_bio_clrbuf(struct buf *bp) {
|
||||
bzero(bp->b_data + (i << PAGE_SHIFT) + j * DEV_BSIZE, DEV_BSIZE);
|
||||
}
|
||||
}
|
||||
bp->b_pages[i]->valid = VM_PAGE_BITS_ALL;
|
||||
/* bp->b_pages[i]->valid = VM_PAGE_BITS_ALL; */
|
||||
}
|
||||
bp->b_resid = 0;
|
||||
} else {
|
||||
clrbuf(bp);
|
||||
}
|
||||
if (remapbuffer)
|
||||
pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -31,7 +31,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* @(#)queue.h 8.5 (Berkeley) 8/20/94
|
||||
* $Id: queue.h,v 1.8 1996/03/31 03:21:45 gibbs Exp $
|
||||
* $Id: queue.h,v 1.9 1996/04/08 07:51:57 phk Exp $
|
||||
*/
|
||||
|
||||
#ifndef _SYS_QUEUE_H_
|
||||
@ -268,7 +268,9 @@ struct { \
|
||||
|
||||
#define TAILQ_LAST(head) ((head)->tqh_last)
|
||||
|
||||
#define TAILQ_NEXT(elm, field) ((elm)->field.teq_next)
|
||||
#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
|
||||
|
||||
#define TAILQ_PREV(elm, field) ((elm)->field.tqe_prev)
|
||||
|
||||
#define TAILQ_INIT(head) { \
|
||||
(head)->tqh_first = NULL; \
|
||||
|
@ -36,7 +36,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* @(#)device_pager.c 8.1 (Berkeley) 6/11/93
|
||||
* $Id: device_pager.c,v 1.21 1996/03/09 06:54:41 dyson Exp $
|
||||
* $Id: device_pager.c,v 1.22 1996/05/03 21:01:45 phk Exp $
|
||||
*/
|
||||
|
||||
#include <sys/param.h>
|
||||
@ -182,7 +182,7 @@ dev_pager_dealloc(object)
|
||||
/*
|
||||
* Free up our fake pages.
|
||||
*/
|
||||
while ((m = object->un_pager.devp.devp_pglist.tqh_first) != 0) {
|
||||
while ((m = TAILQ_FIRST(&object->un_pager.devp.devp_pglist)) != 0) {
|
||||
TAILQ_REMOVE(&object->un_pager.devp.devp_pglist, m, pageq);
|
||||
dev_pager_putfake(m);
|
||||
}
|
||||
@ -265,14 +265,14 @@ dev_pager_getfake(paddr)
|
||||
vm_page_t m;
|
||||
int i;
|
||||
|
||||
if (dev_pager_fakelist.tqh_first == NULL) {
|
||||
if (TAILQ_FIRST(&dev_pager_fakelist) == NULL) {
|
||||
m = (vm_page_t) malloc(PAGE_SIZE * 2, M_VMPGDATA, M_WAITOK);
|
||||
for (i = (PAGE_SIZE * 2) / sizeof(*m); i > 0; i--) {
|
||||
TAILQ_INSERT_TAIL(&dev_pager_fakelist, m, pageq);
|
||||
m++;
|
||||
}
|
||||
}
|
||||
m = dev_pager_fakelist.tqh_first;
|
||||
m = TAILQ_FIRST(&dev_pager_fakelist);
|
||||
TAILQ_REMOVE(&dev_pager_fakelist, m, pageq);
|
||||
|
||||
m->flags = PG_BUSY | PG_FICTITIOUS;
|
||||
|
@ -39,7 +39,7 @@
|
||||
* from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
|
||||
*
|
||||
* @(#)swap_pager.c 8.9 (Berkeley) 3/21/94
|
||||
* $Id: swap_pager.c,v 1.64 1996/05/02 14:21:14 phk Exp $
|
||||
* $Id: swap_pager.c,v 1.65 1996/05/03 21:01:47 phk Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -530,7 +530,7 @@ swap_pager_reclaim()
|
||||
/* for each pager queue */
|
||||
for (k = 0; swp_qs[k]; k++) {
|
||||
|
||||
object = swp_qs[k]->tqh_first;
|
||||
object = TAILQ_FIRST(swp_qs[k]);
|
||||
while (object && (reclaimcount < MAXRECLAIM)) {
|
||||
|
||||
/*
|
||||
@ -555,7 +555,7 @@ swap_pager_reclaim()
|
||||
}
|
||||
}
|
||||
}
|
||||
object = object->pager_object_list.tqe_next;
|
||||
object = TAILQ_NEXT(object, pager_object_list);
|
||||
}
|
||||
}
|
||||
|
||||
@ -956,8 +956,8 @@ swap_pager_getpages(object, m, count, reqpage)
|
||||
|
||||
spc = NULL; /* we might not use an spc data structure */
|
||||
|
||||
if ((count == 1) && (swap_pager_free.tqh_first != NULL)) {
|
||||
spc = swap_pager_free.tqh_first;
|
||||
if ((count == 1) && (TAILQ_FIRST(&swap_pager_free) != NULL)) {
|
||||
spc = TAILQ_FIRST(&swap_pager_free);
|
||||
TAILQ_REMOVE(&swap_pager_free, spc, spc_list);
|
||||
kva = spc->spc_kva;
|
||||
bp = spc->spc_bp;
|
||||
@ -1263,9 +1263,9 @@ swap_pager_putpages(object, m, count, sync, rtvals)
|
||||
/*
|
||||
* get a swap pager clean data structure, block until we get it
|
||||
*/
|
||||
if (swap_pager_free.tqh_first == NULL ||
|
||||
swap_pager_free.tqh_first->spc_list.tqe_next == NULL ||
|
||||
swap_pager_free.tqh_first->spc_list.tqe_next->spc_list.tqe_next == NULL) {
|
||||
if (TAILQ_FIRST(&swap_pager_free) == NULL ||
|
||||
TAILQ_NEXT(TAILQ_FIRST(&swap_pager_free),spc_list) == NULL ||
|
||||
TAILQ_NEXT(TAILQ_NEXT(TAILQ_FIRST(&swap_pager_free),spc_list),spc_list) == NULL) {
|
||||
s = splbio();
|
||||
if (curproc == pageproc) {
|
||||
retryfree:
|
||||
@ -1285,9 +1285,9 @@ retryfree:
|
||||
*/
|
||||
if (tsleep(&swap_pager_free, PVM, "swpfre", hz/5)) {
|
||||
swap_pager_sync();
|
||||
if (swap_pager_free.tqh_first == NULL ||
|
||||
swap_pager_free.tqh_first->spc_list.tqe_next == NULL ||
|
||||
swap_pager_free.tqh_first->spc_list.tqe_next->spc_list.tqe_next == NULL) {
|
||||
if (TAILQ_FIRST(&swap_pager_free) == NULL ||
|
||||
TAILQ_NEXT(TAILQ_FIRST(&swap_pager_free),spc_list) == NULL ||
|
||||
TAILQ_NEXT(TAILQ_NEXT(TAILQ_FIRST(&swap_pager_free),spc_list),spc_list) == NULL) {
|
||||
splx(s);
|
||||
return VM_PAGER_AGAIN;
|
||||
}
|
||||
@ -1297,17 +1297,17 @@ retryfree:
|
||||
* the free swap control blocks.
|
||||
*/
|
||||
swap_pager_sync();
|
||||
if (swap_pager_free.tqh_first == NULL ||
|
||||
swap_pager_free.tqh_first->spc_list.tqe_next == NULL ||
|
||||
swap_pager_free.tqh_first->spc_list.tqe_next->spc_list.tqe_next == NULL) {
|
||||
if (TAILQ_FIRST(&swap_pager_free) == NULL ||
|
||||
TAILQ_NEXT(TAILQ_FIRST(&swap_pager_free),spc_list) == NULL ||
|
||||
TAILQ_NEXT(TAILQ_NEXT(TAILQ_FIRST(&swap_pager_free),spc_list),spc_list) == NULL) {
|
||||
goto retryfree;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
pagedaemon_wakeup();
|
||||
while (swap_pager_free.tqh_first == NULL ||
|
||||
swap_pager_free.tqh_first->spc_list.tqe_next == NULL ||
|
||||
swap_pager_free.tqh_first->spc_list.tqe_next->spc_list.tqe_next == NULL) {
|
||||
while (TAILQ_FIRST(&swap_pager_free) == NULL ||
|
||||
TAILQ_NEXT(TAILQ_FIRST(&swap_pager_free),spc_list) == NULL ||
|
||||
TAILQ_NEXT(TAILQ_NEXT(TAILQ_FIRST(&swap_pager_free),spc_list),spc_list) == NULL) {
|
||||
swap_pager_needflags |= SWAP_FREE_NEEDED;
|
||||
tsleep(&swap_pager_free, PVM, "swpfre", 0);
|
||||
pagedaemon_wakeup();
|
||||
@ -1315,7 +1315,7 @@ retryfree:
|
||||
}
|
||||
splx(s);
|
||||
}
|
||||
spc = swap_pager_free.tqh_first;
|
||||
spc = TAILQ_FIRST(&swap_pager_free);
|
||||
TAILQ_REMOVE(&swap_pager_free, spc, spc_list);
|
||||
|
||||
kva = spc->spc_kva;
|
||||
@ -1482,7 +1482,7 @@ swap_pager_sync()
|
||||
register int s;
|
||||
|
||||
tspc = NULL;
|
||||
if (swap_pager_done.tqh_first == NULL)
|
||||
if (TAILQ_FIRST(&swap_pager_done) == NULL)
|
||||
return;
|
||||
for (;;) {
|
||||
s = splbio();
|
||||
@ -1490,7 +1490,7 @@ swap_pager_sync()
|
||||
* Look up and removal from done list must be done at splbio()
|
||||
* to avoid conflicts with swap_pager_iodone.
|
||||
*/
|
||||
while ((spc = swap_pager_done.tqh_first) != 0) {
|
||||
while ((spc = TAILQ_FIRST(&swap_pager_done)) != 0) {
|
||||
pmap_qremove(spc->spc_kva, spc->spc_count);
|
||||
swap_pager_finish(spc);
|
||||
TAILQ_REMOVE(&swap_pager_done, spc, spc_list);
|
||||
@ -1609,7 +1609,7 @@ swap_pager_iodone(bp)
|
||||
wakeup(spc->spc_object);
|
||||
}
|
||||
if ((swap_pager_needflags & SWAP_FREE_NEEDED) ||
|
||||
swap_pager_inuse.tqh_first == 0) {
|
||||
TAILQ_FIRST(&swap_pager_inuse) == 0) {
|
||||
swap_pager_needflags &= ~SWAP_FREE_NEEDED;
|
||||
wakeup(&swap_pager_free);
|
||||
}
|
||||
@ -1623,7 +1623,7 @@ swap_pager_iodone(bp)
|
||||
wakeup(&vm_pageout_pages_needed);
|
||||
vm_pageout_pages_needed = 0;
|
||||
}
|
||||
if ((swap_pager_inuse.tqh_first == NULL) ||
|
||||
if ((TAILQ_FIRST(&swap_pager_inuse) == NULL) ||
|
||||
((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min &&
|
||||
nswiodone + cnt.v_free_count + cnt.v_cache_count >= cnt.v_free_min)) {
|
||||
pagedaemon_wakeup();
|
||||
|
@ -66,7 +66,7 @@
|
||||
* any improvements or extensions that they make and grant Carnegie the
|
||||
* rights to redistribute these changes.
|
||||
*
|
||||
* $Id: vm_fault.c,v 1.42 1996/03/09 06:48:26 dyson Exp $
|
||||
* $Id: vm_fault.c,v 1.43 1996/03/28 04:53:23 dyson Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -269,8 +269,8 @@ RetryFault:;
|
||||
int s;
|
||||
|
||||
UNLOCK_THINGS;
|
||||
s = splhigh();
|
||||
if ((m->flags & PG_BUSY) || m->busy) {
|
||||
s = splvm();
|
||||
if (((m->flags & PG_BUSY) || m->busy)) {
|
||||
m->flags |= PG_WANTED | PG_REFERENCED;
|
||||
cnt.v_intrans++;
|
||||
tsleep(m, PSWP, "vmpfw", 0);
|
||||
@ -311,7 +311,7 @@ RetryFault:;
|
||||
* Allocate a new page for this object/offset pair.
|
||||
*/
|
||||
m = vm_page_alloc(object, pindex,
|
||||
vp?VM_ALLOC_NORMAL:VM_ALLOC_ZERO);
|
||||
(vp || object->backing_object)?VM_ALLOC_NORMAL:VM_ALLOC_ZERO);
|
||||
|
||||
if (m == NULL) {
|
||||
UNLOCK_AND_DEALLOCATE;
|
||||
@ -551,9 +551,9 @@ readrest:
|
||||
vm_pindex_t other_pindex, other_pindex_offset;
|
||||
vm_page_t tm;
|
||||
|
||||
other_object = object->shadow_head.tqh_first;
|
||||
other_object = TAILQ_FIRST(&object->shadow_head);
|
||||
if (other_object == first_object)
|
||||
other_object = other_object->shadow_list.tqe_next;
|
||||
other_object = TAILQ_NEXT(other_object, shadow_list);
|
||||
if (!other_object)
|
||||
panic("vm_fault: other object missing");
|
||||
if (other_object &&
|
||||
@ -712,7 +712,7 @@ readrest:
|
||||
m->valid = VM_PAGE_BITS_ALL;
|
||||
|
||||
pmap_enter(map->pmap, vaddr, VM_PAGE_TO_PHYS(m), prot, wired);
|
||||
if (vp && (change_wiring == 0) && (wired == 0))
|
||||
if ((change_wiring == 0) && (wired == 0))
|
||||
pmap_prefault(map->pmap, vaddr, entry, first_object);
|
||||
|
||||
/*
|
||||
@ -780,8 +780,9 @@ vm_fault_wire(map, start, end)
|
||||
for (va = start; va < end; va += PAGE_SIZE) {
|
||||
|
||||
while( curproc != pageproc &&
|
||||
(cnt.v_free_count <= cnt.v_pageout_free_min))
|
||||
(cnt.v_free_count <= cnt.v_pageout_free_min)) {
|
||||
VM_WAIT;
|
||||
}
|
||||
|
||||
rv = vm_fault(map, va, VM_PROT_READ|VM_PROT_WRITE, TRUE);
|
||||
if (rv) {
|
||||
@ -817,11 +818,10 @@ vm_fault_unwire(map, start, end)
|
||||
|
||||
for (va = start; va < end; va += PAGE_SIZE) {
|
||||
pa = pmap_extract(pmap, va);
|
||||
if (pa == (vm_offset_t) 0) {
|
||||
panic("unwire: page not in pmap");
|
||||
if (pa != (vm_offset_t) 0) {
|
||||
pmap_change_wiring(pmap, va, FALSE);
|
||||
vm_page_unwire(PHYS_TO_VM_PAGE(pa));
|
||||
}
|
||||
pmap_change_wiring(pmap, va, FALSE);
|
||||
vm_page_unwire(PHYS_TO_VM_PAGE(pa));
|
||||
}
|
||||
|
||||
/*
|
||||
|
104
sys/vm/vm_glue.c
104
sys/vm/vm_glue.c
@ -59,7 +59,7 @@
|
||||
* any improvements or extensions that they make and grant Carnegie the
|
||||
* rights to redistribute these changes.
|
||||
*
|
||||
* $Id: vm_glue.c,v 1.47 1996/04/09 04:36:58 dyson Exp $
|
||||
* $Id: vm_glue.c,v 1.48 1996/05/02 09:34:51 phk Exp $
|
||||
*/
|
||||
|
||||
#include "opt_ddb.h"
|
||||
@ -196,16 +196,15 @@ vm_fork(p1, p2)
|
||||
register struct proc *p1, *p2;
|
||||
{
|
||||
register struct user *up;
|
||||
vm_offset_t addr, ptaddr, ptpa;
|
||||
int error, i;
|
||||
vm_map_t map;
|
||||
pmap_t pvp;
|
||||
vm_page_t stkm;
|
||||
vm_object_t upobj;
|
||||
|
||||
while ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min) {
|
||||
VM_WAIT;
|
||||
}
|
||||
|
||||
#if 0
|
||||
/*
|
||||
* avoid copying any of the parent's pagetables or other per-process
|
||||
* objects that reside in the map by marking all of them
|
||||
@ -213,6 +212,7 @@ vm_fork(p1, p2)
|
||||
*/
|
||||
(void) vm_map_inherit(&p1->p_vmspace->vm_map,
|
||||
UPT_MIN_ADDRESS - UPAGES * PAGE_SIZE, VM_MAX_ADDRESS, VM_INHERIT_NONE);
|
||||
#endif
|
||||
p2->p_vmspace = vmspace_fork(p1->p_vmspace);
|
||||
|
||||
if (p1->p_vmspace->vm_shm)
|
||||
@ -223,61 +223,26 @@ vm_fork(p1, p2)
|
||||
* process
|
||||
*/
|
||||
|
||||
addr = (vm_offset_t) kstack;
|
||||
|
||||
map = &p2->p_vmspace->vm_map;
|
||||
pvp = &p2->p_vmspace->vm_pmap;
|
||||
|
||||
/*
|
||||
* allocate object for the upages
|
||||
*/
|
||||
p2->p_vmspace->vm_upages_obj = vm_object_allocate( OBJT_DEFAULT,
|
||||
p2->p_vmspace->vm_upages_obj = upobj = vm_object_allocate( OBJT_DEFAULT,
|
||||
UPAGES);
|
||||
|
||||
/*
|
||||
* put upages into the address space
|
||||
*/
|
||||
error = vm_map_find(map, p2->p_vmspace->vm_upages_obj, 0,
|
||||
&addr, UPT_MIN_ADDRESS - addr, FALSE, VM_PROT_ALL,
|
||||
VM_PROT_ALL, 0);
|
||||
if (error != KERN_SUCCESS)
|
||||
panic("vm_fork: vm_map_find (UPAGES) failed, addr=0x%x, error=%d", addr, error);
|
||||
|
||||
addr += UPAGES * PAGE_SIZE;
|
||||
/* allocate space for page tables */
|
||||
error = vm_map_find(map, NULL, 0, &addr, UPT_MAX_ADDRESS - addr, FALSE,
|
||||
VM_PROT_ALL, VM_PROT_ALL, 0);
|
||||
if (error != KERN_SUCCESS)
|
||||
panic("vm_fork: vm_map_find (PTES) failed, addr=0x%x, error=%d", addr, error);
|
||||
|
||||
/* get a kernel virtual address for the UPAGES for this proc */
|
||||
up = (struct user *) kmem_alloc_pageable(u_map, UPAGES * PAGE_SIZE);
|
||||
if (up == NULL)
|
||||
panic("vm_fork: u_map allocation failed");
|
||||
|
||||
/*
|
||||
* create a pagetable page for the UPAGES in the process address space
|
||||
*/
|
||||
ptaddr = trunc_page((u_int) vtopte(kstack));
|
||||
(void) vm_fault(map, ptaddr, VM_PROT_READ|VM_PROT_WRITE, FALSE);
|
||||
ptpa = pmap_extract(pvp, ptaddr);
|
||||
if (ptpa == 0) {
|
||||
panic("vm_fork: no pte for UPAGES");
|
||||
}
|
||||
|
||||
/*
|
||||
* hold the page table page for the kernel stack, and fault them in
|
||||
*/
|
||||
stkm = PHYS_TO_VM_PAGE(ptpa);
|
||||
vm_page_hold(stkm);
|
||||
|
||||
for(i=0;i<UPAGES;i++) {
|
||||
vm_page_t m;
|
||||
|
||||
/*
|
||||
* Get a kernel stack page
|
||||
*/
|
||||
while ((m = vm_page_alloc(p2->p_vmspace->vm_upages_obj,
|
||||
while ((m = vm_page_alloc(upobj,
|
||||
i, VM_ALLOC_NORMAL)) == NULL) {
|
||||
VM_WAIT;
|
||||
}
|
||||
@ -286,24 +251,20 @@ vm_fork(p1, p2)
|
||||
* Wire the page
|
||||
*/
|
||||
vm_page_wire(m);
|
||||
m->flags &= ~PG_BUSY;
|
||||
PAGE_WAKEUP(m);
|
||||
|
||||
/*
|
||||
* Enter the page into both the kernel and the process
|
||||
* address space.
|
||||
*/
|
||||
pmap_enter( pvp, (vm_offset_t) kstack + i * PAGE_SIZE,
|
||||
VM_PAGE_TO_PHYS(m), VM_PROT_READ|VM_PROT_WRITE, 1);
|
||||
VM_PAGE_TO_PHYS(m), VM_PROT_READ|VM_PROT_WRITE, TRUE);
|
||||
pmap_kenter(((vm_offset_t) up) + i * PAGE_SIZE,
|
||||
VM_PAGE_TO_PHYS(m));
|
||||
m->flags &= ~PG_ZERO;
|
||||
m->flags |= PG_MAPPED;
|
||||
m->valid = VM_PAGE_BITS_ALL;
|
||||
}
|
||||
/*
|
||||
* The page table page for the kernel stack should be held in memory
|
||||
* now.
|
||||
*/
|
||||
vm_page_unhold(stkm);
|
||||
|
||||
p2->p_addr = up;
|
||||
|
||||
@ -371,33 +332,22 @@ faultin(p)
|
||||
int s;
|
||||
|
||||
if ((p->p_flag & P_INMEM) == 0) {
|
||||
vm_map_t map = &p->p_vmspace->vm_map;
|
||||
pmap_t pmap = &p->p_vmspace->vm_pmap;
|
||||
vm_page_t stkm, m;
|
||||
vm_offset_t ptpa;
|
||||
int error;
|
||||
vm_object_t upobj = p->p_vmspace->vm_upages_obj;
|
||||
|
||||
++p->p_lock;
|
||||
#if defined(SWAP_DEBUG)
|
||||
printf("swapping in %d\n", p->p_pid);
|
||||
#endif
|
||||
|
||||
ptaddr = trunc_page((u_int) vtopte(kstack));
|
||||
(void) vm_fault(map, ptaddr, VM_PROT_READ|VM_PROT_WRITE, FALSE);
|
||||
ptpa = pmap_extract(&p->p_vmspace->vm_pmap, ptaddr);
|
||||
if (ptpa == 0) {
|
||||
panic("vm_fork: no pte for UPAGES");
|
||||
}
|
||||
stkm = PHYS_TO_VM_PAGE(ptpa);
|
||||
vm_page_hold(stkm);
|
||||
|
||||
for(i=0;i<UPAGES;i++) {
|
||||
int s;
|
||||
s = splhigh();
|
||||
|
||||
s = splvm();
|
||||
retry:
|
||||
if ((m = vm_page_lookup(p->p_vmspace->vm_upages_obj, i)) == NULL) {
|
||||
if ((m = vm_page_alloc(p->p_vmspace->vm_upages_obj, i, VM_ALLOC_NORMAL)) == NULL) {
|
||||
if ((m = vm_page_lookup(upobj, i)) == NULL) {
|
||||
if ((m = vm_page_alloc(upobj, i, VM_ALLOC_NORMAL)) == NULL) {
|
||||
VM_WAIT;
|
||||
goto retry;
|
||||
}
|
||||
@ -407,10 +357,9 @@ retry:
|
||||
tsleep(m, PVM, "swinuw",0);
|
||||
goto retry;
|
||||
}
|
||||
m->flags |= PG_BUSY;
|
||||
}
|
||||
vm_page_wire(m);
|
||||
if (m->valid == VM_PAGE_BITS_ALL)
|
||||
m->flags &= ~PG_BUSY;
|
||||
splx(s);
|
||||
|
||||
pmap_enter( pmap, (vm_offset_t) kstack + i * PAGE_SIZE,
|
||||
@ -419,16 +368,15 @@ retry:
|
||||
VM_PAGE_TO_PHYS(m));
|
||||
if (m->valid != VM_PAGE_BITS_ALL) {
|
||||
int rv;
|
||||
rv = vm_pager_get_pages(p->p_vmspace->vm_upages_obj,
|
||||
rv = vm_pager_get_pages(upobj,
|
||||
&m, 1, 0);
|
||||
if (rv != VM_PAGER_OK)
|
||||
panic("faultin: cannot get upages for proc: %d\n", p->p_pid);
|
||||
m->valid = VM_PAGE_BITS_ALL;
|
||||
m->flags &= ~PG_BUSY;
|
||||
}
|
||||
PAGE_WAKEUP(m);
|
||||
m->flags |= PG_MAPPED;
|
||||
}
|
||||
vm_page_unhold(stkm);
|
||||
|
||||
|
||||
s = splhigh();
|
||||
|
||||
@ -527,8 +475,12 @@ swapout_procs()
|
||||
outpri = outpri2 = INT_MIN;
|
||||
retry:
|
||||
for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
|
||||
struct vmspace *vm;
|
||||
if (!swappable(p))
|
||||
continue;
|
||||
|
||||
vm = p->p_vmspace;
|
||||
|
||||
switch (p->p_stat) {
|
||||
default:
|
||||
continue;
|
||||
@ -549,22 +501,25 @@ retry:
|
||||
(p->p_slptime <= 4))
|
||||
continue;
|
||||
|
||||
vm_map_reference(&p->p_vmspace->vm_map);
|
||||
++vm->vm_refcnt;
|
||||
vm_map_reference(&vm->vm_map);
|
||||
/*
|
||||
* do not swapout a process that is waiting for VM
|
||||
* datastructures there is a possible deadlock.
|
||||
*/
|
||||
if (!lock_try_write(&p->p_vmspace->vm_map.lock)) {
|
||||
vm_map_deallocate(&p->p_vmspace->vm_map);
|
||||
if (!lock_try_write(&vm->vm_map.lock)) {
|
||||
vm_map_deallocate(&vm->vm_map);
|
||||
vmspace_free(vm);
|
||||
continue;
|
||||
}
|
||||
vm_map_unlock(&p->p_vmspace->vm_map);
|
||||
vm_map_unlock(&vm->vm_map);
|
||||
/*
|
||||
* If the process has been asleep for awhile and had
|
||||
* most of its pages taken away already, swap it out.
|
||||
*/
|
||||
swapout(p);
|
||||
vm_map_deallocate(&p->p_vmspace->vm_map);
|
||||
vm_map_deallocate(&vm->vm_map);
|
||||
vmspace_free(vm);
|
||||
didswap++;
|
||||
goto retry;
|
||||
}
|
||||
@ -612,6 +567,7 @@ swapout(p)
|
||||
panic("swapout: upage already missing???");
|
||||
m->dirty = VM_PAGE_BITS_ALL;
|
||||
vm_page_unwire(m);
|
||||
vm_page_deactivate(m);
|
||||
pmap_kremove( (vm_offset_t) p->p_addr + PAGE_SIZE * i);
|
||||
}
|
||||
pmap_remove(pmap, (vm_offset_t) kstack,
|
||||
|
@ -61,7 +61,7 @@
|
||||
* any improvements or extensions that they make and grant Carnegie the
|
||||
* rights to redistribute these changes.
|
||||
*
|
||||
* $Id: vm_kern.c,v 1.23 1996/04/24 04:16:44 dyson Exp $
|
||||
* $Id: vm_kern.c,v 1.24 1996/05/10 19:28:54 wollman Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -100,6 +100,7 @@ vm_map_t io_map;
|
||||
vm_map_t clean_map;
|
||||
vm_map_t phys_map;
|
||||
vm_map_t exec_map;
|
||||
vm_map_t exech_map;
|
||||
vm_map_t u_map;
|
||||
|
||||
/*
|
||||
@ -327,22 +328,8 @@ kmem_malloc(map, size, waitflag)
|
||||
vm_map_insert(map, kmem_object, offset, addr, addr + size,
|
||||
VM_PROT_ALL, VM_PROT_ALL, 0);
|
||||
|
||||
/*
|
||||
* If we can wait, just mark the range as wired (will fault pages as
|
||||
* necessary).
|
||||
*/
|
||||
if (waitflag == M_WAITOK) {
|
||||
vm_map_unlock(map);
|
||||
(void) vm_map_pageable(map, (vm_offset_t) addr, addr + size,
|
||||
FALSE);
|
||||
vm_map_simplify(map, addr);
|
||||
return (addr);
|
||||
}
|
||||
/*
|
||||
* If we cannot wait then we must allocate all memory up front,
|
||||
* pulling it off the active queue to prevent pageout.
|
||||
*/
|
||||
for (i = 0; i < size; i += PAGE_SIZE) {
|
||||
retry:
|
||||
m = vm_page_alloc(kmem_object, OFF_TO_IDX(offset + i),
|
||||
(waitflag == M_NOWAIT) ? VM_ALLOC_INTERRUPT : VM_ALLOC_SYSTEM);
|
||||
|
||||
@ -352,6 +339,10 @@ kmem_malloc(map, size, waitflag)
|
||||
* aren't on any queues.
|
||||
*/
|
||||
if (m == NULL) {
|
||||
if (waitflag == M_WAITOK) {
|
||||
VM_WAIT;
|
||||
goto retry;
|
||||
}
|
||||
while (i != 0) {
|
||||
i -= PAGE_SIZE;
|
||||
m = vm_page_lookup(kmem_object,
|
||||
@ -362,7 +353,7 @@ kmem_malloc(map, size, waitflag)
|
||||
vm_map_unlock(map);
|
||||
return (0);
|
||||
}
|
||||
m->flags &= ~(PG_BUSY|PG_ZERO);
|
||||
m->flags &= ~PG_ZERO;
|
||||
m->valid = VM_PAGE_BITS_ALL;
|
||||
}
|
||||
|
||||
@ -386,7 +377,9 @@ kmem_malloc(map, size, waitflag)
|
||||
for (i = 0; i < size; i += PAGE_SIZE) {
|
||||
m = vm_page_lookup(kmem_object, OFF_TO_IDX(offset + i));
|
||||
vm_page_wire(m);
|
||||
pmap_kenter(addr + i, VM_PAGE_TO_PHYS(m));
|
||||
PAGE_WAKEUP(m);
|
||||
pmap_enter(kernel_pmap, addr + i, VM_PAGE_TO_PHYS(m),
|
||||
VM_PROT_ALL, 1);
|
||||
}
|
||||
vm_map_unlock(map);
|
||||
|
||||
|
243
sys/vm/vm_map.c
243
sys/vm/vm_map.c
@ -61,7 +61,7 @@
|
||||
* any improvements or extensions that they make and grant Carnegie the
|
||||
* rights to redistribute these changes.
|
||||
*
|
||||
* $Id: vm_map.c,v 1.43 1996/04/29 22:04:57 dyson Exp $
|
||||
* $Id: vm_map.c,v 1.44 1996/05/03 21:01:49 phk Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -157,11 +157,15 @@ static int kentry_count;
|
||||
static vm_offset_t mapvm_start, mapvm, mapvmmax;
|
||||
static int mapvmpgcnt;
|
||||
|
||||
static struct vm_map_entry *mappool;
|
||||
static int mappoolcnt;
|
||||
#define KENTRY_LOW_WATER 128
|
||||
|
||||
static void _vm_map_clip_end __P((vm_map_t, vm_map_entry_t, vm_offset_t));
|
||||
static void _vm_map_clip_start __P((vm_map_t, vm_map_entry_t, vm_offset_t));
|
||||
static vm_map_entry_t vm_map_entry_create __P((vm_map_t));
|
||||
static void vm_map_entry_delete __P((vm_map_t, vm_map_entry_t));
|
||||
static void vm_map_entry_dispose __P((vm_map_t, vm_map_entry_t));
|
||||
static __inline void vm_map_entry_dispose __P((vm_map_t, vm_map_entry_t));
|
||||
static void vm_map_entry_unwire __P((vm_map_t, vm_map_entry_t));
|
||||
static void vm_map_copy_entry __P((vm_map_t, vm_map_t, vm_map_entry_t,
|
||||
vm_map_entry_t));
|
||||
@ -214,11 +218,10 @@ vmspace_alloc(min, max, pageable)
|
||||
if (mapvmpgcnt == 0 && mapvm == 0) {
|
||||
int s;
|
||||
|
||||
mapvmpgcnt = btoc(cnt.v_page_count * sizeof(struct vm_map_entry));
|
||||
s = splhigh();
|
||||
mapvm_start = mapvm = kmem_alloc_pageable(kernel_map, mapvmpgcnt * PAGE_SIZE);
|
||||
mapvmpgcnt = (cnt.v_page_count * sizeof(struct vm_map_entry) + PAGE_SIZE - 1) / PAGE_SIZE;
|
||||
mapvm_start = mapvm = kmem_alloc_pageable(kernel_map,
|
||||
mapvmpgcnt * PAGE_SIZE);
|
||||
mapvmmax = mapvm_start + mapvmpgcnt * PAGE_SIZE;
|
||||
splx(s);
|
||||
if (!mapvm)
|
||||
mapvmpgcnt = 0;
|
||||
}
|
||||
@ -241,7 +244,6 @@ vmspace_free(vm)
|
||||
panic("vmspace_free: attempt to free already freed vmspace");
|
||||
|
||||
if (--vm->vm_refcnt == 0) {
|
||||
int s, i;
|
||||
|
||||
/*
|
||||
* Lock the map, to wait out all other references to it.
|
||||
@ -252,11 +254,17 @@ vmspace_free(vm)
|
||||
(void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset,
|
||||
vm->vm_map.max_offset);
|
||||
vm_map_unlock(&vm->vm_map);
|
||||
|
||||
while( vm->vm_map.ref_count != 1)
|
||||
tsleep(&vm->vm_map.ref_count, PVM, "vmsfre", 0);
|
||||
--vm->vm_map.ref_count;
|
||||
vm_object_pmap_remove(vm->vm_upages_obj,
|
||||
0, vm->vm_upages_obj->size);
|
||||
vm_object_deallocate(vm->vm_upages_obj);
|
||||
pmap_release(&vm->vm_pmap);
|
||||
FREE(vm, M_VMMAP);
|
||||
} else {
|
||||
wakeup(&vm->vm_map.ref_count);
|
||||
}
|
||||
}
|
||||
|
||||
@ -314,45 +322,66 @@ vm_map_init(map, min, max, pageable)
|
||||
lock_init(&map->lock, TRUE);
|
||||
}
|
||||
|
||||
/*
|
||||
* vm_map_entry_dispose: [ internal use only ]
|
||||
*
|
||||
* Inverse of vm_map_entry_create.
|
||||
*/
|
||||
static __inline void
|
||||
vm_map_entry_dispose(map, entry)
|
||||
vm_map_t map;
|
||||
vm_map_entry_t entry;
|
||||
{
|
||||
int s;
|
||||
|
||||
if (kentry_count < KENTRY_LOW_WATER) {
|
||||
s = splvm();
|
||||
entry->next = kentry_free;
|
||||
kentry_free = entry;
|
||||
++kentry_count;
|
||||
splx(s);
|
||||
} else {
|
||||
entry->next = mappool;
|
||||
mappool = entry;
|
||||
++mappoolcnt;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* vm_map_entry_create: [ internal use only ]
|
||||
*
|
||||
* Allocates a VM map entry for insertion.
|
||||
* No entry fields are filled in. This routine is
|
||||
*/
|
||||
static struct vm_map_entry *mappool;
|
||||
static int mappoolcnt;
|
||||
|
||||
static vm_map_entry_t
|
||||
vm_map_entry_create(map)
|
||||
vm_map_t map;
|
||||
{
|
||||
vm_map_entry_t entry;
|
||||
int i;
|
||||
|
||||
#define KENTRY_LOW_WATER 64
|
||||
#define MAPENTRY_LOW_WATER 128
|
||||
int s;
|
||||
|
||||
/*
|
||||
* This is a *very* nasty (and sort of incomplete) hack!!!!
|
||||
*/
|
||||
if (kentry_count < KENTRY_LOW_WATER) {
|
||||
s = splvm();
|
||||
if (mapvmpgcnt && mapvm) {
|
||||
vm_page_t m;
|
||||
|
||||
m = vm_page_alloc(kernel_object,
|
||||
OFF_TO_IDX(mapvm - vm_map_min(kernel_map)),
|
||||
OFF_TO_IDX(mapvm - VM_MIN_KERNEL_ADDRESS),
|
||||
(map == kmem_map) ? VM_ALLOC_INTERRUPT : VM_ALLOC_NORMAL);
|
||||
|
||||
if (m) {
|
||||
int newentries;
|
||||
|
||||
newentries = (PAGE_SIZE / sizeof(struct vm_map_entry));
|
||||
vm_page_wire(m);
|
||||
m->flags &= ~PG_BUSY;
|
||||
PAGE_WAKEUP(m);
|
||||
m->valid = VM_PAGE_BITS_ALL;
|
||||
pmap_enter(vm_map_pmap(kmem_map), mapvm,
|
||||
VM_PAGE_TO_PHYS(m), VM_PROT_DEFAULT, 1);
|
||||
m->flags |= PG_WRITEABLE|PG_MAPPED;
|
||||
pmap_kenter(mapvm, VM_PAGE_TO_PHYS(m));
|
||||
m->flags |= PG_WRITEABLE;
|
||||
|
||||
entry = (vm_map_entry_t) mapvm;
|
||||
mapvm += PAGE_SIZE;
|
||||
@ -364,65 +393,33 @@ vm_map_entry_create(map)
|
||||
}
|
||||
}
|
||||
}
|
||||
splx(s);
|
||||
}
|
||||
if (map == kernel_map || map == kmem_map || map == pager_map) {
|
||||
|
||||
if (map == kernel_map || map == kmem_map || map == pager_map) {
|
||||
s = splvm();
|
||||
entry = kentry_free;
|
||||
if (entry) {
|
||||
kentry_free = entry->next;
|
||||
--kentry_count;
|
||||
return entry;
|
||||
}
|
||||
entry = mappool;
|
||||
if (entry) {
|
||||
mappool = entry->next;
|
||||
--mappoolcnt;
|
||||
return entry;
|
||||
} else {
|
||||
panic("vm_map_entry_create: out of map entries for kernel");
|
||||
}
|
||||
splx(s);
|
||||
} else {
|
||||
entry = mappool;
|
||||
if (entry) {
|
||||
mappool = entry->next;
|
||||
--mappoolcnt;
|
||||
return entry;
|
||||
} else {
|
||||
MALLOC(entry, vm_map_entry_t, sizeof(struct vm_map_entry),
|
||||
M_VMMAPENT, M_WAITOK);
|
||||
}
|
||||
MALLOC(entry, vm_map_entry_t, sizeof(struct vm_map_entry),
|
||||
M_VMMAPENT, M_WAITOK);
|
||||
}
|
||||
if (entry == NULL)
|
||||
panic("vm_map_entry_create: out of map entries");
|
||||
|
||||
return (entry);
|
||||
}
|
||||
|
||||
/*
|
||||
* vm_map_entry_dispose: [ internal use only ]
|
||||
*
|
||||
* Inverse of vm_map_entry_create.
|
||||
*/
|
||||
static void
|
||||
vm_map_entry_dispose(map, entry)
|
||||
vm_map_t map;
|
||||
vm_map_entry_t entry;
|
||||
{
|
||||
if ((kentry_count < KENTRY_LOW_WATER) ||
|
||||
((vm_offset_t) entry >= kentry_data && (vm_offset_t) entry < (kentry_data + kentry_data_size)) ||
|
||||
((vm_offset_t) entry >= mapvm_start && (vm_offset_t) entry < mapvmmax)) {
|
||||
entry->next = kentry_free;
|
||||
kentry_free = entry;
|
||||
++kentry_count;
|
||||
return;
|
||||
} else {
|
||||
if (mappoolcnt < MAPENTRY_LOW_WATER) {
|
||||
entry->next = mappool;
|
||||
mappool = entry;
|
||||
++mappoolcnt;
|
||||
return;
|
||||
}
|
||||
FREE(entry, M_VMMAPENT);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* vm_map_entry_{un,}link:
|
||||
*
|
||||
@ -637,9 +634,9 @@ vm_map_insert(map, object, offset, start, end, prot, max, cow)
|
||||
|
||||
if ((prev_entry != &map->header) &&
|
||||
(prev_entry->end == start) &&
|
||||
((object == NULL) || (prev_entry->object.vm_object == object)) &&
|
||||
(prev_entry->is_a_map == FALSE) &&
|
||||
(prev_entry->is_sub_map == FALSE) &&
|
||||
((object == NULL) || (prev_entry->object.vm_object == object)) &&
|
||||
(prev_entry->inheritance == VM_INHERIT_DEFAULT) &&
|
||||
(prev_entry->protection == prot) &&
|
||||
(prev_entry->max_protection == max) &&
|
||||
@ -664,13 +661,7 @@ vm_map_insert(map, object, offset, start, end, prot, max, cow)
|
||||
prev_entry->end = end;
|
||||
return (KERN_SUCCESS);
|
||||
}
|
||||
} /* else if ((object == prev_entry->object.vm_object) &&
|
||||
(prev_entry->offset + (prev_entry->end - prev_entry->start) == offset)) {
|
||||
map->size += (end - prev_entry->end);
|
||||
prev_entry->end = end;
|
||||
printf("map optim 1\n");
|
||||
return (KERN_SUCCESS);
|
||||
} */
|
||||
}
|
||||
}
|
||||
/*
|
||||
* Create a new entry
|
||||
@ -711,7 +702,6 @@ vm_map_insert(map, object, offset, start, end, prot, max, cow)
|
||||
/*
|
||||
* Update the free space hint
|
||||
*/
|
||||
|
||||
if ((map->first_free == prev_entry) &&
|
||||
(prev_entry->end >= new_entry->start))
|
||||
map->first_free = new_entry;
|
||||
@ -803,7 +793,7 @@ vm_map_find(map, object, offset, addr, length, find_space, prot, max, cow)
|
||||
start = *addr;
|
||||
|
||||
if (map == kmem_map)
|
||||
s = splhigh();
|
||||
s = splvm();
|
||||
|
||||
vm_map_lock(map);
|
||||
if (find_space) {
|
||||
@ -866,10 +856,13 @@ vm_map_simplify_entry(map, entry)
|
||||
(prev->wired_count == 0)) {
|
||||
if (map->first_free == prev)
|
||||
map->first_free = entry;
|
||||
if (map->hint == prev)
|
||||
map->hint = entry;
|
||||
vm_map_entry_unlink(map, prev);
|
||||
entry->start = prev->start;
|
||||
entry->offset = prev->offset;
|
||||
vm_object_deallocate(prev->object.vm_object);
|
||||
if (prev->object.vm_object)
|
||||
vm_object_deallocate(prev->object.vm_object);
|
||||
vm_map_entry_dispose(map, prev);
|
||||
}
|
||||
}
|
||||
@ -891,9 +884,12 @@ vm_map_simplify_entry(map, entry)
|
||||
(next->wired_count == 0)) {
|
||||
if (map->first_free == next)
|
||||
map->first_free = entry;
|
||||
if (map->hint == next)
|
||||
map->hint = entry;
|
||||
vm_map_entry_unlink(map, next);
|
||||
entry->end = next->end;
|
||||
vm_object_deallocate(next->object.vm_object);
|
||||
if (next->object.vm_object)
|
||||
vm_object_deallocate(next->object.vm_object);
|
||||
vm_map_entry_dispose(map, next);
|
||||
}
|
||||
}
|
||||
@ -1131,7 +1127,6 @@ vm_map_protect(map, start, end, new_prot, set_max)
|
||||
*/
|
||||
|
||||
if (current->protection != old_prot) {
|
||||
|
||||
#define MASK(entry) ((entry)->copy_on_write ? ~VM_PROT_WRITE : \
|
||||
VM_PROT_ALL)
|
||||
#define max(a,b) ((a) > (b) ? (a) : (b))
|
||||
@ -1585,7 +1580,7 @@ vm_map_clean(map, start, end, syncio, invalidate)
|
||||
* The map in question should be locked.
|
||||
* [This is the reason for this routine's existence.]
|
||||
*/
|
||||
static void
|
||||
static __inline void
|
||||
vm_map_entry_unwire(map, entry)
|
||||
vm_map_t map;
|
||||
register vm_map_entry_t entry;
|
||||
@ -1599,7 +1594,7 @@ vm_map_entry_unwire(map, entry)
|
||||
*
|
||||
* Deallocate the given entry from the target map.
|
||||
*/
|
||||
static void
|
||||
static __inline void
|
||||
vm_map_entry_delete(map, entry)
|
||||
register vm_map_t map;
|
||||
register vm_map_entry_t entry;
|
||||
@ -1658,7 +1653,9 @@ vm_map_delete(map, start, end)
|
||||
* Save the free space hint
|
||||
*/
|
||||
|
||||
if (map->first_free->start >= start)
|
||||
if (entry == &map->header) {
|
||||
map->first_free = &map->header;
|
||||
} else if (map->first_free->start >= start)
|
||||
map->first_free = entry->prev;
|
||||
|
||||
/*
|
||||
@ -1667,14 +1664,16 @@ vm_map_delete(map, start, end)
|
||||
|
||||
while ((entry != &map->header) && (entry->start < end)) {
|
||||
vm_map_entry_t next;
|
||||
register vm_offset_t s, e;
|
||||
register vm_object_t object;
|
||||
vm_offset_t s, e;
|
||||
vm_object_t object;
|
||||
vm_ooffset_t offset;
|
||||
|
||||
vm_map_clip_end(map, entry, end);
|
||||
|
||||
next = entry->next;
|
||||
s = entry->start;
|
||||
e = entry->end;
|
||||
offset = entry->offset;
|
||||
|
||||
/*
|
||||
* Unwire before removing addresses from the pmap; otherwise,
|
||||
@ -1691,15 +1690,16 @@ vm_map_delete(map, start, end)
|
||||
* which are sharing it.
|
||||
*/
|
||||
|
||||
if (object == kernel_object || object == kmem_object)
|
||||
vm_object_page_remove(object, OFF_TO_IDX(entry->offset),
|
||||
OFF_TO_IDX(entry->offset + (e - s)), FALSE);
|
||||
else if (!map->is_main_map)
|
||||
if (object == kernel_object || object == kmem_object) {
|
||||
vm_object_page_remove(object, OFF_TO_IDX(offset),
|
||||
OFF_TO_IDX(offset + (e - s)), FALSE);
|
||||
} else if (!map->is_main_map) {
|
||||
vm_object_pmap_remove(object,
|
||||
OFF_TO_IDX(entry->offset),
|
||||
OFF_TO_IDX(entry->offset + (e - s)));
|
||||
else
|
||||
OFF_TO_IDX(offset),
|
||||
OFF_TO_IDX(offset + (e - s)));
|
||||
} else {
|
||||
pmap_remove(map->pmap, s, e);
|
||||
}
|
||||
|
||||
/*
|
||||
* Delete the entry (which may delete the object) only after
|
||||
@ -1729,7 +1729,7 @@ vm_map_remove(map, start, end)
|
||||
register int result, s = 0;
|
||||
|
||||
if (map == kmem_map)
|
||||
s = splhigh();
|
||||
s = splvm();
|
||||
|
||||
vm_map_lock(map);
|
||||
VM_MAP_RANGE_CHECK(map, start, end);
|
||||
@ -1806,16 +1806,6 @@ vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry)
|
||||
if (src_entry->is_sub_map || dst_entry->is_sub_map)
|
||||
return;
|
||||
|
||||
if (dst_entry->object.vm_object != NULL)
|
||||
printf("vm_map_copy_entry: dst_entry object not NULL!\n");
|
||||
|
||||
/*
|
||||
* If our destination map was wired down, unwire it now.
|
||||
*/
|
||||
|
||||
if (dst_entry->wired_count != 0)
|
||||
vm_map_entry_unwire(dst_map, dst_entry);
|
||||
|
||||
if (src_entry->wired_count == 0) {
|
||||
|
||||
boolean_t src_needs_copy;
|
||||
@ -1847,35 +1837,28 @@ vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry)
|
||||
- src_entry->start)));
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Make a copy of the object.
|
||||
*/
|
||||
temp_pindex = OFF_TO_IDX(dst_entry->offset);
|
||||
vm_object_copy(src_entry->object.vm_object,
|
||||
OFF_TO_IDX(src_entry->offset),
|
||||
&dst_entry->object.vm_object,
|
||||
&temp_pindex,
|
||||
&src_needs_copy);
|
||||
dst_entry->offset = IDX_TO_OFF(temp_pindex);
|
||||
/*
|
||||
* If we didn't get a copy-object now, mark the source map
|
||||
* entry so that a shadow will be created to hold its changed
|
||||
* pages.
|
||||
*/
|
||||
if (src_needs_copy)
|
||||
if (src_entry->object.vm_object) {
|
||||
if ((src_entry->object.vm_object->handle == NULL) &&
|
||||
(src_entry->object.vm_object->type == OBJT_DEFAULT ||
|
||||
src_entry->object.vm_object->type == OBJT_SWAP))
|
||||
vm_object_collapse(src_entry->object.vm_object);
|
||||
++src_entry->object.vm_object->ref_count;
|
||||
src_entry->copy_on_write = TRUE;
|
||||
src_entry->needs_copy = TRUE;
|
||||
|
||||
/*
|
||||
* The destination always needs to have a shadow created.
|
||||
*/
|
||||
dst_entry->needs_copy = TRUE;
|
||||
|
||||
/*
|
||||
* Mark the entries copy-on-write, so that write-enabling the
|
||||
* entry won't make copy-on-write pages writable.
|
||||
*/
|
||||
src_entry->copy_on_write = TRUE;
|
||||
dst_entry->copy_on_write = TRUE;
|
||||
dst_entry->needs_copy = TRUE;
|
||||
dst_entry->copy_on_write = TRUE;
|
||||
dst_entry->object.vm_object =
|
||||
src_entry->object.vm_object;
|
||||
dst_entry->offset = src_entry->offset;
|
||||
} else {
|
||||
dst_entry->object.vm_object = NULL;
|
||||
dst_entry->offset = 0;
|
||||
}
|
||||
|
||||
pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start,
|
||||
dst_entry->end - dst_entry->start, src_entry->start);
|
||||
@ -1962,7 +1945,6 @@ vmspace_fork(vm1)
|
||||
/*
|
||||
* Clone the entry and link into the map.
|
||||
*/
|
||||
|
||||
new_entry = vm_map_entry_create(new_map);
|
||||
*new_entry = *old_entry;
|
||||
new_entry->wired_count = 0;
|
||||
@ -2251,11 +2233,13 @@ vm_map_simplify(map, start)
|
||||
vm_map_entry_t prev_entry;
|
||||
|
||||
vm_map_lock(map);
|
||||
if (
|
||||
(vm_map_lookup_entry(map, start, &this_entry)) &&
|
||||
if ((vm_map_lookup_entry(map, start, &this_entry)) &&
|
||||
((prev_entry = this_entry->prev) != &map->header) &&
|
||||
|
||||
(prev_entry->end == start) &&
|
||||
(prev_entry->object.vm_object == this_entry->object.vm_object) &&
|
||||
((prev_entry->offset + (prev_entry->end - prev_entry->start))
|
||||
== this_entry->offset) &&
|
||||
|
||||
(map->is_main_map) &&
|
||||
|
||||
(prev_entry->is_a_map == FALSE) &&
|
||||
@ -2270,18 +2254,15 @@ vm_map_simplify(map, start)
|
||||
(prev_entry->wired_count == this_entry->wired_count) &&
|
||||
|
||||
(prev_entry->copy_on_write == this_entry->copy_on_write) &&
|
||||
(prev_entry->needs_copy == this_entry->needs_copy) &&
|
||||
|
||||
(prev_entry->object.vm_object == this_entry->object.vm_object) &&
|
||||
((prev_entry->offset + (prev_entry->end - prev_entry->start))
|
||||
== this_entry->offset)
|
||||
) {
|
||||
(prev_entry->needs_copy == this_entry->needs_copy)) {
|
||||
if (map->first_free == this_entry)
|
||||
map->first_free = prev_entry;
|
||||
SAVE_HINT(map, prev_entry);
|
||||
if (map->hint == this_entry)
|
||||
SAVE_HINT(map, prev_entry);
|
||||
vm_map_entry_unlink(map, this_entry);
|
||||
prev_entry->end = this_entry->end;
|
||||
vm_object_deallocate(this_entry->object.vm_object);
|
||||
if (this_entry->object.vm_object)
|
||||
vm_object_deallocate(this_entry->object.vm_object);
|
||||
vm_map_entry_dispose(map, this_entry);
|
||||
}
|
||||
vm_map_unlock(map);
|
||||
|
@ -31,7 +31,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* @(#)vm_meter.c 8.4 (Berkeley) 1/4/94
|
||||
* $Id: vm_meter.c,v 1.13 1995/12/14 09:55:02 phk Exp $
|
||||
* $Id: vm_meter.c,v 1.14 1996/03/11 06:11:40 hsu Exp $
|
||||
*/
|
||||
|
||||
#include <sys/param.h>
|
||||
@ -136,9 +136,9 @@ vmtotal SYSCTL_HANDLER_ARGS
|
||||
/*
|
||||
* Mark all objects as inactive.
|
||||
*/
|
||||
for (object = vm_object_list.tqh_first;
|
||||
for (object = TAILQ_FIRST(&vm_object_list);
|
||||
object != NULL;
|
||||
object = object->object_list.tqe_next)
|
||||
object = TAILQ_NEXT(object,object_list))
|
||||
object->flags &= ~OBJ_ACTIVE;
|
||||
/*
|
||||
* Calculate process statistics.
|
||||
@ -191,9 +191,9 @@ vmtotal SYSCTL_HANDLER_ARGS
|
||||
/*
|
||||
* Calculate object memory usage statistics.
|
||||
*/
|
||||
for (object = vm_object_list.tqh_first;
|
||||
for (object = TAILQ_FIRST(&vm_object_list);
|
||||
object != NULL;
|
||||
object = object->object_list.tqe_next) {
|
||||
object = TAILQ_NEXT(object, object_list)) {
|
||||
totalp->t_vm += num_pages(object->size);
|
||||
totalp->t_rm += object->resident_page_count;
|
||||
if (object->flags & OBJ_ACTIVE) {
|
||||
|
@ -38,7 +38,7 @@
|
||||
* from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
|
||||
*
|
||||
* @(#)vm_mmap.c 8.4 (Berkeley) 1/12/94
|
||||
* $Id: vm_mmap.c,v 1.40 1996/03/16 15:00:05 davidg Exp $
|
||||
* $Id: vm_mmap.c,v 1.41 1996/05/03 21:01:51 phk Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -802,8 +802,7 @@ vm_mmap(map, addr, size, prot, maxprot, flags, handle, foff)
|
||||
/*
|
||||
* "Pre-fault" resident pages.
|
||||
*/
|
||||
if ((map != kernel_map) &&
|
||||
(type == OBJT_VNODE) && (map->pmap != NULL)) {
|
||||
if ((type == OBJT_VNODE) && (map->pmap != NULL)) {
|
||||
pmap_object_init_pt(map->pmap, *addr,
|
||||
object, (vm_pindex_t) OFF_TO_IDX(foff), size);
|
||||
}
|
||||
|
@ -61,7 +61,7 @@
|
||||
* any improvements or extensions that they make and grant Carnegie the
|
||||
* rights to redistribute these changes.
|
||||
*
|
||||
* $Id: vm_object.c,v 1.67 1996/03/29 06:28:48 davidg Exp $
|
||||
* $Id: vm_object.c,v 1.68 1996/04/24 04:16:45 dyson Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -278,7 +278,7 @@ vm_object_deallocate(object)
|
||||
(object->type == OBJT_DEFAULT ||
|
||||
object->type == OBJT_SWAP)) {
|
||||
vm_object_t robject;
|
||||
robject = object->shadow_head.tqh_first;
|
||||
robject = TAILQ_FIRST(&object->shadow_head);
|
||||
if ((robject != NULL) &&
|
||||
(robject->handle == NULL) &&
|
||||
(robject->type == OBJT_DEFAULT ||
|
||||
@ -288,7 +288,7 @@ vm_object_deallocate(object)
|
||||
object->ref_count += 2;
|
||||
|
||||
do {
|
||||
s = splhigh();
|
||||
s = splvm();
|
||||
while (robject->paging_in_progress) {
|
||||
robject->flags |= OBJ_PIPWNT;
|
||||
tsleep(robject, PVM, "objde1", 0);
|
||||
@ -375,7 +375,7 @@ vm_object_terminate(object)
|
||||
/*
|
||||
* wait for the pageout daemon to be done with the object
|
||||
*/
|
||||
s = splhigh();
|
||||
s = splvm();
|
||||
while (object->paging_in_progress) {
|
||||
object->flags |= OBJ_PIPWNT;
|
||||
tsleep(object, PVM, "objtrm", 0);
|
||||
@ -402,9 +402,10 @@ vm_object_terminate(object)
|
||||
* Now free the pages. For internal objects, this also removes them
|
||||
* from paging queues.
|
||||
*/
|
||||
while ((p = object->memq.tqh_first) != NULL) {
|
||||
while ((p = TAILQ_FIRST(&object->memq)) != NULL) {
|
||||
if (p->flags & PG_BUSY)
|
||||
printf("vm_object_terminate: freeing busy page\n");
|
||||
vm_page_protect(p, VM_PROT_NONE);
|
||||
PAGE_WAKEUP(p);
|
||||
vm_page_free(p);
|
||||
cnt.v_pfree++;
|
||||
@ -478,12 +479,12 @@ vm_object_page_clean(object, start, end, syncio, lockflag)
|
||||
if ((tstart == 0) && (tend == object->size)) {
|
||||
object->flags &= ~(OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY);
|
||||
}
|
||||
for(p = object->memq.tqh_first; p; p = p->listq.tqe_next)
|
||||
for(p = TAILQ_FIRST(&object->memq); p; p = TAILQ_NEXT(p, listq))
|
||||
p->flags |= PG_CLEANCHK;
|
||||
|
||||
rescan:
|
||||
for(p = object->memq.tqh_first; p; p = np) {
|
||||
np = p->listq.tqe_next;
|
||||
for(p = TAILQ_FIRST(&object->memq); p; p = np) {
|
||||
np = TAILQ_NEXT(p, listq);
|
||||
|
||||
pi = p->pindex;
|
||||
if (((p->flags & PG_CLEANCHK) == 0) ||
|
||||
@ -499,7 +500,7 @@ rescan:
|
||||
continue;
|
||||
}
|
||||
|
||||
s = splhigh();
|
||||
s = splvm();
|
||||
if ((p->flags & PG_BUSY) || p->busy) {
|
||||
p->flags |= PG_WANTED|PG_REFERENCED;
|
||||
tsleep(p, PVM, "vpcwai", 0);
|
||||
@ -597,8 +598,8 @@ vm_object_deactivate_pages(object)
|
||||
{
|
||||
register vm_page_t p, next;
|
||||
|
||||
for (p = object->memq.tqh_first; p != NULL; p = next) {
|
||||
next = p->listq.tqe_next;
|
||||
for (p = TAILQ_FIRST(&object->memq); p != NULL; p = next) {
|
||||
next = TAILQ_NEXT(p, listq);
|
||||
vm_page_deactivate(p);
|
||||
}
|
||||
}
|
||||
@ -613,7 +614,7 @@ vm_object_cache_trim()
|
||||
register vm_object_t object;
|
||||
|
||||
while (vm_object_cached > vm_object_cache_max) {
|
||||
object = vm_object_cached_list.tqh_first;
|
||||
object = TAILQ_FIRST(&vm_object_cached_list);
|
||||
|
||||
vm_object_reference(object);
|
||||
pager_cache(object, FALSE);
|
||||
@ -641,7 +642,7 @@ vm_object_pmap_copy(object, start, end)
|
||||
if (object == NULL || (object->flags & OBJ_WRITEABLE) == 0)
|
||||
return;
|
||||
|
||||
for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) {
|
||||
for (p = TAILQ_FIRST(&object->memq); p != NULL; p = TAILQ_NEXT(p, listq)) {
|
||||
vm_page_protect(p, VM_PROT_READ);
|
||||
}
|
||||
|
||||
@ -665,7 +666,7 @@ vm_object_pmap_remove(object, start, end)
|
||||
register vm_page_t p;
|
||||
if (object == NULL)
|
||||
return;
|
||||
for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) {
|
||||
for (p = TAILQ_FIRST(&object->memq); p != NULL; p = TAILQ_NEXT(p, listq)) {
|
||||
if (p->pindex >= start && p->pindex < end)
|
||||
vm_page_protect(p, VM_PROT_NONE);
|
||||
}
|
||||
@ -808,17 +809,16 @@ vm_object_qcollapse(object)
|
||||
backing_object_paging_offset_index = OFF_TO_IDX(backing_object->paging_offset);
|
||||
paging_offset_index = OFF_TO_IDX(object->paging_offset);
|
||||
size = object->size;
|
||||
p = backing_object->memq.tqh_first;
|
||||
p = TAILQ_FIRST(&backing_object->memq);
|
||||
while (p) {
|
||||
vm_page_t next;
|
||||
|
||||
next = p->listq.tqe_next;
|
||||
next = TAILQ_NEXT(p, listq);
|
||||
if ((p->flags & (PG_BUSY | PG_FICTITIOUS)) ||
|
||||
(p->queue == PQ_CACHE) || !p->valid || p->hold_count || p->wire_count || p->busy) {
|
||||
p = next;
|
||||
continue;
|
||||
}
|
||||
vm_page_protect(p, VM_PROT_NONE);
|
||||
new_pindex = p->pindex - backing_offset_index;
|
||||
if (p->pindex < backing_offset_index ||
|
||||
new_pindex >= size) {
|
||||
@ -826,6 +826,7 @@ vm_object_qcollapse(object)
|
||||
swap_pager_freespace(backing_object,
|
||||
backing_object_paging_offset_index+p->pindex,
|
||||
1);
|
||||
vm_page_protect(p, VM_PROT_NONE);
|
||||
vm_page_free(p);
|
||||
} else {
|
||||
pp = vm_page_lookup(object, new_pindex);
|
||||
@ -834,6 +835,7 @@ vm_object_qcollapse(object)
|
||||
if (backing_object->type == OBJT_SWAP)
|
||||
swap_pager_freespace(backing_object,
|
||||
backing_object_paging_offset_index + p->pindex, 1);
|
||||
vm_page_protect(p, VM_PROT_NONE);
|
||||
vm_page_free(p);
|
||||
} else {
|
||||
if (backing_object->type == OBJT_SWAP)
|
||||
@ -930,7 +932,7 @@ vm_object_collapse(object)
|
||||
* shadow them.
|
||||
*/
|
||||
|
||||
while ((p = backing_object->memq.tqh_first) != 0) {
|
||||
while ((p = TAILQ_FIRST(&backing_object->memq)) != 0) {
|
||||
|
||||
new_pindex = p->pindex - backing_offset_index;
|
||||
|
||||
@ -1071,7 +1073,7 @@ vm_object_collapse(object)
|
||||
* here.
|
||||
*/
|
||||
|
||||
for (p = backing_object->memq.tqh_first; p; p = p->listq.tqe_next) {
|
||||
for (p = TAILQ_FIRST(&backing_object->memq); p; p = TAILQ_NEXT(p, listq)) {
|
||||
new_pindex = p->pindex - backing_offset_index;
|
||||
|
||||
/*
|
||||
@ -1160,24 +1162,29 @@ vm_object_page_remove(object, start, end, clean_only)
|
||||
again:
|
||||
size = end - start;
|
||||
if (size > 4 || size >= object->size / 4) {
|
||||
for (p = object->memq.tqh_first; p != NULL; p = next) {
|
||||
next = p->listq.tqe_next;
|
||||
for (p = TAILQ_FIRST(&object->memq); p != NULL; p = next) {
|
||||
next = TAILQ_NEXT(p, listq);
|
||||
if ((start <= p->pindex) && (p->pindex < end)) {
|
||||
|
||||
if (p->wire_count != 0) {
|
||||
vm_page_protect(p, VM_PROT_NONE);
|
||||
p->valid = 0;
|
||||
continue;
|
||||
}
|
||||
|
||||
s = splhigh();
|
||||
/*
|
||||
* The busy flags are only cleared at
|
||||
* interrupt -- minimize the spl transitions
|
||||
*/
|
||||
if ((p->flags & PG_BUSY) || p->busy) {
|
||||
p->flags |= PG_WANTED;
|
||||
tsleep(p, PVM, "vmopar", 0);
|
||||
s = splvm();
|
||||
if ((p->flags & PG_BUSY) || p->busy) {
|
||||
p->flags |= PG_WANTED;
|
||||
tsleep(p, PVM, "vmopar", 0);
|
||||
splx(s);
|
||||
goto again;
|
||||
}
|
||||
splx(s);
|
||||
goto again;
|
||||
}
|
||||
splx(s);
|
||||
|
||||
if (clean_only) {
|
||||
vm_page_test_dirty(p);
|
||||
@ -1199,14 +1206,20 @@ again:
|
||||
size -= 1;
|
||||
continue;
|
||||
}
|
||||
s = splhigh();
|
||||
/*
|
||||
* The busy flags are only cleared at
|
||||
* interrupt -- minimize the spl transitions
|
||||
*/
|
||||
if ((p->flags & PG_BUSY) || p->busy) {
|
||||
p->flags |= PG_WANTED;
|
||||
tsleep(p, PVM, "vmopar", 0);
|
||||
s = splvm();
|
||||
if ((p->flags & PG_BUSY) || p->busy) {
|
||||
p->flags |= PG_WANTED;
|
||||
tsleep(p, PVM, "vmopar", 0);
|
||||
splx(s);
|
||||
goto again;
|
||||
}
|
||||
splx(s);
|
||||
goto again;
|
||||
}
|
||||
splx(s);
|
||||
if (clean_only) {
|
||||
vm_page_test_dirty(p);
|
||||
if (p->valid & p->dirty) {
|
||||
@ -1391,9 +1404,9 @@ DDB_vm_object_check()
|
||||
* make sure that internal objs are in a map somewhere
|
||||
* and none have zero ref counts.
|
||||
*/
|
||||
for (object = vm_object_list.tqh_first;
|
||||
for (object = TAILQ_FIRST(&vm_object_list);
|
||||
object != NULL;
|
||||
object = object->object_list.tqe_next) {
|
||||
object = TAILQ_NEXT(object, object_list)) {
|
||||
if (object->handle == NULL &&
|
||||
(object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) {
|
||||
if (object->ref_count == 0) {
|
||||
@ -1436,14 +1449,14 @@ vm_object_print(iobject, full, dummy3, dummy4)
|
||||
(int) object->paging_offset,
|
||||
(int) object->backing_object, (int) object->backing_object_offset);
|
||||
printf("cache: next=%p, prev=%p\n",
|
||||
object->cached_list.tqe_next, object->cached_list.tqe_prev);
|
||||
TAILQ_NEXT(object, cached_list), TAILQ_PREV(object, cached_list));
|
||||
|
||||
if (!full)
|
||||
return;
|
||||
|
||||
indent += 2;
|
||||
count = 0;
|
||||
for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) {
|
||||
for (p = TAILQ_FIRST(&object->memq); p != NULL; p = TAILQ_NEXT(p, listq)) {
|
||||
if (count == 0)
|
||||
iprintf("memory:=");
|
||||
else if (count == 6) {
|
||||
|
373
sys/vm/vm_page.c
373
sys/vm/vm_page.c
@ -34,7 +34,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
|
||||
* $Id: vm_page.c,v 1.49 1996/03/09 06:56:39 dyson Exp $
|
||||
* $Id: vm_page.c,v 1.50 1996/03/28 04:53:27 dyson Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -140,7 +140,6 @@ static u_short vm_page_dev_bsize_chunks[] = {
|
||||
static inline __pure int
|
||||
vm_page_hash __P((vm_object_t object, vm_pindex_t pindex))
|
||||
__pure2;
|
||||
static void vm_page_unqueue __P((vm_page_t ));
|
||||
|
||||
/*
|
||||
* vm_set_page_size:
|
||||
@ -244,7 +243,7 @@ vm_page_startup(starta, enda, vaddr)
|
||||
vm_page_buckets = (struct pglist *) vaddr;
|
||||
bucket = vm_page_buckets;
|
||||
if (vm_page_bucket_count == 0) {
|
||||
vm_page_bucket_count = 2;
|
||||
vm_page_bucket_count = 1;
|
||||
while (vm_page_bucket_count < atop(total))
|
||||
vm_page_bucket_count <<= 1;
|
||||
}
|
||||
@ -383,7 +382,7 @@ vm_page_hash(object, pindex)
|
||||
* The object and page must be locked, and must be splhigh.
|
||||
*/
|
||||
|
||||
inline void
|
||||
__inline void
|
||||
vm_page_insert(m, object, pindex)
|
||||
register vm_page_t m;
|
||||
register vm_object_t object;
|
||||
@ -432,7 +431,7 @@ vm_page_insert(m, object, pindex)
|
||||
* The object and page must be locked, and at splhigh.
|
||||
*/
|
||||
|
||||
inline void
|
||||
__inline void
|
||||
vm_page_remove(m)
|
||||
register vm_page_t m;
|
||||
{
|
||||
@ -487,14 +486,13 @@ vm_page_lookup(object, pindex)
|
||||
|
||||
bucket = &vm_page_buckets[vm_page_hash(object, pindex)];
|
||||
|
||||
s = splhigh();
|
||||
for (m = bucket->tqh_first; m != NULL; m = m->hashq.tqe_next) {
|
||||
s = splvm();
|
||||
for (m = TAILQ_FIRST(bucket); m != NULL; m = TAILQ_NEXT(m,hashq)) {
|
||||
if ((m->object == object) && (m->pindex == pindex)) {
|
||||
splx(s);
|
||||
return (m);
|
||||
}
|
||||
}
|
||||
|
||||
splx(s);
|
||||
return (NULL);
|
||||
}
|
||||
@ -515,7 +513,7 @@ vm_page_rename(m, new_object, new_pindex)
|
||||
{
|
||||
int s;
|
||||
|
||||
s = splhigh();
|
||||
s = splvm();
|
||||
vm_page_remove(m);
|
||||
vm_page_insert(m, new_object, new_pindex);
|
||||
splx(s);
|
||||
@ -524,7 +522,7 @@ vm_page_rename(m, new_object, new_pindex)
|
||||
/*
|
||||
* vm_page_unqueue must be called at splhigh();
|
||||
*/
|
||||
static inline void
|
||||
__inline void
|
||||
vm_page_unqueue(vm_page_t m)
|
||||
{
|
||||
int queue = m->queue;
|
||||
@ -575,19 +573,19 @@ vm_page_alloc(object, pindex, page_req)
|
||||
page_req = VM_ALLOC_SYSTEM;
|
||||
};
|
||||
|
||||
s = splhigh();
|
||||
s = splvm();
|
||||
|
||||
switch (page_req) {
|
||||
|
||||
case VM_ALLOC_NORMAL:
|
||||
if (cnt.v_free_count >= cnt.v_free_reserved) {
|
||||
m = vm_page_queue_free.tqh_first;
|
||||
m = TAILQ_FIRST(&vm_page_queue_free);
|
||||
if (m == NULL) {
|
||||
--vm_page_zero_count;
|
||||
m = vm_page_queue_zero.tqh_first;
|
||||
m = TAILQ_FIRST(&vm_page_queue_zero);
|
||||
}
|
||||
} else {
|
||||
m = vm_page_queue_cache.tqh_first;
|
||||
m = TAILQ_FIRST(&vm_page_queue_cache);
|
||||
if (m == NULL) {
|
||||
splx(s);
|
||||
pagedaemon_wakeup();
|
||||
@ -598,14 +596,14 @@ vm_page_alloc(object, pindex, page_req)
|
||||
|
||||
case VM_ALLOC_ZERO:
|
||||
if (cnt.v_free_count >= cnt.v_free_reserved) {
|
||||
m = vm_page_queue_zero.tqh_first;
|
||||
m = TAILQ_FIRST(&vm_page_queue_zero);
|
||||
if (m) {
|
||||
--vm_page_zero_count;
|
||||
} else {
|
||||
m = vm_page_queue_free.tqh_first;
|
||||
m = TAILQ_FIRST(&vm_page_queue_free);
|
||||
}
|
||||
} else {
|
||||
m = vm_page_queue_cache.tqh_first;
|
||||
m = TAILQ_FIRST(&vm_page_queue_cache);
|
||||
if (m == NULL) {
|
||||
splx(s);
|
||||
pagedaemon_wakeup();
|
||||
@ -618,13 +616,13 @@ vm_page_alloc(object, pindex, page_req)
|
||||
if ((cnt.v_free_count >= cnt.v_free_reserved) ||
|
||||
((cnt.v_cache_count == 0) &&
|
||||
(cnt.v_free_count >= cnt.v_interrupt_free_min))) {
|
||||
m = vm_page_queue_free.tqh_first;
|
||||
m = TAILQ_FIRST(&vm_page_queue_free);
|
||||
if (m == NULL) {
|
||||
--vm_page_zero_count;
|
||||
m = vm_page_queue_zero.tqh_first;
|
||||
m = TAILQ_FIRST(&vm_page_queue_zero);
|
||||
}
|
||||
} else {
|
||||
m = vm_page_queue_cache.tqh_first;
|
||||
m = TAILQ_FIRST(&vm_page_queue_cache);
|
||||
if (m == NULL) {
|
||||
splx(s);
|
||||
pagedaemon_wakeup();
|
||||
@ -635,10 +633,10 @@ vm_page_alloc(object, pindex, page_req)
|
||||
|
||||
case VM_ALLOC_INTERRUPT:
|
||||
if (cnt.v_free_count > 0) {
|
||||
m = vm_page_queue_free.tqh_first;
|
||||
m = TAILQ_FIRST(&vm_page_queue_free);
|
||||
if (m == NULL) {
|
||||
--vm_page_zero_count;
|
||||
m = vm_page_queue_zero.tqh_first;
|
||||
m = TAILQ_FIRST(&vm_page_queue_zero);
|
||||
}
|
||||
} else {
|
||||
splx(s);
|
||||
@ -663,8 +661,8 @@ vm_page_alloc(object, pindex, page_req)
|
||||
m->flags = PG_BUSY;
|
||||
}
|
||||
m->wire_count = 0;
|
||||
m->hold_count = 0;
|
||||
m->act_count = 0;
|
||||
m->hold_count = 0;
|
||||
m->busy = 0;
|
||||
m->valid = 0;
|
||||
m->dirty = 0;
|
||||
@ -688,114 +686,35 @@ vm_page_alloc(object, pindex, page_req)
|
||||
}
|
||||
|
||||
/*
|
||||
* This interface is for merging with malloc() someday.
|
||||
* Even if we never implement compaction so that contiguous allocation
|
||||
* works after initialization time, malloc()'s data structures are good
|
||||
* for statistics and for allocations of less than a page.
|
||||
* vm_page_activate:
|
||||
*
|
||||
* Put the specified page on the active list (if appropriate).
|
||||
*
|
||||
* The page queues must be locked.
|
||||
*/
|
||||
void *
|
||||
contigmalloc(size, type, flags, low, high, alignment, boundary)
|
||||
unsigned long size; /* should be size_t here and for malloc() */
|
||||
int type;
|
||||
int flags;
|
||||
unsigned long low;
|
||||
unsigned long high;
|
||||
unsigned long alignment;
|
||||
unsigned long boundary;
|
||||
void
|
||||
vm_page_activate(m)
|
||||
register vm_page_t m;
|
||||
{
|
||||
int i, s, start;
|
||||
vm_offset_t addr, phys, tmp_addr;
|
||||
vm_page_t pga = vm_page_array;
|
||||
int s;
|
||||
|
||||
size = round_page(size);
|
||||
if (size == 0)
|
||||
panic("vm_page_alloc_contig: size must not be 0");
|
||||
if ((alignment & (alignment - 1)) != 0)
|
||||
panic("vm_page_alloc_contig: alignment must be a power of 2");
|
||||
if ((boundary & (boundary - 1)) != 0)
|
||||
panic("vm_page_alloc_contig: boundary must be a power of 2");
|
||||
s = splvm();
|
||||
if (m->queue == PQ_ACTIVE)
|
||||
panic("vm_page_activate: already active");
|
||||
|
||||
start = 0;
|
||||
s = splhigh();
|
||||
again:
|
||||
/*
|
||||
* Find first page in array that is free, within range, aligned, and
|
||||
* such that the boundary won't be crossed.
|
||||
*/
|
||||
for (i = start; i < cnt.v_page_count; i++) {
|
||||
phys = VM_PAGE_TO_PHYS(&pga[i]);
|
||||
if ((pga[i].queue == PQ_FREE) &&
|
||||
(phys >= low) && (phys < high) &&
|
||||
((phys & (alignment - 1)) == 0) &&
|
||||
(((phys ^ (phys + size - 1)) & ~(boundary - 1)) == 0))
|
||||
break;
|
||||
if (m->queue == PQ_CACHE)
|
||||
cnt.v_reactivated++;
|
||||
|
||||
vm_page_unqueue(m);
|
||||
|
||||
if (m->wire_count == 0) {
|
||||
if (m->act_count < 5)
|
||||
m->act_count = 5;
|
||||
TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
|
||||
m->queue = PQ_ACTIVE;
|
||||
cnt.v_active_count++;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the above failed or we will exceed the upper bound, fail.
|
||||
*/
|
||||
if ((i == cnt.v_page_count) ||
|
||||
((VM_PAGE_TO_PHYS(&pga[i]) + size) > high)) {
|
||||
splx(s);
|
||||
return (NULL);
|
||||
}
|
||||
start = i;
|
||||
|
||||
/*
|
||||
* Check successive pages for contiguous and free.
|
||||
*/
|
||||
for (i = start + 1; i < (start + size / PAGE_SIZE); i++) {
|
||||
if ((VM_PAGE_TO_PHYS(&pga[i]) !=
|
||||
(VM_PAGE_TO_PHYS(&pga[i - 1]) + PAGE_SIZE)) ||
|
||||
(pga[i].queue != PQ_FREE)) {
|
||||
start++;
|
||||
goto again;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* We've found a contiguous chunk that meets are requirements.
|
||||
* Allocate kernel VM, unfree and assign the physical pages to it and
|
||||
* return kernel VM pointer.
|
||||
*/
|
||||
tmp_addr = addr = kmem_alloc_pageable(kernel_map, size);
|
||||
if (addr == 0) {
|
||||
splx(s);
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
for (i = start; i < (start + size / PAGE_SIZE); i++) {
|
||||
vm_page_t m = &pga[i];
|
||||
|
||||
TAILQ_REMOVE(&vm_page_queue_free, m, pageq);
|
||||
cnt.v_free_count--;
|
||||
m->valid = VM_PAGE_BITS_ALL;
|
||||
m->flags = 0;
|
||||
m->dirty = 0;
|
||||
m->wire_count = 0;
|
||||
m->act_count = 0;
|
||||
m->busy = 0;
|
||||
m->queue = PQ_NONE;
|
||||
vm_page_insert(m, kernel_object,
|
||||
OFF_TO_IDX(tmp_addr - VM_MIN_KERNEL_ADDRESS));
|
||||
vm_page_wire(m);
|
||||
pmap_kenter(tmp_addr, VM_PAGE_TO_PHYS(m));
|
||||
tmp_addr += PAGE_SIZE;
|
||||
}
|
||||
|
||||
splx(s);
|
||||
return ((void *)addr);
|
||||
}
|
||||
|
||||
vm_offset_t
|
||||
vm_page_alloc_contig(size, low, high, alignment)
|
||||
vm_offset_t size;
|
||||
vm_offset_t low;
|
||||
vm_offset_t high;
|
||||
vm_offset_t alignment;
|
||||
{
|
||||
return ((vm_offset_t)contigmalloc(size, M_DEVBUF, M_NOWAIT, low, high,
|
||||
alignment, 0ul));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -813,7 +732,7 @@ vm_page_free(m)
|
||||
int s;
|
||||
int flags = m->flags;
|
||||
|
||||
s = splhigh();
|
||||
s = splvm();
|
||||
if (m->busy || (flags & PG_BUSY) || (m->queue == PQ_FREE)) {
|
||||
printf("vm_page_free: pindex(%ld), busy(%d), PG_BUSY(%d)\n",
|
||||
m->pindex, m->busy, (flags & PG_BUSY) ? 1 : 0);
|
||||
@ -824,7 +743,8 @@ vm_page_free(m)
|
||||
}
|
||||
|
||||
if (m->hold_count) {
|
||||
panic("freeing held page, count=%d", m->hold_count);
|
||||
panic("freeing held page, count=%d, pindex=%d(0x%x)",
|
||||
m->hold_count, m->pindex, m->pindex);
|
||||
}
|
||||
|
||||
vm_page_remove(m);
|
||||
@ -840,7 +760,19 @@ vm_page_free(m)
|
||||
m->wire_count = 0;
|
||||
}
|
||||
m->queue = PQ_FREE;
|
||||
TAILQ_INSERT_TAIL(&vm_page_queue_free, m, pageq);
|
||||
|
||||
/*
|
||||
* If the pageout process is grabbing the page, it is likely
|
||||
* that the page is NOT in the cache. It is more likely that
|
||||
* the page will be partially in the cache if it is being
|
||||
* explicitly freed.
|
||||
*/
|
||||
if (curproc == pageproc) {
|
||||
TAILQ_INSERT_TAIL(&vm_page_queue_free, m, pageq);
|
||||
} else {
|
||||
TAILQ_INSERT_HEAD(&vm_page_queue_free, m, pageq);
|
||||
}
|
||||
|
||||
splx(s);
|
||||
/*
|
||||
* if pageout daemon needs pages, then tell it that there are
|
||||
@ -859,7 +791,6 @@ vm_page_free(m)
|
||||
*/
|
||||
if ((cnt.v_free_count + cnt.v_cache_count) == cnt.v_free_min) {
|
||||
wakeup(&cnt.v_free_count);
|
||||
wakeup(&proc0);
|
||||
}
|
||||
} else {
|
||||
splx(s);
|
||||
@ -884,7 +815,7 @@ vm_page_wire(m)
|
||||
int s;
|
||||
|
||||
if (m->wire_count == 0) {
|
||||
s = splhigh();
|
||||
s = splvm();
|
||||
vm_page_unqueue(m);
|
||||
splx(s);
|
||||
cnt.v_wire_count++;
|
||||
@ -907,56 +838,23 @@ vm_page_unwire(m)
|
||||
{
|
||||
int s;
|
||||
|
||||
s = splhigh();
|
||||
s = splvm();
|
||||
|
||||
if (m->wire_count > 0)
|
||||
m->wire_count--;
|
||||
|
||||
if (m->wire_count == 0) {
|
||||
cnt.v_wire_count--;
|
||||
TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
|
||||
m->queue = PQ_ACTIVE;
|
||||
if( m->act_count < ACT_MAX)
|
||||
m->act_count += 1;
|
||||
cnt.v_active_count++;
|
||||
}
|
||||
splx(s);
|
||||
}
|
||||
|
||||
/*
|
||||
* vm_page_activate:
|
||||
*
|
||||
* Put the specified page on the active list (if appropriate).
|
||||
*
|
||||
* The page queues must be locked.
|
||||
*/
|
||||
void
|
||||
vm_page_activate(m)
|
||||
register vm_page_t m;
|
||||
{
|
||||
int s;
|
||||
|
||||
s = splhigh();
|
||||
if (m->queue == PQ_ACTIVE)
|
||||
panic("vm_page_activate: already active");
|
||||
|
||||
if (m->queue == PQ_CACHE)
|
||||
cnt.v_reactivated++;
|
||||
|
||||
vm_page_unqueue(m);
|
||||
|
||||
if (m->wire_count == 0) {
|
||||
TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
|
||||
m->queue = PQ_ACTIVE;
|
||||
if (m->act_count < 5)
|
||||
m->act_count = 5;
|
||||
else if( m->act_count < ACT_MAX)
|
||||
m->act_count += 1;
|
||||
cnt.v_active_count++;
|
||||
}
|
||||
splx(s);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* vm_page_deactivate:
|
||||
*
|
||||
@ -982,7 +880,7 @@ vm_page_deactivate(m)
|
||||
if (m->queue == PQ_INACTIVE)
|
||||
return;
|
||||
|
||||
spl = splhigh();
|
||||
spl = splvm();
|
||||
if (m->wire_count == 0 && m->hold_count == 0) {
|
||||
if (m->queue == PQ_CACHE)
|
||||
cnt.v_reactivated++;
|
||||
@ -990,7 +888,6 @@ vm_page_deactivate(m)
|
||||
TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
|
||||
m->queue = PQ_INACTIVE;
|
||||
cnt.v_inactive_count++;
|
||||
m->act_count = 0;
|
||||
}
|
||||
splx(spl);
|
||||
}
|
||||
@ -1014,7 +911,7 @@ vm_page_cache(m)
|
||||
return;
|
||||
|
||||
vm_page_protect(m, VM_PROT_NONE);
|
||||
s = splhigh();
|
||||
s = splvm();
|
||||
vm_page_unqueue(m);
|
||||
TAILQ_INSERT_TAIL(&vm_page_queue_cache, m, pageq);
|
||||
m->queue = PQ_CACHE;
|
||||
@ -1030,35 +927,6 @@ vm_page_cache(m)
|
||||
splx(s);
|
||||
}
|
||||
|
||||
/*
|
||||
* vm_page_zero_fill:
|
||||
*
|
||||
* Zero-fill the specified page.
|
||||
* Written as a standard pagein routine, to
|
||||
* be used by the zero-fill object.
|
||||
*/
|
||||
boolean_t
|
||||
vm_page_zero_fill(m)
|
||||
vm_page_t m;
|
||||
{
|
||||
pmap_zero_page(VM_PAGE_TO_PHYS(m));
|
||||
return (TRUE);
|
||||
}
|
||||
|
||||
/*
|
||||
* vm_page_copy:
|
||||
*
|
||||
* Copy one page to another
|
||||
*/
|
||||
void
|
||||
vm_page_copy(src_m, dest_m)
|
||||
vm_page_t src_m;
|
||||
vm_page_t dest_m;
|
||||
{
|
||||
pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m));
|
||||
dest_m->valid = VM_PAGE_BITS_ALL;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* mapping function for valid bits or for dirty bits in
|
||||
@ -1126,8 +994,6 @@ vm_page_is_valid(m, base, size)
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
void
|
||||
vm_page_test_dirty(m)
|
||||
vm_page_t m;
|
||||
@ -1138,6 +1004,115 @@ vm_page_test_dirty(m)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* This interface is for merging with malloc() someday.
|
||||
* Even if we never implement compaction so that contiguous allocation
|
||||
* works after initialization time, malloc()'s data structures are good
|
||||
* for statistics and for allocations of less than a page.
|
||||
*/
|
||||
void *
|
||||
contigmalloc(size, type, flags, low, high, alignment, boundary)
|
||||
unsigned long size; /* should be size_t here and for malloc() */
|
||||
int type;
|
||||
int flags;
|
||||
unsigned long low;
|
||||
unsigned long high;
|
||||
unsigned long alignment;
|
||||
unsigned long boundary;
|
||||
{
|
||||
int i, s, start;
|
||||
vm_offset_t addr, phys, tmp_addr;
|
||||
vm_page_t pga = vm_page_array;
|
||||
|
||||
size = round_page(size);
|
||||
if (size == 0)
|
||||
panic("vm_page_alloc_contig: size must not be 0");
|
||||
if ((alignment & (alignment - 1)) != 0)
|
||||
panic("vm_page_alloc_contig: alignment must be a power of 2");
|
||||
if ((boundary & (boundary - 1)) != 0)
|
||||
panic("vm_page_alloc_contig: boundary must be a power of 2");
|
||||
|
||||
start = 0;
|
||||
s = splvm();
|
||||
again:
|
||||
/*
|
||||
* Find first page in array that is free, within range, aligned, and
|
||||
* such that the boundary won't be crossed.
|
||||
*/
|
||||
for (i = start; i < cnt.v_page_count; i++) {
|
||||
phys = VM_PAGE_TO_PHYS(&pga[i]);
|
||||
if ((pga[i].queue == PQ_FREE) &&
|
||||
(phys >= low) && (phys < high) &&
|
||||
((phys & (alignment - 1)) == 0) &&
|
||||
(((phys ^ (phys + size - 1)) & ~(boundary - 1)) == 0))
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the above failed or we will exceed the upper bound, fail.
|
||||
*/
|
||||
if ((i == cnt.v_page_count) ||
|
||||
((VM_PAGE_TO_PHYS(&pga[i]) + size) > high)) {
|
||||
splx(s);
|
||||
return (NULL);
|
||||
}
|
||||
start = i;
|
||||
|
||||
/*
|
||||
* Check successive pages for contiguous and free.
|
||||
*/
|
||||
for (i = start + 1; i < (start + size / PAGE_SIZE); i++) {
|
||||
if ((VM_PAGE_TO_PHYS(&pga[i]) !=
|
||||
(VM_PAGE_TO_PHYS(&pga[i - 1]) + PAGE_SIZE)) ||
|
||||
(pga[i].queue != PQ_FREE)) {
|
||||
start++;
|
||||
goto again;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* We've found a contiguous chunk that meets are requirements.
|
||||
* Allocate kernel VM, unfree and assign the physical pages to it and
|
||||
* return kernel VM pointer.
|
||||
*/
|
||||
tmp_addr = addr = kmem_alloc_pageable(kernel_map, size);
|
||||
if (addr == 0) {
|
||||
splx(s);
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
for (i = start; i < (start + size / PAGE_SIZE); i++) {
|
||||
vm_page_t m = &pga[i];
|
||||
|
||||
TAILQ_REMOVE(&vm_page_queue_free, m, pageq);
|
||||
cnt.v_free_count--;
|
||||
m->valid = VM_PAGE_BITS_ALL;
|
||||
m->flags = 0;
|
||||
m->dirty = 0;
|
||||
m->wire_count = 0;
|
||||
m->busy = 0;
|
||||
m->queue = PQ_NONE;
|
||||
vm_page_insert(m, kernel_object,
|
||||
OFF_TO_IDX(tmp_addr - VM_MIN_KERNEL_ADDRESS));
|
||||
vm_page_wire(m);
|
||||
pmap_kenter(tmp_addr, VM_PAGE_TO_PHYS(m));
|
||||
tmp_addr += PAGE_SIZE;
|
||||
}
|
||||
|
||||
splx(s);
|
||||
return ((void *)addr);
|
||||
}
|
||||
|
||||
vm_offset_t
|
||||
vm_page_alloc_contig(size, low, high, alignment)
|
||||
vm_offset_t size;
|
||||
vm_offset_t low;
|
||||
vm_offset_t high;
|
||||
vm_offset_t alignment;
|
||||
{
|
||||
return ((vm_offset_t)contigmalloc(size, M_DEVBUF, M_NOWAIT, low, high,
|
||||
alignment, 0ul));
|
||||
}
|
||||
#ifdef DDB
|
||||
void
|
||||
DDB_print_page_info(void)
|
||||
|
@ -65,7 +65,7 @@
|
||||
* any improvements or extensions that they make and grant Carnegie the
|
||||
* rights to redistribute these changes.
|
||||
*
|
||||
* $Id: vm_pageout.c,v 1.69 1996/03/28 04:53:28 dyson Exp $
|
||||
* $Id: vm_pageout.c,v 1.70 1996/04/11 21:05:25 bde Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -138,8 +138,6 @@ extern int nswiodone;
|
||||
extern int vm_swap_size;
|
||||
extern int vfs_update_wakeup;
|
||||
|
||||
#define MAXSCAN 1024 /* maximum number of pages to scan in queues */
|
||||
|
||||
#define MAXLAUNDER (cnt.v_page_count > 1800 ? 32 : 16)
|
||||
|
||||
#define VM_PAGEOUT_PAGE_COUNT 16
|
||||
@ -415,9 +413,9 @@ vm_pageout_object_deactivate_pages(map, object, count, map_remove_only)
|
||||
* scan the objects entire memory queue
|
||||
*/
|
||||
rcount = object->resident_page_count;
|
||||
p = object->memq.tqh_first;
|
||||
p = TAILQ_FIRST(&object->memq);
|
||||
while (p && (rcount-- > 0)) {
|
||||
next = p->listq.tqe_next;
|
||||
next = TAILQ_NEXT(p, listq);
|
||||
cnt.v_pdpages++;
|
||||
if (p->wire_count != 0 ||
|
||||
p->hold_count != 0 ||
|
||||
@ -434,26 +432,9 @@ vm_pageout_object_deactivate_pages(map, object, count, map_remove_only)
|
||||
if (p->queue == PQ_ACTIVE) {
|
||||
if (!pmap_is_referenced(VM_PAGE_TO_PHYS(p)) &&
|
||||
(p->flags & PG_REFERENCED) == 0) {
|
||||
p->act_count -= min(p->act_count, ACT_DECLINE);
|
||||
/*
|
||||
* if the page act_count is zero -- then we
|
||||
* deactivate
|
||||
*/
|
||||
if (!p->act_count) {
|
||||
if (!map_remove_only)
|
||||
vm_page_deactivate(p);
|
||||
vm_page_protect(p, VM_PROT_NONE);
|
||||
/*
|
||||
* else if on the next go-around we
|
||||
* will deactivate the page we need to
|
||||
* place the page on the end of the
|
||||
* queue to age the other pages in
|
||||
* memory.
|
||||
*/
|
||||
} else {
|
||||
TAILQ_REMOVE(&vm_page_queue_active, p, pageq);
|
||||
TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq);
|
||||
}
|
||||
vm_page_protect(p, VM_PROT_NONE);
|
||||
if (!map_remove_only)
|
||||
vm_page_deactivate(p);
|
||||
/*
|
||||
* see if we are done yet
|
||||
*/
|
||||
@ -471,8 +452,6 @@ vm_pageout_object_deactivate_pages(map, object, count, map_remove_only)
|
||||
*/
|
||||
pmap_clear_reference(VM_PAGE_TO_PHYS(p));
|
||||
p->flags &= ~PG_REFERENCED;
|
||||
if (p->act_count < ACT_MAX)
|
||||
p->act_count += ACT_ADVANCE;
|
||||
|
||||
TAILQ_REMOVE(&vm_page_queue_active, p, pageq);
|
||||
TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq);
|
||||
@ -544,9 +523,12 @@ vm_pageout_scan()
|
||||
vm_object_t object;
|
||||
int force_wakeup = 0;
|
||||
int vnodes_skipped = 0;
|
||||
int usagefloor;
|
||||
int i;
|
||||
|
||||
pages_freed = 0;
|
||||
|
||||
|
||||
/*
|
||||
* Start scanning the inactive queue for pages we can free. We keep
|
||||
* scanning until we have enough free pages or we have scanned through
|
||||
@ -559,13 +541,14 @@ vm_pageout_scan()
|
||||
|
||||
rescan1:
|
||||
maxscan = cnt.v_inactive_count;
|
||||
m = vm_page_queue_inactive.tqh_first;
|
||||
m = TAILQ_FIRST(&vm_page_queue_inactive);
|
||||
while ((m != NULL) && (maxscan-- > 0) &&
|
||||
((cnt.v_cache_count + cnt.v_free_count) < (cnt.v_cache_min + cnt.v_free_target))) {
|
||||
((cnt.v_cache_count + cnt.v_free_count) <
|
||||
(cnt.v_cache_min + cnt.v_free_target))) {
|
||||
vm_page_t next;
|
||||
|
||||
cnt.v_pdpages++;
|
||||
next = m->pageq.tqe_next;
|
||||
next = TAILQ_NEXT(m, pageq);
|
||||
|
||||
#if defined(VM_DIAGNOSE)
|
||||
if (m->queue != PQ_INACTIVE) {
|
||||
@ -575,7 +558,8 @@ rescan1:
|
||||
#endif
|
||||
|
||||
/*
|
||||
* dont mess with busy pages
|
||||
* Dont mess with busy pages, keep in the front of the
|
||||
* queue, most likely are being paged out.
|
||||
*/
|
||||
if (m->busy || (m->flags & PG_BUSY)) {
|
||||
m = next;
|
||||
@ -600,8 +584,6 @@ rescan1:
|
||||
m->flags &= ~PG_REFERENCED;
|
||||
pmap_clear_reference(VM_PAGE_TO_PHYS(m));
|
||||
vm_page_activate(m);
|
||||
if (m->act_count < ACT_MAX)
|
||||
m->act_count += ACT_ADVANCE;
|
||||
m = next;
|
||||
continue;
|
||||
}
|
||||
@ -681,14 +663,11 @@ rescan1:
|
||||
page_shortage = 1;
|
||||
}
|
||||
}
|
||||
maxscan = MAXSCAN;
|
||||
pcount = cnt.v_active_count;
|
||||
m = vm_page_queue_active.tqh_first;
|
||||
while ((m != NULL) && (maxscan > 0) &&
|
||||
(pcount-- > 0) && (page_shortage > 0)) {
|
||||
|
||||
cnt.v_pdpages++;
|
||||
next = m->pageq.tqe_next;
|
||||
pcount = cnt.v_active_count;
|
||||
m = TAILQ_FIRST(&vm_page_queue_active);
|
||||
while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) {
|
||||
next = TAILQ_NEXT(m, pageq);
|
||||
|
||||
/*
|
||||
* Don't deactivate pages that are busy.
|
||||
@ -701,54 +680,47 @@ rescan1:
|
||||
m = next;
|
||||
continue;
|
||||
}
|
||||
if (m->object->ref_count &&
|
||||
((m->flags & PG_REFERENCED) ||
|
||||
pmap_is_referenced(VM_PAGE_TO_PHYS(m))) ) {
|
||||
pmap_clear_reference(VM_PAGE_TO_PHYS(m));
|
||||
m->flags &= ~PG_REFERENCED;
|
||||
if (m->act_count < ACT_MAX) {
|
||||
m->act_count += ACT_ADVANCE;
|
||||
|
||||
/*
|
||||
* The count for pagedaemon pages is done after checking the
|
||||
* page for eligbility...
|
||||
*/
|
||||
cnt.v_pdpages++;
|
||||
if ((m->flags & PG_REFERENCED) == 0) {
|
||||
if (pmap_is_referenced(VM_PAGE_TO_PHYS(m))) {
|
||||
pmap_clear_reference(VM_PAGE_TO_PHYS(m));
|
||||
m->flags |= PG_REFERENCED;
|
||||
}
|
||||
} else {
|
||||
pmap_clear_reference(VM_PAGE_TO_PHYS(m));
|
||||
}
|
||||
if ( (m->object->ref_count != 0) &&
|
||||
(m->flags & PG_REFERENCED) ) {
|
||||
m->flags &= ~PG_REFERENCED;
|
||||
TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
|
||||
TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
|
||||
} else {
|
||||
m->flags &= ~PG_REFERENCED;
|
||||
pmap_clear_reference(VM_PAGE_TO_PHYS(m));
|
||||
m->act_count -= min(m->act_count, ACT_DECLINE);
|
||||
|
||||
/*
|
||||
* if the page act_count is zero -- then we deactivate
|
||||
*/
|
||||
if (!m->act_count && (page_shortage > 0)) {
|
||||
if (m->object->ref_count == 0) {
|
||||
--page_shortage;
|
||||
vm_page_test_dirty(m);
|
||||
if (m->dirty == 0) {
|
||||
m->act_count = 0;
|
||||
vm_page_cache(m);
|
||||
} else {
|
||||
vm_page_deactivate(m);
|
||||
}
|
||||
if (page_shortage > 0) {
|
||||
--page_shortage;
|
||||
vm_page_test_dirty(m);
|
||||
if (m->dirty == 0) {
|
||||
vm_page_cache(m);
|
||||
} else {
|
||||
vm_page_protect(m, VM_PROT_NONE);
|
||||
vm_page_deactivate(m);
|
||||
--page_shortage;
|
||||
}
|
||||
} else if (m->act_count) {
|
||||
TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
|
||||
TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
|
||||
}
|
||||
}
|
||||
maxscan--;
|
||||
m = next;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* We try to maintain some *really* free pages, this allows interrupt
|
||||
* code to be guaranteed space.
|
||||
*/
|
||||
while (cnt.v_free_count < cnt.v_free_reserved) {
|
||||
m = vm_page_queue_cache.tqh_first;
|
||||
m = TAILQ_FIRST(&vm_page_queue_cache);
|
||||
if (!m)
|
||||
break;
|
||||
vm_page_free(m);
|
||||
@ -770,23 +742,13 @@ rescan1:
|
||||
}
|
||||
}
|
||||
#ifndef NO_SWAPPING
|
||||
/*
|
||||
* now swap processes out if we are in low memory conditions
|
||||
*/
|
||||
if (!swap_pager_full && vm_swap_size &&
|
||||
vm_pageout_req_swapout == 0) {
|
||||
vm_pageout_req_swapout = 1;
|
||||
if (cnt.v_free_count + cnt.v_cache_count < cnt.v_free_target) {
|
||||
vm_req_vmdaemon();
|
||||
vm_pageout_req_swapout = 1;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifndef NO_SWAPPING
|
||||
if ((cnt.v_inactive_count + cnt.v_free_count + cnt.v_cache_count) <
|
||||
(cnt.v_inactive_target + cnt.v_free_min)) {
|
||||
vm_req_vmdaemon();
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* make sure that we have swap space -- if we are low on memory and
|
||||
@ -883,22 +845,23 @@ vm_pageout()
|
||||
* The pageout daemon is never done, so loop forever.
|
||||
*/
|
||||
while (TRUE) {
|
||||
int s = splhigh();
|
||||
|
||||
int s = splvm();
|
||||
if (!vm_pages_needed ||
|
||||
((cnt.v_free_count >= cnt.v_free_reserved) &&
|
||||
(cnt.v_free_count + cnt.v_cache_count >= cnt.v_free_min))) {
|
||||
vm_pages_needed = 0;
|
||||
tsleep(&vm_pages_needed, PVM, "psleep", 0);
|
||||
} else if (!vm_pages_needed) {
|
||||
tsleep(&vm_pages_needed, PVM, "psleep", hz/3);
|
||||
}
|
||||
if (vm_pages_needed)
|
||||
cnt.v_pdwakeups++;
|
||||
vm_pages_needed = 0;
|
||||
splx(s);
|
||||
cnt.v_pdwakeups++;
|
||||
vm_pager_sync();
|
||||
vm_pageout_scan();
|
||||
vm_pager_sync();
|
||||
wakeup(&cnt.v_free_count);
|
||||
wakeup(kmem_map);
|
||||
}
|
||||
}
|
||||
|
||||
@ -908,7 +871,7 @@ vm_req_vmdaemon()
|
||||
{
|
||||
static int lastrun = 0;
|
||||
|
||||
if ((ticks > (lastrun + hz / 10)) || (ticks < lastrun)) {
|
||||
if ((ticks > (lastrun + hz)) || (ticks < lastrun)) {
|
||||
wakeup(&vm_daemon_needed);
|
||||
lastrun = ticks;
|
||||
}
|
||||
@ -978,7 +941,7 @@ vm_daemon()
|
||||
* we remove cached objects that have no RSS...
|
||||
*/
|
||||
restart:
|
||||
object = vm_object_cached_list.tqh_first;
|
||||
object = TAILQ_FIRST(&vm_object_cached_list);
|
||||
while (object) {
|
||||
/*
|
||||
* if there are no resident pages -- get rid of the object
|
||||
@ -988,7 +951,7 @@ restart:
|
||||
pager_cache(object, FALSE);
|
||||
goto restart;
|
||||
}
|
||||
object = object->cached_list.tqe_next;
|
||||
object = TAILQ_NEXT(object, cached_list);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -61,7 +61,7 @@
|
||||
* any improvements or extensions that they make and grant Carnegie the
|
||||
* rights to redistribute these changes.
|
||||
*
|
||||
* $Id: vm_pager.c,v 1.21 1995/12/14 09:55:11 phk Exp $
|
||||
* $Id: vm_pager.c,v 1.22 1996/05/03 21:01:53 phk Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -249,7 +249,7 @@ vm_pager_object_lookup(pg_list, handle)
|
||||
{
|
||||
register vm_object_t object;
|
||||
|
||||
for (object = pg_list->tqh_first; object != NULL; object = object->pager_object_list.tqe_next)
|
||||
for (object = TAILQ_FIRST(pg_list); object != NULL; object = TAILQ_NEXT(object,pager_object_list))
|
||||
if (object->handle == handle)
|
||||
return (object);
|
||||
return (NULL);
|
||||
@ -288,7 +288,7 @@ getpbuf()
|
||||
|
||||
s = splbio();
|
||||
/* get a bp from the swap buffer header pool */
|
||||
while ((bp = bswlist.tqh_first) == NULL) {
|
||||
while ((bp = TAILQ_FIRST(&bswlist)) == NULL) {
|
||||
bswneeded = 1;
|
||||
tsleep(&bswneeded, PVM, "wswbuf", 0);
|
||||
}
|
||||
@ -313,7 +313,7 @@ trypbuf()
|
||||
struct buf *bp;
|
||||
|
||||
s = splbio();
|
||||
if ((bp = bswlist.tqh_first) == NULL) {
|
||||
if ((bp = TAILQ_FIRST(&bswlist)) == NULL) {
|
||||
splx(s);
|
||||
return NULL;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user