mirror of
https://github.com/freebsd/freebsd-src.git
synced 2024-12-04 10:19:26 +00:00
Incorporated post 1.1.5 work from John Dyson. This includes performance
improvements via the new routines pmap_qenter/pmap_qremove and pmap_kenter/ pmap_kremove. These routine allow fast mapping of pages for those architectures that have "normal" MMUs. Also included is a fix to the pageout daemon to properly check a queue end condition. Submitted by: John Dyson
This commit is contained in:
parent
93f6448c49
commit
16f62314cd
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=1887
@ -35,7 +35,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
|
||||
* $Id: machdep.c,v 1.45 1994/08/03 02:45:26 davidg Exp $
|
||||
* $Id: machdep.c,v 1.46 1994/08/04 06:10:27 davidg Exp $
|
||||
*/
|
||||
|
||||
#include "npx.h"
|
||||
@ -248,8 +248,8 @@ again:
|
||||
freebufspace = bufpages * NBPG;
|
||||
if (nswbuf == 0) {
|
||||
nswbuf = (nbuf / 2) &~ 1; /* force even */
|
||||
if (nswbuf > 256)
|
||||
nswbuf = 256; /* sanity */
|
||||
if (nswbuf > 64)
|
||||
nswbuf = 64; /* sanity */
|
||||
}
|
||||
valloc(swbuf, struct buf, nswbuf);
|
||||
valloc(buf, struct buf, nbuf);
|
||||
@ -284,19 +284,21 @@ again:
|
||||
panic("startup: table size inconsistency");
|
||||
|
||||
clean_map = kmem_suballoc(kernel_map, &clean_sva, &clean_eva,
|
||||
(nbuf*MAXBSIZE) + VM_PHYS_SIZE + maxbkva + pager_map_size, TRUE);
|
||||
|
||||
io_map = kmem_suballoc(clean_map, &minaddr, &maxaddr, maxbkva, FALSE);
|
||||
pager_map = kmem_suballoc(clean_map, &pager_sva, &pager_eva,
|
||||
pager_map_size, TRUE);
|
||||
|
||||
(nbuf*MAXBSIZE) + (nswbuf*MAXPHYS) +
|
||||
maxbkva + pager_map_size, TRUE);
|
||||
buffer_map = kmem_suballoc(clean_map, &buffer_sva, &buffer_eva,
|
||||
(nbuf * MAXBSIZE), TRUE);
|
||||
(nbuf*MAXBSIZE), TRUE);
|
||||
pager_map = kmem_suballoc(clean_map, &pager_sva, &pager_eva,
|
||||
(nswbuf*MAXPHYS) + pager_map_size, TRUE);
|
||||
io_map = kmem_suballoc(clean_map, &minaddr, &maxaddr, maxbkva, FALSE);
|
||||
|
||||
#if 0
|
||||
/*
|
||||
* Allocate a submap for physio
|
||||
*/
|
||||
phys_map = kmem_suballoc(clean_map, &minaddr, &maxaddr,
|
||||
VM_PHYS_SIZE, TRUE);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Finally, allocate mbuf pool. Since mclrefcnt is an off-size
|
||||
@ -334,6 +336,7 @@ again:
|
||||
* Set up buffers, so they can be used to read disk labels.
|
||||
*/
|
||||
bufinit();
|
||||
vm_pager_bufferinit();
|
||||
|
||||
/*
|
||||
* Configure the system.
|
||||
|
@ -39,7 +39,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
|
||||
* $Id: pmap.c,v 1.26 1994/05/25 08:54:35 rgrimes Exp $
|
||||
* $Id: pmap.c,v 1.27 1994/08/03 02:45:28 davidg Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -149,11 +149,17 @@ static inline void *vm_get_pmap();
|
||||
static inline void vm_put_pmap();
|
||||
inline void pmap_use_pt();
|
||||
inline void pmap_unuse_pt();
|
||||
inline pt_entry_t * const pmap_pte();
|
||||
inline pt_entry_t * pmap_pte();
|
||||
static inline pv_entry_t get_pv_entry();
|
||||
void pmap_alloc_pv_entry();
|
||||
void pmap_clear_modify();
|
||||
void i386_protection_init();
|
||||
static void i386_protection_init();
|
||||
|
||||
void pmap_kenter __P((vm_offset_t, vm_offset_t));
|
||||
void pmap_kremove __P((vm_offset_t));
|
||||
void pmap_qenter __P((vm_offset_t, vm_page_t *, int));
|
||||
void pmap_qremove __P((vm_offset_t, int));
|
||||
|
||||
extern vm_offset_t clean_sva, clean_eva;
|
||||
extern int cpu_class;
|
||||
|
||||
@ -693,8 +699,7 @@ pmap_alloc_pv_entry()
|
||||
/*
|
||||
* let the kernel see it
|
||||
*/
|
||||
pmap_enter(vm_map_pmap(kernel_map), pvva,
|
||||
VM_PAGE_TO_PHYS(m), VM_PROT_DEFAULT,1);
|
||||
pmap_kenter(pvva, VM_PAGE_TO_PHYS(m));
|
||||
|
||||
entry = (pv_entry_t) pvva;
|
||||
/*
|
||||
@ -1335,6 +1340,8 @@ pmap_qremove(va, count)
|
||||
|
||||
/*
|
||||
* add a wired page to the kva
|
||||
* note that in order for the mapping to take effect -- you
|
||||
* should do a tlbflush after doing the pmap_kenter...
|
||||
*/
|
||||
void
|
||||
pmap_kenter(va, pa)
|
||||
@ -1342,75 +1349,25 @@ pmap_kenter(va, pa)
|
||||
register vm_offset_t pa;
|
||||
{
|
||||
register pt_entry_t *pte;
|
||||
register pv_entry_t pv, npv;
|
||||
vm_offset_t opa;
|
||||
int s;
|
||||
|
||||
/*
|
||||
* Enter on the PV list if part of our managed memory
|
||||
* Note that we raise IPL while manipulating pv_table
|
||||
* since pmap_enter can be called at interrupt time.
|
||||
*/
|
||||
|
||||
pte = vtopte(va);
|
||||
|
||||
opa = pmap_pte_pa(pte);
|
||||
/*
|
||||
* Mapping has not changed, must be protection or wiring change.
|
||||
*/
|
||||
if (opa == pa) {
|
||||
/*
|
||||
* Wiring change, just update stats.
|
||||
* We don't worry about wiring PT pages as they remain
|
||||
* resident as long as there are valid mappings in them.
|
||||
* Hence, if a user page is wired, the PT page will be also.
|
||||
*/
|
||||
if (!pmap_pte_w(pte)) {
|
||||
kernel_pmap->pm_stats.wired_count++;
|
||||
}
|
||||
goto validate;
|
||||
}
|
||||
|
||||
if (opa) {
|
||||
pmap_remove(kernel_pmap, va, va + PAGE_SIZE);
|
||||
}
|
||||
|
||||
pv = pa_to_pvh(pa);
|
||||
s = splhigh();
|
||||
/*
|
||||
* No entries yet, use header as the first entry
|
||||
*/
|
||||
if (pv->pv_pmap == NULL) {
|
||||
pv->pv_va = va;
|
||||
pv->pv_pmap = kernel_pmap;
|
||||
pv->pv_next = NULL;
|
||||
}
|
||||
/*
|
||||
* There is at least one other VA mapping this page.
|
||||
* Place this entry after the header.
|
||||
*/
|
||||
else {
|
||||
npv = get_pv_entry();
|
||||
npv->pv_va = va;
|
||||
npv->pv_pmap = kernel_pmap;
|
||||
npv->pv_next = pv->pv_next;
|
||||
pv->pv_next = npv;
|
||||
}
|
||||
splx(s);
|
||||
|
||||
/*
|
||||
* Increment counters
|
||||
*/
|
||||
kernel_pmap->pm_stats.resident_count++;
|
||||
|
||||
validate:
|
||||
|
||||
/*
|
||||
* Now validate mapping with desired protection/wiring.
|
||||
*/
|
||||
*pte = (pt_entry_t) ( (int) (pa | PG_RW | PG_V | PG_W));
|
||||
}
|
||||
|
||||
/*
|
||||
* remove a page from the kernel pagetables
|
||||
*/
|
||||
void
|
||||
pmap_kremove( va)
|
||||
vm_offset_t va;
|
||||
{
|
||||
register pt_entry_t *pte;
|
||||
pte = vtopte(va);
|
||||
|
||||
*pte = (pt_entry_t) 0;
|
||||
tlbflush();
|
||||
}
|
||||
|
||||
/*
|
||||
* this code makes some *MAJOR* assumptions:
|
||||
* 1. Current pmap & pmap exists.
|
||||
@ -1799,6 +1756,10 @@ pmap_testbit(pa, bit)
|
||||
}
|
||||
}
|
||||
}
|
||||
if( !pv->pv_pmap) {
|
||||
printf("Null pmap (tb) at va: 0x%lx\n", pv->pv_va);
|
||||
continue;
|
||||
}
|
||||
pte = pmap_pte(pv->pv_pmap, pv->pv_va);
|
||||
if ((int) *pte & bit) {
|
||||
splx(s);
|
||||
@ -1846,6 +1807,10 @@ pmap_changebit(pa, bit, setem)
|
||||
continue;
|
||||
}
|
||||
|
||||
if( !pv->pv_pmap) {
|
||||
printf("Null pmap (cb) at va: 0x%lx\n", va);
|
||||
continue;
|
||||
}
|
||||
pte = pmap_pte(pv->pv_pmap, va);
|
||||
if (setem)
|
||||
(int) npte = (int) *pte | bit;
|
||||
|
@ -35,7 +35,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
|
||||
* $Id: machdep.c,v 1.45 1994/08/03 02:45:26 davidg Exp $
|
||||
* $Id: machdep.c,v 1.46 1994/08/04 06:10:27 davidg Exp $
|
||||
*/
|
||||
|
||||
#include "npx.h"
|
||||
@ -248,8 +248,8 @@ again:
|
||||
freebufspace = bufpages * NBPG;
|
||||
if (nswbuf == 0) {
|
||||
nswbuf = (nbuf / 2) &~ 1; /* force even */
|
||||
if (nswbuf > 256)
|
||||
nswbuf = 256; /* sanity */
|
||||
if (nswbuf > 64)
|
||||
nswbuf = 64; /* sanity */
|
||||
}
|
||||
valloc(swbuf, struct buf, nswbuf);
|
||||
valloc(buf, struct buf, nbuf);
|
||||
@ -284,19 +284,21 @@ again:
|
||||
panic("startup: table size inconsistency");
|
||||
|
||||
clean_map = kmem_suballoc(kernel_map, &clean_sva, &clean_eva,
|
||||
(nbuf*MAXBSIZE) + VM_PHYS_SIZE + maxbkva + pager_map_size, TRUE);
|
||||
|
||||
io_map = kmem_suballoc(clean_map, &minaddr, &maxaddr, maxbkva, FALSE);
|
||||
pager_map = kmem_suballoc(clean_map, &pager_sva, &pager_eva,
|
||||
pager_map_size, TRUE);
|
||||
|
||||
(nbuf*MAXBSIZE) + (nswbuf*MAXPHYS) +
|
||||
maxbkva + pager_map_size, TRUE);
|
||||
buffer_map = kmem_suballoc(clean_map, &buffer_sva, &buffer_eva,
|
||||
(nbuf * MAXBSIZE), TRUE);
|
||||
(nbuf*MAXBSIZE), TRUE);
|
||||
pager_map = kmem_suballoc(clean_map, &pager_sva, &pager_eva,
|
||||
(nswbuf*MAXPHYS) + pager_map_size, TRUE);
|
||||
io_map = kmem_suballoc(clean_map, &minaddr, &maxaddr, maxbkva, FALSE);
|
||||
|
||||
#if 0
|
||||
/*
|
||||
* Allocate a submap for physio
|
||||
*/
|
||||
phys_map = kmem_suballoc(clean_map, &minaddr, &maxaddr,
|
||||
VM_PHYS_SIZE, TRUE);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Finally, allocate mbuf pool. Since mclrefcnt is an off-size
|
||||
@ -334,6 +336,7 @@ again:
|
||||
* Set up buffers, so they can be used to read disk labels.
|
||||
*/
|
||||
bufinit();
|
||||
vm_pager_bufferinit();
|
||||
|
||||
/*
|
||||
* Configure the system.
|
||||
|
@ -39,7 +39,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
|
||||
* $Id: pmap.c,v 1.26 1994/05/25 08:54:35 rgrimes Exp $
|
||||
* $Id: pmap.c,v 1.27 1994/08/03 02:45:28 davidg Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -149,11 +149,17 @@ static inline void *vm_get_pmap();
|
||||
static inline void vm_put_pmap();
|
||||
inline void pmap_use_pt();
|
||||
inline void pmap_unuse_pt();
|
||||
inline pt_entry_t * const pmap_pte();
|
||||
inline pt_entry_t * pmap_pte();
|
||||
static inline pv_entry_t get_pv_entry();
|
||||
void pmap_alloc_pv_entry();
|
||||
void pmap_clear_modify();
|
||||
void i386_protection_init();
|
||||
static void i386_protection_init();
|
||||
|
||||
void pmap_kenter __P((vm_offset_t, vm_offset_t));
|
||||
void pmap_kremove __P((vm_offset_t));
|
||||
void pmap_qenter __P((vm_offset_t, vm_page_t *, int));
|
||||
void pmap_qremove __P((vm_offset_t, int));
|
||||
|
||||
extern vm_offset_t clean_sva, clean_eva;
|
||||
extern int cpu_class;
|
||||
|
||||
@ -693,8 +699,7 @@ pmap_alloc_pv_entry()
|
||||
/*
|
||||
* let the kernel see it
|
||||
*/
|
||||
pmap_enter(vm_map_pmap(kernel_map), pvva,
|
||||
VM_PAGE_TO_PHYS(m), VM_PROT_DEFAULT,1);
|
||||
pmap_kenter(pvva, VM_PAGE_TO_PHYS(m));
|
||||
|
||||
entry = (pv_entry_t) pvva;
|
||||
/*
|
||||
@ -1335,6 +1340,8 @@ pmap_qremove(va, count)
|
||||
|
||||
/*
|
||||
* add a wired page to the kva
|
||||
* note that in order for the mapping to take effect -- you
|
||||
* should do a tlbflush after doing the pmap_kenter...
|
||||
*/
|
||||
void
|
||||
pmap_kenter(va, pa)
|
||||
@ -1342,75 +1349,25 @@ pmap_kenter(va, pa)
|
||||
register vm_offset_t pa;
|
||||
{
|
||||
register pt_entry_t *pte;
|
||||
register pv_entry_t pv, npv;
|
||||
vm_offset_t opa;
|
||||
int s;
|
||||
|
||||
/*
|
||||
* Enter on the PV list if part of our managed memory
|
||||
* Note that we raise IPL while manipulating pv_table
|
||||
* since pmap_enter can be called at interrupt time.
|
||||
*/
|
||||
|
||||
pte = vtopte(va);
|
||||
|
||||
opa = pmap_pte_pa(pte);
|
||||
/*
|
||||
* Mapping has not changed, must be protection or wiring change.
|
||||
*/
|
||||
if (opa == pa) {
|
||||
/*
|
||||
* Wiring change, just update stats.
|
||||
* We don't worry about wiring PT pages as they remain
|
||||
* resident as long as there are valid mappings in them.
|
||||
* Hence, if a user page is wired, the PT page will be also.
|
||||
*/
|
||||
if (!pmap_pte_w(pte)) {
|
||||
kernel_pmap->pm_stats.wired_count++;
|
||||
}
|
||||
goto validate;
|
||||
}
|
||||
|
||||
if (opa) {
|
||||
pmap_remove(kernel_pmap, va, va + PAGE_SIZE);
|
||||
}
|
||||
|
||||
pv = pa_to_pvh(pa);
|
||||
s = splhigh();
|
||||
/*
|
||||
* No entries yet, use header as the first entry
|
||||
*/
|
||||
if (pv->pv_pmap == NULL) {
|
||||
pv->pv_va = va;
|
||||
pv->pv_pmap = kernel_pmap;
|
||||
pv->pv_next = NULL;
|
||||
}
|
||||
/*
|
||||
* There is at least one other VA mapping this page.
|
||||
* Place this entry after the header.
|
||||
*/
|
||||
else {
|
||||
npv = get_pv_entry();
|
||||
npv->pv_va = va;
|
||||
npv->pv_pmap = kernel_pmap;
|
||||
npv->pv_next = pv->pv_next;
|
||||
pv->pv_next = npv;
|
||||
}
|
||||
splx(s);
|
||||
|
||||
/*
|
||||
* Increment counters
|
||||
*/
|
||||
kernel_pmap->pm_stats.resident_count++;
|
||||
|
||||
validate:
|
||||
|
||||
/*
|
||||
* Now validate mapping with desired protection/wiring.
|
||||
*/
|
||||
*pte = (pt_entry_t) ( (int) (pa | PG_RW | PG_V | PG_W));
|
||||
}
|
||||
|
||||
/*
|
||||
* remove a page from the kernel pagetables
|
||||
*/
|
||||
void
|
||||
pmap_kremove( va)
|
||||
vm_offset_t va;
|
||||
{
|
||||
register pt_entry_t *pte;
|
||||
pte = vtopte(va);
|
||||
|
||||
*pte = (pt_entry_t) 0;
|
||||
tlbflush();
|
||||
}
|
||||
|
||||
/*
|
||||
* this code makes some *MAJOR* assumptions:
|
||||
* 1. Current pmap & pmap exists.
|
||||
@ -1799,6 +1756,10 @@ pmap_testbit(pa, bit)
|
||||
}
|
||||
}
|
||||
}
|
||||
if( !pv->pv_pmap) {
|
||||
printf("Null pmap (tb) at va: 0x%lx\n", pv->pv_va);
|
||||
continue;
|
||||
}
|
||||
pte = pmap_pte(pv->pv_pmap, pv->pv_va);
|
||||
if ((int) *pte & bit) {
|
||||
splx(s);
|
||||
@ -1846,6 +1807,10 @@ pmap_changebit(pa, bit, setem)
|
||||
continue;
|
||||
}
|
||||
|
||||
if( !pv->pv_pmap) {
|
||||
printf("Null pmap (cb) at va: 0x%lx\n", va);
|
||||
continue;
|
||||
}
|
||||
pte = pmap_pte(pv->pv_pmap, va);
|
||||
if (setem)
|
||||
(int) npte = (int) *pte | bit;
|
||||
|
@ -16,7 +16,7 @@
|
||||
* 4. Modifications may be freely made to this file if the above conditions
|
||||
* are met.
|
||||
*
|
||||
* $Id$
|
||||
* $Id: kern_physio.c,v 1.3 1994/08/02 07:42:05 davidg Exp $
|
||||
*/
|
||||
|
||||
#include <sys/param.h>
|
||||
@ -38,10 +38,12 @@ physio(strategy, bp, dev, rw, minp, uio)
|
||||
struct uio *uio;
|
||||
{
|
||||
int i;
|
||||
int bp_alloc = (bp == 0);
|
||||
int bufflags = rw?B_READ:0;
|
||||
int error;
|
||||
int spl;
|
||||
caddr_t sa;
|
||||
int bp_alloc = (bp == 0);
|
||||
struct buf *bpa;
|
||||
|
||||
/*
|
||||
* keep the process from being swapped
|
||||
@ -49,10 +51,8 @@ physio(strategy, bp, dev, rw, minp, uio)
|
||||
curproc->p_flag |= P_PHYSIO;
|
||||
|
||||
/* create and build a buffer header for a transfer */
|
||||
|
||||
if (bp_alloc) {
|
||||
bp = (struct buf *)getpbuf();
|
||||
} else {
|
||||
bpa = (struct buf *)getpbuf();
|
||||
if (!bp_alloc) {
|
||||
spl = splbio();
|
||||
while (bp->b_flags & B_BUSY) {
|
||||
bp->b_flags |= B_WANTED;
|
||||
@ -60,8 +60,14 @@ physio(strategy, bp, dev, rw, minp, uio)
|
||||
}
|
||||
bp->b_flags |= B_BUSY;
|
||||
splx(spl);
|
||||
} else {
|
||||
bp = bpa;
|
||||
}
|
||||
|
||||
/*
|
||||
* get a copy of the kva from the physical buffer
|
||||
*/
|
||||
sa = bpa->b_data;
|
||||
bp->b_proc = curproc;
|
||||
bp->b_dev = dev;
|
||||
error = bp->b_error = 0;
|
||||
@ -76,6 +82,11 @@ physio(strategy, bp, dev, rw, minp, uio)
|
||||
bp->b_flags = B_BUSY | B_PHYS | B_CALL | bufflags;
|
||||
bp->b_iodone = physwakeup;
|
||||
bp->b_data = uio->uio_iov[i].iov_base;
|
||||
/*
|
||||
* pass in the kva from the physical buffer
|
||||
* for the temporary kernel mapping.
|
||||
*/
|
||||
bp->b_saveaddr = sa;
|
||||
bp->b_blkno = btodb(uio->uio_offset);
|
||||
|
||||
|
||||
@ -123,9 +134,8 @@ physio(strategy, bp, dev, rw, minp, uio)
|
||||
|
||||
|
||||
doerror:
|
||||
if (bp_alloc) {
|
||||
relpbuf(bp);
|
||||
} else {
|
||||
relpbuf(bpa);
|
||||
if (!bp_alloc) {
|
||||
bp->b_flags &= ~(B_BUSY|B_PHYS);
|
||||
if( bp->b_flags & B_WANTED) {
|
||||
bp->b_flags &= ~B_WANTED;
|
||||
|
@ -16,7 +16,7 @@
|
||||
* 4. Modifications may be freely made to this file if the above conditions
|
||||
* are met.
|
||||
*
|
||||
* $Id: vfs_bio.c,v 1.4 1994/08/02 07:43:13 davidg Exp $
|
||||
* $Id: vfs_bio.c,v 1.5 1994/08/04 19:43:13 davidg Exp $
|
||||
*/
|
||||
|
||||
#include <sys/param.h>
|
||||
@ -300,7 +300,8 @@ brelse(struct buf *bp)
|
||||
needsbuffer = 0;
|
||||
wakeup((caddr_t)&needsbuffer);
|
||||
}
|
||||
/* anyone need this very block? */
|
||||
|
||||
/* anyone need this block? */
|
||||
if (bp->b_flags & B_WANTED) {
|
||||
bp->b_flags &= ~(B_WANTED|B_AGE);
|
||||
wakeup((caddr_t)bp);
|
||||
@ -321,13 +322,14 @@ brelse(struct buf *bp)
|
||||
panic("brelse: free buffer onto another queue???");
|
||||
|
||||
/* enqueue */
|
||||
/* buffers with junk contents */
|
||||
/* buffers with no memory */
|
||||
if(bp->b_bufsize == 0) {
|
||||
bp->b_qindex = QUEUE_EMPTY;
|
||||
TAILQ_INSERT_HEAD(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
|
||||
LIST_REMOVE(bp, b_hash);
|
||||
LIST_INSERT_HEAD(&invalhash, bp, b_hash);
|
||||
bp->b_dev = NODEV;
|
||||
/* buffers with junk contents */
|
||||
} else if(bp->b_flags & (B_ERROR|B_INVAL|B_NOCACHE)) {
|
||||
bp->b_qindex = QUEUE_AGE;
|
||||
TAILQ_INSERT_HEAD(&bufqueues[QUEUE_AGE], bp, b_freelist);
|
||||
@ -363,8 +365,8 @@ struct buf *
|
||||
getnewbuf(int slpflag, int slptimeo)
|
||||
{
|
||||
struct buf *bp;
|
||||
int x;
|
||||
x = splbio();
|
||||
int s;
|
||||
s = splbio();
|
||||
start:
|
||||
/* can we constitute a new buffer? */
|
||||
if (bp = bufqueues[QUEUE_EMPTY].tqh_first) {
|
||||
@ -387,7 +389,7 @@ tryfree:
|
||||
/* wait for a free buffer of any kind */
|
||||
needsbuffer = 1;
|
||||
tsleep((caddr_t)&needsbuffer, PRIBIO, "newbuf", 0);
|
||||
splx(x);
|
||||
splx(s);
|
||||
return (0);
|
||||
}
|
||||
|
||||
@ -411,7 +413,7 @@ fillbuf:
|
||||
bp->b_flags = B_BUSY;
|
||||
LIST_REMOVE(bp, b_hash);
|
||||
LIST_INSERT_HEAD(&invalhash, bp, b_hash);
|
||||
splx(x);
|
||||
splx(s);
|
||||
bp->b_dev = NODEV;
|
||||
bp->b_vp = NULL;
|
||||
bp->b_blkno = bp->b_lblkno = 0;
|
||||
@ -466,10 +468,10 @@ struct buf *
|
||||
getblk(struct vnode *vp, daddr_t blkno, int size, int slpflag, int slptimeo)
|
||||
{
|
||||
struct buf *bp;
|
||||
int x;
|
||||
int s;
|
||||
struct bufhashhdr *bh;
|
||||
|
||||
x = splbio();
|
||||
s = splbio();
|
||||
loop:
|
||||
if (bp = incore(vp, blkno)) {
|
||||
if (bp->b_flags & B_BUSY) {
|
||||
@ -509,7 +511,7 @@ loop:
|
||||
bh = BUFHASH(vp, blkno);
|
||||
LIST_INSERT_HEAD(bh, bp, b_hash);
|
||||
}
|
||||
splx(x);
|
||||
splx(s);
|
||||
return (bp);
|
||||
}
|
||||
|
||||
@ -564,9 +566,9 @@ allocbuf(struct buf *bp, int size)
|
||||
int
|
||||
biowait(register struct buf *bp)
|
||||
{
|
||||
int x;
|
||||
int s;
|
||||
|
||||
x = splbio();
|
||||
s = splbio();
|
||||
while ((bp->b_flags & B_DONE) == 0)
|
||||
tsleep((caddr_t)bp, PRIBIO, "biowait", 0);
|
||||
if((bp->b_flags & B_ERROR) || bp->b_error) {
|
||||
@ -580,10 +582,10 @@ biowait(register struct buf *bp)
|
||||
bp->b_error = EIO;
|
||||
else
|
||||
bp->b_flags |= B_ERROR;
|
||||
splx(x);
|
||||
splx(s);
|
||||
return (bp->b_error);
|
||||
} else {
|
||||
splx(x);
|
||||
splx(s);
|
||||
return (0);
|
||||
}
|
||||
}
|
||||
@ -604,6 +606,9 @@ biodone(register struct buf *bp)
|
||||
vwakeup(bp);
|
||||
}
|
||||
|
||||
if (bp->b_flags & B_BOUNCE)
|
||||
vm_bounce_free(bp);
|
||||
|
||||
/* call optional completion function if requested */
|
||||
if (bp->b_flags & B_CALL) {
|
||||
bp->b_flags &= ~B_CALL;
|
||||
@ -677,6 +682,7 @@ vm_hold_load_pages(vm_offset_t froma, vm_offset_t toa) {
|
||||
VM_WAIT;
|
||||
goto tryagain;
|
||||
}
|
||||
|
||||
p = vm_page_alloc(kernel_object, pg - VM_MIN_KERNEL_ADDRESS);
|
||||
if( !p) {
|
||||
VM_WAIT;
|
||||
@ -684,9 +690,9 @@ vm_hold_load_pages(vm_offset_t froma, vm_offset_t toa) {
|
||||
}
|
||||
|
||||
vm_page_wire(p);
|
||||
pmap_enter(kernel_pmap, pg, VM_PAGE_TO_PHYS(p),
|
||||
VM_PROT_READ|VM_PROT_WRITE, 1);
|
||||
pmap_kenter( pg, VM_PAGE_TO_PHYS(p));
|
||||
}
|
||||
pmap_update();
|
||||
}
|
||||
|
||||
void
|
||||
@ -697,16 +703,11 @@ vm_hold_free_pages(vm_offset_t froma, vm_offset_t toa) {
|
||||
vm_offset_t to = round_page(toa);
|
||||
|
||||
for(pg = from ; pg < to ; pg += PAGE_SIZE) {
|
||||
vm_offset_t pa;
|
||||
pa = pmap_kextract(pg);
|
||||
if( !pa) {
|
||||
printf("No pa for va: %x\n", pg);
|
||||
} else {
|
||||
p = PHYS_TO_VM_PAGE( pa);
|
||||
pmap_remove(kernel_pmap, pg, pg + PAGE_SIZE);
|
||||
vm_page_free(p);
|
||||
}
|
||||
p = PHYS_TO_VM_PAGE( pmap_kextract( pg));
|
||||
pmap_kremove( pg);
|
||||
vm_page_free(p);
|
||||
}
|
||||
pmap_update();
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -36,7 +36,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* @(#)buf.h 8.7 (Berkeley) 1/21/94
|
||||
* $Id$
|
||||
* $Id: buf.h,v 1.4 1994/08/02 07:52:39 davidg Exp $
|
||||
*/
|
||||
|
||||
#ifndef _SYS_BUF_H_
|
||||
@ -79,7 +79,6 @@ struct buf {
|
||||
int b_validend; /* Offset of end of valid region. */
|
||||
daddr_t b_pblkno; /* physical block number */
|
||||
caddr_t b_savekva; /* saved kva for transfer while bouncing */
|
||||
TAILQ_HEAD(b_clusterhd,buf) b_cluster; /* low level clustering */
|
||||
void *b_driver1; /* for private use by the driver */
|
||||
void *b_driver2; /* for private use by the driver */
|
||||
void *b_spc;
|
||||
|
@ -36,7 +36,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* @(#)buf.h 8.7 (Berkeley) 1/21/94
|
||||
* $Id$
|
||||
* $Id: buf.h,v 1.4 1994/08/02 07:52:39 davidg Exp $
|
||||
*/
|
||||
|
||||
#ifndef _SYS_BUF_H_
|
||||
@ -79,7 +79,6 @@ struct buf {
|
||||
int b_validend; /* Offset of end of valid region. */
|
||||
daddr_t b_pblkno; /* physical block number */
|
||||
caddr_t b_savekva; /* saved kva for transfer while bouncing */
|
||||
TAILQ_HEAD(b_clusterhd,buf) b_cluster; /* low level clustering */
|
||||
void *b_driver1; /* for private use by the driver */
|
||||
void *b_driver2; /* for private use by the driver */
|
||||
void *b_spc;
|
||||
|
@ -39,7 +39,7 @@
|
||||
* from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
|
||||
*
|
||||
* @(#)swap_pager.c 8.9 (Berkeley) 3/21/94
|
||||
* $Id$
|
||||
* $Id: swap_pager.c,v 1.4 1994/08/02 07:55:13 davidg Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -1030,19 +1030,8 @@ swap_pager_input(swp, m, count, reqpage)
|
||||
*/
|
||||
|
||||
spc = NULL; /* we might not use an spc data structure */
|
||||
kva = 0;
|
||||
|
||||
/*
|
||||
* we allocate a new kva for transfers > 1 page
|
||||
* but for transfers == 1 page, the swap_pager_free list contains
|
||||
* entries that have pre-allocated kva's (for efficiency).
|
||||
*/
|
||||
if (count > 1) {
|
||||
kva = kmem_alloc_pageable(pager_map, count*PAGE_SIZE);
|
||||
}
|
||||
|
||||
|
||||
if (!kva) {
|
||||
if (count == 1) {
|
||||
/*
|
||||
* if a kva has not been allocated, we can only do a one page transfer,
|
||||
* so we free the other pages that might have been allocated by
|
||||
@ -1077,29 +1066,22 @@ swap_pager_input(swp, m, count, reqpage)
|
||||
spc = swap_pager_free.tqh_first;
|
||||
TAILQ_REMOVE(&swap_pager_free, spc, spc_list);
|
||||
kva = spc->spc_kva;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* map our page(s) into kva for input
|
||||
*/
|
||||
for (i = 0; i < count; i++) {
|
||||
pmap_kenter( kva + PAGE_SIZE * i, VM_PAGE_TO_PHYS(m[i]));
|
||||
}
|
||||
pmap_update();
|
||||
|
||||
|
||||
/*
|
||||
* Get a swap buffer header and perform the IO
|
||||
*/
|
||||
if( spc) {
|
||||
bp = spc->spc_bp;
|
||||
bzero(bp, sizeof *bp);
|
||||
bp->b_spc = spc;
|
||||
} else {
|
||||
/*
|
||||
* Get a swap buffer header to perform the IO
|
||||
*/
|
||||
bp = getpbuf();
|
||||
kva = (vm_offset_t) bp->b_data;
|
||||
}
|
||||
|
||||
/*
|
||||
* map our page(s) into kva for input
|
||||
*/
|
||||
pmap_qenter( kva, m, count);
|
||||
|
||||
s = splbio();
|
||||
bp->b_flags = B_BUSY | B_READ | B_CALL;
|
||||
bp->b_iodone = swap_pager_iodone1;
|
||||
@ -1112,12 +1094,6 @@ swap_pager_input(swp, m, count, reqpage)
|
||||
bp->b_bcount = PAGE_SIZE*count;
|
||||
bp->b_bufsize = PAGE_SIZE*count;
|
||||
|
||||
/*
|
||||
VHOLD(swapdev_vp);
|
||||
bp->b_vp = swapdev_vp;
|
||||
if (swapdev_vp->v_type == VBLK)
|
||||
bp->b_dev = swapdev_vp->v_rdev;
|
||||
*/
|
||||
bgetvp( swapdev_vp, bp);
|
||||
|
||||
swp->sw_piip++;
|
||||
@ -1153,7 +1129,7 @@ swap_pager_input(swp, m, count, reqpage)
|
||||
/*
|
||||
* remove the mapping for kernel virtual
|
||||
*/
|
||||
pmap_remove(vm_map_pmap(pager_map), kva, kva + count * PAGE_SIZE);
|
||||
pmap_qremove( kva, count);
|
||||
|
||||
if (spc) {
|
||||
/*
|
||||
@ -1169,10 +1145,6 @@ swap_pager_input(swp, m, count, reqpage)
|
||||
wakeup((caddr_t)&swap_pager_free);
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* free the kernel virtual addresses
|
||||
*/
|
||||
kmem_free_wakeup(pager_map, kva, count * PAGE_SIZE);
|
||||
/*
|
||||
* release the physical I/O buffer
|
||||
*/
|
||||
@ -1372,6 +1344,11 @@ retrygetspace:
|
||||
* we allocate a new kva for transfers > 1 page
|
||||
* but for transfers == 1 page, the swap_pager_free list contains
|
||||
* entries that have pre-allocated kva's (for efficiency).
|
||||
* NOTE -- we do not use the physical buffer pool or the
|
||||
* preallocated associated kva's because of the potential for
|
||||
* deadlock. This is very subtile -- but deadlocks or resource
|
||||
* contention must be avoided on pageouts -- or your system will
|
||||
* sleep (forever) !!!
|
||||
*/
|
||||
if ( count > 1) {
|
||||
kva = kmem_alloc_pageable(pager_map, count*PAGE_SIZE);
|
||||
@ -1429,10 +1406,7 @@ retrygetspace:
|
||||
/*
|
||||
* map our page(s) into kva for I/O
|
||||
*/
|
||||
for (i = 0; i < count; i++) {
|
||||
pmap_kenter( kva + PAGE_SIZE * i, VM_PAGE_TO_PHYS(m[i]));
|
||||
}
|
||||
pmap_update();
|
||||
pmap_qenter(kva, m, count);
|
||||
|
||||
/*
|
||||
* get the base I/O offset into the swap file
|
||||
@ -1473,12 +1447,7 @@ retrygetspace:
|
||||
bp->b_un.b_addr = (caddr_t) kva;
|
||||
bp->b_blkno = reqaddr[0];
|
||||
bgetvp( swapdev_vp, bp);
|
||||
/*
|
||||
VHOLD(swapdev_vp);
|
||||
bp->b_vp = swapdev_vp;
|
||||
if (swapdev_vp->v_type == VBLK)
|
||||
bp->b_dev = swapdev_vp->v_rdev;
|
||||
*/
|
||||
|
||||
bp->b_bcount = PAGE_SIZE*count;
|
||||
bp->b_bufsize = PAGE_SIZE*count;
|
||||
swapdev_vp->v_numoutput++;
|
||||
@ -1543,7 +1512,7 @@ retrygetspace:
|
||||
/*
|
||||
* remove the mapping for kernel virtual
|
||||
*/
|
||||
pmap_remove(vm_map_pmap(pager_map), kva, kva + count * PAGE_SIZE);
|
||||
pmap_qremove( kva, count);
|
||||
|
||||
/*
|
||||
* if we have written the page, then indicate that the page
|
||||
@ -1604,11 +1573,11 @@ swap_pager_clean()
|
||||
*/
|
||||
while (spc = swap_pager_done.tqh_first) {
|
||||
if( spc->spc_altkva) {
|
||||
pmap_remove(vm_map_pmap(pager_map), spc->spc_altkva, spc->spc_altkva + spc->spc_count * PAGE_SIZE);
|
||||
pmap_qremove( spc->spc_altkva, spc->spc_count);
|
||||
kmem_free_wakeup(pager_map, spc->spc_altkva, spc->spc_count * PAGE_SIZE);
|
||||
spc->spc_altkva = 0;
|
||||
} else {
|
||||
pmap_remove(vm_map_pmap(pager_map), spc->spc_kva, spc->spc_kva + PAGE_SIZE);
|
||||
pmap_qremove( spc->spc_kva, 1);
|
||||
}
|
||||
swap_pager_finish(spc);
|
||||
TAILQ_REMOVE(&swap_pager_done, spc, spc_list);
|
||||
@ -1742,86 +1711,6 @@ swap_pager_iodone(bp)
|
||||
splx(s);
|
||||
}
|
||||
|
||||
int bswneeded;
|
||||
/* TAILQ_HEAD(swqueue, buf) bswlist; */
|
||||
/*
|
||||
* allocate a physical buffer
|
||||
*/
|
||||
struct buf *
|
||||
getpbuf() {
|
||||
int s;
|
||||
struct buf *bp;
|
||||
|
||||
s = splbio();
|
||||
/* get a bp from the swap buffer header pool */
|
||||
while ((bp = bswlist.tqh_first) == NULL) {
|
||||
bswneeded = 1;
|
||||
tsleep((caddr_t)&bswneeded, PVM, "wswbuf", 0);
|
||||
}
|
||||
TAILQ_REMOVE(&bswlist, bp, b_freelist);
|
||||
|
||||
splx(s);
|
||||
|
||||
bzero(bp, sizeof *bp);
|
||||
bp->b_rcred = NOCRED;
|
||||
bp->b_wcred = NOCRED;
|
||||
return bp;
|
||||
}
|
||||
|
||||
/*
|
||||
* allocate a physical buffer, if one is available
|
||||
*/
|
||||
struct buf *
|
||||
trypbuf() {
|
||||
int s;
|
||||
struct buf *bp;
|
||||
|
||||
s = splbio();
|
||||
if ((bp = bswlist.tqh_first) == NULL) {
|
||||
splx(s);
|
||||
return NULL;
|
||||
}
|
||||
TAILQ_REMOVE(&bswlist, bp, b_freelist);
|
||||
splx(s);
|
||||
|
||||
bzero(bp, sizeof *bp);
|
||||
bp->b_rcred = NOCRED;
|
||||
bp->b_wcred = NOCRED;
|
||||
return bp;
|
||||
}
|
||||
|
||||
/*
|
||||
* release a physical buffer
|
||||
*/
|
||||
void
|
||||
relpbuf(bp)
|
||||
struct buf *bp;
|
||||
{
|
||||
int s;
|
||||
|
||||
s = splbio();
|
||||
|
||||
if (bp->b_rcred != NOCRED) {
|
||||
crfree(bp->b_rcred);
|
||||
bp->b_rcred = NOCRED;
|
||||
}
|
||||
if (bp->b_wcred != NOCRED) {
|
||||
crfree(bp->b_wcred);
|
||||
bp->b_wcred = NOCRED;
|
||||
}
|
||||
|
||||
if (bp->b_vp)
|
||||
brelvp(bp);
|
||||
|
||||
TAILQ_INSERT_HEAD(&bswlist, bp, b_freelist);
|
||||
|
||||
if (bswneeded) {
|
||||
bswneeded = 0;
|
||||
wakeup((caddr_t)&bswlist);
|
||||
}
|
||||
splx(s);
|
||||
}
|
||||
|
||||
/*
|
||||
* return true if any swap control structures can be allocated
|
||||
*/
|
||||
|
@ -66,7 +66,7 @@
|
||||
* any improvements or extensions that they make and grant Carnegie the
|
||||
* rights to redistribute these changes.
|
||||
*
|
||||
* $Id$
|
||||
* $Id: vm_fault.c,v 1.3 1994/08/02 07:55:18 davidg Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -267,11 +267,14 @@ vm_fault(map, vaddr, fault_type, change_wiring)
|
||||
* wait for it and then retry.
|
||||
*/
|
||||
if (m->flags & PG_BUSY) {
|
||||
int s;
|
||||
UNLOCK_THINGS;
|
||||
s = splhigh();
|
||||
if (m->flags & PG_BUSY) {
|
||||
m->flags |= PG_WANTED;
|
||||
tsleep((caddr_t)m,PSWP,"vmpfw",0);
|
||||
}
|
||||
splx(s);
|
||||
vm_object_deallocate(first_object);
|
||||
goto RetryFault;
|
||||
}
|
||||
@ -282,7 +285,7 @@ vm_fault(map, vaddr, fault_type, change_wiring)
|
||||
*/
|
||||
|
||||
vm_page_lock_queues();
|
||||
spl = splimp();
|
||||
spl = splhigh();
|
||||
if (m->flags & PG_INACTIVE) {
|
||||
TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
|
||||
m->flags &= ~PG_INACTIVE;
|
||||
|
@ -61,7 +61,7 @@
|
||||
* any improvements or extensions that they make and grant Carnegie the
|
||||
* rights to redistribute these changes.
|
||||
*
|
||||
* $Id$
|
||||
* $Id: vm_kern.c,v 1.3 1994/08/02 07:55:22 davidg Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -371,9 +371,13 @@ kmem_malloc(map, size, canwait)
|
||||
vm_object_lock(kmem_object);
|
||||
m = vm_page_lookup(kmem_object, offset + i);
|
||||
vm_object_unlock(kmem_object);
|
||||
/*
|
||||
pmap_enter(map->pmap, addr + i, VM_PAGE_TO_PHYS(m),
|
||||
VM_PROT_DEFAULT, TRUE);
|
||||
*/
|
||||
pmap_kenter( addr + i, VM_PAGE_TO_PHYS(m));
|
||||
}
|
||||
pmap_update();
|
||||
vm_map_unlock(map);
|
||||
|
||||
vm_map_simplify(map, addr);
|
||||
|
@ -65,7 +65,7 @@
|
||||
* any improvements or extensions that they make and grant Carnegie the
|
||||
* rights to redistribute these changes.
|
||||
*
|
||||
* $Id: vm_pageout.c,v 1.5 1994/08/02 07:55:33 davidg Exp $
|
||||
* $Id: vm_pageout.c,v 1.6 1994/08/04 03:06:47 davidg Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -617,10 +617,12 @@ rescan1:
|
||||
if (written = vm_pageout_clean(m,0)) {
|
||||
maxlaunder -= written;
|
||||
}
|
||||
if (!next)
|
||||
break;
|
||||
/*
|
||||
* if the next page has been re-activated, start scanning again
|
||||
*/
|
||||
if (!next || (next->flags & PG_INACTIVE) == 0)
|
||||
if ((next->flags & PG_INACTIVE) == 0)
|
||||
goto rescan1;
|
||||
} else if (pmap_is_referenced(VM_PAGE_TO_PHYS(m))) {
|
||||
pmap_clear_reference(VM_PAGE_TO_PHYS(m));
|
||||
@ -706,7 +708,6 @@ rescan1:
|
||||
TAILQ_INSERT_TAIL(&m->object->memq, m, listq);
|
||||
}
|
||||
}
|
||||
|
||||
m = next;
|
||||
}
|
||||
|
||||
|
@ -61,7 +61,7 @@
|
||||
* any improvements or extensions that they make and grant Carnegie the
|
||||
* rights to redistribute these changes.
|
||||
*
|
||||
* $Id$
|
||||
* $Id: vm_pager.c,v 1.3 1994/08/02 07:55:35 davidg Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -72,6 +72,8 @@
|
||||
#include <sys/param.h>
|
||||
#include <sys/systm.h>
|
||||
#include <sys/malloc.h>
|
||||
#include <sys/buf.h>
|
||||
#include <sys/ucred.h>
|
||||
|
||||
#include <vm/vm.h>
|
||||
#include <vm/vm_page.h>
|
||||
@ -98,25 +100,20 @@ struct pagerops *dfltpagerops = NULL; /* default pager */
|
||||
* cleaning requests (NPENDINGIO == 64) * the maximum swap cluster size
|
||||
* (MAXPHYS == 64k) if you want to get the most efficiency.
|
||||
*/
|
||||
#define PAGER_MAP_SIZE (4 * 1024 * 1024)
|
||||
#define PAGER_MAP_SIZE (8 * 1024 * 1024)
|
||||
|
||||
int pager_map_size = PAGER_MAP_SIZE;
|
||||
vm_map_t pager_map;
|
||||
boolean_t pager_map_wanted;
|
||||
vm_offset_t pager_sva, pager_eva;
|
||||
int bswneeded;
|
||||
vm_offset_t swapbkva; /* swap buffers kva */
|
||||
|
||||
void
|
||||
vm_pager_init()
|
||||
{
|
||||
struct pagerops **pgops;
|
||||
|
||||
/*
|
||||
* Allocate a kernel submap for tracking get/put page mappings
|
||||
*/
|
||||
/*
|
||||
pager_map = kmem_suballoc(kernel_map, &pager_sva, &pager_eva,
|
||||
PAGER_MAP_SIZE, FALSE);
|
||||
*/
|
||||
/*
|
||||
* Initialize known pagers
|
||||
*/
|
||||
@ -127,6 +124,29 @@ vm_pager_init()
|
||||
panic("no default pager");
|
||||
}
|
||||
|
||||
void
|
||||
vm_pager_bufferinit()
|
||||
{
|
||||
struct buf *bp;
|
||||
int i;
|
||||
bp = swbuf;
|
||||
/*
|
||||
* Now set up swap and physical I/O buffer headers.
|
||||
*/
|
||||
for (i = 0; i < nswbuf - 1; i++, bp++) {
|
||||
TAILQ_INSERT_HEAD(&bswlist, bp, b_freelist);
|
||||
bp->b_rcred = bp->b_wcred = NOCRED;
|
||||
bp->b_vnbufs.le_next = NOLIST;
|
||||
}
|
||||
bp->b_rcred = bp->b_wcred = NOCRED;
|
||||
bp->b_vnbufs.le_next = NOLIST;
|
||||
bp->b_actf = NULL;
|
||||
|
||||
swapbkva = kmem_alloc_pageable( pager_map, nswbuf * MAXPHYS);
|
||||
if( !swapbkva)
|
||||
panic("Not enough pager_map VM space for physical buffers");
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate an instance of a pager of the given type.
|
||||
* Size, protection and offset parameters are passed in for pagers that
|
||||
@ -322,3 +342,85 @@ pager_cache(object, should_cache)
|
||||
|
||||
return (KERN_SUCCESS);
|
||||
}
|
||||
|
||||
/*
|
||||
* allocate a physical buffer
|
||||
*/
|
||||
struct buf *
|
||||
getpbuf() {
|
||||
int s;
|
||||
struct buf *bp;
|
||||
|
||||
s = splbio();
|
||||
/* get a bp from the swap buffer header pool */
|
||||
tryagain:
|
||||
while ((bp = bswlist.tqh_first) == NULL) {
|
||||
bswneeded = 1;
|
||||
tsleep((caddr_t)&bswneeded, PVM, "wswbuf", 0);
|
||||
}
|
||||
TAILQ_REMOVE(&bswlist, bp, b_freelist);
|
||||
splx(s);
|
||||
|
||||
bzero(bp, sizeof *bp);
|
||||
bp->b_rcred = NOCRED;
|
||||
bp->b_wcred = NOCRED;
|
||||
bp->b_data = (caddr_t) (MAXPHYS * (bp-swbuf)) + swapbkva;
|
||||
return bp;
|
||||
}
|
||||
|
||||
/*
|
||||
* allocate a physical buffer, if one is available
|
||||
*/
|
||||
struct buf *
|
||||
trypbuf() {
|
||||
int s;
|
||||
struct buf *bp;
|
||||
|
||||
s = splbio();
|
||||
if ((bp = bswlist.tqh_first) == NULL) {
|
||||
splx(s);
|
||||
return NULL;
|
||||
}
|
||||
TAILQ_REMOVE(&bswlist, bp, b_freelist);
|
||||
splx(s);
|
||||
|
||||
bzero(bp, sizeof *bp);
|
||||
bp->b_rcred = NOCRED;
|
||||
bp->b_wcred = NOCRED;
|
||||
bp->b_data = (caddr_t) (MAXPHYS * (bp-swbuf)) + swapbkva;
|
||||
return bp;
|
||||
}
|
||||
|
||||
/*
|
||||
* release a physical buffer
|
||||
*/
|
||||
void
|
||||
relpbuf(bp)
|
||||
struct buf *bp;
|
||||
{
|
||||
int s;
|
||||
|
||||
s = splbio();
|
||||
|
||||
if (bp->b_rcred != NOCRED) {
|
||||
crfree(bp->b_rcred);
|
||||
bp->b_rcred = NOCRED;
|
||||
}
|
||||
if (bp->b_wcred != NOCRED) {
|
||||
crfree(bp->b_wcred);
|
||||
bp->b_wcred = NOCRED;
|
||||
}
|
||||
|
||||
if (bp->b_vp)
|
||||
brelvp(bp);
|
||||
|
||||
TAILQ_INSERT_HEAD(&bswlist, bp, b_freelist);
|
||||
|
||||
if (bswneeded) {
|
||||
bswneeded = 0;
|
||||
wakeup((caddr_t)&bswlist);
|
||||
}
|
||||
splx(s);
|
||||
}
|
||||
|
||||
|
||||
|
@ -31,7 +31,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* @(#)vm_swap.c 8.5 (Berkeley) 2/17/94
|
||||
* $Id$
|
||||
* $Id: vm_swap.c,v 1.3 1994/08/02 07:55:40 davidg Exp $
|
||||
*/
|
||||
|
||||
#include <sys/param.h>
|
||||
@ -58,6 +58,8 @@ int niswdev; /* number of interleaved swap devices */
|
||||
int niswap; /* size of interleaved swap area */
|
||||
#endif
|
||||
|
||||
int bswneeded;
|
||||
vm_offset_t swapbkva; /* swap buffers kva */
|
||||
/*
|
||||
* Set up swap devices.
|
||||
* Initialize linked list of free swap
|
||||
@ -141,18 +143,6 @@ swapinit()
|
||||
printf("swfree errno %d\n", error); /* XXX */
|
||||
panic("swapinit swfree 0");
|
||||
}
|
||||
|
||||
/*
|
||||
* Now set up swap buffer headers.
|
||||
*/
|
||||
for (i = 0; i < nswbuf - 1; i++, sp++) {
|
||||
TAILQ_INSERT_HEAD(&bswlist, sp, b_freelist);
|
||||
sp->b_rcred = sp->b_wcred = p->p_ucred;
|
||||
sp->b_vnbufs.le_next = NOLIST;
|
||||
}
|
||||
sp->b_rcred = sp->b_wcred = p->p_ucred;
|
||||
sp->b_vnbufs.le_next = NOLIST;
|
||||
sp->b_actf = NULL;
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -37,7 +37,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91
|
||||
* $Id: vnode_pager.c,v 1.2 1994/05/25 09:21:11 rgrimes Exp $
|
||||
* $Id: vnode_pager.c,v 1.3 1994/08/04 03:06:48 davidg Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -102,6 +102,8 @@ struct pagerops vnodepagerops = {
|
||||
vnode_pager_haspage
|
||||
};
|
||||
|
||||
|
||||
|
||||
static int vnode_pager_input(vn_pager_t vnp, vm_page_t * m, int count, int reqpage);
|
||||
static int vnode_pager_output(vn_pager_t vnp, vm_page_t * m, int count, int *rtvals);
|
||||
struct buf * getpbuf();
|
||||
@ -518,8 +520,46 @@ void
|
||||
vnode_pager_iodone(bp)
|
||||
struct buf *bp;
|
||||
{
|
||||
int s = splbio();
|
||||
bp->b_flags |= B_DONE;
|
||||
wakeup((caddr_t) bp);
|
||||
if( bp->b_flags & B_ASYNC) {
|
||||
vm_offset_t paddr;
|
||||
vm_page_t m;
|
||||
vm_object_t obj = 0;
|
||||
int i;
|
||||
int npages;
|
||||
|
||||
paddr = (vm_offset_t) bp->b_data;
|
||||
if( bp->b_bufsize != bp->b_bcount)
|
||||
bzero( bp->b_data + bp->b_bcount,
|
||||
bp->b_bufsize - bp->b_bcount);
|
||||
|
||||
npages = (bp->b_bufsize + PAGE_SIZE - 1) / PAGE_SIZE;
|
||||
for( i = 0; i < npages; i++) {
|
||||
m = PHYS_TO_VM_PAGE(pmap_kextract(paddr + i * PAGE_SIZE));
|
||||
obj = m->object;
|
||||
if( m) {
|
||||
m->flags |= PG_CLEAN;
|
||||
m->flags &= ~(PG_LAUNDRY|PG_FAKE);
|
||||
PAGE_WAKEUP(m);
|
||||
} else {
|
||||
panic("vnode_pager_iodone: page is gone!!!");
|
||||
}
|
||||
}
|
||||
if( obj) {
|
||||
--obj->paging_in_progress;
|
||||
if( obj->paging_in_progress == 0)
|
||||
wakeup((caddr_t) obj);
|
||||
} else {
|
||||
panic("vnode_pager_iodone: object is gone???");
|
||||
}
|
||||
HOLDRELE(bp->b_vp);
|
||||
splx(s);
|
||||
relpbuf(bp);
|
||||
return;
|
||||
}
|
||||
splx(s);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -723,7 +763,7 @@ vnode_pager_input(vnp, m, count, reqpage)
|
||||
{
|
||||
int i, j;
|
||||
vm_offset_t kva, foff;
|
||||
int size;
|
||||
int size, sizea;
|
||||
struct proc *p = curproc; /* XXX */
|
||||
vm_object_t object;
|
||||
vm_offset_t paging_offset;
|
||||
@ -736,7 +776,8 @@ vnode_pager_input(vnp, m, count, reqpage)
|
||||
int block, offset;
|
||||
|
||||
int nbp;
|
||||
struct buf *bp;
|
||||
struct buf *bp, *bpa;
|
||||
int counta;
|
||||
int s;
|
||||
int failflag;
|
||||
|
||||
@ -756,33 +797,13 @@ vnode_pager_input(vnp, m, count, reqpage)
|
||||
* originally, we did not check for an error return value -- assuming
|
||||
* an fs always has a bmap entry point -- that assumption is wrong!!!
|
||||
*/
|
||||
kva = 0;
|
||||
mapsize = 0;
|
||||
foff = m[reqpage]->offset + paging_offset;
|
||||
if (!VOP_BMAP(vp, foff, &dp, 0, 0)) {
|
||||
|
||||
/*
|
||||
* we do not block for a kva, notice we default to a kva
|
||||
* conservative behavior
|
||||
*/
|
||||
kva = kmem_alloc_pageable(pager_map, (mapsize = count * PAGE_SIZE));
|
||||
if (!kva) {
|
||||
for (i = 0; i < count; i++) {
|
||||
if (i != reqpage) {
|
||||
vnode_pager_freepage(m[i]);
|
||||
}
|
||||
}
|
||||
m[0] = m[reqpage];
|
||||
kva = kmem_alloc_wait(pager_map, mapsize = PAGE_SIZE);
|
||||
reqpage = 0;
|
||||
count = 1;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* if we can't get a kva or we can't bmap, use old VOP code
|
||||
* if we can't bmap, use old VOP code
|
||||
*/
|
||||
if (!kva) {
|
||||
if (VOP_BMAP(vp, foff, &dp, 0, 0)) {
|
||||
for (i = 0; i < count; i++) {
|
||||
if (i != reqpage) {
|
||||
vnode_pager_freepage(m[i]);
|
||||
@ -798,8 +819,6 @@ vnode_pager_input(vnp, m, count, reqpage)
|
||||
} else if ((PAGE_SIZE / bsize) > 1 &&
|
||||
(vp->v_mount->mnt_stat.f_type != MOUNT_NFS)) {
|
||||
|
||||
kmem_free_wakeup(pager_map, kva, mapsize);
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
if (i != reqpage) {
|
||||
vnode_pager_freepage(m[i]);
|
||||
@ -852,12 +871,12 @@ vnode_pager_input(vnp, m, count, reqpage)
|
||||
if ((amount > 0) && (offset + amount) <= bp->b_bcount) {
|
||||
bp->b_flags |= B_BUSY;
|
||||
splx(s);
|
||||
kva = kmem_alloc_pageable( pager_map, PAGE_SIZE);
|
||||
|
||||
/*
|
||||
* map the requested page
|
||||
*/
|
||||
pmap_kenter(kva, VM_PAGE_TO_PHYS(m[reqpage]));
|
||||
pmap_update();
|
||||
pmap_qenter(kva, &m[reqpage], 1);
|
||||
|
||||
/*
|
||||
* copy the data from the buffer
|
||||
@ -870,7 +889,7 @@ vnode_pager_input(vnp, m, count, reqpage)
|
||||
/*
|
||||
* unmap the page and free the kva
|
||||
*/
|
||||
pmap_remove(vm_map_pmap(pager_map), kva, kva + PAGE_SIZE);
|
||||
pmap_qremove( kva, 1);
|
||||
kmem_free_wakeup(pager_map, kva, mapsize);
|
||||
|
||||
/*
|
||||
@ -982,14 +1001,25 @@ vnode_pager_input(vnp, m, count, reqpage)
|
||||
if (dp->v_type == VBLK || dp->v_type == VCHR)
|
||||
size = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
|
||||
|
||||
counta = 0;
|
||||
if( count*PAGE_SIZE > bsize)
|
||||
counta = (count - reqpage) - 1;
|
||||
bpa = 0;
|
||||
sizea = 0;
|
||||
if( counta) {
|
||||
bpa = getpbuf();
|
||||
count -= counta;
|
||||
sizea = size - count*PAGE_SIZE;
|
||||
size = count * PAGE_SIZE;
|
||||
}
|
||||
|
||||
bp = getpbuf();
|
||||
kva = (vm_offset_t)bp->b_data;
|
||||
|
||||
/*
|
||||
* and map the pages to be read into the kva
|
||||
*/
|
||||
for (i = 0; i < count; i++)
|
||||
pmap_kenter(kva + PAGE_SIZE * i, VM_PAGE_TO_PHYS(m[i]));
|
||||
|
||||
pmap_update();
|
||||
bp = getpbuf();
|
||||
pmap_qenter(kva, m, count);
|
||||
VHOLD(vp);
|
||||
|
||||
/* build a minimal buffer header */
|
||||
@ -1002,7 +1032,6 @@ vnode_pager_input(vnp, m, count, reqpage)
|
||||
crhold(bp->b_rcred);
|
||||
if (bp->b_wcred != NOCRED)
|
||||
crhold(bp->b_wcred);
|
||||
bp->b_un.b_addr = (caddr_t) kva;
|
||||
bp->b_blkno = firstaddr / DEV_BSIZE;
|
||||
bgetvp(dp, bp);
|
||||
bp->b_bcount = size;
|
||||
@ -1010,6 +1039,29 @@ vnode_pager_input(vnp, m, count, reqpage)
|
||||
|
||||
/* do the input */
|
||||
VOP_STRATEGY(bp);
|
||||
if( counta) {
|
||||
for(i=0;i<counta;i++) {
|
||||
vm_page_deactivate(m[count+i]);
|
||||
}
|
||||
pmap_qenter(bpa->b_data, &m[count], counta);
|
||||
++m[count]->object->paging_in_progress;
|
||||
VHOLD(vp);
|
||||
bpa->b_flags = B_BUSY | B_READ | B_CALL | B_ASYNC;
|
||||
bpa->b_iodone = vnode_pager_iodone;
|
||||
/* B_PHYS is not set, but it is nice to fill this in */
|
||||
bpa->b_proc = curproc;
|
||||
bpa->b_rcred = bpa->b_wcred = bpa->b_proc->p_ucred;
|
||||
if (bpa->b_rcred != NOCRED)
|
||||
crhold(bpa->b_rcred);
|
||||
if (bpa->b_wcred != NOCRED)
|
||||
crhold(bpa->b_wcred);
|
||||
bpa->b_blkno = (firstaddr + count * PAGE_SIZE) / DEV_BSIZE;
|
||||
bgetvp(dp, bpa);
|
||||
bpa->b_bcount = sizea;
|
||||
bpa->b_bufsize = counta*PAGE_SIZE;
|
||||
|
||||
VOP_STRATEGY(bpa);
|
||||
}
|
||||
|
||||
s = splbio();
|
||||
/* we definitely need to be at splbio here */
|
||||
@ -1025,8 +1077,7 @@ vnode_pager_input(vnp, m, count, reqpage)
|
||||
if (size != count * PAGE_SIZE)
|
||||
bzero((caddr_t) kva + size, PAGE_SIZE * count - size);
|
||||
}
|
||||
pmap_remove(vm_map_pmap(pager_map), kva, kva + PAGE_SIZE * count);
|
||||
kmem_free_wakeup(pager_map, kva, mapsize);
|
||||
pmap_qremove( kva, count);
|
||||
|
||||
/*
|
||||
* free the buffer header back to the swap buffer pool
|
||||
@ -1283,16 +1334,6 @@ retryoutput:
|
||||
return rtvals[0];
|
||||
}
|
||||
|
||||
/*
|
||||
* get some kva for the output
|
||||
*/
|
||||
kva = kmem_alloc_pageable(pager_map, (mapsize = count * PAGE_SIZE));
|
||||
if (!kva) {
|
||||
kva = kmem_alloc_pageable(pager_map, (mapsize = PAGE_SIZE));
|
||||
count = 1;
|
||||
if (!kva)
|
||||
return rtvals[0];
|
||||
}
|
||||
for (i = 0; i < count; i++) {
|
||||
foff = m[i]->offset + paging_offset;
|
||||
if (foff >= vnp->vnp_size) {
|
||||
@ -1333,16 +1374,14 @@ retryoutput:
|
||||
if (dp->v_type == VBLK || dp->v_type == VCHR)
|
||||
size = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
|
||||
|
||||
bp = getpbuf();
|
||||
kva = (vm_offset_t)bp->b_data;
|
||||
/*
|
||||
* and map the pages to be read into the kva
|
||||
*/
|
||||
for (i = 0; i < count; i++)
|
||||
pmap_kenter(kva + PAGE_SIZE * i, VM_PAGE_TO_PHYS(m[i]));
|
||||
pmap_update();
|
||||
/*
|
||||
pmap_qenter(kva, m, count);
|
||||
printf("vnode: writing foff: %d, devoff: %d, size: %d\n",
|
||||
foff, reqaddr, size);
|
||||
*/
|
||||
|
||||
/*
|
||||
* next invalidate the incore vfs_bio data
|
||||
@ -1370,7 +1409,6 @@ retryoutput:
|
||||
}
|
||||
|
||||
|
||||
bp = getpbuf();
|
||||
VHOLD(vp);
|
||||
/* build a minimal buffer header */
|
||||
bp->b_flags = B_BUSY | B_WRITE | B_CALL;
|
||||
@ -1383,7 +1421,6 @@ retryoutput:
|
||||
crhold(bp->b_rcred);
|
||||
if (bp->b_wcred != NOCRED)
|
||||
crhold(bp->b_wcred);
|
||||
bp->b_un.b_addr = (caddr_t) kva;
|
||||
bp->b_blkno = reqaddr / DEV_BSIZE;
|
||||
bgetvp(dp, bp);
|
||||
++dp->v_numoutput;
|
||||
@ -1410,8 +1447,7 @@ retryoutput:
|
||||
if ((bp->b_flags & B_ERROR) != 0)
|
||||
error = EIO;
|
||||
|
||||
pmap_remove(vm_map_pmap(pager_map), kva, kva + PAGE_SIZE * count);
|
||||
kmem_free_wakeup(pager_map, kva, mapsize);
|
||||
pmap_qremove( kva, count);
|
||||
|
||||
/*
|
||||
* free the buffer header back to the swap buffer pool
|
||||
|
Loading…
Reference in New Issue
Block a user