Fixed two potentially serious classes of bugs:

1) The vnode pager wasn't properly tracking the file size due to
   "size" being page rounded in some cases and not in others.
   This sometimes resulted in corrupted files. First noticed by
   Terry Lambert.
   Fixed by changing the "size" pager_alloc parameter to be a 64bit
   byte value (as opposed to a 32bit page index) and changing the
   pagers and their callers to deal with this properly.
2) Fixed a bogus type cast in round_page() and trunc_page() that
   caused some 64bit offsets and sizes to be scrambled. Removing
   the cast required adding casts at a few dozen callers.
   There may be problems with other bogus casts in close-by
   macros. A quick check seemed to indicate that those were okay,
   however.
This commit is contained in:
David Greenman 1998-10-13 08:24:45 +00:00
parent d74a7fd03e
commit 6cde7a165f
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=40286
27 changed files with 102 additions and 103 deletions

View File

@ -23,7 +23,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: busdma_machdep.c,v 1.9 1998/09/29 09:06:00 bde Exp $
* $Id: busdma_machdep.c,v 1.10 1998/10/07 03:38:14 gibbs Exp $
*/
#include <sys/param.h>
@ -138,8 +138,8 @@ bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
newtag->parent = parent;
newtag->boundary = boundary;
newtag->lowaddr = trunc_page(lowaddr) + (PAGE_SIZE - 1);
newtag->highaddr = trunc_page(highaddr) + (PAGE_SIZE - 1);
newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1);
newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1);
newtag->filter = filter;
newtag->filterarg = filterarg;
newtag->maxsize = maxsize;
@ -395,7 +395,7 @@ bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
* Count the number of bounce pages
* needed in order to complete this transfer
*/
vaddr = trunc_page(buf);
vaddr = trunc_page((vm_offset_t)buf);
vendaddr = (vm_offset_t)buf + buflen;
while (vaddr < vendaddr) {

View File

@ -38,7 +38,7 @@
*
* from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
* Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
* $Id: vm_machdep.c,v 1.110 1998/09/25 17:34:48 peter Exp $
* $Id: vm_machdep.c,v 1.111 1998/09/28 03:34:39 tegge Exp $
*/
#include "npx.h"
@ -341,7 +341,7 @@ vmapbuf(bp)
if ((bp->b_flags & B_PHYS) == 0)
panic("vmapbuf");
for (v = bp->b_saveaddr, addr = (caddr_t)trunc_page(bp->b_data);
for (v = bp->b_saveaddr, addr = (caddr_t)trunc_page((vm_offset_t)bp->b_data);
addr < bp->b_data + bp->b_bufsize;
addr += PAGE_SIZE, v += PAGE_SIZE) {
/*
@ -376,7 +376,7 @@ vunmapbuf(bp)
if ((bp->b_flags & B_PHYS) == 0)
panic("vunmapbuf");
for (addr = (caddr_t)trunc_page(bp->b_data);
for (addr = (caddr_t)trunc_page((vm_offset_t)bp->b_data);
addr < bp->b_data + bp->b_bufsize;
addr += PAGE_SIZE) {
pa = trunc_page(pmap_kextract((vm_offset_t) addr));

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)isa.c 7.2 (Berkeley) 5/13/91
* $Id: isa.c,v 1.113 1998/07/19 04:22:55 bde Exp $
* $Id: isa.c,v 1.114 1998/10/12 13:12:45 bde Exp $
*/
/*
@ -900,7 +900,7 @@ isa_dmarangecheck(caddr_t va, u_int length, int chan)
vm_offset_t phys, priorpage = 0, endva;
u_int dma_pgmsk = (chan & 4) ? ~(128*1024-1) : ~(64*1024-1);
endva = (vm_offset_t)round_page(va + length);
endva = (vm_offset_t)round_page((vm_offset_t)va + length);
for (; va < (caddr_t) endva ; va += PAGE_SIZE) {
phys = trunc_page(pmap_extract(pmap_kernel(), (vm_offset_t)va));
#define ISARAM_END RAM_END

View File

@ -25,7 +25,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: scsi_da.c,v 1.8 1998/10/08 05:46:38 ken Exp $
* $Id: scsi_da.c,v 1.9 1998/10/12 17:16:47 ken Exp $
*/
#include "opt_hw_wdog.h"
@ -621,7 +621,7 @@ dadump(dev_t dev)
if (is_physical_memory((vm_offset_t)addr)) {
pmap_enter(kernel_pmap, (vm_offset_t)CADDR1,
trunc_page(addr), VM_PROT_READ, TRUE);
trunc_page((vm_offset_t)addr), VM_PROT_READ, TRUE);
} else {
pmap_enter(kernel_pmap, (vm_offset_t)CADDR1,
trunc_page(0), VM_PROT_READ, TRUE);

View File

@ -23,7 +23,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: busdma_machdep.c,v 1.9 1998/09/29 09:06:00 bde Exp $
* $Id: busdma_machdep.c,v 1.10 1998/10/07 03:38:14 gibbs Exp $
*/
#include <sys/param.h>
@ -138,8 +138,8 @@ bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
newtag->parent = parent;
newtag->boundary = boundary;
newtag->lowaddr = trunc_page(lowaddr) + (PAGE_SIZE - 1);
newtag->highaddr = trunc_page(highaddr) + (PAGE_SIZE - 1);
newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1);
newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1);
newtag->filter = filter;
newtag->filterarg = filterarg;
newtag->maxsize = maxsize;
@ -395,7 +395,7 @@ bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
* Count the number of bounce pages
* needed in order to complete this transfer
*/
vaddr = trunc_page(buf);
vaddr = trunc_page((vm_offset_t)buf);
vendaddr = (vm_offset_t)buf + buflen;
while (vaddr < vendaddr) {

View File

@ -38,7 +38,7 @@
*
* from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
* Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
* $Id: vm_machdep.c,v 1.110 1998/09/25 17:34:48 peter Exp $
* $Id: vm_machdep.c,v 1.111 1998/09/28 03:34:39 tegge Exp $
*/
#include "npx.h"
@ -341,7 +341,7 @@ vmapbuf(bp)
if ((bp->b_flags & B_PHYS) == 0)
panic("vmapbuf");
for (v = bp->b_saveaddr, addr = (caddr_t)trunc_page(bp->b_data);
for (v = bp->b_saveaddr, addr = (caddr_t)trunc_page((vm_offset_t)bp->b_data);
addr < bp->b_data + bp->b_bufsize;
addr += PAGE_SIZE, v += PAGE_SIZE) {
/*
@ -376,7 +376,7 @@ vunmapbuf(bp)
if ((bp->b_flags & B_PHYS) == 0)
panic("vunmapbuf");
for (addr = (caddr_t)trunc_page(bp->b_data);
for (addr = (caddr_t)trunc_page((vm_offset_t)bp->b_data);
addr < bp->b_data + bp->b_bufsize;
addr += PAGE_SIZE) {
pa = trunc_page(pmap_kextract((vm_offset_t) addr));

View File

@ -26,7 +26,7 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $Id: imgact_coff.c,v 1.30 1998/02/11 01:46:47 eivind Exp $
* $Id: imgact_coff.c,v 1.31 1998/08/16 01:21:49 bde Exp $
*/
#include <sys/param.h>
@ -70,7 +70,7 @@ load_coff_section(struct vmspace *vmspace, struct vnode *vp, vm_offset_t offset,
size_t copy_len;
map_offset = trunc_page(offset);
map_addr = trunc_page(vmaddr);
map_addr = trunc_page((vm_offset_t)vmaddr);
if (memsz > filsz) {
/*
@ -116,8 +116,8 @@ load_coff_section(struct vmspace *vmspace, struct vnode *vp, vm_offset_t offset,
*/
copy_len = (offset + filsz) - trunc_page(offset + filsz);
map_addr = trunc_page(vmaddr + filsz);
map_len = round_page(vmaddr + memsz) - map_addr;
map_addr = trunc_page((vm_offset_t)vmaddr + filsz);
map_len = round_page((vm_offset_t)vmaddr + memsz) - map_addr;
DPRINTF(("%s(%d): vm_map_find(&vmspace->vm_map, NULL, 0, &0x%08lx,0x%x, FALSE, VM_PROT_ALL, VM_PROT_ALL, 0)\n", __FILE__, __LINE__, map_addr, map_len));
@ -451,7 +451,7 @@ exec_coff_imgact(imgp)
vmspace->vm_taddr = (caddr_t)(void *)(uintptr_t)text_address;
vmspace->vm_daddr = (caddr_t)(void *)(uintptr_t)data_address;
hole = (caddr_t)trunc_page(vmspace->vm_daddr) + ctob(vmspace->vm_dsize);
hole = (caddr_t)trunc_page((vm_offset_t)vmspace->vm_daddr) + ctob(vmspace->vm_dsize);
DPRINTF(("%s(%d): vm_map_find(&vmspace->vm_map, NULL, 0, &0x%08lx, PAGE_SIZE, FALSE, VM_PROT_ALL, VM_PROT_ALL, 0)\n",

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)param.h 5.8 (Berkeley) 6/28/91
* $Id: param.h,v 1.45 1998/08/31 08:41:40 kato Exp $
* $Id: param.h,v 1.46 1998/09/09 01:21:25 jdp Exp $
*/
#ifndef _MACHINE_PARAM_H_
@ -140,8 +140,8 @@
/*
* Mach derived conversion macros
*/
#define trunc_page(x) ((unsigned)(x) & ~PAGE_MASK)
#define round_page(x) ((((unsigned)(x)) + PAGE_MASK) & ~PAGE_MASK)
#define trunc_page(x) ((x) & ~PAGE_MASK)
#define round_page(x) (((x) + PAGE_MASK) & ~PAGE_MASK)
#define trunc_4mpage(x) ((unsigned)(x) & ~PDRMASK)
#define round_4mpage(x) ((((unsigned)(x)) + PDRMASK) & ~PDRMASK)

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)isa.c 7.2 (Berkeley) 5/13/91
* $Id: isa.c,v 1.113 1998/07/19 04:22:55 bde Exp $
* $Id: isa.c,v 1.114 1998/10/12 13:12:45 bde Exp $
*/
/*
@ -900,7 +900,7 @@ isa_dmarangecheck(caddr_t va, u_int length, int chan)
vm_offset_t phys, priorpage = 0, endva;
u_int dma_pgmsk = (chan & 4) ? ~(128*1024-1) : ~(64*1024-1);
endva = (vm_offset_t)round_page(va + length);
endva = (vm_offset_t)round_page((vm_offset_t)va + length);
for (; va < (caddr_t) endva ; va += PAGE_SIZE) {
phys = trunc_page(pmap_extract(pmap_kernel(), (vm_offset_t)va));
#define ISARAM_END RAM_END

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)wd.c 7.2 (Berkeley) 5/9/91
* $Id: wd.c,v 1.175 1998/09/14 19:56:39 sos Exp $
* $Id: wd.c,v 1.176 1998/09/15 08:15:30 gibbs Exp $
*/
/* TODO:
@ -2181,7 +2181,7 @@ out:
while (blkcnt != 0) {
if (is_physical_memory((vm_offset_t)addr))
pmap_enter(kernel_pmap, (vm_offset_t)CADDR1,
trunc_page(addr), VM_PROT_READ, TRUE);
trunc_page((vm_offset_t)addr), VM_PROT_READ, TRUE);
else
pmap_enter(kernel_pmap, (vm_offset_t)CADDR1,
trunc_page(0), VM_PROT_READ, TRUE);

View File

@ -26,7 +26,7 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $Id: imgact_elf.c,v 1.36 1998/10/03 04:12:09 jdp Exp $
* $Id: imgact_elf.c,v 1.37 1998/10/11 19:22:07 jdp Exp $
*/
#include "opt_rlimit.h"
@ -193,7 +193,7 @@ elf_load_section(struct vmspace *vmspace, struct vnode *vp, vm_offset_t offset,
unsigned char *data_buf = 0;
size_t copy_len;
map_addr = trunc_page(vmaddr);
map_addr = trunc_page((vm_offset_t)vmaddr);
if (memsz > filsz)
map_len = trunc_page(offset+filsz) - trunc_page(offset);
@ -219,8 +219,8 @@ elf_load_section(struct vmspace *vmspace, struct vnode *vp, vm_offset_t offset,
* bit into it. The remaining space should be .bss...
*/
copy_len = (offset + filsz) - trunc_page(offset + filsz);
map_addr = trunc_page(vmaddr + filsz);
map_len = round_page(vmaddr + memsz) - map_addr;
map_addr = trunc_page((vm_offset_t)vmaddr + filsz);
map_len = round_page((vm_offset_t)vmaddr + memsz) - map_addr;
if (map_len != 0) {
if (error = vm_map_find(&vmspace->vm_map, NULL, 0,

View File

@ -16,7 +16,7 @@
* 4. Modifications may be freely made to this file if the above conditions
* are met.
*
* $Id: sys_pipe.c,v 1.41 1998/03/28 10:33:07 bde Exp $
* $Id: sys_pipe.c,v 1.42 1998/06/07 17:11:39 dfr Exp $
*/
/*
@ -490,8 +490,8 @@ pipe_build_write_buffer(wpipe, uio)
if (size > wpipe->pipe_buffer.size)
size = wpipe->pipe_buffer.size;
endaddr = round_page(uio->uio_iov->iov_base + size);
for(i = 0, addr = trunc_page(uio->uio_iov->iov_base);
endaddr = round_page((vm_offset_t)uio->uio_iov->iov_base + size);
for(i = 0, addr = trunc_page((vm_offset_t)uio->uio_iov->iov_base);
addr < endaddr;
addr += PAGE_SIZE, i+=1) {

View File

@ -1,4 +1,4 @@
/* $Id: sysv_shm.c,v 1.37 1998/05/04 17:12:47 dyson Exp $ */
/* $Id: sysv_shm.c,v 1.38 1998/08/24 08:39:38 dfr Exp $ */
/* $NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $ */
/*
@ -265,7 +265,7 @@ shmat(p, uap)
return EINVAL;
} else {
/* This is just a hint to vm_map_find() about where to put it. */
attach_va = round_page(p->p_vmspace->vm_taddr + MAXTSIZ + MAXDSIZ);
attach_va = round_page((vm_offset_t)p->p_vmspace->vm_taddr + MAXTSIZ + MAXDSIZ);
}
shm_handle = shmseg->shm_internal;
@ -501,8 +501,7 @@ shmget_allocate_segment(p, uap, mode)
* to.
*/
shm_handle->shm_object =
vm_pager_allocate(OBJT_SWAP, 0, OFF_TO_IDX(size),
VM_PROT_DEFAULT, 0);
vm_pager_allocate(OBJT_SWAP, 0, size, VM_PROT_DEFAULT, 0);
vm_object_clear_flag(shm_handle->shm_object, OBJ_ONEMAPPING);
vm_object_set_flag(shm_handle->shm_object, OBJ_NOSPLIT);

View File

@ -11,7 +11,7 @@
* 2. Absolutely no warranty of function or purpose is made by the author
* John S. Dyson.
*
* $Id: vfs_bio.c,v 1.177 1998/09/25 17:34:49 peter Exp $
* $Id: vfs_bio.c,v 1.178 1998/09/26 00:12:35 dillon Exp $
*/
/*
@ -668,7 +668,7 @@ brelse(struct buf * bp)
}
if ((bp->b_flags & B_INVAL) == 0) {
pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages);
pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages);
}
}
if (bp->b_flags & (B_NOCACHE|B_ERROR)) {
@ -1721,7 +1721,7 @@ allocbuf(struct buf * bp, int size)
bp->b_pages[i] = NULL;
vm_page_unwire(m);
}
pmap_qremove((vm_offset_t) trunc_page(bp->b_data) +
pmap_qremove((vm_offset_t) trunc_page((vm_offset_t)bp->b_data) +
(desiredpages << PAGE_SHIFT), (bp->b_npages - desiredpages));
bp->b_npages = desiredpages;
}
@ -1827,7 +1827,7 @@ allocbuf(struct buf * bp, int size)
if (bp->b_validend == 0)
bp->b_flags &= ~B_CACHE;
}
bp->b_data = (caddr_t) trunc_page(bp->b_data);
bp->b_data = (caddr_t) trunc_page((vm_offset_t)bp->b_data);
bp->b_npages = curbpnpages;
pmap_qenter((vm_offset_t) bp->b_data,
bp->b_pages, bp->b_npages);
@ -1975,7 +1975,7 @@ biodone(register struct buf * bp)
continue;
}
bp->b_pages[i] = m;
pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages);
pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages);
}
#if defined(VFS_BIO_DEBUG)
if (OFF_TO_IDX(foff) != m->pindex) {
@ -2123,7 +2123,7 @@ vfs_unbusy_pages(struct buf * bp)
}
#endif
bp->b_pages[i] = m;
pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages);
pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages);
}
vm_object_pip_subtract(obj, 1);
vm_page_flag_clear(m, PG_ZERO);
@ -2260,7 +2260,7 @@ retry:
else if (bp->b_bcount >= PAGE_SIZE) {
if (m->valid && (bp->b_flags & B_CACHE) == 0) {
bp->b_pages[i] = bogus_page;
pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages);
pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages);
}
}
}
@ -2349,7 +2349,7 @@ vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to)
to = round_page(to);
from = round_page(from);
index = (from - trunc_page(bp->b_data)) >> PAGE_SHIFT;
index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
@ -2382,7 +2382,7 @@ vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to)
from = round_page(from);
to = round_page(to);
newnpages = index = (from - trunc_page(bp->b_data)) >> PAGE_SHIFT;
newnpages = index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
p = bp->b_pages[index];

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95
* $Id: vfs_subr.c,v 1.163 1998/09/14 19:56:40 sos Exp $
* $Id: vfs_subr.c,v 1.164 1998/10/12 20:14:09 dt Exp $
*/
/*
@ -2539,15 +2539,14 @@ retry:
if (vp->v_type == VREG) {
if ((error = VOP_GETATTR(vp, &vat, cred, p)) != 0)
goto retn;
object = vnode_pager_alloc(vp,
OFF_TO_IDX(round_page(vat.va_size)), 0, 0);
object = vnode_pager_alloc(vp, vat.va_size, 0, 0);
} else if (major(vp->v_rdev) < nblkdev) {
/*
* This simply allocates the biggest object possible
* for a VBLK vnode. This should be fixed, but doesn't
* cause any problems (yet).
*/
object = vnode_pager_alloc(vp, INT_MAX, 0, 0);
object = vnode_pager_alloc(vp, IDX_TO_OFF(INT_MAX), 0, 0);
}
object->ref_count--;
vp->v_usecount--;

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95
* $Id: vfs_subr.c,v 1.163 1998/09/14 19:56:40 sos Exp $
* $Id: vfs_subr.c,v 1.164 1998/10/12 20:14:09 dt Exp $
*/
/*
@ -2539,15 +2539,14 @@ retry:
if (vp->v_type == VREG) {
if ((error = VOP_GETATTR(vp, &vat, cred, p)) != 0)
goto retn;
object = vnode_pager_alloc(vp,
OFF_TO_IDX(round_page(vat.va_size)), 0, 0);
object = vnode_pager_alloc(vp, vat.va_size, 0, 0);
} else if (major(vp->v_rdev) < nblkdev) {
/*
* This simply allocates the biggest object possible
* for a VBLK vnode. This should be fixed, but doesn't
* cause any problems (yet).
*/
object = vnode_pager_alloc(vp, INT_MAX, 0, 0);
object = vnode_pager_alloc(vp, IDX_TO_OFF(INT_MAX), 0, 0);
}
object->ref_count--;
vp->v_usecount--;

View File

@ -28,7 +28,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: default_pager.c,v 1.14 1998/02/04 22:33:39 eivind Exp $
* $Id: default_pager.c,v 1.15 1998/02/06 12:14:20 eivind Exp $
*/
#include <sys/param.h>
@ -44,7 +44,7 @@
#include <vm/default_pager.h>
#include <vm/swap_pager.h>
static vm_object_t default_pager_alloc __P((void *, vm_size_t, vm_prot_t,
static vm_object_t default_pager_alloc __P((void *, vm_ooffset_t, vm_prot_t,
vm_ooffset_t));
static void default_pager_dealloc __P((vm_object_t));
static int default_pager_getpages __P((vm_object_t, vm_page_t *, int, int));
@ -69,13 +69,13 @@ struct pagerops defaultpagerops = {
* no_pager_alloc just returns an initialized object.
*/
static vm_object_t
default_pager_alloc(void *handle, vm_size_t size, vm_prot_t prot,
default_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
vm_ooffset_t offset)
{
if (handle != NULL)
panic("default_pager_alloc: handle specified");
return vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(offset) + size);
return vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(round_page(offset + size)));
}
static void

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)device_pager.c 8.1 (Berkeley) 6/11/93
* $Id: device_pager.c,v 1.30 1998/02/06 12:14:20 eivind Exp $
* $Id: device_pager.c,v 1.31 1998/07/15 02:32:35 bde Exp $
*/
#include <sys/param.h>
@ -52,7 +52,7 @@
#include <vm/vm_pager.h>
static void dev_pager_init __P((void));
static vm_object_t dev_pager_alloc __P((void *, vm_size_t, vm_prot_t,
static vm_object_t dev_pager_alloc __P((void *, vm_ooffset_t, vm_prot_t,
vm_ooffset_t));
static void dev_pager_dealloc __P((vm_object_t));
static int dev_pager_getpages __P((vm_object_t, vm_page_t *, int, int));
@ -90,7 +90,7 @@ dev_pager_init()
}
static vm_object_t
dev_pager_alloc(void *handle, vm_size_t size, vm_prot_t prot, vm_ooffset_t foff)
dev_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, vm_ooffset_t foff)
{
dev_t dev;
d_mmap_t *mapfunc;
@ -113,13 +113,15 @@ dev_pager_alloc(void *handle, vm_size_t size, vm_prot_t prot, vm_ooffset_t foff)
if (foff & PAGE_MASK)
return (NULL);
size = round_page(size);
/*
* Check that the specified range of the device allows the desired
* protection.
*
* XXX assumes VM_PROT_* == PROT_*
*/
npages = size;
npages = OFF_TO_IDX(size);
for (off = foff; npages--; off += PAGE_SIZE)
if ((*mapfunc) (dev, off, (int) prot) == -1)
return (NULL);
@ -143,7 +145,7 @@ dev_pager_alloc(void *handle, vm_size_t size, vm_prot_t prot, vm_ooffset_t foff)
* Allocate object and associate it with the pager.
*/
object = vm_object_allocate(OBJT_DEVICE,
OFF_TO_IDX(foff) + size);
OFF_TO_IDX(foff + size));
object->handle = handle;
TAILQ_INIT(&object->un_pager.devp.devp_pglist);
TAILQ_INSERT_TAIL(&dev_pager_object_list, object, pager_object_list);
@ -152,8 +154,8 @@ dev_pager_alloc(void *handle, vm_size_t size, vm_prot_t prot, vm_ooffset_t foff)
* Gain a reference to the object.
*/
vm_object_reference(object);
if (OFF_TO_IDX(foff) + size > object->size)
object->size = OFF_TO_IDX(foff) + size;
if (OFF_TO_IDX(foff + size) > object->size)
object->size = OFF_TO_IDX(foff + size);
}
dev_pager_alloc_lock = 0;

View File

@ -39,7 +39,7 @@
* from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
*
* @(#)swap_pager.c 8.9 (Berkeley) 3/21/94
* $Id: swap_pager.c,v 1.100 1998/08/24 08:39:37 dfr Exp $
* $Id: swap_pager.c,v 1.101 1998/09/04 08:06:56 dfr Exp $
*/
/*
@ -136,7 +136,7 @@ static struct pagerlst *swp_qs[] = {
* pagerops for OBJT_SWAP - "swap pager".
*/
static vm_object_t
swap_pager_alloc __P((void *handle, vm_size_t size,
swap_pager_alloc __P((void *handle, vm_ooffset_t size,
vm_prot_t prot, vm_ooffset_t offset));
static void swap_pager_dealloc __P((vm_object_t object));
static boolean_t
@ -295,7 +295,7 @@ swap_pager_swp_alloc(object, wait)
* we should not wait for memory as it could resulting in deadlock.
*/
static vm_object_t
swap_pager_alloc(void *handle, vm_size_t size, vm_prot_t prot,
swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
vm_ooffset_t offset)
{
vm_object_t object;
@ -317,13 +317,13 @@ swap_pager_alloc(void *handle, vm_size_t size, vm_prot_t prot,
* rip support of "named anonymous regions" out altogether.
*/
object = vm_object_allocate(OBJT_SWAP,
OFF_TO_IDX(offset + PAGE_MASK) + size);
OFF_TO_IDX(offset + PAGE_MASK + size));
object->handle = handle;
(void) swap_pager_swp_alloc(object, M_WAITOK);
}
} else {
object = vm_object_allocate(OBJT_SWAP,
OFF_TO_IDX(offset + PAGE_MASK) + size);
OFF_TO_IDX(offset + PAGE_MASK + size));
(void) swap_pager_swp_alloc(object, M_WAITOK);
}

View File

@ -59,7 +59,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_glue.c,v 1.75 1998/03/04 10:27:00 dufault Exp $
* $Id: vm_glue.c,v 1.76 1998/09/29 17:33:59 abial Exp $
*/
#include "opt_rlimit.h"
@ -125,8 +125,8 @@ kernacc(addr, len, rw)
vm_offset_t saddr, eaddr;
vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
saddr = trunc_page(addr);
eaddr = round_page(addr + len);
saddr = trunc_page((vm_offset_t)addr);
eaddr = round_page((vm_offset_t)addr + len);
vm_map_lock_read(kernel_map);
rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
vm_map_unlock_read(kernel_map);
@ -164,7 +164,7 @@ useracc(addr, len, rw)
*/
save_hint = map->hint;
rv = vm_map_check_protection(map,
trunc_page(addr), round_page(addr + len), prot);
trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len), prot);
map->hint = save_hint;
vm_map_unlock_read(map);
@ -176,8 +176,8 @@ vslock(addr, len)
caddr_t addr;
u_int len;
{
vm_map_pageable(&curproc->p_vmspace->vm_map, trunc_page(addr),
round_page(addr + len), FALSE);
vm_map_pageable(&curproc->p_vmspace->vm_map, trunc_page((vm_offset_t)addr),
round_page((vm_offset_t)addr + len), FALSE);
}
void
@ -189,8 +189,8 @@ vsunlock(addr, len, dirtied)
#ifdef lint
dirtied++;
#endif /* lint */
vm_map_pageable(&curproc->p_vmspace->vm_map, trunc_page(addr),
round_page(addr + len), TRUE);
vm_map_pageable(&curproc->p_vmspace->vm_map, trunc_page((vm_offset_t)addr),
round_page((vm_offset_t)addr + len), TRUE);
}
/*

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_map.c,v 1.135 1998/09/04 08:06:57 dfr Exp $
* $Id: vm_map.c,v 1.136 1998/10/01 20:46:41 jdp Exp $
*/
/*
@ -1965,7 +1965,7 @@ vm_map_split(entry)
size = offidxend - offidxstart;
new_object = vm_pager_allocate(orig_object->type,
NULL, size, VM_PROT_ALL, 0LL);
NULL, IDX_TO_OFF(size), VM_PROT_ALL, 0LL);
if (new_object == NULL)
return;

View File

@ -38,7 +38,7 @@
* from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
*
* @(#)vm_mmap.c 8.4 (Berkeley) 1/12/94
* $Id: vm_mmap.c,v 1.82 1998/08/24 08:39:37 dfr Exp $
* $Id: vm_mmap.c,v 1.83 1998/09/04 08:06:57 dfr Exp $
*/
/*
@ -220,8 +220,8 @@ mmap(p, uap)
* There should really be a pmap call to determine a reasonable
* location.
*/
else if (addr < round_page(p->p_vmspace->vm_daddr + MAXDSIZ))
addr = round_page(p->p_vmspace->vm_daddr + MAXDSIZ);
else if (addr < round_page((vm_offset_t)p->p_vmspace->vm_daddr + MAXDSIZ))
addr = round_page((vm_offset_t)p->p_vmspace->vm_daddr + MAXDSIZ);
if (flags & MAP_ANON) {
/*
@ -987,7 +987,7 @@ vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot,
object = NULL;
} else {
object = vm_pager_allocate(type,
handle, OFF_TO_IDX(objsize), prot, foff);
handle, objsize, prot, foff);
if (object == NULL)
return (type == OBJT_DEVICE ? EINVAL : ENOMEM);
}

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_pager.c,v 1.36 1998/03/07 21:37:21 dyson Exp $
* $Id: vm_pager.c,v 1.37 1998/03/16 01:56:01 dyson Exp $
*/
/*
@ -91,7 +91,7 @@ extern struct pagerops vnodepagerops;
extern struct pagerops devicepagerops;
static int dead_pager_getpages __P((vm_object_t, vm_page_t *, int, int));
static vm_object_t dead_pager_alloc __P((void *, vm_size_t, vm_prot_t,
static vm_object_t dead_pager_alloc __P((void *, vm_ooffset_t, vm_prot_t,
vm_ooffset_t));
static int dead_pager_putpages __P((vm_object_t, vm_page_t *, int, int, int *));
static boolean_t dead_pager_haspage __P((vm_object_t, vm_pindex_t, int *, int *));
@ -110,7 +110,7 @@ dead_pager_getpages(obj, ma, count, req)
vm_object_t
dead_pager_alloc(handle, size, prot, off)
void *handle;
vm_size_t size;
vm_ooffset_t size;
vm_prot_t prot;
vm_ooffset_t off;
{
@ -227,7 +227,7 @@ vm_pager_bufferinit()
* need to perform page-level validation (e.g. the device pager).
*/
vm_object_t
vm_pager_allocate(objtype_t type, void *handle, vm_size_t size, vm_prot_t prot,
vm_pager_allocate(objtype_t type, void *handle, vm_ooffset_t size, vm_prot_t prot,
vm_ooffset_t off)
{
struct pagerops *ops;

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)vm_pager.h 8.4 (Berkeley) 1/12/94
* $Id: vm_pager.h,v 1.15 1998/02/03 22:19:35 bde Exp $
* $Id: vm_pager.h,v 1.16 1998/03/07 21:37:27 dyson Exp $
*/
/*
@ -52,7 +52,7 @@ TAILQ_HEAD(pagerlst, vm_object);
struct pagerops {
void (*pgo_init) __P((void)); /* Initialize pager. */
vm_object_t (*pgo_alloc) __P((void *, vm_size_t, vm_prot_t, vm_ooffset_t)); /* Allocate pager. */
vm_object_t (*pgo_alloc) __P((void *, vm_ooffset_t, vm_prot_t, vm_ooffset_t)); /* Allocate pager. */
void (*pgo_dealloc) __P((vm_object_t)); /* Disassociate. */
int (*pgo_getpages) __P((vm_object_t, vm_page_t *, int, int)); /* Get (read) page. */
int (*pgo_putpages) __P((vm_object_t, vm_page_t *, int, int, int *)); /* Put (write) page. */
@ -88,7 +88,7 @@ MALLOC_DECLARE(M_VMPGDATA);
extern vm_map_t pager_map;
extern int pager_map_size;
vm_object_t vm_pager_allocate __P((objtype_t, void *, vm_size_t, vm_prot_t, vm_ooffset_t));
vm_object_t vm_pager_allocate __P((objtype_t, void *, vm_ooffset_t, vm_prot_t, vm_ooffset_t));
void vm_pager_bufferinit __P((void));
void vm_pager_deallocate __P((vm_object_t));
int vm_pager_get_pages __P((vm_object_t, vm_page_t *, int, int));

View File

@ -38,7 +38,7 @@
* from: Utah $Hdr: vm_unix.c 1.1 89/11/07$
*
* @(#)vm_unix.c 8.1 (Berkeley) 6/11/93
* $Id: vm_unix.c,v 1.15 1997/08/02 14:33:27 bde Exp $
* $Id: vm_unix.c,v 1.16 1997/11/06 19:29:57 phk Exp $
*/
/*
@ -74,7 +74,7 @@ obreak(p, uap)
int rv;
base = round_page((vm_offset_t) vm->vm_daddr);
new = round_page(uap->nsize);
new = round_page((vm_offset_t)uap->nsize);
if (new > base) {
if ((new - base) > (unsigned) p->p_rlimit[RLIMIT_DATA].rlim_cur)
return ENOMEM;

View File

@ -38,7 +38,7 @@
* SUCH DAMAGE.
*
* from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91
* $Id: vnode_pager.c,v 1.98 1998/09/05 15:17:34 bde Exp $
* $Id: vnode_pager.c,v 1.99 1998/09/28 23:58:10 rvb Exp $
*/
/*
@ -94,7 +94,7 @@ struct pagerops vnodepagerops = {
* Handle is a vnode pointer.
*/
vm_object_t
vnode_pager_alloc(void *handle, vm_size_t size, vm_prot_t prot,
vnode_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
vm_ooffset_t offset)
{
vm_object_t object;
@ -134,10 +134,10 @@ vnode_pager_alloc(void *handle, vm_size_t size, vm_prot_t prot,
/*
* And an object of the appropriate size
*/
object = vm_object_allocate(OBJT_VNODE, size);
object = vm_object_allocate(OBJT_VNODE, OFF_TO_IDX(round_page(size)));
object->flags = 0;
object->un_pager.vnp.vnp_size = (vm_ooffset_t) size * PAGE_SIZE;
object->un_pager.vnp.vnp_size = size;
object->handle = handle;
vp->v_object = object;

View File

@ -36,14 +36,14 @@
* SUCH DAMAGE.
*
* @(#)vnode_pager.h 8.1 (Berkeley) 6/11/93
* $Id: vnode_pager.h,v 1.10 1997/02/22 09:48:43 peter Exp $
* $Id: vnode_pager.h,v 1.11 1998/02/26 06:39:59 msmith Exp $
*/
#ifndef _VNODE_PAGER_
#define _VNODE_PAGER_ 1
#ifdef KERNEL
vm_object_t vnode_pager_alloc __P((void *, vm_size_t, vm_prot_t, vm_ooffset_t));
vm_object_t vnode_pager_alloc __P((void *, vm_ooffset_t, vm_prot_t, vm_ooffset_t));
void vnode_pager_freepage __P((vm_page_t m));
struct vnode *vnode_pager_lock __P((vm_object_t));