From bd1e3a0f89e65b62b704ca57b7d05611257c4815 Mon Sep 17 00:00:00 2001 From: Peter Wemm Date: Wed, 27 Feb 2002 02:14:58 +0000 Subject: [PATCH] Jake further reduced IPI shootdowns on sparc64 in loops by using ranged shootdowns in a couple of key places. Do the same for i386. This also hides some physical addresses from higher levels and has it use the generic vm_page_t's instead. This will help for PAE down the road. Obtained from: jake (MI code, suggestions for MD part) --- sys/amd64/amd64/pmap.c | 86 ++++++++++++++---------------------------- sys/i386/i386/pmap.c | 86 ++++++++++++++---------------------------- sys/kern/kern_exec.c | 4 +- sys/kern/sys_process.c | 4 +- sys/kern/vfs_bio.c | 4 +- sys/vm/vm_pager.c | 4 +- sys/vm/vm_zone.c | 2 +- 7 files changed, 65 insertions(+), 125 deletions(-) diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c index ba3ee22c27d9..a2ece0a0eb77 100644 --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -727,11 +727,9 @@ PMAP_INLINE void pmap_kenter(vm_offset_t va, vm_offset_t pa) { pt_entry_t *pte; - pt_entry_t npte; - npte = pa | PG_RW | PG_V | pgeflag; pte = vtopte(va); - *pte = npte; + *pte = pa | PG_RW | PG_V | pgeflag; invlpg(va); } @@ -741,7 +739,7 @@ pmap_kenter(vm_offset_t va, vm_offset_t pa) PMAP_INLINE void pmap_kremove(vm_offset_t va) { - register pt_entry_t *pte; + pt_entry_t *pte; pte = vtopte(va); *pte = 0; @@ -764,12 +762,10 @@ vm_offset_t pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot) { vm_offset_t va, sva; - pt_entry_t *pte; va = sva = *virt; while (start < end) { - pte = vtopte(va); - *pte = start | PG_RW | PG_V | pgeflag; + pmap_kenter(va, start); va += PAGE_SIZE; start += PAGE_SIZE; } @@ -791,14 +787,12 @@ void pmap_qenter(vm_offset_t sva, vm_page_t *m, int count) { vm_offset_t va, end_va; - pt_entry_t *pte; va = sva; end_va = va + count * PAGE_SIZE; while (va < end_va) { - pte = vtopte(va); - *pte = VM_PAGE_TO_PHYS(*m) | PG_RW | PG_V | pgeflag; + pmap_kenter(va, VM_PAGE_TO_PHYS(*m)); va += PAGE_SIZE; m++; } @@ -812,15 +806,13 @@ pmap_qenter(vm_offset_t sva, vm_page_t *m, int count) void pmap_qremove(vm_offset_t sva, int count) { - pt_entry_t *pte; vm_offset_t va, end_va; va = sva; end_va = va + count * PAGE_SIZE; while (va < end_va) { - pte = vtopte(va); - *pte = 0; + pmap_kremove(va); va += PAGE_SIZE; } invlpg_range(sva, end_va); @@ -845,10 +837,10 @@ void pmap_new_proc(struct proc *p) { int i; + vm_page_t ma[UAREA_PAGES]; vm_object_t upobj; vm_offset_t up; vm_page_t m; - pt_entry_t *ptek, oldpte; /* * allocate object for the upages @@ -868,13 +860,12 @@ pmap_new_proc(struct proc *p) p->p_uarea = (struct user *)up; } - ptek = vtopte(up); - for (i = 0; i < UAREA_PAGES; i++) { /* * Get a kernel stack page */ m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); + ma[i] = m; /* * Wire the page @@ -882,19 +873,12 @@ pmap_new_proc(struct proc *p) m->wire_count++; cnt.v_wire_count++; - oldpte = *(ptek + i); - /* - * Enter the page into the kernel address space. - */ - *(ptek + i) = VM_PAGE_TO_PHYS(m) | PG_RW | PG_V | pgeflag; - if (oldpte) - invlpg(up + i * PAGE_SIZE); - vm_page_wakeup(m); vm_page_flag_clear(m, PG_ZERO); vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE); m->valid = VM_PAGE_BITS_ALL; } + pmap_qenter(up, ma, UAREA_PAGES); } /* @@ -909,18 +893,15 @@ pmap_dispose_proc(p) vm_object_t upobj; vm_offset_t up; vm_page_t m; - pt_entry_t *ptek; upobj = p->p_upages_obj; up = (vm_offset_t)p->p_uarea; - ptek = vtopte(up); + pmap_qremove(up, UAREA_PAGES); for (i = 0; i < UAREA_PAGES; i++) { m = vm_page_lookup(upobj, i); if (m == NULL) panic("pmap_dispose_proc: upage already missing?"); vm_page_busy(m); - *(ptek + i) = 0; - invlpg(up + i * PAGE_SIZE); vm_page_unwire(m, 0); vm_page_free(m); } @@ -940,13 +921,13 @@ pmap_swapout_proc(p) upobj = p->p_upages_obj; up = (vm_offset_t)p->p_uarea; + pmap_qremove(up, UAREA_PAGES); for (i = 0; i < UAREA_PAGES; i++) { m = vm_page_lookup(upobj, i); if (m == NULL) panic("pmap_swapout_proc: upage already missing?"); vm_page_dirty(m); vm_page_unwire(m, 0); - pmap_kremove(up + i * PAGE_SIZE); } } @@ -958,6 +939,7 @@ pmap_swapin_proc(p) struct proc *p; { int i, rv; + vm_page_t ma[UAREA_PAGES]; vm_object_t upobj; vm_offset_t up; vm_page_t m; @@ -966,7 +948,6 @@ pmap_swapin_proc(p) up = (vm_offset_t)p->p_uarea; for (i = 0; i < UAREA_PAGES; i++) { m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); - pmap_kenter(up + i * PAGE_SIZE, VM_PAGE_TO_PHYS(m)); if (m->valid != VM_PAGE_BITS_ALL) { rv = vm_pager_get_pages(upobj, &m, 1, 0); if (rv != VM_PAGER_OK) @@ -974,10 +955,12 @@ pmap_swapin_proc(p) m = vm_page_lookup(upobj, i); m->valid = VM_PAGE_BITS_ALL; } + ma[i] = m; vm_page_wire(m); vm_page_wakeup(m); vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE); } + pmap_qenter(up, ma, UAREA_PAGES); } /* @@ -989,10 +972,10 @@ void pmap_new_thread(struct thread *td) { int i; + vm_page_t ma[KSTACK_PAGES]; vm_object_t ksobj; vm_page_t m; vm_offset_t ks; - pt_entry_t *ptek, oldpte; /* * allocate object for the kstack @@ -1003,40 +986,33 @@ pmap_new_thread(struct thread *td) td->td_kstack_obj = ksobj; } -#ifdef KSTACK_GUARD /* get a kernel virtual address for the kstack for this thread */ ks = td->td_kstack; +#ifdef KSTACK_GUARD if (ks == 0) { ks = kmem_alloc_nofault(kernel_map, (KSTACK_PAGES + 1) * PAGE_SIZE); if (ks == 0) panic("pmap_new_thread: kstack allocation failed"); + if (*vtopte(ks) != 0) + pmap_qremove(ks, 1); ks += PAGE_SIZE; td->td_kstack = ks; } - - ptek = vtopte(ks - PAGE_SIZE); - oldpte = *ptek; - *ptek = 0; - if (oldpte) - invlpg(ks - PAGE_SIZE); - ptek++; #else - /* get a kernel virtual address for the kstack for this thread */ - ks = td->td_kstack; if (ks == 0) { ks = kmem_alloc_nofault(kernel_map, KSTACK_PAGES * PAGE_SIZE); if (ks == 0) panic("pmap_new_thread: kstack allocation failed"); td->td_kstack = ks; } - ptek = vtopte(ks); #endif for (i = 0; i < KSTACK_PAGES; i++) { /* * Get a kernel stack page */ m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); + ma[i] = m; /* * Wire the page @@ -1044,19 +1020,12 @@ pmap_new_thread(struct thread *td) m->wire_count++; cnt.v_wire_count++; - oldpte = *(ptek + i); - /* - * Enter the page into the kernel address space. - */ - *(ptek + i) = VM_PAGE_TO_PHYS(m) | PG_RW | PG_V | pgeflag; - if (oldpte) - invlpg(ks + i * PAGE_SIZE); - vm_page_wakeup(m); vm_page_flag_clear(m, PG_ZERO); vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE); m->valid = VM_PAGE_BITS_ALL; } + pmap_qenter(ks, ma, KSTACK_PAGES); } /* @@ -1071,18 +1040,15 @@ pmap_dispose_thread(td) vm_object_t ksobj; vm_offset_t ks; vm_page_t m; - pt_entry_t *ptek; ksobj = td->td_kstack_obj; ks = td->td_kstack; - ptek = vtopte(ks); + pmap_qremove(ks, KSTACK_PAGES); for (i = 0; i < KSTACK_PAGES; i++) { m = vm_page_lookup(ksobj, i); if (m == NULL) panic("pmap_dispose_thread: kstack already missing?"); vm_page_busy(m); - *(ptek + i) = 0; - invlpg(ks + i * PAGE_SIZE); vm_page_unwire(m, 0); vm_page_free(m); } @@ -1102,13 +1068,13 @@ pmap_swapout_thread(td) ksobj = td->td_kstack_obj; ks = td->td_kstack; + pmap_qremove(ks, KSTACK_PAGES); for (i = 0; i < KSTACK_PAGES; i++) { m = vm_page_lookup(ksobj, i); if (m == NULL) panic("pmap_swapout_thread: kstack already missing?"); vm_page_dirty(m); vm_page_unwire(m, 0); - pmap_kremove(ks + i * PAGE_SIZE); } } @@ -1120,6 +1086,7 @@ pmap_swapin_thread(td) struct thread *td; { int i, rv; + vm_page_t ma[KSTACK_PAGES]; vm_object_t ksobj; vm_offset_t ks; vm_page_t m; @@ -1128,7 +1095,6 @@ pmap_swapin_thread(td) ks = td->td_kstack; for (i = 0; i < KSTACK_PAGES; i++) { m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); - pmap_kenter(ks + i * PAGE_SIZE, VM_PAGE_TO_PHYS(m)); if (m->valid != VM_PAGE_BITS_ALL) { rv = vm_pager_get_pages(ksobj, &m, 1, 0); if (rv != VM_PAGER_OK) @@ -1136,10 +1102,12 @@ pmap_swapin_thread(td) m = vm_page_lookup(ksobj, i); m->valid = VM_PAGE_BITS_ALL; } + ma[i] = m; vm_page_wire(m); vm_page_wakeup(m); vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE); } + pmap_qenter(ks, ma, KSTACK_PAGES); } /*************************************************** @@ -1234,7 +1202,8 @@ pmap_pinit0(pmap) { pmap->pm_pdir = (pd_entry_t *)kmem_alloc_pageable(kernel_map, PAGE_SIZE); - pmap_kenter((vm_offset_t) pmap->pm_pdir, (vm_offset_t) IdlePTD); + pmap_kenter((vm_offset_t)pmap->pm_pdir, (vm_offset_t)IdlePTD); + invlpg((vm_offset_t)pmap->pm_pdir); pmap->pm_count = 1; pmap->pm_ptphint = NULL; pmap->pm_active = 0; @@ -1280,7 +1249,7 @@ pmap_pinit(pmap) vm_page_flag_clear(ptdpg, PG_MAPPED | PG_BUSY); /* not usually mapped*/ ptdpg->valid = VM_PAGE_BITS_ALL; - pmap_kenter((vm_offset_t) pmap->pm_pdir, VM_PAGE_TO_PHYS(ptdpg)); + pmap_qenter((vm_offset_t) pmap->pm_pdir, &ptdpg, 1); if ((ptdpg->flags & PG_ZERO) == 0) bzero(pmap->pm_pdir, PAGE_SIZE); @@ -2369,6 +2338,7 @@ void * pmap_kenter_temporary(vm_offset_t pa, int i) { pmap_kenter((vm_offset_t)crashdumpmap + (i * PAGE_SIZE), pa); + invlpg((vm_offset_t)crashdumpmap + (i * PAGE_SIZE)); return ((void *)crashdumpmap); } diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c index ba3ee22c27d9..a2ece0a0eb77 100644 --- a/sys/i386/i386/pmap.c +++ b/sys/i386/i386/pmap.c @@ -727,11 +727,9 @@ PMAP_INLINE void pmap_kenter(vm_offset_t va, vm_offset_t pa) { pt_entry_t *pte; - pt_entry_t npte; - npte = pa | PG_RW | PG_V | pgeflag; pte = vtopte(va); - *pte = npte; + *pte = pa | PG_RW | PG_V | pgeflag; invlpg(va); } @@ -741,7 +739,7 @@ pmap_kenter(vm_offset_t va, vm_offset_t pa) PMAP_INLINE void pmap_kremove(vm_offset_t va) { - register pt_entry_t *pte; + pt_entry_t *pte; pte = vtopte(va); *pte = 0; @@ -764,12 +762,10 @@ vm_offset_t pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot) { vm_offset_t va, sva; - pt_entry_t *pte; va = sva = *virt; while (start < end) { - pte = vtopte(va); - *pte = start | PG_RW | PG_V | pgeflag; + pmap_kenter(va, start); va += PAGE_SIZE; start += PAGE_SIZE; } @@ -791,14 +787,12 @@ void pmap_qenter(vm_offset_t sva, vm_page_t *m, int count) { vm_offset_t va, end_va; - pt_entry_t *pte; va = sva; end_va = va + count * PAGE_SIZE; while (va < end_va) { - pte = vtopte(va); - *pte = VM_PAGE_TO_PHYS(*m) | PG_RW | PG_V | pgeflag; + pmap_kenter(va, VM_PAGE_TO_PHYS(*m)); va += PAGE_SIZE; m++; } @@ -812,15 +806,13 @@ pmap_qenter(vm_offset_t sva, vm_page_t *m, int count) void pmap_qremove(vm_offset_t sva, int count) { - pt_entry_t *pte; vm_offset_t va, end_va; va = sva; end_va = va + count * PAGE_SIZE; while (va < end_va) { - pte = vtopte(va); - *pte = 0; + pmap_kremove(va); va += PAGE_SIZE; } invlpg_range(sva, end_va); @@ -845,10 +837,10 @@ void pmap_new_proc(struct proc *p) { int i; + vm_page_t ma[UAREA_PAGES]; vm_object_t upobj; vm_offset_t up; vm_page_t m; - pt_entry_t *ptek, oldpte; /* * allocate object for the upages @@ -868,13 +860,12 @@ pmap_new_proc(struct proc *p) p->p_uarea = (struct user *)up; } - ptek = vtopte(up); - for (i = 0; i < UAREA_PAGES; i++) { /* * Get a kernel stack page */ m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); + ma[i] = m; /* * Wire the page @@ -882,19 +873,12 @@ pmap_new_proc(struct proc *p) m->wire_count++; cnt.v_wire_count++; - oldpte = *(ptek + i); - /* - * Enter the page into the kernel address space. - */ - *(ptek + i) = VM_PAGE_TO_PHYS(m) | PG_RW | PG_V | pgeflag; - if (oldpte) - invlpg(up + i * PAGE_SIZE); - vm_page_wakeup(m); vm_page_flag_clear(m, PG_ZERO); vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE); m->valid = VM_PAGE_BITS_ALL; } + pmap_qenter(up, ma, UAREA_PAGES); } /* @@ -909,18 +893,15 @@ pmap_dispose_proc(p) vm_object_t upobj; vm_offset_t up; vm_page_t m; - pt_entry_t *ptek; upobj = p->p_upages_obj; up = (vm_offset_t)p->p_uarea; - ptek = vtopte(up); + pmap_qremove(up, UAREA_PAGES); for (i = 0; i < UAREA_PAGES; i++) { m = vm_page_lookup(upobj, i); if (m == NULL) panic("pmap_dispose_proc: upage already missing?"); vm_page_busy(m); - *(ptek + i) = 0; - invlpg(up + i * PAGE_SIZE); vm_page_unwire(m, 0); vm_page_free(m); } @@ -940,13 +921,13 @@ pmap_swapout_proc(p) upobj = p->p_upages_obj; up = (vm_offset_t)p->p_uarea; + pmap_qremove(up, UAREA_PAGES); for (i = 0; i < UAREA_PAGES; i++) { m = vm_page_lookup(upobj, i); if (m == NULL) panic("pmap_swapout_proc: upage already missing?"); vm_page_dirty(m); vm_page_unwire(m, 0); - pmap_kremove(up + i * PAGE_SIZE); } } @@ -958,6 +939,7 @@ pmap_swapin_proc(p) struct proc *p; { int i, rv; + vm_page_t ma[UAREA_PAGES]; vm_object_t upobj; vm_offset_t up; vm_page_t m; @@ -966,7 +948,6 @@ pmap_swapin_proc(p) up = (vm_offset_t)p->p_uarea; for (i = 0; i < UAREA_PAGES; i++) { m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); - pmap_kenter(up + i * PAGE_SIZE, VM_PAGE_TO_PHYS(m)); if (m->valid != VM_PAGE_BITS_ALL) { rv = vm_pager_get_pages(upobj, &m, 1, 0); if (rv != VM_PAGER_OK) @@ -974,10 +955,12 @@ pmap_swapin_proc(p) m = vm_page_lookup(upobj, i); m->valid = VM_PAGE_BITS_ALL; } + ma[i] = m; vm_page_wire(m); vm_page_wakeup(m); vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE); } + pmap_qenter(up, ma, UAREA_PAGES); } /* @@ -989,10 +972,10 @@ void pmap_new_thread(struct thread *td) { int i; + vm_page_t ma[KSTACK_PAGES]; vm_object_t ksobj; vm_page_t m; vm_offset_t ks; - pt_entry_t *ptek, oldpte; /* * allocate object for the kstack @@ -1003,40 +986,33 @@ pmap_new_thread(struct thread *td) td->td_kstack_obj = ksobj; } -#ifdef KSTACK_GUARD /* get a kernel virtual address for the kstack for this thread */ ks = td->td_kstack; +#ifdef KSTACK_GUARD if (ks == 0) { ks = kmem_alloc_nofault(kernel_map, (KSTACK_PAGES + 1) * PAGE_SIZE); if (ks == 0) panic("pmap_new_thread: kstack allocation failed"); + if (*vtopte(ks) != 0) + pmap_qremove(ks, 1); ks += PAGE_SIZE; td->td_kstack = ks; } - - ptek = vtopte(ks - PAGE_SIZE); - oldpte = *ptek; - *ptek = 0; - if (oldpte) - invlpg(ks - PAGE_SIZE); - ptek++; #else - /* get a kernel virtual address for the kstack for this thread */ - ks = td->td_kstack; if (ks == 0) { ks = kmem_alloc_nofault(kernel_map, KSTACK_PAGES * PAGE_SIZE); if (ks == 0) panic("pmap_new_thread: kstack allocation failed"); td->td_kstack = ks; } - ptek = vtopte(ks); #endif for (i = 0; i < KSTACK_PAGES; i++) { /* * Get a kernel stack page */ m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); + ma[i] = m; /* * Wire the page @@ -1044,19 +1020,12 @@ pmap_new_thread(struct thread *td) m->wire_count++; cnt.v_wire_count++; - oldpte = *(ptek + i); - /* - * Enter the page into the kernel address space. - */ - *(ptek + i) = VM_PAGE_TO_PHYS(m) | PG_RW | PG_V | pgeflag; - if (oldpte) - invlpg(ks + i * PAGE_SIZE); - vm_page_wakeup(m); vm_page_flag_clear(m, PG_ZERO); vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE); m->valid = VM_PAGE_BITS_ALL; } + pmap_qenter(ks, ma, KSTACK_PAGES); } /* @@ -1071,18 +1040,15 @@ pmap_dispose_thread(td) vm_object_t ksobj; vm_offset_t ks; vm_page_t m; - pt_entry_t *ptek; ksobj = td->td_kstack_obj; ks = td->td_kstack; - ptek = vtopte(ks); + pmap_qremove(ks, KSTACK_PAGES); for (i = 0; i < KSTACK_PAGES; i++) { m = vm_page_lookup(ksobj, i); if (m == NULL) panic("pmap_dispose_thread: kstack already missing?"); vm_page_busy(m); - *(ptek + i) = 0; - invlpg(ks + i * PAGE_SIZE); vm_page_unwire(m, 0); vm_page_free(m); } @@ -1102,13 +1068,13 @@ pmap_swapout_thread(td) ksobj = td->td_kstack_obj; ks = td->td_kstack; + pmap_qremove(ks, KSTACK_PAGES); for (i = 0; i < KSTACK_PAGES; i++) { m = vm_page_lookup(ksobj, i); if (m == NULL) panic("pmap_swapout_thread: kstack already missing?"); vm_page_dirty(m); vm_page_unwire(m, 0); - pmap_kremove(ks + i * PAGE_SIZE); } } @@ -1120,6 +1086,7 @@ pmap_swapin_thread(td) struct thread *td; { int i, rv; + vm_page_t ma[KSTACK_PAGES]; vm_object_t ksobj; vm_offset_t ks; vm_page_t m; @@ -1128,7 +1095,6 @@ pmap_swapin_thread(td) ks = td->td_kstack; for (i = 0; i < KSTACK_PAGES; i++) { m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); - pmap_kenter(ks + i * PAGE_SIZE, VM_PAGE_TO_PHYS(m)); if (m->valid != VM_PAGE_BITS_ALL) { rv = vm_pager_get_pages(ksobj, &m, 1, 0); if (rv != VM_PAGER_OK) @@ -1136,10 +1102,12 @@ pmap_swapin_thread(td) m = vm_page_lookup(ksobj, i); m->valid = VM_PAGE_BITS_ALL; } + ma[i] = m; vm_page_wire(m); vm_page_wakeup(m); vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE); } + pmap_qenter(ks, ma, KSTACK_PAGES); } /*************************************************** @@ -1234,7 +1202,8 @@ pmap_pinit0(pmap) { pmap->pm_pdir = (pd_entry_t *)kmem_alloc_pageable(kernel_map, PAGE_SIZE); - pmap_kenter((vm_offset_t) pmap->pm_pdir, (vm_offset_t) IdlePTD); + pmap_kenter((vm_offset_t)pmap->pm_pdir, (vm_offset_t)IdlePTD); + invlpg((vm_offset_t)pmap->pm_pdir); pmap->pm_count = 1; pmap->pm_ptphint = NULL; pmap->pm_active = 0; @@ -1280,7 +1249,7 @@ pmap_pinit(pmap) vm_page_flag_clear(ptdpg, PG_MAPPED | PG_BUSY); /* not usually mapped*/ ptdpg->valid = VM_PAGE_BITS_ALL; - pmap_kenter((vm_offset_t) pmap->pm_pdir, VM_PAGE_TO_PHYS(ptdpg)); + pmap_qenter((vm_offset_t) pmap->pm_pdir, &ptdpg, 1); if ((ptdpg->flags & PG_ZERO) == 0) bzero(pmap->pm_pdir, PAGE_SIZE); @@ -2369,6 +2338,7 @@ void * pmap_kenter_temporary(vm_offset_t pa, int i) { pmap_kenter((vm_offset_t)crashdumpmap + (i * PAGE_SIZE), pa); + invlpg((vm_offset_t)crashdumpmap + (i * PAGE_SIZE)); return ((void *)crashdumpmap); } diff --git a/sys/kern/kern_exec.c b/sys/kern/kern_exec.c index 3a1b56cbb1e6..3bad53c18464 100644 --- a/sys/kern/kern_exec.c +++ b/sys/kern/kern_exec.c @@ -551,7 +551,7 @@ exec_map_first_page(imgp) vm_page_wire(ma[0]); vm_page_wakeup(ma[0]); - pmap_kenter((vm_offset_t) imgp->image_header, VM_PAGE_TO_PHYS(ma[0])); + pmap_qenter((vm_offset_t)imgp->image_header, ma, 1); imgp->firstpage = ma[0]; return 0; @@ -564,7 +564,7 @@ exec_unmap_first_page(imgp) GIANT_REQUIRED; if (imgp->firstpage) { - pmap_kremove((vm_offset_t) imgp->image_header); + pmap_qremove((vm_offset_t)imgp->image_header, 1); vm_page_unwire(imgp->firstpage, 1); imgp->firstpage = NULL; } diff --git a/sys/kern/sys_process.c b/sys/kern/sys_process.c index e15f1914d450..02b75b9982af 100644 --- a/sys/kern/sys_process.c +++ b/sys/kern/sys_process.c @@ -278,14 +278,14 @@ proc_rwmem(struct proc *p, struct uio *uio) vm_object_reference(object); vm_map_lookup_done(tmap, out_entry); - pmap_kenter(kva, VM_PAGE_TO_PHYS(m)); + pmap_qenter(kva, &m, 1); /* * Now do the i/o move. */ error = uiomove((caddr_t)(kva + page_offset), len, uio); - pmap_kremove(kva); + pmap_qremove(kva, 1); /* * release the page and the object diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c index 32e91ca87d2d..d3a849c39bf8 100644 --- a/sys/kern/vfs_bio.c +++ b/sys/kern/vfs_bio.c @@ -3244,7 +3244,7 @@ tryagain: vm_page_wire(p); p->valid = VM_PAGE_BITS_ALL; vm_page_flag_clear(p, PG_ZERO); - pmap_kenter(pg, VM_PAGE_TO_PHYS(p)); + pmap_qenter(pg, &p, 1); bp->b_pages[index] = p; vm_page_wakeup(p); } @@ -3272,7 +3272,7 @@ vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to) bp->b_blkno, bp->b_lblkno); } bp->b_pages[index] = NULL; - pmap_kremove(pg); + pmap_qremove(pg, 1); vm_page_busy(p); vm_page_unwire(p, 0); vm_page_free(p); diff --git a/sys/vm/vm_pager.c b/sys/vm/vm_pager.c index 6cb678a9d471..3a529b6f9eed 100644 --- a/sys/vm/vm_pager.c +++ b/sys/vm/vm_pager.c @@ -318,7 +318,7 @@ vm_pager_map_page(m) vm_offset_t kva; kva = kmem_alloc_wait(pager_map, PAGE_SIZE); - pmap_kenter(kva, VM_PAGE_TO_PHYS(m)); + pmap_qenter(kva, &m, 1); return (kva); } @@ -326,7 +326,7 @@ void vm_pager_unmap_page(kva) vm_offset_t kva; { - pmap_kremove(kva); + pmap_qremove(kva, 1); kmem_free_wakeup(pager_map, kva, PAGE_SIZE); } diff --git a/sys/vm/vm_zone.c b/sys/vm/vm_zone.c index a1b1d3c3b6ee..5057b6e82bc3 100644 --- a/sys/vm/vm_zone.c +++ b/sys/vm/vm_zone.c @@ -386,7 +386,7 @@ _zget(vm_zone_t z) break; zkva = z->zkva + z->zpagecount * PAGE_SIZE; - pmap_kenter(zkva, VM_PAGE_TO_PHYS(m)); + pmap_qenter(zkva, &m, 1); bzero((caddr_t) zkva, PAGE_SIZE); z->zpagecount++; atomic_add_int(&zone_kmem_pages, 1);