Convert all pmap_kenter/pmap_kremove pairs in MI code to use pmap_qenter/

pmap_qremove.  pmap_kenter is not safe to use in MI code because it is not
guaranteed to flush the mapping from the tlb on all cpus.  If the process
in question is preempted and migrates cpus between the call to pmap_kenter
and pmap_kremove, the original cpu will be left with stale mappings in its
tlb.  This is currently not a problem for i386 because we do not use PG_G on
SMP, and thus all mappings are flushed from the tlb on context switches, not
just user mappings.  This is not the case on all architectures, and if PG_G
is to be used with SMP on i386 it will be a problem.  This was committed by
peter earlier as part of his fine grained tlb shootdown work for i386, which
was backed out for other reasons.

Reviewed by:	peter
This commit is contained in:
Jake Burkholder 2002-03-17 00:56:41 +00:00
parent 3646fbdacb
commit ac59490b5e
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=92461
5 changed files with 10 additions and 9 deletions

View File

@ -551,7 +551,7 @@ exec_map_first_page(imgp)
vm_page_wire(ma[0]);
vm_page_wakeup(ma[0]);
pmap_kenter((vm_offset_t) imgp->image_header, VM_PAGE_TO_PHYS(ma[0]));
pmap_qenter((vm_offset_t)imgp->image_header, ma, 1);
imgp->firstpage = ma[0];
return 0;
@ -564,7 +564,7 @@ exec_unmap_first_page(imgp)
GIANT_REQUIRED;
if (imgp->firstpage) {
pmap_kremove((vm_offset_t) imgp->image_header);
pmap_qremove((vm_offset_t)imgp->image_header, 1);
vm_page_unwire(imgp->firstpage, 1);
imgp->firstpage = NULL;
}

View File

@ -278,14 +278,14 @@ proc_rwmem(struct proc *p, struct uio *uio)
vm_object_reference(object);
vm_map_lookup_done(tmap, out_entry);
pmap_kenter(kva, VM_PAGE_TO_PHYS(m));
pmap_qenter(kva, &m, 1);
/*
* Now do the i/o move.
*/
error = uiomove((caddr_t)(kva + page_offset), len, uio);
pmap_kremove(kva);
pmap_qremove(kva, 1);
/*
* release the page and the object

View File

@ -3324,7 +3324,7 @@ tryagain:
vm_page_wire(p);
p->valid = VM_PAGE_BITS_ALL;
vm_page_flag_clear(p, PG_ZERO);
pmap_kenter(pg, VM_PAGE_TO_PHYS(p));
pmap_qenter(pg, &p, 1);
bp->b_pages[index] = p;
vm_page_wakeup(p);
}
@ -3353,7 +3353,7 @@ vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to)
bp->b_blkno, bp->b_lblkno);
}
bp->b_pages[index] = NULL;
pmap_kremove(pg);
pmap_qremove(pg, 1);
vm_page_busy(p);
vm_page_unwire(p, 0);
vm_page_free(p);

View File

@ -318,7 +318,7 @@ vm_pager_map_page(m)
vm_offset_t kva;
kva = kmem_alloc_wait(pager_map, PAGE_SIZE);
pmap_kenter(kva, VM_PAGE_TO_PHYS(m));
pmap_qenter(kva, &m, 1);
return (kva);
}
@ -326,7 +326,8 @@ void
vm_pager_unmap_page(kva)
vm_offset_t kva;
{
pmap_kremove(kva);
pmap_qremove(kva, 1);
kmem_free_wakeup(pager_map, kva, PAGE_SIZE);
}

View File

@ -386,7 +386,7 @@ _zget(vm_zone_t z)
break;
zkva = z->zkva + z->zpagecount * PAGE_SIZE;
pmap_kenter(zkva, VM_PAGE_TO_PHYS(m));
pmap_qenter(zkva, &m, 1);
bzero((caddr_t) zkva, PAGE_SIZE);
z->zpagecount++;
atomic_add_int(&zone_kmem_pages, 1);