diff --git a/sys/dev/cxgb/ulp/tom/cxgb_cpl_socket.c b/sys/dev/cxgb/ulp/tom/cxgb_cpl_socket.c index bbb594ac2b2c..33f1dd15fb7e 100644 --- a/sys/dev/cxgb/ulp/tom/cxgb_cpl_socket.c +++ b/sys/dev/cxgb/ulp/tom/cxgb_cpl_socket.c @@ -454,7 +454,7 @@ sendmore: while (uiotmp.uio_resid > 0) { rv = cxgb_vm_page_to_miov(toep, &uiotmp, &m); if (rv) { - vm_fault_unhold_pages(toep->tp_pages, count); + vm_page_unhold_pages(toep->tp_pages, count); return (rv); } uio->uio_resid -= m->m_pkthdr.len; @@ -469,7 +469,7 @@ sendmore: * */ cxgb_wait_dma_completion(toep); - vm_fault_unhold_pages(toep->tp_pages, count); + vm_page_unhold_pages(toep->tp_pages, count); /* * If there is more data to send adjust local copy of iov * to point to teh start diff --git a/sys/dev/cxgb/ulp/tom/cxgb_ddp.c b/sys/dev/cxgb/ulp/tom/cxgb_ddp.c index a54598c60672..a188f620afba 100644 --- a/sys/dev/cxgb/ulp/tom/cxgb_ddp.c +++ b/sys/dev/cxgb/ulp/tom/cxgb_ddp.c @@ -175,7 +175,7 @@ different_gl: *newgl = p; return (0); unpin: - vm_fault_unhold_pages(p->dgl_pages, npages); + vm_page_unhold_pages(p->dgl_pages, npages); free_gl: @@ -208,7 +208,7 @@ ddp_gl_free_pages(struct ddp_gather_list *gl, int dirty) /* * XXX mark pages as dirty before unholding */ - vm_fault_unhold_pages(gl->dgl_pages, gl->dgl_nelem); + vm_page_unhold_pages(gl->dgl_pages, gl->dgl_nelem); } void diff --git a/sys/dev/cxgb/ulp/tom/cxgb_vm.c b/sys/dev/cxgb/ulp/tom/cxgb_vm.c index 4659788f0bb5..d7328b6a8eae 100644 --- a/sys/dev/cxgb/ulp/tom/cxgb_vm.c +++ b/sys/dev/cxgb/ulp/tom/cxgb_vm.c @@ -150,16 +150,3 @@ error: } return (EFAULT); } - -void -vm_fault_unhold_pages(vm_page_t *mp, int count) -{ - - KASSERT(count >= 0, ("negative count %d", count)); - while (count--) { - vm_page_lock(*mp); - vm_page_unhold(*mp); - vm_page_unlock(*mp); - mp++; - } -} diff --git a/sys/dev/cxgb/ulp/tom/cxgb_vm.h b/sys/dev/cxgb/ulp/tom/cxgb_vm.h index 7532e20ef78f..6647d553391c 100644 --- a/sys/dev/cxgb/ulp/tom/cxgb_vm.h +++ b/sys/dev/cxgb/ulp/tom/cxgb_vm.h @@ -34,6 +34,5 @@ $FreeBSD$ int vm_fault_hold_user_pages(vm_map_t map, vm_offset_t addr, vm_page_t *mp, int count, vm_prot_t prot); -void vm_fault_unhold_pages(vm_page_t *mp, int count); #endif diff --git a/sys/kern/sys_pipe.c b/sys/kern/sys_pipe.c index 22401a7a8a60..444b4240ba79 100644 --- a/sys/kern/sys_pipe.c +++ b/sys/kern/sys_pipe.c @@ -749,7 +749,7 @@ pipe_build_write_buffer(wpipe, uio) { pmap_t pmap; u_int size; - int i, j; + int i; vm_offset_t addr, endaddr; PIPE_LOCK_ASSERT(wpipe, MA_NOTOWNED); @@ -771,11 +771,7 @@ pipe_build_write_buffer(wpipe, uio) */ race: if (vm_fault_quick((caddr_t)addr, VM_PROT_READ) < 0) { - for (j = 0; j < i; j++) { - vm_page_lock(wpipe->pipe_map.ms[j]); - vm_page_unhold(wpipe->pipe_map.ms[j]); - vm_page_unlock(wpipe->pipe_map.ms[j]); - } + vm_page_unhold_pages(wpipe->pipe_map.ms, i); return (EFAULT); } wpipe->pipe_map.ms[i] = pmap_extract_and_hold(pmap, addr, @@ -812,14 +808,9 @@ static void pipe_destroy_write_buffer(wpipe) struct pipe *wpipe; { - int i; PIPE_LOCK_ASSERT(wpipe, MA_OWNED); - for (i = 0; i < wpipe->pipe_map.npages; i++) { - vm_page_lock(wpipe->pipe_map.ms[i]); - vm_page_unhold(wpipe->pipe_map.ms[i]); - vm_page_unlock(wpipe->pipe_map.ms[i]); - } + vm_page_unhold_pages(wpipe->pipe_map.ms, wpipe->pipe_map.npages); wpipe->pipe_map.npages = 0; } diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c index ccde4b4ab4bc..7d04e3344ef6 100644 --- a/sys/kern/vfs_bio.c +++ b/sys/kern/vfs_bio.c @@ -3911,16 +3911,11 @@ retry: void vunmapbuf(struct buf *bp) { - int pidx; int npages; npages = bp->b_npages; pmap_qremove(trunc_page((vm_offset_t)bp->b_data), npages); - for (pidx = 0; pidx < npages; pidx++) { - vm_page_lock(bp->b_pages[pidx]); - vm_page_unhold(bp->b_pages[pidx]); - vm_page_unlock(bp->b_pages[pidx]); - } + vm_page_unhold_pages(bp->b_pages, npages); bp->b_data = bp->b_saveaddr; } diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c index 1208ea07fb5c..e708a5706fcd 100644 --- a/sys/vm/vm_page.c +++ b/sys/vm/vm_page.c @@ -600,6 +600,35 @@ vm_page_unhold(vm_page_t mem) vm_page_free_toq(mem); } +/* + * vm_page_unhold_pages: + * + * Unhold each of the pages that is referenced by the given array. + */ +void +vm_page_unhold_pages(vm_page_t *ma, int count) +{ + struct mtx *mtx, *new_mtx; + + mtx = NULL; + for (; count != 0; count--) { + /* + * Avoid releasing and reacquiring the same page lock. + */ + new_mtx = vm_page_lockptr(*ma); + if (mtx != new_mtx) { + if (mtx != NULL) + mtx_unlock(mtx); + mtx = new_mtx; + mtx_lock(mtx); + } + vm_page_unhold(*ma); + ma++; + } + if (mtx != NULL) + mtx_unlock(mtx); +} + /* * vm_page_free: * diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h index e4183b38499e..54a15fb78c7a 100644 --- a/sys/vm/vm_page.h +++ b/sys/vm/vm_page.h @@ -364,6 +364,7 @@ void vm_page_set_valid(vm_page_t m, int base, int size); void vm_page_sleep(vm_page_t m, const char *msg); vm_page_t vm_page_splay(vm_pindex_t, vm_page_t); vm_offset_t vm_page_startup(vm_offset_t vaddr); +void vm_page_unhold_pages(vm_page_t *ma, int count); void vm_page_unwire (vm_page_t, int); void vm_page_wire (vm_page_t); void vm_page_set_validclean (vm_page_t, int, int);