device_pager: user iterators to free device pages

Change cdev_mgtdev_page_free_page to take an iterator, rather than an
object and page, so that removing the page from the object radix tree
can take advantage of locality with iterators. Define a
general-purpose function to free all pages, which can be used in
several places.

Reviewed by:	kib
Differential Revision:	https://reviews.freebsd.org/D47692
This commit is contained in:
Doug Moore 2024-11-21 15:49:30 -06:00
parent ae4f39464c
commit 38e3125d6d
7 changed files with 43 additions and 71 deletions

View File

@ -47,6 +47,7 @@
static void
tegra_bo_destruct(struct tegra_bo *bo)
{
struct pctrie_iter pages;
vm_page_t m;
size_t size;
int i;
@ -58,11 +59,12 @@ tegra_bo_destruct(struct tegra_bo *bo)
if (bo->vbase != 0)
pmap_qremove(bo->vbase, bo->npages);
vm_page_iter_init(&pages, bo->cdev_pager);
VM_OBJECT_WLOCK(bo->cdev_pager);
for (i = 0; i < bo->npages; i++) {
m = bo->m[i];
m = vm_page_iter_lookup(&pages, i);
vm_page_busy_acquire(m, 0);
cdev_mgtdev_pager_free_page(bo->cdev_pager, m);
cdev_mgtdev_pager_free_page(&pages);
m->flags &= ~PG_FICTITIOUS;
vm_page_unwire_noq(m);
vm_page_free(m);

View File

@ -418,27 +418,13 @@ lkpi_io_mapping_map_user(struct io_mapping *iomap,
*/
void
lkpi_unmap_mapping_range(void *obj, loff_t const holebegin __unused,
loff_t const holelen, int even_cows __unused)
loff_t const holelen __unused, int even_cows __unused)
{
vm_object_t devobj;
vm_page_t page;
int i, page_count;
devobj = cdev_pager_lookup(obj);
if (devobj != NULL) {
page_count = OFF_TO_IDX(holelen);
VM_OBJECT_WLOCK(devobj);
retry:
for (i = 0; i < page_count; i++) {
page = vm_page_lookup(devobj, i);
if (page == NULL)
continue;
if (!vm_page_busy_acquire(page, VM_ALLOC_WAITFAIL))
goto retry;
cdev_mgtdev_pager_free_page(devobj, page);
}
VM_OBJECT_WUNLOCK(devobj);
cdev_mgtdev_pager_free_pages(devobj);
vm_object_deallocate(devobj);
}
}

View File

@ -361,26 +361,12 @@ void
ttm_bo_release_mmap(struct ttm_buffer_object *bo)
{
vm_object_t vm_obj;
vm_page_t m;
int i;
vm_obj = cdev_pager_lookup(bo);
if (vm_obj == NULL)
return;
VM_OBJECT_WLOCK(vm_obj);
retry:
for (i = 0; i < bo->num_pages; i++) {
m = vm_page_lookup(vm_obj, i);
if (m == NULL)
continue;
if (vm_page_busy_acquire(m, VM_ALLOC_WAITFAIL) == 0)
goto retry;
cdev_mgtdev_pager_free_page(vm_obj, m);
if (vm_obj != NULL) {
cdev_mgtdev_pager_free_pages(vm_obj);
vm_object_deallocate(vm_obj);
}
VM_OBJECT_WUNLOCK(vm_obj);
vm_object_deallocate(vm_obj);
}
#if 0

View File

@ -563,7 +563,6 @@ notify_unmap_cleanup(struct gntdev_gmap *gmap)
{
uint32_t i;
int error, count;
vm_page_t m;
struct gnttab_unmap_grant_ref *unmap_ops;
unmap_ops = malloc(sizeof(struct gnttab_unmap_grant_ref) * gmap->count,
@ -592,17 +591,7 @@ notify_unmap_cleanup(struct gntdev_gmap *gmap)
}
/* Free the pages. */
VM_OBJECT_WLOCK(gmap->map->mem);
retry:
for (i = 0; i < gmap->count; i++) {
m = vm_page_lookup(gmap->map->mem, i);
if (m == NULL)
continue;
if (vm_page_busy_acquire(m, VM_ALLOC_WAITFAIL) == 0)
goto retry;
cdev_mgtdev_pager_free_page(gmap->map->mem, m);
}
VM_OBJECT_WUNLOCK(gmap->map->mem);
cdev_mgtdev_pager_free_pages(gmap->map->mem);
/* Perform unmap hypercall. */
error = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,

View File

@ -120,25 +120,13 @@ privcmd_pg_dtor(void *handle)
struct privcmd_map *map = handle;
int error __diagused;
vm_size_t i;
vm_page_t m;
/*
* Remove the mappings from the used pages. This will remove the
* underlying p2m bindings in Xen second stage translation.
*/
if (map->mapped == true) {
VM_OBJECT_WLOCK(map->mem);
retry:
for (i = 0; i < map->size; i++) {
m = vm_page_lookup(map->mem, i);
if (m == NULL)
continue;
if (vm_page_busy_acquire(m, VM_ALLOC_WAITFAIL) == 0)
goto retry;
cdev_mgtdev_pager_free_page(map->mem, m);
}
VM_OBJECT_WUNLOCK(map->mem);
cdev_mgtdev_pager_free_pages(map->mem);
for (i = 0; i < map->size; i++) {
rm.gpfn = atop(map->phys_base_addr) + i;
HYPERVISOR_memory_op(XENMEM_remove_from_physmap, &rm);

View File

@ -52,6 +52,7 @@
#include <vm/vm_page.h>
#include <vm/vm_pager.h>
#include <vm/vm_phys.h>
#include <vm/vm_radix.h>
#include <vm/uma.h>
static void dev_pager_init(void);
@ -262,9 +263,13 @@ void
cdev_pager_free_page(vm_object_t object, vm_page_t m)
{
if (object->type == OBJT_MGTDEVICE)
cdev_mgtdev_pager_free_page(object, m);
else if (object->type == OBJT_DEVICE)
if (object->type == OBJT_MGTDEVICE) {
struct pctrie_iter pages;
vm_page_iter_init(&pages, object);
vm_page_iter_lookup(&pages, m->pindex);
cdev_mgtdev_pager_free_page(&pages);
} else if (object->type == OBJT_DEVICE)
dev_pager_free_page(object, m);
else
KASSERT(false,
@ -272,15 +277,30 @@ cdev_pager_free_page(vm_object_t object, vm_page_t m)
}
void
cdev_mgtdev_pager_free_page(vm_object_t object, vm_page_t m)
cdev_mgtdev_pager_free_page(struct pctrie_iter *pages)
{
pmap_remove_all(vm_radix_iter_page(pages));
vm_page_iter_remove(pages);
}
VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT((object->type == OBJT_MGTDEVICE &&
(m->oflags & VPO_UNMANAGED) == 0),
("Unmanaged device or page obj %p m %p", object, m));
pmap_remove_all(m);
(void)vm_page_remove(m);
void
cdev_mgtdev_pager_free_pages(vm_object_t object)
{
struct pctrie_iter pages;
vm_page_t m;
vm_page_iter_init(&pages, object);
VM_OBJECT_WLOCK(object);
retry:
for (m = vm_page_iter_lookup_ge(&pages, 0); m != NULL;
m = vm_radix_iter_step(&pages)) {
if (!vm_page_busy_acquire(m, VM_ALLOC_WAITFAIL)) {
pctrie_iter_reset(&pages);
goto retry;
}
cdev_mgtdev_pager_free_page(&pages);
}
VM_OBJECT_WUNLOCK(object);
}
static void

View File

@ -300,7 +300,8 @@ vm_object_t cdev_pager_allocate(void *handle, enum obj_type tp,
vm_ooffset_t foff, struct ucred *cred);
vm_object_t cdev_pager_lookup(void *handle);
void cdev_pager_free_page(vm_object_t object, vm_page_t m);
void cdev_mgtdev_pager_free_page(vm_object_t object, vm_page_t m);
void cdev_mgtdev_pager_free_page(struct pctrie_iter *pages);
void cdev_mgtdev_pager_free_pages(vm_object_t object);
struct phys_pager_ops {
int (*phys_pg_getpages)(vm_object_t vm_obj, vm_page_t *m, int count,