From 7fe5c13c05e84ab7b4056daa316bf89719f1151e Mon Sep 17 00:00:00 2001 From: Alan Cox Date: Fri, 5 Jul 2019 05:23:23 +0000 Subject: [PATCH] Merge r349526 from amd64. When we protect an L3 entry, we only call vm_page_dirty() when, in fact, we are write protecting the page and the L3 entry has PTE_D set. However, pmap_protect() was always calling vm_page_dirty() when an L2 entry has PTE_D set. Handle L2 entries the same as L3 entries so that we won't perform unnecessary calls to vm_page_dirty(). Simplify the loop calling vm_page_dirty() on L2 entries. --- sys/riscv/riscv/pmap.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/sys/riscv/riscv/pmap.c b/sys/riscv/riscv/pmap.c index 3d9fcdd1f8e9..0385747e8fae 100644 --- a/sys/riscv/riscv/pmap.c +++ b/sys/riscv/riscv/pmap.c @@ -2298,9 +2298,9 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) { pd_entry_t *l1, *l2, l2e; pt_entry_t *l3, l3e, mask; - vm_page_t m; + vm_page_t m, mt; vm_paddr_t pa; - vm_offset_t va, va_next; + vm_offset_t va_next; bool anychanged, pv_lists_locked; if ((prot & VM_PROT_READ) == VM_PROT_NONE) { @@ -2340,12 +2340,13 @@ resume: if ((l2e & PTE_RWX) != 0) { if (sva + L2_SIZE == va_next && eva >= va_next) { retryl2: - if ((l2e & (PTE_SW_MANAGED | PTE_D)) == + if ((prot & VM_PROT_WRITE) == 0 && + (l2e & (PTE_SW_MANAGED | PTE_D)) == (PTE_SW_MANAGED | PTE_D)) { pa = PTE_TO_PHYS(l2e); - for (va = sva, m = PHYS_TO_VM_PAGE(pa); - va < va_next; m++, va += PAGE_SIZE) - vm_page_dirty(m); + m = PHYS_TO_VM_PAGE(pa); + for (mt = m; mt < &m[Ln_ENTRIES]; mt++) + vm_page_dirty(mt); } if (!atomic_fcmpset_long(l2, &l2e, l2e & ~mask)) goto retryl2;