From 3f7905d29c1790f2749b531e722c6e77d705ca87 Mon Sep 17 00:00:00 2001 From: Konstantin Belousov Date: Sun, 23 Mar 2008 07:07:27 +0000 Subject: [PATCH] Prevent the overflow in the calculation of the next page directory. The overflow causes the wraparound with consequent corruption of the (almost) whole address space mapping. As Alan noted, pmap_copy() does not require the wrap-around checks because it cannot be applied to the kernel's pmap. The checks there are included for consistency. Reported and tested by: kris (i386/pmap.c:pmap_remove() part) Reviewed by: alc MFC after: 1 week --- sys/amd64/amd64/pmap.c | 18 ++++++++++++++++++ sys/i386/i386/pmap.c | 6 ++++++ 2 files changed, 24 insertions(+) diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c index d2ada05bda20..77468501f0b1 100644 --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -2444,12 +2444,16 @@ pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) pml4e = pmap_pml4e(pmap, sva); if ((*pml4e & PG_V) == 0) { va_next = (sva + NBPML4) & ~PML4MASK; + if (va_next < sva) + va_next = eva; continue; } pdpe = pmap_pml4e_to_pdpe(pml4e, sva); if ((*pdpe & PG_V) == 0) { va_next = (sva + NBPDP) & ~PDPMASK; + if (va_next < sva) + va_next = eva; continue; } @@ -2457,6 +2461,8 @@ pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) * Calculate index for next page table. */ va_next = (sva + NBPDR) & ~PDRMASK; + if (va_next < sva) + va_next = eva; pde = pmap_pdpe_to_pde(pdpe, sva); ptpaddr = *pde; @@ -2672,16 +2678,22 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) pml4e = pmap_pml4e(pmap, sva); if ((*pml4e & PG_V) == 0) { va_next = (sva + NBPML4) & ~PML4MASK; + if (va_next < sva) + va_next = eva; continue; } pdpe = pmap_pml4e_to_pdpe(pml4e, sva); if ((*pdpe & PG_V) == 0) { va_next = (sva + NBPDP) & ~PDPMASK; + if (va_next < sva) + va_next = eva; continue; } va_next = (sva + NBPDR) & ~PDRMASK; + if (va_next < sva) + va_next = eva; pde = pmap_pdpe_to_pde(pdpe, sva); ptpaddr = *pde; @@ -3485,16 +3497,22 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, pml4e = pmap_pml4e(src_pmap, addr); if ((*pml4e & PG_V) == 0) { va_next = (addr + NBPML4) & ~PML4MASK; + if (va_next < addr) + va_next = end_addr; continue; } pdpe = pmap_pml4e_to_pdpe(pml4e, addr); if ((*pdpe & PG_V) == 0) { va_next = (addr + NBPDP) & ~PDPMASK; + if (va_next < addr) + va_next = end_addr; continue; } va_next = (addr + NBPDR) & ~PDRMASK; + if (va_next < addr) + va_next = end_addr; pde = pmap_pdpe_to_pde(pdpe, addr); srcptepaddr = *pde; diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c index eee6de05dbd9..ddcf449fadc7 100644 --- a/sys/i386/i386/pmap.c +++ b/sys/i386/i386/pmap.c @@ -2046,6 +2046,8 @@ pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) * Calculate index for next page table. */ pdnxt = (sva + NBPDR) & ~PDRMASK; + if (pdnxt < sva) + pdnxt = eva; if (pmap->pm_stats.resident_count == 0) break; @@ -2194,6 +2196,8 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) unsigned pdirindex; pdnxt = (sva + NBPDR) & ~PDRMASK; + if (pdnxt < sva) + pdnxt = eva; pdirindex = sva >> PDRSHIFT; ptpaddr = pmap->pm_pdir[pdirindex]; @@ -2782,6 +2786,8 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, ("pmap_copy: invalid to pmap_copy page tables")); pdnxt = (addr + NBPDR) & ~PDRMASK; + if (pdnxt < addr) + pdnxt = end_addr; ptepindex = addr >> PDRSHIFT; srcptepaddr = src_pmap->pm_pdir[ptepindex];