diff --git a/sys/conf/NOTES b/sys/conf/NOTES index 0f9cdb5dc628..fee10b5ac13b 100644 --- a/sys/conf/NOTES +++ b/sys/conf/NOTES @@ -308,6 +308,13 @@ options GDB # options SYSCTL_DEBUG +# +# DEBUG_MEMGUARD builds and enables memguard(9), a replacement allocator +# for the kernel used to detect modify-after-free scenarios. See the +# memguard(9) man page for more information on usage. +# +options DEBUG_MEMGUARD + # # KTRACE enables the system-call tracing facility ktrace(2). To be more # SMP-friendly, KTRACE uses a worker thread to process most trace events diff --git a/sys/conf/files b/sys/conf/files index 95c50113a5b3..90a8d489cc2d 100644 --- a/sys/conf/files +++ b/sys/conf/files @@ -1702,6 +1702,7 @@ vm/swap_pager.c standard vm/uma_core.c standard vm/uma_dbg.c standard vm/vm_contig.c standard +vm/memguard.c optional DEBUG_MEMGUARD vm/vm_fault.c standard vm/vm_glue.c standard vm/vm_init.c standard diff --git a/sys/conf/options b/sys/conf/options index 8398bd757301..1c9ec6e67acb 100644 --- a/sys/conf/options +++ b/sys/conf/options @@ -515,6 +515,9 @@ PQ_LARGECACHE opt_vmpage.h PQ_HUGECACHE opt_vmpage.h PQ_CACHESIZE opt_vmpage.h +# The MemGuard replacement allocator used for tamper-after-free detection +DEBUG_MEMGUARD opt_vm.h + # Standard SMP options SMP opt_global.h diff --git a/sys/kern/kern_malloc.c b/sys/kern/kern_malloc.c index e794ec08e7a3..a972854a676b 100644 --- a/sys/kern/kern_malloc.c +++ b/sys/kern/kern_malloc.c @@ -58,6 +58,10 @@ __FBSDID("$FreeBSD$"); #include #include +#ifdef DEBUG_MEMGUARD +#include +#endif + #if defined(INVARIANTS) && defined(__i386__) #include #endif @@ -129,6 +133,12 @@ struct { {0, NULL}, }; +#ifdef DEBUG_MEMGUARD +u_int vm_memguard_divisor; +SYSCTL_UINT(_vm, OID_AUTO, memguard_divisor, CTLFLAG_RD, &vm_memguard_divisor, + 0, "(kmem_size/memguard_divisor) == memguard submap size"); +#endif + u_int vm_kmem_size; SYSCTL_UINT(_vm, OID_AUTO, kmem_size, CTLFLAG_RD, &vm_kmem_size, 0, "Size of kernel memory"); @@ -280,6 +290,13 @@ malloc(size, type, flags) if (flags & M_WAITOK) KASSERT(curthread->td_intr_nesting_level == 0, ("malloc(M_WAITOK) in interrupt context")); + +#ifdef DEBUG_MEMGUARD + /* XXX CHANGEME! */ + if (type == M_SUBPROC) + return memguard_alloc(size, flags); +#endif + if (size <= KMEM_ZMAX) { if (size & KMEM_ZMASK) size = (size & ~KMEM_ZMASK) + KMEM_ZBASE; @@ -331,6 +348,14 @@ free(addr, type) if (addr == NULL) return; +#ifdef DEBUG_MEMGUARD + /* XXX CHANGEME! */ + if (type == M_SUBPROC) { + memguard_free(addr); + return; + } +#endif + KASSERT(type->ks_memuse > 0, ("malloc(9)/free(9) confusion.\n%s", "Probably freeing with wrong type, but maybe not here.")); @@ -389,6 +414,14 @@ realloc(addr, size, type, flags) if (addr == NULL) return (malloc(size, type, flags)); +#ifdef DEBUG_MEMGUARD +/* XXX: CHANGEME! */ +if (type == M_SUBPROC) { + slab = NULL; + alloc = size; +} else { +#endif + slab = vtoslab((vm_offset_t)addr & ~(UMA_SLAB_MASK)); /* Sanity check */ @@ -406,6 +439,10 @@ realloc(addr, size, type, flags) && (size > (alloc >> REALLOC_FRACTION) || alloc == MINALLOCSIZE)) return (addr); +#ifdef DEBUG_MEMGUARD +} +#endif + /* Allocate a new, bigger (or smaller) block */ if ((newaddr = malloc(size, type, flags)) == NULL) return (NULL); @@ -502,6 +539,22 @@ kmeminit(dummy) (vm_offset_t *)&kmemlimit, vm_kmem_size); kmem_map->system_map = 1; +#ifdef DEBUG_MEMGUARD + /* + * Initialize MemGuard if support compiled in. MemGuard is a + * replacement allocator used for detecting tamper-after-free + * scenarios as they occur. It is only used for debugging. + */ + vm_memguard_divisor = 10; + TUNABLE_INT_FETCH("vm.memguard_divisor", &vm_memguard_divisor); + + /* Pick a conservative value if provided value sucks. */ + if ((vm_memguard_divisor <= 0) || + ((vm_kmem_size / vm_memguard_divisor) == 0)) + vm_memguard_divisor = 10; + memguard_init(kmem_map, vm_kmem_size / vm_memguard_divisor); +#endif + uma_startup2(); for (i = 0, indx = 0; kmemzones[indx].kz_size != 0; indx++) { diff --git a/sys/vm/memguard.c b/sys/vm/memguard.c new file mode 100644 index 000000000000..3f471b82fe3e --- /dev/null +++ b/sys/vm/memguard.c @@ -0,0 +1,222 @@ +/* + * Copyright (c) 2005, + * Bosko Milekic + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice unmodified, this list of conditions, and the following + * disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +/* + * MemGuard is a simple replacement allocator for debugging only + * which provides ElectricFence-style memory barrier protection on + * objects being allocated, and is used to detect tampering-after-free + * scenarios. + * + * See the memguard(9) man page for more information on using MemGuard. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +/* + * Global MemGuard data. + */ +static vm_map_t memguard_map; +static unsigned long memguard_mapsize; +static unsigned long memguard_mapused; +struct memguard_entry { + STAILQ_ENTRY(memguard_entry) entries; + void *ptr; +}; +static STAILQ_HEAD(memguard_fifo, memguard_entry) memguard_fifo_pool; + +/* + * Local prototypes. + */ +static void memguard_guard(void *addr); +static void memguard_unguard(void *addr); + +/* + * Local macros. MemGuard data is global, so replace these with whatever + * your system uses to protect global data (if it is kernel-level + * parallelized). This is for porting among BSDs. + */ +#define MEMGUARD_CRIT_SECTION_DECLARE static struct mtx memguard_mtx +#define MEMGUARD_CRIT_SECTION_INIT \ + mtx_init(&memguard_mtx, "MemGuard mtx", NULL, MTX_DEF) +#define MEMGUARD_CRIT_SECTION_ENTER mtx_lock(&memguard_mtx) +#define MEMGUARD_CRIT_SECTION_EXIT mtx_unlock(&memguard_mtx) +MEMGUARD_CRIT_SECTION_DECLARE; + +/* + * Initialize the MemGuard mock allocator. All objects from MemGuard come + * out of a single VM map (contiguous chunk of address space). + */ +void +memguard_init(vm_map_t parent_map, unsigned long size) +{ + char *base, *limit; + + /* size must be multiple of PAGE_SIZE */ + size /= PAGE_SIZE; + size++; + size *= PAGE_SIZE; + + memguard_map = kmem_suballoc(parent_map, (vm_offset_t *)&base, + (vm_offset_t *)&limit, (vm_size_t)size); + memguard_map->system_map = 1; + memguard_mapsize = size; + memguard_mapused = 0; + + MEMGUARD_CRIT_SECTION_INIT; + MEMGUARD_CRIT_SECTION_ENTER; + STAILQ_INIT(&memguard_fifo_pool); + MEMGUARD_CRIT_SECTION_EXIT; + + printf("MEMGUARD DEBUGGING ALLOCATOR INITIALIZED:\n"); + printf("\tMEMGUARD map base: %p\n", base); + printf("\tMEMGUARD map limit: %p\n", limit); + printf("\tMEMGUARD map size: %ld (Bytes)\n", size); +} + +/* + * Allocate a single object of specified size with specified flags (either + * M_WAITOK or M_NOWAIT). + */ +void * +memguard_alloc(unsigned long size, int flags) +{ + void *obj = NULL; + struct memguard_entry *e = NULL; + + /* XXX: MemGuard does not handle > PAGE_SIZE objects. */ + if (size > PAGE_SIZE) + panic("MEMGUARD: Cannot handle objects > PAGE_SIZE"); + + /* + * If we haven't exhausted the memguard_map yet, allocate from + * it and grab a new page, even if we have recycled pages in our + * FIFO. This is because we wish to allow recycled pages to live + * guarded in the FIFO for as long as possible in order to catch + * even very late tamper-after-frees, even though it means that + * we end up wasting more memory, this is only a DEBUGGING allocator + * after all. + */ + MEMGUARD_CRIT_SECTION_ENTER; + if (memguard_mapused >= memguard_mapsize) { + e = STAILQ_FIRST(&memguard_fifo_pool); + if (e != NULL) { + STAILQ_REMOVE(&memguard_fifo_pool, e, + memguard_entry, entries); + MEMGUARD_CRIT_SECTION_EXIT; + obj = e->ptr; + free(e, M_TEMP); + memguard_unguard(obj); + if (flags & M_ZERO) + bzero(obj, PAGE_SIZE); + return obj; + } + MEMGUARD_CRIT_SECTION_EXIT; + if (flags & M_WAITOK) + panic("MEMGUARD: Failed with M_WAITOK: " \ + "memguard_map too small"); + return NULL; + } else + memguard_mapused += PAGE_SIZE; + MEMGUARD_CRIT_SECTION_EXIT; + + if (obj == NULL) + obj = (void *)kmem_malloc(memguard_map, PAGE_SIZE, flags); + if (obj != NULL) { + memguard_unguard(obj); + if (flags & M_ZERO) + bzero(obj, PAGE_SIZE); + } else { + MEMGUARD_CRIT_SECTION_ENTER; + memguard_mapused -= PAGE_SIZE; + MEMGUARD_CRIT_SECTION_EXIT; + } + return obj; +} + +/* + * Free specified single object. + */ +void +memguard_free(void *addr) +{ + struct memguard_entry *e; + + memguard_guard(addr); + e = malloc(sizeof(struct memguard_entry), M_TEMP, M_NOWAIT); + if (e == NULL) { + MEMGUARD_CRIT_SECTION_ENTER; + memguard_mapused -= PAGE_SIZE; + MEMGUARD_CRIT_SECTION_EXIT; + kmem_free(memguard_map, (vm_offset_t)round_page( + (unsigned long)addr), PAGE_SIZE); + return; + } + e->ptr = (void *)round_page((unsigned long)addr); + MEMGUARD_CRIT_SECTION_ENTER; + STAILQ_INSERT_TAIL(&memguard_fifo_pool, e, entries); + MEMGUARD_CRIT_SECTION_EXIT; +} + +/* + * Guard a page containing specified object (make it read-only so that + * future writes to it fail). + */ +static void +memguard_guard(void *addr) +{ + void *a = (void *)round_page((unsigned long)addr); + (void)vm_map_protect(memguard_map, (vm_offset_t)a, + (vm_offset_t)((unsigned long)a + PAGE_SIZE), VM_PROT_READ, 0); +} + +/* + * Unguard a page containing specified object (make it read-and-write to + * allow full data access). + */ +static void +memguard_unguard(void *addr) +{ + void *a = (void *)round_page((unsigned long)addr); + (void)vm_map_protect(memguard_map, (vm_offset_t)a, + (vm_offset_t)((unsigned long)a + PAGE_SIZE), + VM_PROT_READ | VM_PROT_WRITE, 0); +} diff --git a/sys/vm/memguard.h b/sys/vm/memguard.h new file mode 100644 index 000000000000..46f0460b2d10 --- /dev/null +++ b/sys/vm/memguard.h @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2005, + * Bosko Milekic + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice unmodified, this list of conditions, and the following + * disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD$ + */ + +void memguard_init(vm_map_t parent_map, unsigned long size); +void *memguard_alloc(unsigned long size, int flags); +void memguard_free(void *addr);