From: Manfred Spraul - do not use xchg for the redzone test. It's bogus - the current value is important, it must be dumped together with the error message: single bit change, or another value, or _ACTIVE instead of _INACTIVE. - abstract the location of the redzone words into seperate functions. Simplifies the code quite a bit. - if an object is aligned due to DEBUG_PAGEALLOC, move it to the end of the page. - properly limit DEBUG_PAGEALLOC - right now it won't work on systems with cacheline sizes > 128 bytes. mm/slab.c | 192 +++++++++++++++++++++++++++++++------------------------------- 1 files changed, 99 insertions(+), 93 deletions(-) diff -puN mm/slab.c~slab-debug-updates mm/slab.c --- 25/mm/slab.c~slab-debug-updates 2003-08-06 14:05:47.000000000 -0700 +++ 25-akpm/mm/slab.c 2003-08-06 14:05:47.000000000 -0700 @@ -293,6 +293,10 @@ struct kmem_cache_s { atomic_t freehit; atomic_t freemiss; #endif +#if DEBUG + int dbghead; + int reallen; +#endif }; #define CFLGS_OFF_SLAB (0x80000000UL) @@ -356,33 +360,54 @@ struct kmem_cache_s { #define POISON_AFTER 0x6b /* for use-after-free poisoning */ #define POISON_END 0xa5 /* end-byte of poisoning */ +/* memory layout of objects: + * 0 : objp + * 0 .. cachep->dbghead - BYTES_PER_WORD - 1: padding. This ensures that + * the end of an object is aligned with the end of the real + * allocation. Catches writes behind the end of the allocation. + * cachep->dbghead - BYTES_PER_WORD .. cachep->dbghead - 1: + * redzone word. + * cachep->dbghead: The real object. + * cachep->objsize - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long] + * cachep->objsize - 1* BYTES_PER_WORD: last caller address [BYTES_PER_WORD long] + */ static inline int obj_dbghead(kmem_cache_t *cachep) { - if (cachep->flags & SLAB_RED_ZONE) - return BYTES_PER_WORD; - return 0; + return cachep->dbghead; } -static inline int obj_dbglen(kmem_cache_t *cachep) +static inline int obj_reallen(kmem_cache_t *cachep) { - int len = 0; + return cachep->reallen; +} - if (cachep->flags & SLAB_RED_ZONE) { - len += 2*BYTES_PER_WORD; - } - if (cachep->flags & SLAB_STORE_USER) { - len += BYTES_PER_WORD; - } - return len; +static unsigned long *dbg_redzone1(kmem_cache_t *cachep, void *objp) +{ + BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); + return (unsigned long*) (objp+obj_dbghead(cachep)-BYTES_PER_WORD); +} + +static unsigned long *dbg_redzone2(kmem_cache_t *cachep, void *objp) +{ + BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); + if (cachep->flags & SLAB_STORE_USER) + return (unsigned long*) (objp+cachep->objsize-2*BYTES_PER_WORD); + return (unsigned long*) (objp+cachep->objsize-BYTES_PER_WORD); +} + +static void **dbg_userword(kmem_cache_t *cachep, void *objp) +{ + BUG_ON(!(cachep->flags & SLAB_STORE_USER)); + return (void**)(objp+cachep->objsize-BYTES_PER_WORD); } #else static inline int obj_dbghead(kmem_cache_t *cachep) { return 0; } -static inline int obj_dbglen(kmem_cache_t *cachep) +static inline int obj_reallen(kmem_cache_t *cachep) { - return 0; + return cachep->objsize; } #endif @@ -803,7 +828,7 @@ static inline void kmem_freepages (kmem_ #ifdef CONFIG_DEBUG_PAGEALLOC static void store_stackinfo(kmem_cache_t *cachep, unsigned long *addr, unsigned long caller) { - int size = cachep->objsize-obj_dbglen(cachep); + int size = obj_reallen(cachep); addr = (unsigned long *)&((char*)addr)[obj_dbghead(cachep)]; @@ -835,7 +860,7 @@ static void store_stackinfo(kmem_cache_t static void poison_obj(kmem_cache_t *cachep, void *addr, unsigned char val) { - int size = cachep->objsize-obj_dbglen(cachep); + int size = obj_reallen(cachep); addr = &((char*)addr)[obj_dbghead(cachep)]; memset(addr, val, size); @@ -857,47 +882,42 @@ static void *scan_poisoned_obj(unsigned return NULL; } -static void check_poison_obj(kmem_cache_t *cachep, void *addr) +static void check_poison_obj(kmem_cache_t *cachep, void *objp) { void *end; - int size = cachep->objsize-obj_dbglen(cachep); + void *realobj; + int size = obj_reallen(cachep); - addr = &((char*)addr)[obj_dbghead(cachep)]; + realobj = objp+obj_dbghead(cachep); - end = scan_poisoned_obj(addr, size); + end = scan_poisoned_obj(realobj, size); if (end) { int s; printk(KERN_ERR "Slab corruption: start=%p, expend=%p, " - "problemat=%p\n", addr, addr+size-1, end); + "problemat=%p\n", realobj, realobj+size-1, end); if (cachep->flags & SLAB_STORE_USER) { - void *pc; - - if (cachep->flags & SLAB_RED_ZONE) - pc = *(void**)(addr+size+BYTES_PER_WORD); - else - pc = *(void**)(addr+size); - printk(KERN_ERR "Last user: [<%p>]", pc); - print_symbol("(%s)", (unsigned long)pc); + printk(KERN_ERR "Last user: [<%p>]", *dbg_userword(cachep, objp)); + print_symbol("(%s)", (unsigned long)*dbg_userword(cachep, objp)); printk("\n"); } printk(KERN_ERR "Data: "); for (s = 0; s < size; s++) { - if (((char*)addr)[s] == POISON_BEFORE) + if (((char*)realobj)[s] == POISON_BEFORE) printk("."); - else if (((char*)addr)[s] == POISON_AFTER) + else if (((char*)realobj)[s] == POISON_AFTER) printk("*"); else - printk("%02X ", ((unsigned char*)addr)[s]); + printk("%02X ", ((unsigned char*)realobj)[s]); } printk("\n"); printk(KERN_ERR "Next: "); for (; s < size + 32; s++) { - if (((char*)addr)[s] == POISON_BEFORE) + if (((char*)realobj)[s] == POISON_BEFORE) printk("."); - else if (((char*)addr)[s] == POISON_AFTER) + else if (((char*)realobj)[s] == POISON_AFTER) printk("*"); else - printk("%02X ", ((unsigned char*)addr)[s]); + printk("%02X ", ((unsigned char*)realobj)[s]); } printk("\n"); slab_error(cachep, "object was modified after freeing"); @@ -915,7 +935,6 @@ static void slab_destroy (kmem_cache_t * int i; for (i = 0; i < cachep->num; i++) { void *objp = slabp->s_mem + cachep->objsize * i; - int objlen = cachep->objsize; if (cachep->flags & SLAB_POISON) { #ifdef CONFIG_DEBUG_PAGEALLOC @@ -927,21 +946,16 @@ static void slab_destroy (kmem_cache_t * check_poison_obj(cachep, objp); #endif } - if (cachep->flags & SLAB_STORE_USER) - objlen -= BYTES_PER_WORD; - if (cachep->flags & SLAB_RED_ZONE) { - if (*((unsigned long*)(objp)) != RED_INACTIVE) + if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) slab_error(cachep, "start of a freed object " "was overwritten"); - if (*((unsigned long*)(objp + objlen - BYTES_PER_WORD)) - != RED_INACTIVE) + if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) slab_error(cachep, "end of a freed object " "was overwritten"); - objp += BYTES_PER_WORD; } if (cachep->dtor && !(cachep->flags & SLAB_POISON)) - (cachep->dtor)(objp, cachep, 0); + (cachep->dtor)(objp+obj_dbghead(cachep), cachep, 0); } #else if (cachep->dtor) { @@ -1019,10 +1033,6 @@ kmem_cache_create (const char *name, siz } #if FORCED_DEBUG -#ifdef CONFIG_DEBUG_PAGEALLOC - if (size < PAGE_SIZE-3*BYTES_PER_WORD && size > 128) - size = PAGE_SIZE-3*BYTES_PER_WORD; -#endif /* * Enable redzoning and last user accounting, except * - for caches with forced alignment: redzoning would violate the @@ -1053,6 +1063,9 @@ kmem_cache_create (const char *name, siz goto opps; memset(cachep, 0, sizeof(kmem_cache_t)); +#if DEBUG + cachep->reallen = size; +#endif /* Check that size is in terms of words. This is needed to avoid * unaligned accesses for some archs when redzoning is used, and makes * sure any on-slab bufctl's are also correctly aligned. @@ -1070,13 +1083,21 @@ kmem_cache_create (const char *name, siz * when redzoning. */ flags &= ~SLAB_HWCACHE_ALIGN; - size += 2*BYTES_PER_WORD; /* words for redzone */ + /* add space for red zone words */ + cachep->dbghead += BYTES_PER_WORD; + size += 2*BYTES_PER_WORD; } if (flags & SLAB_STORE_USER) { flags &= ~SLAB_HWCACHE_ALIGN; - size += BYTES_PER_WORD; /* word for kfree caller address */ + size += BYTES_PER_WORD; /* add space */ + } +#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) + if (size > 128 && cachep->reallen > L1_CACHE_BYTES && size < PAGE_SIZE) { + cachep->dbghead += PAGE_SIZE - size; + size = PAGE_SIZE; } #endif +#endif align = BYTES_PER_WORD; if (flags & SLAB_HWCACHE_ALIGN) align = L1_CACHE_BYTES; @@ -1443,20 +1464,15 @@ static void cache_init_objs (kmem_cache_ for (i = 0; i < cachep->num; i++) { void* objp = slabp->s_mem+cachep->objsize*i; #if DEBUG - int objlen = cachep->objsize; /* need to poison the objs? */ if (cachep->flags & SLAB_POISON) poison_obj(cachep, objp, POISON_BEFORE); - if (cachep->flags & SLAB_STORE_USER) { - objlen -= BYTES_PER_WORD; - ((unsigned long*)(objp+objlen))[0] = 0; - } + if (cachep->flags & SLAB_STORE_USER) + *dbg_userword(cachep, objp) = NULL; if (cachep->flags & SLAB_RED_ZONE) { - *((unsigned long*)(objp)) = RED_INACTIVE; - objp += BYTES_PER_WORD; - objlen -= 2* BYTES_PER_WORD; - *((unsigned long*)(objp + objlen)) = RED_INACTIVE; + *dbg_redzone1(cachep, objp) = RED_INACTIVE; + *dbg_redzone2(cachep, objp) = RED_INACTIVE; } /* * Constructors are not allowed to allocate memory from @@ -1464,14 +1480,13 @@ static void cache_init_objs (kmem_cache_ * Otherwise, deadlock. They must also be threaded. */ if (cachep->ctor && !(cachep->flags & SLAB_POISON)) - cachep->ctor(objp, cachep, ctor_flags); + cachep->ctor(objp+obj_dbghead(cachep), cachep, ctor_flags); if (cachep->flags & SLAB_RED_ZONE) { - if (*((unsigned long*)(objp + objlen)) != RED_INACTIVE) + if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) slab_error(cachep, "constructor overwrote the" " end of an object"); - objp -= BYTES_PER_WORD; - if (*((unsigned long*)(objp)) != RED_INACTIVE) + if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) slab_error(cachep, "constructor overwrote the" " start of an object"); } @@ -1622,9 +1637,9 @@ static inline void *cache_free_debugchec #if DEBUG struct page *page; unsigned int objnr; - int objlen = cachep->objsize; struct slab *slabp; + objp -= obj_dbghead(cachep); kfree_debugcheck(objp); page = virt_to_page(objp); @@ -1637,21 +1652,18 @@ static inline void *cache_free_debugchec } slabp = GET_PAGE_SLAB(page); - if (cachep->flags & SLAB_STORE_USER) { - objlen -= BYTES_PER_WORD; - } if (cachep->flags & SLAB_RED_ZONE) { - objp -= BYTES_PER_WORD; - if (xchg((unsigned long *)objp, RED_INACTIVE) != RED_ACTIVE) - slab_error(cachep, "double free, or memory before" + if (*dbg_redzone1(cachep, objp) != RED_ACTIVE || *dbg_redzone2(cachep, objp) != RED_ACTIVE) { + slab_error(cachep, "double free, or memory outside" " object was overwritten"); - if (xchg((unsigned long *)(objp+objlen-BYTES_PER_WORD), RED_INACTIVE) != RED_ACTIVE) - slab_error(cachep, "double free, or memory after " - " object was overwritten"); - } - if (cachep->flags & SLAB_STORE_USER) { - *((void**)(objp+objlen)) = caller; + printk(KERN_ERR "%p: redzone 1: 0x%lx, redzone 2: 0x%lx.\n", + objp, *dbg_redzone1(cachep, objp), *dbg_redzone2(cachep, objp)); + } + *dbg_redzone1(cachep, objp) = RED_INACTIVE; + *dbg_redzone2(cachep, objp) = RED_INACTIVE; } + if (cachep->flags & SLAB_STORE_USER) + *dbg_userword(cachep, objp) = caller; objnr = (objp-slabp->s_mem)/cachep->objsize; @@ -1825,8 +1837,6 @@ cache_alloc_debugcheck_after(kmem_cache_ unsigned long flags, void *objp, void *caller) { #if DEBUG - int objlen = cachep->objsize; - if (!objp) return objp; if (cachep->flags & SLAB_POISON) { @@ -1840,24 +1850,20 @@ cache_alloc_debugcheck_after(kmem_cache_ #endif poison_obj(cachep, objp, POISON_BEFORE); } - if (cachep->flags & SLAB_STORE_USER) { - objlen -= BYTES_PER_WORD; - *((void **)(objp+objlen)) = caller; - } + if (cachep->flags & SLAB_STORE_USER) + *dbg_userword(cachep, objp) = caller; if (cachep->flags & SLAB_RED_ZONE) { - /* Set alloc red-zone, and check old one. */ - if (xchg((unsigned long *)objp, RED_ACTIVE) != RED_INACTIVE) { - slab_error(cachep, "memory before object was " - "overwritten"); - } - if (xchg((unsigned long *)(objp+objlen - BYTES_PER_WORD), - RED_ACTIVE) != RED_INACTIVE) { - slab_error(cachep, "memory after object was " - "overwritten"); + if (*dbg_redzone1(cachep, objp) != RED_INACTIVE || *dbg_redzone2(cachep, objp) != RED_INACTIVE) { + slab_error(cachep, "double free, or memory outside" + " object was overwritten"); + printk(KERN_ERR "%p: redzone 1: 0x%lx, redzone 2: 0x%lx.\n", + objp, *dbg_redzone1(cachep, objp), *dbg_redzone2(cachep, objp)); } - objp += BYTES_PER_WORD; + *dbg_redzone1(cachep, objp) = RED_ACTIVE; + *dbg_redzone2(cachep, objp) = RED_ACTIVE; } + objp += obj_dbghead(cachep); if (cachep->ctor && cachep->flags & SLAB_POISON) { unsigned long ctor_flags = SLAB_CTOR_CONSTRUCTOR; @@ -2175,7 +2181,7 @@ free_percpu(const void *objp) unsigned int kmem_cache_size(kmem_cache_t *cachep) { - return cachep->objsize-obj_dbglen(cachep); + return obj_reallen(cachep); } kmem_cache_t * kmem_find_general_cachep (size_t size, int gfpflags) _