From: Alexander Nyberg This patch stores the 3 last function addresses of the call chain allocating the object (it won't work on certain large caches though, that fall backs to the now existing way). Signed-off-by: Alexander Nyberg Signed-off-by: Andrew Morton --- mm/slab.c | 99 ++++++++++++++++++++++++++++++++++++++++++++++---------------- 1 files changed, 74 insertions(+), 25 deletions(-) diff -puN mm/slab.c~slab-leak-detector-give-longer-traces mm/slab.c --- devel/mm/slab.c~slab-leak-detector-give-longer-traces 2005-08-06 15:35:04.000000000 -0700 +++ devel-akpm/mm/slab.c 2005-08-06 15:35:04.000000000 -0700 @@ -521,16 +521,28 @@ static unsigned long *dbg_redzone2(kmem_ { BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); if (cachep->flags & SLAB_STORE_USER) - return (unsigned long*) (objp+cachep->objsize-2*BYTES_PER_WORD); + return (unsigned long*) (objp+cachep->objsize-4*BYTES_PER_WORD); return (unsigned long*) (objp+cachep->objsize-BYTES_PER_WORD); } -static void **dbg_userword(kmem_cache_t *cachep, void *objp) +static void **dbg_userword3(kmem_cache_t *cachep, void *objp) { BUG_ON(!(cachep->flags & SLAB_STORE_USER)); return (void**)(objp+cachep->objsize-BYTES_PER_WORD); } +static void **dbg_userword2(kmem_cache_t *cachep, void *objp) +{ + BUG_ON(!(cachep->flags & SLAB_STORE_USER)); + return (void**)(objp+cachep->objsize-2*BYTES_PER_WORD); +} + +static void **dbg_userword1(kmem_cache_t *cachep, void *objp) +{ + BUG_ON(!(cachep->flags & SLAB_STORE_USER)); + return (void**)(objp+cachep->objsize-3*BYTES_PER_WORD); +} + #else #define obj_dbghead(x) 0 @@ -1312,12 +1324,16 @@ static void print_objinfo(kmem_cache_t * } if (cachep->flags & SLAB_STORE_USER) { - printk(KERN_ERR "Last user: [<%p>]", - *dbg_userword(cachep, objp)); - print_symbol("(%s)", - (unsigned long)*dbg_userword(cachep, objp)); + printk(KERN_ERR "Last user:\n"); + printk(KERN_ERR "[<%p>]", *dbg_userword1(cachep, objp)); + print_symbol("(%s)", (unsigned long) *dbg_userword1(cachep, objp)); + printk(KERN_ERR "[<%p>]", *dbg_userword2(cachep, objp)); + print_symbol("(%s)", (unsigned long) *dbg_userword2(cachep, objp)); + printk(KERN_ERR "[<%p>]", *dbg_userword2(cachep, objp)); + print_symbol("(%s)", (unsigned long) *dbg_userword2(cachep, objp)); printk("\n"); } + realobj = (char*)objp+obj_dbghead(cachep); size = obj_reallen(cachep); for (i=0; i= malloc_sizes[INDEX_L3+1].cs_size && cachep->reallen > cache_line_size() && size < PAGE_SIZE) { @@ -2081,8 +2097,11 @@ static void cache_init_objs(kmem_cache_t /* need to poison the objs? */ if (cachep->flags & SLAB_POISON) poison_obj(cachep, objp, POISON_FREE); - if (cachep->flags & SLAB_STORE_USER) - *dbg_userword(cachep, objp) = NULL; + if (cachep->flags & SLAB_STORE_USER) { + *dbg_userword1(cachep, objp) = NULL; + *dbg_userword2(cachep, objp) = NULL; + *dbg_userword3(cachep, objp) = NULL; + } if (cachep->flags & SLAB_RED_ZONE) { *dbg_redzone1(cachep, objp) = RED_INACTIVE; @@ -2256,7 +2275,7 @@ static void kfree_debugcheck(const void } } -static void *cache_free_debugcheck(kmem_cache_t *cachep, void *objp, +static void inline *cache_free_debugcheck(kmem_cache_t *cachep, void *objp, void *caller) { struct page *page; @@ -2286,8 +2305,11 @@ static void *cache_free_debugcheck(kmem_ *dbg_redzone1(cachep, objp) = RED_INACTIVE; *dbg_redzone2(cachep, objp) = RED_INACTIVE; } - if (cachep->flags & SLAB_STORE_USER) - *dbg_userword(cachep, objp) = caller; + if (cachep->flags & SLAB_STORE_USER) { + *dbg_userword1(cachep, objp) = caller; /* address(0) */ + *dbg_userword2(cachep, objp) = __builtin_return_address(1); + *dbg_userword3(cachep, objp) = __builtin_return_address(2); + } objnr = (objp-slabp->s_mem)/cachep->objsize; @@ -2462,7 +2484,7 @@ cache_alloc_debugcheck_before(kmem_cache } #if DEBUG -static void * +static void inline * cache_alloc_debugcheck_after(kmem_cache_t *cachep, unsigned int __nocast flags, void *objp, void *caller) { @@ -2479,8 +2501,11 @@ cache_alloc_debugcheck_after(kmem_cache_ #endif poison_obj(cachep, objp, POISON_INUSE); } - if (cachep->flags & SLAB_STORE_USER) - *dbg_userword(cachep, objp) = caller; + if (cachep->flags & SLAB_STORE_USER) { + *dbg_userword1(cachep, objp) = caller; /* address(0) */ + *dbg_userword2(cachep, objp) = __builtin_return_address(1); + *dbg_userword3(cachep, objp) = __builtin_return_address(2); + } if (cachep->flags & SLAB_RED_ZONE) { if (*dbg_redzone1(cachep, objp) != RED_INACTIVE || *dbg_redzone2(cachep, objp) != RED_INACTIVE) { @@ -3529,6 +3554,37 @@ struct seq_operations slabinfo_op = { .show = s_show, }; +#if DEBUG +#include +static inline void dump_slab(kmem_cache_t *cachep, struct slab *slabp) +{ + int i; + int slab_user = cachep->flags & SLAB_STORE_USER; + + for (i = 0; i < cachep->num; i++) { + if (slab_user) { + void *objp = slabp->s_mem + cachep->objsize * i; + + printk("obj:%p [%p] ", objp, *dbg_userword1(cachep, objp)); + print_symbol("<%s>", (unsigned long) *dbg_userword1(cachep, objp)); + printk("\n"); + printk("obj:%p [%p] ", objp, *dbg_userword2(cachep, objp)); + print_symbol("<%s>", (unsigned long) *dbg_userword2(cachep, objp)); + printk("\n"); + printk("obj:%p [%p] ", objp, *dbg_userword3(cachep, objp)); + print_symbol("<%s>", (unsigned long) *dbg_userword3(cachep, objp)); + printk("\n"); + } else { + unsigned long sym = slab_bufctl(slabp)[i]; + + printk("obj %p/%d: %p", slabp, i, (void *)sym); + print_symbol(" <%s>", sym); + printk("\n"); + } + } +} +#endif + static void do_dump_slabp(kmem_cache_t *cachep) { #if DEBUG @@ -3542,16 +3598,9 @@ static void do_dump_slabp(kmem_cache_t * spin_lock(&rl3->list_lock); list_for_each(q, &rl3->slabs_full) { - int i; struct slab *slabp = list_entry(q, struct slab, list); - - for (i = 0; i < cachep->num; i++) { - unsigned long sym = slab_bufctl(slabp)[i]; - - printk("obj %p/%d: %p", slabp, i, (void *)sym); - print_symbol(" <%s>", sym); - printk("\n"); - } + dump_slab(cachep, slabp); + touch_nmi_watchdog(); } spin_unlock(&rl3->list_lock); } _