From: Manfred Spraul slab.c contains too many inline functions: - some functions that are not performance critical were inlined. Waste of text size. - The debug code relies on __builtin_return_address(0) to keep track of the callers. According to rmk, gcc didn't inline some functions as expected and that resulted in useless debug output. This was probably caused by the large debug-only inline functions. The attached patche removes most inline functions: - the empty on release/huge on debug inline functions were replaced with empty macros on release/normal functions on debug. - spurious inline statements were removed. The code is down to 6 inline functions: three one-liners for struct abstractions, one for a might_sleep_if test and two for the performance critical __cache_alloc / __cache_free functions. Note: If an embedded arch wants to save a few bytes by uninlining __cache_{free,alloc}: The right way to do that is to fold the functions into kmem_cache_xy and then replace kmalloc with kmem_cache_alloc(kmem_find_general_cachep(),). Signed-Off: Manfred Spraul Signed-off-by: Andrew Morton --- 25-akpm/mm/slab.c | 84 +++++++++++++++++++++++------------------------------- 1 files changed, 37 insertions(+), 47 deletions(-) diff -puN mm/slab.c~reduce-function-inlining-in-slabc mm/slab.c --- 25/mm/slab.c~reduce-function-inlining-in-slabc 2004-06-19 14:19:20.701121712 -0700 +++ 25-akpm/mm/slab.c 2004-06-19 14:19:20.710120344 -0700 @@ -384,12 +384,12 @@ struct kmem_cache_s { * cachep->objsize - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long] * cachep->objsize - 1* BYTES_PER_WORD: last caller address [BYTES_PER_WORD long] */ -static inline int obj_dbghead(kmem_cache_t *cachep) +static int obj_dbghead(kmem_cache_t *cachep) { return cachep->dbghead; } -static inline int obj_reallen(kmem_cache_t *cachep) +static int obj_reallen(kmem_cache_t *cachep) { return cachep->reallen; } @@ -413,30 +413,15 @@ static void **dbg_userword(kmem_cache_t BUG_ON(!(cachep->flags & SLAB_STORE_USER)); return (void**)(objp+cachep->objsize-BYTES_PER_WORD); } + #else -static inline int obj_dbghead(kmem_cache_t *cachep) -{ - return 0; -} -static inline int obj_reallen(kmem_cache_t *cachep) -{ - return cachep->objsize; -} -static inline unsigned long *dbg_redzone1(kmem_cache_t *cachep, void *objp) -{ - BUG(); - return 0; -} -static inline unsigned long *dbg_redzone2(kmem_cache_t *cachep, void *objp) -{ - BUG(); - return 0; -} -static inline void **dbg_userword(kmem_cache_t *cachep, void *objp) -{ - BUG(); - return 0; -} + +#define obj_dbghead(x) 0 +#define obj_reallen(cachep) (cachep->objsize) +#define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long *)NULL;}) +#define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long *)NULL;}) +#define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;}) + #endif /* @@ -879,7 +864,7 @@ static void *kmem_getpages(kmem_cache_t /* * Interface to system's page release. */ -static inline void kmem_freepages(kmem_cache_t *cachep, void *addr) +static void kmem_freepages(kmem_cache_t *cachep, void *addr) { unsigned long i = (1<gfporder); struct page *page = virt_to_page(addr); @@ -1413,27 +1398,29 @@ opps: } EXPORT_SYMBOL(kmem_cache_create); -static inline void check_irq_off(void) -{ #if DEBUG +static void check_irq_off(void) +{ BUG_ON(!irqs_disabled()); -#endif } -static inline void check_irq_on(void) +static void check_irq_on(void) { -#if DEBUG BUG_ON(irqs_disabled()); -#endif } -static inline void check_spinlock_acquired(kmem_cache_t *cachep) +static void check_spinlock_acquired(kmem_cache_t *cachep) { #ifdef CONFIG_SMP check_irq_off(); BUG_ON(spin_trylock(&cachep->spinlock)); #endif } +#else +#define check_irq_off() do { } while(0) +#define check_irq_on() do { } while(0) +#define check_spinlock_acquired(x) do { } while(0) +#endif /* * Waits for all CPUs to execute func(). @@ -1596,7 +1583,7 @@ int kmem_cache_destroy (kmem_cache_t * c EXPORT_SYMBOL(kmem_cache_destroy); /* Get the memory for a slab management obj. */ -static inline struct slab* alloc_slabmgmt (kmem_cache_t *cachep, +static struct slab* alloc_slabmgmt (kmem_cache_t *cachep, void *objp, int colour_off, int local_flags) { struct slab *slabp; @@ -1779,15 +1766,16 @@ failed: return 0; } +#if DEBUG + /* * Perform extra freeing checks: * - detect bad pointers. * - POISON/RED_ZONE checking * - destructor calls, for caches with POISON+dtor */ -static inline void kfree_debugcheck(const void *objp) +static void kfree_debugcheck(const void *objp) { -#if DEBUG struct page *page; if (!virt_addr_valid(objp)) { @@ -1800,12 +1788,10 @@ static inline void kfree_debugcheck(cons printk(KERN_ERR "kfree_debugcheck: bad ptr %lxh.\n", (unsigned long)objp); BUG(); } -#endif } -static inline void *cache_free_debugcheck (kmem_cache_t * cachep, void * objp, void *caller) +static void *cache_free_debugcheck (kmem_cache_t * cachep, void * objp, void *caller) { -#if DEBUG struct page *page; unsigned int objnr; struct slab *slabp; @@ -1867,13 +1853,11 @@ static inline void *cache_free_debugchec poison_obj(cachep, objp, POISON_FREE); #endif } -#endif return objp; } -static inline void check_slabp(kmem_cache_t *cachep, struct slab *slabp) +static void check_slabp(kmem_cache_t *cachep, struct slab *slabp) { -#if DEBUG int i; int entries = 0; @@ -1897,8 +1881,12 @@ bad: printk("\n"); BUG(); } -#endif } +#else +#define kfree_debugcheck(x) do { } while(0) +#define cache_free_debugcheck(x,objp,z) (objp) +#define check_slabp(x,y) do { } while(0) +#endif static void* cache_alloc_refill(kmem_cache_t* cachep, int flags) { @@ -2005,11 +1993,11 @@ cache_alloc_debugcheck_before(kmem_cache #endif } -static inline void * +#if DEBUG +static void * cache_alloc_debugcheck_after(kmem_cache_t *cachep, unsigned long flags, void *objp, void *caller) { -#if DEBUG if (!objp) return objp; if (cachep->flags & SLAB_POISON) { @@ -2045,9 +2033,11 @@ cache_alloc_debugcheck_after(kmem_cache_ cachep->ctor(objp, cachep, ctor_flags); } -#endif return objp; } +#else +#define cache_alloc_debugcheck_after(a,b,objp,d) (objp) +#endif static inline void * __cache_alloc (kmem_cache_t *cachep, int flags) @@ -2693,7 +2683,7 @@ static void drain_array_locked(kmem_cach * If we cannot acquire the cache chain semaphore then just give up - we'll * try again next timer interrupt. */ -static inline void cache_reap (void) +static void cache_reap (void) { struct list_head *walk; _