From: Manfred Spraul slab.c is not very efficient for passing objects between cpus. Usually this is a rare event, but with network routing and cpu-affine NICs it is possible that nearly all allocation operations will occur on one cpu, and nearly all free operations on another cpu. This causes slab memory to be returned all the way to the page allocator, rather than being cached in slab. The attached patch solves that by adding an array of objects that is shared by all cpus. Instead of multiple linked list operations per object, object pointers are now passed to/from the shared array (and thus between cpus) with memcopy operations. On uniprocessor, the default array size is 0, because the shared array and the per-cpu head array are redundant. Additionally, the patch exports more statistics in /proc/slabinfo and make the array sizes tunable by writing to /proc/slabinfo. Both changes break backward compatibility, user space scripts must look at the slabinfo version and act accordingly. The size of the new shared array may be altered at runtime, by writing to /proc/slabinfo. The new parameters for writing to /proc/slabinfo are: #echo "cache-name limit batchcount shared" > /proc/slabinfo For example "size-4096 120 60 8" improves the slab efficiency for network routing, because the default values (24 12 8) are too small for the large series generated due to irq mitigation. Note that only root has write permissions to /proc/slabinfo. 25-akpm/mm/slab.c | 182 +++++++++++++++++++++++++++++++++++++++++------------- 1 files changed, 139 insertions(+), 43 deletions(-) diff -puN mm/slab.c~slab-magazine-layer mm/slab.c --- 25/mm/slab.c~slab-magazine-layer Tue May 27 13:40:55 2003 +++ 25-akpm/mm/slab.c Tue May 27 14:05:47 2003 @@ -210,6 +210,7 @@ struct kmem_list3 { unsigned long free_objects; int free_touched; unsigned long next_reap; + struct array_cache *shared; }; #define LIST3_INIT(parent) \ @@ -1209,6 +1210,7 @@ static void smp_call_function_all_cpus(v } static void free_block (kmem_cache_t* cachep, void** objpp, int len); +static void drain_array_locked(kmem_cache_t* cachep, struct array_cache *ac); static void do_drain(void *arg) { @@ -1217,13 +1219,20 @@ static void do_drain(void *arg) check_irq_off(); ac = ac_data(cachep); + spin_lock(&cachep->spinlock); free_block(cachep, &ac_entry(ac)[0], ac->avail); + spin_unlock(&cachep->spinlock); ac->avail = 0; } static void drain_cpu_caches(kmem_cache_t *cachep) { smp_call_function_all_cpus(do_drain, cachep); + check_irq_on(); + spin_lock_irq(&cachep->spinlock); + if (cachep->lists.shared) + drain_array_locked(cachep, cachep->lists.shared); + spin_unlock_irq(&cachep->spinlock); } @@ -1321,6 +1330,8 @@ int kmem_cache_destroy (kmem_cache_t * c for (i = 0; i < NR_CPUS; i++) kfree(cachep->array[i]); /* NUMA: free the list3 structures */ + kfree(cachep->lists.shared); + cachep->lists.shared = NULL; } kmem_cache_free(&cache_cache, cachep); @@ -1663,6 +1674,19 @@ retry: BUG_ON(ac->avail > 0); spin_lock(&cachep->spinlock); + if (l3->shared) { + struct array_cache *shared_array = l3->shared; + if (shared_array->avail) { + if (batchcount > shared_array->avail) + batchcount = shared_array->avail; + shared_array->avail -= batchcount; + ac->avail = batchcount; + memcpy(ac_entry(ac), &ac_entry(shared_array)[shared_array->avail], + sizeof(void*)*batchcount); + shared_array->touched = 1; + goto alloc_done; + } + } while (batchcount > 0) { struct list_head *entry; struct slab *slabp; @@ -1686,6 +1710,7 @@ retry: must_grow: l3->free_objects -= ac->avail; +alloc_done: spin_unlock(&cachep->spinlock); if (unlikely(!ac->avail)) { @@ -1784,13 +1809,11 @@ static inline void * __cache_alloc (kmem * the l3 structure */ -static inline void -__free_block(kmem_cache_t *cachep, void **objpp, int nr_objects) +static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects) { int i; - check_irq_off(); - spin_lock(&cachep->spinlock); + check_spinlock_acquired(cachep); /* NUMA: move add into loop */ cachep->lists.free_objects += nr_objects; @@ -1828,12 +1851,6 @@ __free_block(kmem_cache_t *cachep, void &list3_data_ptr(cachep, objp)->slabs_partial); } } - spin_unlock(&cachep->spinlock); -} - -static void free_block(kmem_cache_t* cachep, void** objpp, int len) -{ - __free_block(cachep, objpp, len); } static void cache_flusharray (kmem_cache_t* cachep, struct array_cache *ac) @@ -1845,14 +1862,28 @@ static void cache_flusharray (kmem_cache BUG_ON(!batchcount || batchcount > ac->avail); #endif check_irq_off(); - __free_block(cachep, &ac_entry(ac)[0], batchcount); + spin_lock(&cachep->spinlock); + if (cachep->lists.shared) { + struct array_cache *shared_array = cachep->lists.shared; + int max = shared_array->limit-shared_array->avail; + if (max) { + if (batchcount > max) + batchcount = max; + memcpy(&ac_entry(shared_array)[shared_array->avail], + &ac_entry(ac)[0], + sizeof(void*)*batchcount); + shared_array->avail += batchcount; + goto free_done; + } + } + free_block(cachep, &ac_entry(ac)[0], batchcount); +free_done: #if STATS { int i = 0; struct list_head *p; - spin_lock(&cachep->spinlock); p = list3_data(cachep)->slabs_free.next; while (p != &(list3_data(cachep)->slabs_free)) { struct slab *slabp; @@ -1864,9 +1895,9 @@ static void cache_flusharray (kmem_cache p = p->next; } STATS_SET_FREEABLE(cachep, i); - spin_unlock(&cachep->spinlock); } #endif + spin_unlock(&cachep->spinlock); ac->avail -= batchcount; memmove(&ac_entry(ac)[0], &ac_entry(ac)[batchcount], sizeof(void*)*ac->avail); @@ -2100,9 +2131,10 @@ static void do_ccupdate_local(void *info } -static int do_tune_cpucache (kmem_cache_t* cachep, int limit, int batchcount) +static int do_tune_cpucache (kmem_cache_t* cachep, int limit, int batchcount, int shared) { struct ccupdate_struct new; + struct array_cache *new_shared; int i; memset(&new.new,0,sizeof(new.new)); @@ -2136,11 +2168,29 @@ static int do_tune_cpucache (kmem_cache_ struct array_cache *ccold = new.new[i]; if (!ccold) continue; - local_irq_disable(); + spin_lock_irq(&cachep->spinlock); free_block(cachep, ac_entry(ccold), ccold->avail); - local_irq_enable(); + spin_unlock_irq(&cachep->spinlock); kfree(ccold); } + new_shared = kmalloc(sizeof(void*)*batchcount*shared+ + sizeof(struct array_cache), GFP_KERNEL); + if (new_shared) { + struct array_cache *old; + new_shared->avail = 0; + new_shared->limit = batchcount*shared; + new_shared->batchcount = 0xbaadf00d; + new_shared->touched = 0; + + spin_lock_irq(&cachep->spinlock); + old = cachep->lists.shared; + cachep->lists.shared = new_shared; + if (old) + free_block(cachep, ac_entry(old), old->avail); + spin_unlock_irq(&cachep->spinlock); + kfree(old); + } + return 0; } @@ -2148,7 +2198,7 @@ static int do_tune_cpucache (kmem_cache_ static void enable_cpucache (kmem_cache_t *cachep) { int err; - int limit; + int limit, shared; /* The head array serves three purposes: * - create a LIFO ordering, i.e. return objects that are cache-warm @@ -2163,11 +2213,25 @@ static void enable_cpucache (kmem_cache_ else if (cachep->objsize > PAGE_SIZE) limit = 8; else if (cachep->objsize > 1024) - limit = 54; + limit = 24; else if (cachep->objsize > 256) - limit = 120; + limit = 54; else - limit = 248; + limit = 120; + + /* Cpu bound tasks (e.g. network routing) can exhibit cpu bound + * allocation behaviour: Most allocs on one cpu, most free operations + * on another cpu. For these cases, an efficient object passing between + * cpus is necessary. This is provided by a shared array. The array + * replaces Bonwick's magazine layer. + * On uniprocessor, it's functionally equivalent (but less efficient) + * to a larger limit. Thus disabled by default. + */ + shared = 0; +#ifdef CONFIG_SMP + if (cachep->objsize <= PAGE_SIZE) + shared = 8; +#endif #if DEBUG /* With debugging enabled, large batchcount lead to excessively @@ -2177,12 +2241,53 @@ static void enable_cpucache (kmem_cache_ if (limit > 32) limit = 32; #endif - err = do_tune_cpucache(cachep, limit, (limit+1)/2); + err = do_tune_cpucache(cachep, limit, (limit+1)/2, shared); if (err) printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n", cachep->name, -err); } +static void drain_array(kmem_cache_t *cachep, struct array_cache *ac) +{ + int tofree; + + check_irq_off(); + if (ac->touched) { + ac->touched = 0; + } else if (ac->avail) { + tofree = (ac->limit+4)/5; + if (tofree > ac->avail) { + tofree = (ac->avail+1)/2; + } + spin_lock(&cachep->spinlock); + free_block(cachep, ac_entry(ac), tofree); + spin_unlock(&cachep->spinlock); + ac->avail -= tofree; + memmove(&ac_entry(ac)[0], &ac_entry(ac)[tofree], + sizeof(void*)*ac->avail); + } +} + +static void drain_array_locked(kmem_cache_t *cachep, struct array_cache *ac) +{ + int tofree; + + check_spinlock_acquired(cachep); + if (ac->touched) { + ac->touched = 0; + } else if (ac->avail) { + tofree = (ac->limit+4)/5; + if (tofree > ac->avail) { + tofree = (ac->avail+1)/2; + } + free_block(cachep, ac_entry(ac), tofree); + ac->avail -= tofree; + memmove(&ac_entry(ac)[0], &ac_entry(ac)[tofree], + sizeof(void*)*ac->avail); + } +} + + /** * cache_reap - Reclaim memory from caches. * @@ -2209,7 +2314,6 @@ static inline void cache_reap (void) kmem_cache_t *searchp; struct list_head* p; int tofree; - struct array_cache *ac; struct slab *slabp; searchp = list_entry(walk, kmem_cache_t, next); @@ -2219,19 +2323,8 @@ static inline void cache_reap (void) check_irq_on(); local_irq_disable(); - ac = ac_data(searchp); - if (ac->touched) { - ac->touched = 0; - } else if (ac->avail) { - tofree = (ac->limit+4)/5; - if (tofree > ac->avail) { - tofree = (ac->avail+1)/2; - } - free_block(searchp, ac_entry(ac), tofree); - ac->avail -= tofree; - memmove(&ac_entry(ac)[0], &ac_entry(ac)[tofree], - sizeof(void*)*ac->avail); - } + drain_array(searchp, ac_data(searchp)); + if(time_after(searchp->lists.next_reap, jiffies)) goto next_irqon; @@ -2240,6 +2333,10 @@ static inline void cache_reap (void) goto next_unlock; } searchp->lists.next_reap = jiffies + REAPTIMEOUT_LIST3; + + if (searchp->lists.shared) + drain_array_locked(searchp, searchp->lists.shared); + if (searchp->lists.free_touched) { searchp->lists.free_touched = 0; goto next_unlock; @@ -2449,7 +2546,7 @@ struct seq_operations slabinfo_op = { #define MAX_SLABINFO_WRITE 128 /** - * slabinfo_write - SMP tuning for the slab allocator + * slabinfo_write - Tuning for the slab allocator * @file: unused * @buffer: user buffer * @count: data len @@ -2459,7 +2556,7 @@ ssize_t slabinfo_write(struct file *file size_t count, loff_t *ppos) { char kbuf[MAX_SLABINFO_WRITE+1], *tmp; - int limit, batchcount, res; + int limit, batchcount, shared, res; struct list_head *p; if (count > MAX_SLABINFO_WRITE) @@ -2473,10 +2570,8 @@ ssize_t slabinfo_write(struct file *file return -EINVAL; *tmp = '\0'; tmp++; - limit = simple_strtol(tmp, &tmp, 10); - while (*tmp == ' ') - tmp++; - batchcount = simple_strtol(tmp, &tmp, 10); + if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3) + return -EINVAL; /* Find the cache in the chain of caches. */ down(&cache_chain_sem); @@ -2487,10 +2582,11 @@ ssize_t slabinfo_write(struct file *file if (!strcmp(cachep->name, kbuf)) { if (limit < 1 || batchcount < 1 || - batchcount > limit) { + batchcount > limit || + shared < 0) { res = -EINVAL; } else { - res = do_tune_cpucache(cachep, limit, batchcount); + res = do_tune_cpucache(cachep, limit, batchcount, shared); } break; } _