--- 2.4.10pre8aa1/mm/slab.c.~1~ Wed Sep 12 03:23:43 2001 +++ 2.4.10pre8aa1/mm/slab.c Wed Sep 12 05:02:40 2001 @@ -926,8 +926,10 @@ break; slabp = list_entry(cachep->slabs_free.prev, slab_t, list); +#if DEBUG if (slabp->inuse) BUG(); +#endif list_del(&slabp->list); spin_unlock_irq(&cachep->spinlock); @@ -1215,7 +1217,7 @@ } static inline void * kmem_cache_alloc_one_tail (kmem_cache_t *cachep, - slab_t *slabp, int partial) + slab_t *slabp) { void *objp; @@ -1228,14 +1230,9 @@ objp = slabp->s_mem + slabp->free*cachep->objsize; slabp->free=slab_bufctl(slabp)[slabp->free]; - if (slabp->free == BUFCTL_END) { + if (__builtin_expect(slabp->free == BUFCTL_END, 0)) { list_del(&slabp->list); list_add(&slabp->list, &cachep->slabs_full); - } else { - if (!partial) { - list_del(&slabp->list); - list_add(&slabp->list, &cachep->slabs_partial); - } } #if DEBUG if (cachep->flags & SLAB_POISON) @@ -1262,20 +1259,23 @@ */ #define kmem_cache_alloc_one(cachep) \ ({ \ - slab_t *slabp; \ - struct list_head * slab_freelist; \ - int partial = 1; \ + struct list_head * slabs_partial, * entry; \ + slab_t *slabp; \ \ - slab_freelist = &(cachep)->slabs_partial; \ - if (list_empty(slab_freelist)) { \ - partial = 0; \ - slab_freelist = &(cachep)->slabs_free; \ - if (list_empty(slab_freelist)) \ + slabs_partial = &(cachep)->slabs_partial; \ + entry = slabs_partial->next; \ + if (__builtin_expect(entry == slabs_partial, 0)) { \ + struct list_head * slabs_free; \ + slabs_free = &(cachep)->slabs_free; \ + entry = slabs_free->next; \ + if (__builtin_expect(entry == slabs_free, 0)) \ goto alloc_new_slab; \ + list_del(entry); \ + list_add(entry, slabs_partial); \ } \ \ - slabp = list_entry(slab_freelist->next, slab_t, list); \ - kmem_cache_alloc_one_tail(cachep, slabp, partial); \ + slabp = list_entry(entry, slab_t, list); \ + kmem_cache_alloc_one_tail(cachep, slabp); \ }) #ifdef CONFIG_SMP @@ -1283,25 +1283,27 @@ { int batchcount = cachep->batchcount; cpucache_t* cc = cc_data(cachep); - struct list_head * slab_freelist; - int partial; - slab_t *slabp; spin_lock(&cachep->spinlock); while (batchcount--) { + struct list_head * slabs_partial, * entry; + slab_t *slabp; /* Get slab alloc is to come from. */ - slab_freelist = &(cachep)->slabs_partial; - partial = 1; - if (list_empty(slab_freelist)) { - partial = 0; - slab_freelist = &(cachep)->slabs_free; - if (list_empty(slab_freelist)) + slabs_partial = &(cachep)->slabs_partial; + entry = slabs_partial->next; + if (__builtin_expect(entry == slabs_partial, 0)) { + struct list_head * slabs_free; + slabs_free = &(cachep)->slabs_free; + entry = slabs_free->next; + if (__builtin_expect(entry == slabs_free, 0)) break; + list_del(entry); + list_add(entry, slabs_partial); } - slabp = list_entry(slab_freelist->next, slab_t, list); + slabp = list_entry(entry, slab_t, list); cc_entry(cc)[cc->avail++] = - kmem_cache_alloc_one_tail(cachep, slabp, partial); + kmem_cache_alloc_one_tail(cachep, slabp); } spin_unlock(&cachep->spinlock); @@ -1432,23 +1434,18 @@ STATS_DEC_ACTIVE(cachep); /* fixup slab chains */ - if (!--slabp->inuse) - goto moveslab_free; - if (slabp->inuse + 1 == cachep->num) - goto moveslab_partial; - return; - -moveslab_partial: - /* Was full. */ - list_del(&slabp->list); - list_add(&slabp->list, &cachep->slabs_partial); - return; - -moveslab_free: - /* Was partial, now empty. */ - list_del(&slabp->list); - list_add(&slabp->list, &cachep->slabs_free); - return; + { + int inuse = slabp->inuse; + if (__builtin_expect(!--slabp->inuse, 0)) { + /* Was partial or full, now empty. */ + list_del(&slabp->list); + list_add(&slabp->list, &cachep->slabs_free); + } else if (__builtin_expect(inuse == cachep->num, 0)) { + /* Was full. */ + list_del(&slabp->list); + list_add(&slabp->list, &cachep->slabs_partial); + } + } } #ifdef CONFIG_SMP @@ -1756,8 +1753,10 @@ p = searchp->slabs_free.next; while (p != &searchp->slabs_free) { slabp = list_entry(p, slab_t, list); +#if DEBUG if (slabp->inuse) BUG(); +#endif full_free++; p = p->next; } @@ -1807,8 +1806,10 @@ if (p == &best_cachep->slabs_free) break; slabp = list_entry(p,slab_t,list); +#if DEBUG if (slabp->inuse) BUG(); +#endif list_del(&slabp->list); STATS_INC_REAPED(best_cachep);