aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorAndrew Morton <akpm@osdl.org>2004-06-23 18:54:49 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2004-06-23 18:54:49 -0700
commitb167eef8224b01dec3f15bb95936595013a6066f (patch)
tree1ca24fa09e56c2891513f3fe246d7590acb1864f /mm
parentc75b81a5da09be94099cf7b46bdb32fedc8ce71a (diff)
downloadhistory-b167eef8224b01dec3f15bb95936595013a6066f.tar.gz
[PATCH] hwcache align kmalloc caches
From: Manfred Spraul <manfred@colorfullife.com> Reversing the patches that made all caches hw cacheline aligned had an unintended side effect on the kmalloc caches: Before they had the SLAB_HWCACHE_ALIGN flag set, now it's clear. This breaks one sgi driver - it expects aligned caches. Additionally I think it's the right thing to do: It costs virtually nothing (the caches are power-of-two sized) and could reduce false sharing. Additionally, the patch adds back the documentation for the SLAB_HWCACHE_ALIGN flag. Signed-Off: Manfred Spraul <manfred@colorfullife.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.c14
1 files changed, 10 insertions, 4 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 45c5c0549230d0..d99f1eb10dbf2f 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -129,6 +129,10 @@
#define ARCH_KMALLOC_MINALIGN 0
#endif
+#ifndef ARCH_KMALLOC_FLAGS
+#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
+#endif
+
/* Legal flag mask for kmem_cache_create(). */
#if DEBUG
# define CREATE_MASK (SLAB_DEBUG_INITIAL | SLAB_RED_ZONE | \
@@ -758,7 +762,7 @@ void __init kmem_cache_init(void)
* allow tighter packing of the smaller caches. */
sizes->cs_cachep = kmem_cache_create(names->name,
sizes->cs_size, ARCH_KMALLOC_MINALIGN,
- SLAB_PANIC, NULL, NULL);
+ (ARCH_KMALLOC_FLAGS | SLAB_PANIC), NULL, NULL);
/* Inc off-slab bufctl limit until the ceiling is hit. */
if (!(OFF_SLAB(sizes->cs_cachep))) {
@@ -768,7 +772,8 @@ void __init kmem_cache_init(void)
sizes->cs_dmacachep = kmem_cache_create(names->name_dma,
sizes->cs_size, ARCH_KMALLOC_MINALIGN,
- (SLAB_CACHE_DMA | SLAB_PANIC), NULL, NULL);
+ (ARCH_KMALLOC_FLAGS | SLAB_CACHE_DMA | SLAB_PANIC),
+ NULL, NULL);
sizes++;
names++;
@@ -1116,8 +1121,9 @@ static void slab_destroy (kmem_cache_t *cachep, struct slab *slabp)
* %SLAB_NO_REAP - Don't automatically reap this cache when we're under
* memory pressure.
*
- * %SLAB_HWCACHE_ALIGN - This flag has no effect and will be removed soon.
- *
+ * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
+ * cacheline. This can be beneficial if you're counting cycles as closely
+ * as davem.
*/
kmem_cache_t *
kmem_cache_create (const char *name, size_t size, size_t align,