aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorManfred Spraul <manfred@colorfullife.com>2005-01-03 04:16:46 -0800
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-01-03 04:16:46 -0800
commitd32d6f8ab2ef776261359b78127fd0251a041a90 (patch)
tree8b5ccb26ef3ac734976060ff89dd1dddfa3024f1 /mm
parenta161d268c652c927de0c553192e034c537ac59d7 (diff)
downloadhistory-d32d6f8ab2ef776261359b78127fd0251a041a90.tar.gz
[PATCH] slab: Add more arch overrides to control object alignment
Add ARCH_SLAB_MINALIGN and document ARCH_KMALLOC_MINALIGN: The flags allow the arch code to override the default minimum object aligment (BYTES_PER_WORD). Signed-Off-By: Manfred Spraul <manfred@colorfullife.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.c82
1 files changed, 56 insertions, 26 deletions
diff --git a/mm/slab.c b/mm/slab.c
index bec2eb6ccb7249..12bf28e2b9cfc3 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -128,9 +128,28 @@
#endif
#ifndef ARCH_KMALLOC_MINALIGN
+/*
+ * Enforce a minimum alignment for the kmalloc caches.
+ * Usually, the kmalloc caches are cache_line_size() aligned, except when
+ * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned.
+ * Some archs want to perform DMA into kmalloc caches and need a guaranteed
+ * alignment larger than BYTES_PER_WORD. ARCH_KMALLOC_MINALIGN allows that.
+ * Note that this flag disables some debug features.
+ */
#define ARCH_KMALLOC_MINALIGN 0
#endif
+#ifndef ARCH_SLAB_MINALIGN
+/*
+ * Enforce a minimum alignment for all caches.
+ * Intended for archs that get misalignment faults even for BYTES_PER_WORD
+ * aligned buffers. Includes ARCH_KMALLOC_MINALIGN.
+ * If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables
+ * some debug features.
+ */
+#define ARCH_SLAB_MINALIGN 0
+#endif
+
#ifndef ARCH_KMALLOC_FLAGS
#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
#endif
@@ -1172,7 +1191,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
unsigned long flags, void (*ctor)(void*, kmem_cache_t *, unsigned long),
void (*dtor)(void*, kmem_cache_t *, unsigned long))
{
- size_t left_over, slab_size;
+ size_t left_over, slab_size, ralign;
kmem_cache_t *cachep = NULL;
/*
@@ -1222,24 +1241,44 @@ kmem_cache_create (const char *name, size_t size, size_t align,
if (flags & ~CREATE_MASK)
BUG();
- if (align) {
- /* combinations of forced alignment and advanced debugging is
- * not yet implemented.
+ /* Check that size is in terms of words. This is needed to avoid
+ * unaligned accesses for some archs when redzoning is used, and makes
+ * sure any on-slab bufctl's are also correctly aligned.
+ */
+ if (size & (BYTES_PER_WORD-1)) {
+ size += (BYTES_PER_WORD-1);
+ size &= ~(BYTES_PER_WORD-1);
+ }
+
+ /* calculate out the final buffer alignment: */
+ /* 1) arch recommendation: can be overridden for debug */
+ if (flags & SLAB_HWCACHE_ALIGN) {
+ /* Default alignment: as specified by the arch code.
+ * Except if an object is really small, then squeeze multiple
+ * objects into one cacheline.
*/
- flags &= ~(SLAB_RED_ZONE|SLAB_STORE_USER);
+ ralign = cache_line_size();
+ while (size <= ralign/2)
+ ralign /= 2;
} else {
- if (flags & SLAB_HWCACHE_ALIGN) {
- /* Default alignment: as specified by the arch code.
- * Except if an object is really small, then squeeze multiple
- * into one cacheline.
- */
- align = cache_line_size();
- while (size <= align/2)
- align /= 2;
- } else {
- align = BYTES_PER_WORD;
- }
- }
+ ralign = BYTES_PER_WORD;
+ }
+ /* 2) arch mandated alignment: disables debug if necessary */
+ if (ralign < ARCH_SLAB_MINALIGN) {
+ ralign = ARCH_SLAB_MINALIGN;
+ if (ralign > BYTES_PER_WORD)
+ flags &= ~(SLAB_RED_ZONE|SLAB_STORE_USER);
+ }
+ /* 3) caller mandated alignment: disables debug if necessary */
+ if (ralign < align) {
+ ralign = align;
+ if (ralign > BYTES_PER_WORD)
+ flags &= ~(SLAB_RED_ZONE|SLAB_STORE_USER);
+ }
+ /* 4) Store it. Note that the debug code below can reduce
+ * the alignment to BYTES_PER_WORD.
+ */
+ align = ralign;
/* Get cache's description obj. */
cachep = (kmem_cache_t *) kmem_cache_alloc(&cache_cache, SLAB_KERNEL);
@@ -1247,15 +1286,6 @@ kmem_cache_create (const char *name, size_t size, size_t align,
goto opps;
memset(cachep, 0, sizeof(kmem_cache_t));
- /* Check that size is in terms of words. This is needed to avoid
- * unaligned accesses for some archs when redzoning is used, and makes
- * sure any on-slab bufctl's are also correctly aligned.
- */
- if (size & (BYTES_PER_WORD-1)) {
- size += (BYTES_PER_WORD-1);
- size &= ~(BYTES_PER_WORD-1);
- }
-
#if DEBUG
cachep->reallen = size;