diff -urN 2.4.9/include/linux/cache.h cachelinealigned/include/linux/cache.h --- 2.4.9/include/linux/cache.h Wed Aug 15 22:43:55 2001 +++ cachelinealigned/include/linux/cache.h Fri Aug 17 04:38:02 2001 @@ -1,6 +1,7 @@ #ifndef __LINUX_CACHE_H #define __LINUX_CACHE_H +#include #include #ifndef L1_CACHE_ALIGN @@ -13,6 +14,14 @@ #ifndef ____cacheline_aligned #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES))) +#endif + +#ifndef ____cacheline_aligned_in_smp +#ifdef CONFIG_SMP +#define ____cacheline_aligned_in_smp ____cacheline_aligned +#else +#define ____cacheline_aligned_in_smp +#endif /* CONFIG_SMP */ #endif #ifndef __cacheline_aligned diff -urN 2.4.9/mm/filemap.c cachelinealigned/mm/filemap.c --- 2.4.9/mm/filemap.c Thu Aug 16 22:03:41 2001 +++ cachelinealigned/mm/filemap.c Fri Aug 17 04:38:02 2001 @@ -45,12 +45,12 @@ unsigned int page_hash_bits; struct page **page_hash_table; -spinlock_t __cacheline_aligned pagecache_lock = SPIN_LOCK_UNLOCKED; +spinlock_t pagecache_lock ____cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED; /* * NOTE: to avoid deadlocking you must never acquire the pagecache_lock with * the pagemap_lru_lock held. */ -spinlock_t __cacheline_aligned pagemap_lru_lock = SPIN_LOCK_UNLOCKED; +spinlock_t pagemap_lru_lock ____cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED; #define CLUSTER_PAGES (1 << page_cluster) #define CLUSTER_OFFSET(x) (((x) >> page_cluster) << page_cluster)