aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2006-01-12 01:05:27 -0800
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-12 09:08:49 -0800
commit4dc7a0bbeb6882ad665e588e82fabe5bb4645f2f (patch)
tree8c034f802157d7f449e76f45086c0e13e0ea4711
parentc6b44d10f25e5a93eca5135b686a35775c63546e (diff)
downloadlinux-4dc7a0bbeb6882ad665e588e82fabe5bb4645f2f.tar.gz
[PATCH] sched: add cacheflush() asm
Add per-arch sched_cacheflush() which is a write-back cacheflush used by the migration-cost calibration code at bootup time. Signed-off-by: Ingo Molnar <mingo@elte.hu> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--arch/ia64/kernel/setup.c10
-rw-r--r--include/asm-alpha/system.h10
-rw-r--r--include/asm-arm/system.h10
-rw-r--r--include/asm-arm26/system.h10
-rw-r--r--include/asm-i386/system.h9
-rw-r--r--include/asm-ia64/system.h1
-rw-r--r--include/asm-m32r/system.h10
-rw-r--r--include/asm-mips/system.h10
-rw-r--r--include/asm-parisc/system.h9
-rw-r--r--include/asm-powerpc/system.h10
-rw-r--r--include/asm-ppc/system.h10
-rw-r--r--include/asm-s390/system.h10
-rw-r--r--include/asm-sh/system.h10
-rw-r--r--include/asm-sparc/system.h10
-rw-r--r--include/asm-sparc64/system.h10
-rw-r--r--include/asm-x86_64/system.h9
16 files changed, 148 insertions, 0 deletions
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index c33305d8e5eb21..d91c8ff2c0d7d3 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -60,6 +60,7 @@
#include <asm/smp.h>
#include <asm/system.h>
#include <asm/unistd.h>
+#include <asm/system.h>
#if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)
# error "struct cpuinfo_ia64 too big!"
@@ -870,6 +871,15 @@ cpu_init (void)
pm_idle = default_idle;
}
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ */
+void sched_cacheflush(void)
+{
+ ia64_sal_cache_flush(3);
+}
+
void
check_bugs (void)
{
diff --git a/include/asm-alpha/system.h b/include/asm-alpha/system.h
index 050e86d12891ef..766ab868e8ab1a 100644
--- a/include/asm-alpha/system.h
+++ b/include/asm-alpha/system.h
@@ -140,6 +140,16 @@ extern void halt(void) __attribute__((noreturn));
struct task_struct;
extern struct task_struct *alpha_switch_to(unsigned long, struct task_struct*);
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
#define imb() \
__asm__ __volatile__ ("call_pal %0 #imb" : : "i" (PAL_imb) : "memory")
diff --git a/include/asm-arm/system.h b/include/asm-arm/system.h
index 5621d61ebc0795..0497171df8c9cb 100644
--- a/include/asm-arm/system.h
+++ b/include/asm-arm/system.h
@@ -172,6 +172,16 @@ do { \
} while (0)
/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
+/*
* CPU interrupt mask handling.
*/
#if __LINUX_ARM_ARCH__ >= 6
diff --git a/include/asm-arm26/system.h b/include/asm-arm26/system.h
index f23fac1938f31d..1bce6b3590ff37 100644
--- a/include/asm-arm26/system.h
+++ b/include/asm-arm26/system.h
@@ -115,6 +115,16 @@ do { \
} while (0)
/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
+/*
* Save the current interrupt enable state & disable IRQs
*/
#define local_irq_save(x) \
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index 9c0593b7a94e92..36a92ed6a9d0d7 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -548,6 +548,15 @@ void enable_hlt(void);
extern int es7000_plat;
void cpu_idle_wait(void);
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible:
+ */
+static inline void sched_cacheflush(void)
+{
+ wbinvd();
+}
+
extern unsigned long arch_align_stack(unsigned long sp);
#endif
diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h
index 635235fa1e3260..510c31c50723b9 100644
--- a/include/asm-ia64/system.h
+++ b/include/asm-ia64/system.h
@@ -279,6 +279,7 @@ extern void ia64_load_extra (struct task_struct *task);
#define ia64_platform_is(x) (strcmp(x, platform_name) == 0)
void cpu_idle_wait(void);
+void sched_cacheflush(void);
#define arch_align_stack(x) (x)
diff --git a/include/asm-m32r/system.h b/include/asm-m32r/system.h
index dcf619a0a0b032..06c12a037cba55 100644
--- a/include/asm-m32r/system.h
+++ b/include/asm-m32r/system.h
@@ -68,6 +68,16 @@
last = __last; \
} while(0)
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
/* Interrupt Control */
#if !defined(CONFIG_CHIP_M32102) && !defined(CONFIG_CHIP_M32104)
#define local_irq_enable() \
diff --git a/include/asm-mips/system.h b/include/asm-mips/system.h
index 330c4e497af38d..5f761ad5a8d9e2 100644
--- a/include/asm-mips/system.h
+++ b/include/asm-mips/system.h
@@ -164,6 +164,16 @@ do { \
__restore_dsp(current); \
} while(0)
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
{
__u32 retval;
diff --git a/include/asm-parisc/system.h b/include/asm-parisc/system.h
index f3928d3a80cb01..a5a973c0c07f55 100644
--- a/include/asm-parisc/system.h
+++ b/include/asm-parisc/system.h
@@ -49,6 +49,15 @@ extern struct task_struct *_switch_to(struct task_struct *, struct task_struct *
(last) = _switch_to(prev, next); \
} while(0)
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
/* interrupt control */
diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h
index 0c58e32a957052..1be629b4fb97bf 100644
--- a/include/asm-powerpc/system.h
+++ b/include/asm-powerpc/system.h
@@ -175,6 +175,16 @@ struct thread_struct;
extern struct task_struct *_switch(struct thread_struct *prev,
struct thread_struct *next);
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
extern unsigned int rtas_data;
extern int mem_init_done; /* set on boot once kmalloc can be called */
extern unsigned long memory_limit;
diff --git a/include/asm-ppc/system.h b/include/asm-ppc/system.h
index bd99cb53a19fb5..212dca66fcac28 100644
--- a/include/asm-ppc/system.h
+++ b/include/asm-ppc/system.h
@@ -123,6 +123,16 @@ extern struct task_struct *__switch_to(struct task_struct *,
struct task_struct *);
#define switch_to(prev, next, last) ((last) = __switch_to((prev), (next)))
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
struct thread_struct;
extern struct task_struct *_switch(struct thread_struct *prev,
struct thread_struct *next);
diff --git a/include/asm-s390/system.h b/include/asm-s390/system.h
index 864cae7e1fd663..c7c3a9ad593f49 100644
--- a/include/asm-s390/system.h
+++ b/include/asm-s390/system.h
@@ -104,6 +104,16 @@ static inline void restore_access_regs(unsigned int *acrs)
prev = __switch_to(prev,next); \
} while (0)
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
extern void account_user_vtime(struct task_struct *);
extern void account_system_vtime(struct task_struct *);
diff --git a/include/asm-sh/system.h b/include/asm-sh/system.h
index 28a3c2d8bcd7c1..bb0330499bdfef 100644
--- a/include/asm-sh/system.h
+++ b/include/asm-sh/system.h
@@ -57,6 +57,16 @@
last = __last; \
} while (0)
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
#define nop() __asm__ __volatile__ ("nop")
diff --git a/include/asm-sparc/system.h b/include/asm-sparc/system.h
index 1f6b71f9e1b637..52fe2e464e15ee 100644
--- a/include/asm-sparc/system.h
+++ b/include/asm-sparc/system.h
@@ -166,6 +166,16 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
} while(0)
/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
+/*
* Changing the IRQ level on the Sparc.
*/
extern void local_irq_restore(unsigned long);
diff --git a/include/asm-sparc64/system.h b/include/asm-sparc64/system.h
index 309f1466b6fa11..07d72367f82c9a 100644
--- a/include/asm-sparc64/system.h
+++ b/include/asm-sparc64/system.h
@@ -253,6 +253,16 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \
} \
} while(0)
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val)
{
unsigned long tmp1, tmp2;
diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h
index 38c1e8a69c9c03..0eacbefb7dd04a 100644
--- a/include/asm-x86_64/system.h
+++ b/include/asm-x86_64/system.h
@@ -193,6 +193,15 @@ static inline void write_cr4(unsigned long val)
#define wbinvd() \
__asm__ __volatile__ ("wbinvd": : :"memory");
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ */
+static inline void sched_cacheflush(void)
+{
+ wbinvd();
+}
+
#endif /* __KERNEL__ */
#define nop() __asm__ __volatile__ ("nop")