aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2021-01-25 16:26:50 +0100
committerIngo Molnar <mingo@kernel.org>2021-02-17 14:12:42 +0100
commitef72661e28c64ad610f89acc2832ec67b27ba438 (patch)
treea68805ca80c0cc424cda83ae3100fd016f7a8793
parent73f44fe19d359635a607e8e8daa0da4001c1cfc2 (diff)
downloadbluetooth-next-ef72661e28c64ad610f89acc2832ec67b27ba438.tar.gz
sched: Harden PREEMPT_DYNAMIC
Use the new EXPORT_STATIC_CALL_TRAMP() / static_call_mod() to unexport the static_call_key for the PREEMPT_DYNAMIC calls such that modules can no longer update these calls. Having modules change/hi-jack the preemption calls would be horrible. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/include/asm/preempt.h4
-rw-r--r--include/linux/kernel.h2
-rw-r--r--include/linux/sched.h2
-rw-r--r--kernel/sched/core.c8
4 files changed, 8 insertions, 8 deletions
diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
index 9b12dce9bda55..0aa96f824af16 100644
--- a/arch/x86/include/asm/preempt.h
+++ b/arch/x86/include/asm/preempt.h
@@ -114,7 +114,7 @@ DECLARE_STATIC_CALL(preempt_schedule, __preempt_schedule_func);
#define __preempt_schedule() \
do { \
- __ADDRESSABLE(STATIC_CALL_KEY(preempt_schedule)); \
+ __STATIC_CALL_MOD_ADDRESSABLE(preempt_schedule); \
asm volatile ("call " STATIC_CALL_TRAMP_STR(preempt_schedule) : ASM_CALL_CONSTRAINT); \
} while (0)
@@ -127,7 +127,7 @@ DECLARE_STATIC_CALL(preempt_schedule_notrace, __preempt_schedule_notrace_func);
#define __preempt_schedule_notrace() \
do { \
- __ADDRESSABLE(STATIC_CALL_KEY(preempt_schedule_notrace)); \
+ __STATIC_CALL_MOD_ADDRESSABLE(preempt_schedule_notrace); \
asm volatile ("call " STATIC_CALL_TRAMP_STR(preempt_schedule_notrace) : ASM_CALL_CONSTRAINT); \
} while (0)
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index cfd3d349f905e..5b7ed6dc99acc 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -93,7 +93,7 @@ DECLARE_STATIC_CALL(might_resched, __cond_resched);
static __always_inline void might_resched(void)
{
- static_call(might_resched)();
+ static_call_mod(might_resched)();
}
#else
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 2f35594b8b535..4d568288abf9f 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1880,7 +1880,7 @@ DECLARE_STATIC_CALL(cond_resched, __cond_resched);
static __always_inline int _cond_resched(void)
{
- return static_call(cond_resched)();
+ return static_call_mod(cond_resched)();
}
#else
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 4a17bb5f28b0f..cec507be460c3 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5267,7 +5267,7 @@ EXPORT_SYMBOL(preempt_schedule);
#ifdef CONFIG_PREEMPT_DYNAMIC
DEFINE_STATIC_CALL(preempt_schedule, __preempt_schedule_func);
-EXPORT_STATIC_CALL(preempt_schedule);
+EXPORT_STATIC_CALL_TRAMP(preempt_schedule);
#endif
@@ -5325,7 +5325,7 @@ EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
#ifdef CONFIG_PREEMPT_DYNAMIC
DEFINE_STATIC_CALL(preempt_schedule_notrace, __preempt_schedule_notrace_func);
-EXPORT_STATIC_CALL(preempt_schedule_notrace);
+EXPORT_STATIC_CALL_TRAMP(preempt_schedule_notrace);
#endif
#endif /* CONFIG_PREEMPTION */
@@ -6997,10 +6997,10 @@ EXPORT_SYMBOL(__cond_resched);
#ifdef CONFIG_PREEMPT_DYNAMIC
DEFINE_STATIC_CALL_RET0(cond_resched, __cond_resched);
-EXPORT_STATIC_CALL(cond_resched);
+EXPORT_STATIC_CALL_TRAMP(cond_resched);
DEFINE_STATIC_CALL_RET0(might_resched, __cond_resched);
-EXPORT_STATIC_CALL(might_resched);
+EXPORT_STATIC_CALL_TRAMP(might_resched);
#endif
/*