aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/time/sched_clock.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2023-05-19 12:21:00 +0200
committerPeter Zijlstra <peterz@infradead.org>2023-06-05 21:11:04 +0200
commit5949a68c73444d89b171703b67ff04fc4d6059c1 (patch)
tree35e76f4b405de1b2c47aa7b805f64bbf123a1268 /kernel/time/sched_clock.c
parentd16317de9b412aa7bd3598c607112298e36b4352 (diff)
downloadlinux-5949a68c73444d89b171703b67ff04fc4d6059c1.tar.gz
time/sched_clock: Provide sched_clock_noinstr()
With the intent to provide local_clock_noinstr(), a variant of local_clock() that's safe to be called from noinstr code (with the assumption that any such code will already be non-preemptible), prepare for things by providing a noinstr sched_clock_noinstr() function. Specifically, preempt_enable_*() calls out to schedule(), which upsets noinstr validation efforts. As such, pull out the preempt_{dis,en}able_notrace() requirements from the sched_clock_read() implementations by explicitly providing it in the sched_clock() function. This further requires said sched_clock_read() functions to be noinstr themselves, for ARCH_WANTS_NO_INSTR users. See the next few patches. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Tested-by: Michael Kelley <mikelley@microsoft.com> # Hyper-V Link: https://lore.kernel.org/r/20230519102715.302350330@infradead.org
Diffstat (limited to 'kernel/time/sched_clock.c')
-rw-r--r--kernel/time/sched_clock.c22
1 files changed, 16 insertions, 6 deletions
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c
index e8f2fb09a214d9..68d6c1190ac7f8 100644
--- a/kernel/time/sched_clock.c
+++ b/kernel/time/sched_clock.c
@@ -64,7 +64,7 @@ static struct clock_data cd ____cacheline_aligned = {
.actual_read_sched_clock = jiffy_sched_clock_read,
};
-static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift)
+static __always_inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift)
{
return (cyc * mult) >> shift;
}
@@ -80,23 +80,33 @@ notrace int sched_clock_read_retry(unsigned int seq)
return raw_read_seqcount_latch_retry(&cd.seq, seq);
}
-unsigned long long notrace sched_clock(void)
+unsigned long long noinstr sched_clock_noinstr(void)
{
- u64 cyc, res;
- unsigned int seq;
struct clock_read_data *rd;
+ unsigned int seq;
+ u64 cyc, res;
do {
- rd = sched_clock_read_begin(&seq);
+ seq = raw_read_seqcount_latch(&cd.seq);
+ rd = cd.read_data + (seq & 1);
cyc = (rd->read_sched_clock() - rd->epoch_cyc) &
rd->sched_clock_mask;
res = rd->epoch_ns + cyc_to_ns(cyc, rd->mult, rd->shift);
- } while (sched_clock_read_retry(seq));
+ } while (raw_read_seqcount_latch_retry(&cd.seq, seq));
return res;
}
+unsigned long long notrace sched_clock(void)
+{
+ unsigned long long ns;
+ preempt_disable_notrace();
+ ns = sched_clock_noinstr();
+ preempt_enable_notrace();
+ return ns;
+}
+
/*
* Updating the data required to read the clock.
*