aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/time/tick-sched.c
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2015-07-16 17:42:29 +0200
committerFrederic Weisbecker <fweisbec@gmail.com>2016-03-02 16:43:00 +0100
commit555e0c1ef7ff49ee5ac3a1eb12de4a2e4722f63d (patch)
treeede085fe6a092b648768057d988f3f11ae864d1e /kernel/time/tick-sched.c
parente6e6cc22e067a6f44449aa8fd0328404079c3ba5 (diff)
downloadlinux-555e0c1ef7ff49ee5ac3a1eb12de4a2e4722f63d.tar.gz
perf: Migrate perf to use new tick dependency mask model
Instead of providing asynchronous checks for the nohz subsystem to verify perf event tick dependency, migrate perf to the new mask. Perf needs the tick for two situations: 1) Freq events. We could set the tick dependency when those are installed on a CPU context. But setting a global dependency on top of the global freq events accounting is much easier. If people want that to be optimized, we can still refine that on the per-CPU tick dependency level. This patch dooesn't change the current behaviour anyway. 2) Throttled events: this is a per-cpu dependency. Reviewed-by: Chris Metcalf <cmetcalf@ezchip.com> Cc: Christoph Lameter <cl@linux.com> Cc: Chris Metcalf <cmetcalf@ezchip.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Luiz Capitulino <lcapitulino@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Viresh Kumar <viresh.kumar@linaro.org> Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Diffstat (limited to 'kernel/time/tick-sched.c')
-rw-r--r--kernel/time/tick-sched.c8
1 files changed, 1 insertions, 7 deletions
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 47e5ac45ce6952..3f79def37ca90e 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -22,7 +22,6 @@
#include <linux/module.h>
#include <linux/irq_work.h>
#include <linux/posix-timers.h>
-#include <linux/perf_event.h>
#include <linux/context_tracking.h>
#include <asm/irq_regs.h>
@@ -215,11 +214,6 @@ static bool can_stop_full_tick(struct tick_sched *ts)
return false;
}
- if (!perf_event_can_stop_tick()) {
- trace_tick_stop(0, TICK_DEP_MASK_PERF_EVENTS);
- return false;
- }
-
#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
/*
* sched_clock_tick() needs us?
@@ -257,7 +251,7 @@ static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
* This kick, unlike tick_nohz_full_kick_cpu() and tick_nohz_full_kick_all(),
* is NMI safe.
*/
-void tick_nohz_full_kick(void)
+static void tick_nohz_full_kick(void)
{
if (!tick_nohz_full_cpu(smp_processor_id()))
return;