summaryrefslogtreecommitdiffstats
path: root/trace-Convert-various-locks-to-raw_spinlock.patch
diff options
context:
space:
mode:
authorPaul Gortmaker <paul.gortmaker@windriver.com>2011-01-14 10:48:26 -0500
committerPaul Gortmaker <paul.gortmaker@windriver.com>2011-01-14 10:48:47 -0500
commit377ce86729f7a45d42cdb5d08ce3e1612247b780 (patch)
tree5667cee4d5d1c869baed5af13d8a0a77d53fd5bf /trace-Convert-various-locks-to-raw_spinlock.patch
parentbc83d4f8da427bb7bf26dad9895aabe59fd38da4 (diff)
downloadrt-patches-377ce86729f7a45d42cdb5d08ce3e1612247b780.tar.gz
rename atomic patches to raw
Also fix up the subjects, with: for i in `cat /tmp/list` ; do mv $i $i~ ; cat $i~ |sed 's/\(^Subject: .*\)atomic\(.*$\)/\1raw\2/' > $i ; done best viewed with "git show -M" Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
Diffstat (limited to 'trace-Convert-various-locks-to-raw_spinlock.patch')
-rw-r--r--trace-Convert-various-locks-to-raw_spinlock.patch262
1 files changed, 262 insertions, 0 deletions
diff --git a/trace-Convert-various-locks-to-raw_spinlock.patch b/trace-Convert-various-locks-to-raw_spinlock.patch
new file mode 100644
index 0000000..8a4e443
--- /dev/null
+++ b/trace-Convert-various-locks-to-raw_spinlock.patch
@@ -0,0 +1,262 @@
+From 87654a70523a8c5baadcbbc07d80cbae8f912837 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sat, 25 Jul 2009 17:13:33 +0200
+Subject: [PATCH] trace: Convert various locks to raw_spinlock
+
+commit 87654a70523a8c5baadcbbc07d80cbae8f912837 in tip.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index bf27bb7..c94dd14 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -403,7 +403,7 @@ int ring_buffer_print_page_header(struct trace_seq *s)
+ struct ring_buffer_per_cpu {
+ int cpu;
+ struct ring_buffer *buffer;
+- spinlock_t reader_lock; /* serialize readers */
++ raw_spinlock_t reader_lock; /* serialize readers */
+ raw_spinlock_t lock;
+ struct lock_class_key lock_key;
+ struct list_head pages;
+@@ -570,7 +570,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
+
+ cpu_buffer->cpu = cpu;
+ cpu_buffer->buffer = buffer;
+- spin_lock_init(&cpu_buffer->reader_lock);
++ raw_spin_lock_init(&cpu_buffer->reader_lock);
+ lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
+ cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+ INIT_LIST_HEAD(&cpu_buffer->pages);
+@@ -2117,9 +2117,9 @@ void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
+
+ cpu_buffer = iter->cpu_buffer;
+
+- spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
++ raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+ rb_iter_reset(iter);
+- spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
++ raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ }
+ EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
+
+@@ -2517,10 +2517,10 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
+ again:
+ local_irq_save(flags);
+ if (dolock)
+- spin_lock(&cpu_buffer->reader_lock);
++ raw_spin_lock(&cpu_buffer->reader_lock);
+ event = rb_buffer_peek(buffer, cpu, ts);
+ if (dolock)
+- spin_unlock(&cpu_buffer->reader_lock);
++ raw_spin_unlock(&cpu_buffer->reader_lock);
+ local_irq_restore(flags);
+
+ if (event && event->type_len == RINGBUF_TYPE_PADDING) {
+@@ -2547,9 +2547,9 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
+ unsigned long flags;
+
+ again:
+- spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
++ raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+ event = rb_iter_peek(iter, ts);
+- spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
++ raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+
+ if (event && event->type_len == RINGBUF_TYPE_PADDING) {
+ cpu_relax();
+@@ -2587,7 +2587,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
+ cpu_buffer = buffer->buffers[cpu];
+ local_irq_save(flags);
+ if (dolock)
+- spin_lock(&cpu_buffer->reader_lock);
++ raw_spin_lock(&cpu_buffer->reader_lock);
+
+ event = rb_buffer_peek(buffer, cpu, ts);
+ if (!event)
+@@ -2597,7 +2597,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
+
+ out_unlock:
+ if (dolock)
+- spin_unlock(&cpu_buffer->reader_lock);
++ raw_spin_unlock(&cpu_buffer->reader_lock);
+ local_irq_restore(flags);
+
+ out:
+@@ -2645,11 +2645,11 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
+ atomic_inc(&cpu_buffer->record_disabled);
+ synchronize_sched();
+
+- spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
++ raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+ __raw_spin_lock(&cpu_buffer->lock);
+ rb_iter_reset(iter);
+ __raw_spin_unlock(&cpu_buffer->lock);
+- spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
++ raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+
+ return iter;
+ }
+@@ -2687,14 +2687,14 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
+ unsigned long flags;
+
+ again:
+- spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
++ raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+ event = rb_iter_peek(iter, ts);
+ if (!event)
+ goto out;
+
+ rb_advance_iter(iter);
+ out:
+- spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
++ raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+
+ if (event && event->type_len == RINGBUF_TYPE_PADDING) {
+ cpu_relax();
+@@ -2762,7 +2762,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
+
+ atomic_inc(&cpu_buffer->record_disabled);
+
+- spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
++ raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+
+ __raw_spin_lock(&cpu_buffer->lock);
+
+@@ -2770,7 +2770,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
+
+ __raw_spin_unlock(&cpu_buffer->lock);
+
+- spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
++ raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+
+ atomic_dec(&cpu_buffer->record_disabled);
+ }
+@@ -2808,10 +2808,10 @@ int ring_buffer_empty(struct ring_buffer *buffer)
+ cpu_buffer = buffer->buffers[cpu];
+ local_irq_save(flags);
+ if (dolock)
+- spin_lock(&cpu_buffer->reader_lock);
++ raw_spin_lock(&cpu_buffer->reader_lock);
+ ret = rb_per_cpu_empty(cpu_buffer);
+ if (dolock)
+- spin_unlock(&cpu_buffer->reader_lock);
++ raw_spin_unlock(&cpu_buffer->reader_lock);
+ local_irq_restore(flags);
+
+ if (!ret)
+@@ -2842,10 +2842,10 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
+ cpu_buffer = buffer->buffers[cpu];
+ local_irq_save(flags);
+ if (dolock)
+- spin_lock(&cpu_buffer->reader_lock);
++ raw_spin_lock(&cpu_buffer->reader_lock);
+ ret = rb_per_cpu_empty(cpu_buffer);
+ if (dolock)
+- spin_unlock(&cpu_buffer->reader_lock);
++ raw_spin_unlock(&cpu_buffer->reader_lock);
+ local_irq_restore(flags);
+
+ return ret;
+@@ -3031,7 +3031,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
+ if (!bpage)
+ goto out;
+
+- spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
++ raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+
+ reader = rb_get_reader_page(cpu_buffer);
+ if (!reader)
+@@ -3106,7 +3106,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
+ ret = read;
+
+ out_unlock:
+- spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
++ raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+
+ out:
+ return ret;
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 8bc8d8a..8ab991c 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -668,7 +668,7 @@ static void trace_init_cmdlines(void)
+ }
+
+ static int trace_stop_count;
+-static DEFINE_SPINLOCK(tracing_start_lock);
++static DEFINE_RAW_SPINLOCK(tracing_start_lock);
+
+ /**
+ * ftrace_off_permanent - disable all ftrace code permanently
+@@ -699,7 +699,7 @@ void tracing_start(void)
+ if (tracing_disabled)
+ return;
+
+- spin_lock_irqsave(&tracing_start_lock, flags);
++ raw_spin_lock_irqsave(&tracing_start_lock, flags);
+ if (--trace_stop_count) {
+ if (trace_stop_count < 0) {
+ /* Someone screwed up their debugging */
+@@ -720,7 +720,7 @@ void tracing_start(void)
+
+ ftrace_start();
+ out:
+- spin_unlock_irqrestore(&tracing_start_lock, flags);
++ raw_spin_unlock_irqrestore(&tracing_start_lock, flags);
+ }
+
+ /**
+@@ -735,7 +735,7 @@ void tracing_stop(void)
+ unsigned long flags;
+
+ ftrace_stop();
+- spin_lock_irqsave(&tracing_start_lock, flags);
++ raw_spin_lock_irqsave(&tracing_start_lock, flags);
+ if (trace_stop_count++)
+ goto out;
+
+@@ -748,7 +748,7 @@ void tracing_stop(void)
+ ring_buffer_record_disable(buffer);
+
+ out:
+- spin_unlock_irqrestore(&tracing_start_lock, flags);
++ raw_spin_unlock_irqrestore(&tracing_start_lock, flags);
+ }
+
+ void trace_stop_cmdline_recording(void);
+diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
+index b923d13..ea555b5 100644
+--- a/kernel/trace/trace_irqsoff.c
++++ b/kernel/trace/trace_irqsoff.c
+@@ -23,7 +23,7 @@ static int tracer_enabled __read_mostly;
+
+ static DEFINE_PER_CPU(int, tracing_cpu);
+
+-static DEFINE_SPINLOCK(max_trace_lock);
++static DEFINE_RAW_SPINLOCK(max_trace_lock);
+
+ enum {
+ TRACER_IRQS_OFF = (1 << 1),
+@@ -149,7 +149,7 @@ check_critical_timing(struct trace_array *tr,
+ if (!report_latency(delta))
+ goto out;
+
+- spin_lock_irqsave(&max_trace_lock, flags);
++ raw_spin_lock_irqsave(&max_trace_lock, flags);
+
+ /* check if we are still the max latency */
+ if (!report_latency(delta))
+@@ -173,7 +173,7 @@ check_critical_timing(struct trace_array *tr,
+ max_sequence++;
+
+ out_unlock:
+- spin_unlock_irqrestore(&max_trace_lock, flags);
++ raw_spin_unlock_irqrestore(&max_trace_lock, flags);
+
+ out:
+ data->critical_sequence = max_sequence;
+--
+1.7.1.1
+