summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Gortmaker <paul.gortmaker@windriver.com>2011-01-29 17:20:10 -0500
committerPaul Gortmaker <paul.gortmaker@windriver.com>2011-01-29 17:20:10 -0500
commit428625fae0558c9cea777e9e5c93f0360cebae2e (patch)
tree96579e85ef14b22a5f59b30e85254845f917b1cf
parent1b126ef0737daf5fe9a0a708357c352b8b0e056e (diff)
downloadrt-patches-428625fae0558c9cea777e9e5c93f0360cebae2e.tar.gz
rcu rawlock is 1304afb225288a2e250d6a7495462c28e5509cbb upstream
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
-rw-r--r--rcu-upgrade-locks-to-rawlocks.patch688
-rw-r--r--series1
2 files changed, 0 insertions, 689 deletions
diff --git a/rcu-upgrade-locks-to-rawlocks.patch b/rcu-upgrade-locks-to-rawlocks.patch
deleted file mode 100644
index d7a54e2..0000000
--- a/rcu-upgrade-locks-to-rawlocks.patch
+++ /dev/null
@@ -1,688 +0,0 @@
-From 4c57528251ea864748db8f66b6632be3ef787cc6 Mon Sep 17 00:00:00 2001
-From: Paul Gortmaker <paul.gortmaker@windriver.com>
-Date: Mon, 17 Jan 2011 20:44:59 -0500
-Subject: [PATCH] rcu: upgrade locks to rawlocks
-
-This is one of several extractions from the merge up to 33-rc8.
-
-Easiest to see in the tip git repo with:
-
- git diff 5f854cfc024622e4aae14d7cf422f6ff86278688^2 \
- 5f854cfc024622e4aae14d7cf422f6ff86278688 kernel/rcutree_plugin.h
-
-i.e. show the difference between the 2nd parent of the merge (RT) and
-the file looked like after the merge. Same for the other rcu files.
-
-You can find the origin of this change in the tip merge commit:
-
- commit 5f854cfc024622e4aae14d7cf422f6ff86278688
- Merge: cc24da0 4ec62b2
- Author: Thomas Gleixner <tglx@linutronix.de>
- Date: Sun Feb 21 20:17:22 2010 +0100
-
- Forward to 2.6.33-rc8
-
- Merge branch 'linus' into rt/head with a pile of conflicts.
-
- Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-
-Normally there are not significant changes/additions in a merge commit that
-are not from any other "normal" commit. But in this case there are, so
-break them out into separate explicit commits.
-
-Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
-
-diff --git a/kernel/rcutree.c b/kernel/rcutree.c
-index 53ae959..586ba1d 100644
---- a/kernel/rcutree.c
-+++ b/kernel/rcutree.c
-@@ -66,11 +66,11 @@ static struct lock_class_key rcu_node_class[NUM_RCU_LVLS];
- .signaled = RCU_GP_IDLE, \
- .gpnum = -300, \
- .completed = -300, \
-- .onofflock = __SPIN_LOCK_UNLOCKED(&name.onofflock), \
-+ .onofflock = __RAW_SPIN_LOCK_UNLOCKED(&name.onofflock), \
- .orphan_cbs_list = NULL, \
- .orphan_cbs_tail = &name.orphan_cbs_list, \
- .orphan_qlen = 0, \
-- .fqslock = __SPIN_LOCK_UNLOCKED(&name.fqslock), \
-+ .fqslock = __RAW_SPIN_LOCK_UNLOCKED(&name.fqslock), \
- .n_force_qs = 0, \
- .n_force_qs_ngp = 0, \
- }
-@@ -439,10 +439,10 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
-
- /* Only let one CPU complain about others per time interval. */
-
-- spin_lock_irqsave(&rnp->lock, flags);
-+ raw_spin_lock_irqsave(&rnp->lock, flags);
- delta = jiffies - rsp->jiffies_stall;
- if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) {
-- spin_unlock_irqrestore(&rnp->lock, flags);
-+ raw_spin_unlock_irqrestore(&rnp->lock, flags);
- return;
- }
- rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
-@@ -452,7 +452,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
- * due to CPU offlining.
- */
- rcu_print_task_stall(rnp);
-- spin_unlock_irqrestore(&rnp->lock, flags);
-+ raw_spin_unlock_irqrestore(&rnp->lock, flags);
-
- /* OK, time to rat on our buddy... */
-
-@@ -481,11 +481,11 @@ static void print_cpu_stall(struct rcu_state *rsp)
- smp_processor_id(), jiffies - rsp->gp_start);
- trigger_all_cpu_backtrace();
-
-- spin_lock_irqsave(&rnp->lock, flags);
-+ raw_spin_lock_irqsave(&rnp->lock, flags);
- if ((long)(jiffies - rsp->jiffies_stall) >= 0)
- rsp->jiffies_stall =
- jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
-- spin_unlock_irqrestore(&rnp->lock, flags);
-+ raw_spin_unlock_irqrestore(&rnp->lock, flags);
-
- set_need_resched(); /* kick ourselves to get things going. */
- }
-@@ -545,12 +545,12 @@ static void note_new_gpnum(struct rcu_state *rsp, struct rcu_data *rdp)
- local_irq_save(flags);
- rnp = rdp->mynode;
- if (rdp->gpnum == ACCESS_ONCE(rnp->gpnum) || /* outside lock. */
-- !spin_trylock(&rnp->lock)) { /* irqs already off, retry later. */
-+ !raw_spin_trylock(&rnp->lock)) { /* irqs already off, retry later. */
- local_irq_restore(flags);
- return;
- }
- __note_new_gpnum(rsp, rnp, rdp);
-- spin_unlock_irqrestore(&rnp->lock, flags);
-+ raw_spin_unlock_irqrestore(&rnp->lock, flags);
- }
-
- /*
-@@ -609,12 +609,12 @@ rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp)
- local_irq_save(flags);
- rnp = rdp->mynode;
- if (rdp->completed == ACCESS_ONCE(rnp->completed) || /* outside lock. */
-- !spin_trylock(&rnp->lock)) { /* irqs already off, retry later. */
-+ !raw_spin_trylock(&rnp->lock)) { /* irqs already off, retry later. */
- local_irq_restore(flags);
- return;
- }
- __rcu_process_gp_end(rsp, rnp, rdp);
-- spin_unlock_irqrestore(&rnp->lock, flags);
-+ raw_spin_unlock_irqrestore(&rnp->lock, flags);
- }
-
- /*
-@@ -661,10 +661,10 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
-
- if (!cpu_needs_another_gp(rsp, rdp)) {
- if (rnp->completed == rsp->completed) {
-- spin_unlock_irqrestore(&rnp->lock, flags);
-+ raw_spin_unlock_irqrestore(&rnp->lock, flags);
- return;
- }
-- spin_unlock(&rnp->lock); /* irqs remain disabled. */
-+ raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
-
- /*
- * Propagate new ->completed value to rcu_node structures
-@@ -672,9 +672,9 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
- * of the next grace period to process their callbacks.
- */
- rcu_for_each_node_breadth_first(rsp, rnp) {
-- spin_lock(&rnp->lock); /* irqs already disabled. */
-+ raw_spin_lock(&rnp->lock); /* irqs already disabled. */
- rnp->completed = rsp->completed;
-- spin_unlock(&rnp->lock); /* irqs remain disabled. */
-+ raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
- }
- local_irq_restore(flags);
- return;
-@@ -695,15 +695,15 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
- rnp->completed = rsp->completed;
- rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */
- rcu_start_gp_per_cpu(rsp, rnp, rdp);
-- spin_unlock_irqrestore(&rnp->lock, flags);
-+ raw_spin_unlock_irqrestore(&rnp->lock, flags);
- return;
- }
-
-- spin_unlock(&rnp->lock); /* leave irqs disabled. */
-+ raw_spin_unlock(&rnp->lock); /* leave irqs disabled. */
-
-
- /* Exclude any concurrent CPU-hotplug operations. */
-- spin_lock(&rsp->onofflock); /* irqs already disabled. */
-+ raw_spin_lock(&rsp->onofflock); /* irqs already disabled. */
-
- /*
- * Set the quiescent-state-needed bits in all the rcu_node
-@@ -723,21 +723,21 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
- * irqs disabled.
- */
- rcu_for_each_node_breadth_first(rsp, rnp) {
-- spin_lock(&rnp->lock); /* irqs already disabled. */
-+ raw_spin_lock(&rnp->lock); /* irqs already disabled. */
- rcu_preempt_check_blocked_tasks(rnp);
- rnp->qsmask = rnp->qsmaskinit;
- rnp->gpnum = rsp->gpnum;
- rnp->completed = rsp->completed;
- if (rnp == rdp->mynode)
- rcu_start_gp_per_cpu(rsp, rnp, rdp);
-- spin_unlock(&rnp->lock); /* irqs remain disabled. */
-+ raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
- }
-
- rnp = rcu_get_root(rsp);
-- spin_lock(&rnp->lock); /* irqs already disabled. */
-+ raw_spin_lock(&rnp->lock); /* irqs already disabled. */
- rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */
-- spin_unlock(&rnp->lock); /* irqs remain disabled. */
-- spin_unlock_irqrestore(&rsp->onofflock, flags);
-+ raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
-+ raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
- }
-
- /*
-@@ -776,14 +776,14 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
- if (!(rnp->qsmask & mask)) {
-
- /* Our bit has already been cleared, so done. */
-- spin_unlock_irqrestore(&rnp->lock, flags);
-+ raw_spin_unlock_irqrestore(&rnp->lock, flags);
- return;
- }
- rnp->qsmask &= ~mask;
- if (rnp->qsmask != 0 || rcu_preempted_readers(rnp)) {
-
- /* Other bits still set at this level, so done. */
-- spin_unlock_irqrestore(&rnp->lock, flags);
-+ raw_spin_unlock_irqrestore(&rnp->lock, flags);
- return;
- }
- mask = rnp->grpmask;
-@@ -793,10 +793,10 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
-
- break;
- }
-- spin_unlock_irqrestore(&rnp->lock, flags);
-+ raw_spin_unlock_irqrestore(&rnp->lock, flags);
- rnp_c = rnp;
- rnp = rnp->parent;
-- spin_lock_irqsave(&rnp->lock, flags);
-+ raw_spin_lock_irqsave(&rnp->lock, flags);
- WARN_ON_ONCE(rnp_c->qsmask);
- }
-
-@@ -825,7 +825,7 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long las
- struct rcu_node *rnp;
-
- rnp = rdp->mynode;
-- spin_lock_irqsave(&rnp->lock, flags);
-+ raw_spin_lock_irqsave(&rnp->lock, flags);
- if (lastcomp != rnp->completed) {
-
- /*
-@@ -837,12 +837,12 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long las
- * race occurred.
- */
- rdp->passed_quiesc = 0; /* try again later! */
-- spin_unlock_irqrestore(&rnp->lock, flags);
-+ raw_spin_unlock_irqrestore(&rnp->lock, flags);
- return;
- }
- mask = rdp->grpmask;
- if ((rnp->qsmask & mask) == 0) {
-- spin_unlock_irqrestore(&rnp->lock, flags);
-+ raw_spin_unlock_irqrestore(&rnp->lock, flags);
- } else {
- rdp->qs_pending = 0;
-
-@@ -906,7 +906,7 @@ static void rcu_send_cbs_to_orphanage(struct rcu_state *rsp)
-
- if (rdp->nxtlist == NULL)
- return; /* irqs disabled, so comparison is stable. */
-- spin_lock(&rsp->onofflock); /* irqs already disabled. */
-+ raw_spin_lock(&rsp->onofflock); /* irqs already disabled. */
- *rsp->orphan_cbs_tail = rdp->nxtlist;
- rsp->orphan_cbs_tail = rdp->nxttail[RCU_NEXT_TAIL];
- rdp->nxtlist = NULL;
-@@ -914,7 +914,7 @@ static void rcu_send_cbs_to_orphanage(struct rcu_state *rsp)
- rdp->nxttail[i] = &rdp->nxtlist;
- rsp->orphan_qlen += rdp->qlen;
- rdp->qlen = 0;
-- spin_unlock(&rsp->onofflock); /* irqs remain disabled. */
-+ raw_spin_unlock(&rsp->onofflock); /* irqs remain disabled. */
- }
-
- /*
-@@ -925,10 +925,10 @@ static void rcu_adopt_orphan_cbs(struct rcu_state *rsp)
- unsigned long flags;
- struct rcu_data *rdp;
-
-- spin_lock_irqsave(&rsp->onofflock, flags);
-+ raw_spin_lock_irqsave(&rsp->onofflock, flags);
- rdp = rsp->rda[smp_processor_id()];
- if (rsp->orphan_cbs_list == NULL) {
-- spin_unlock_irqrestore(&rsp->onofflock, flags);
-+ raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
- return;
- }
- *rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_cbs_list;
-@@ -937,7 +937,7 @@ static void rcu_adopt_orphan_cbs(struct rcu_state *rsp)
- rsp->orphan_cbs_list = NULL;
- rsp->orphan_cbs_tail = &rsp->orphan_cbs_list;
- rsp->orphan_qlen = 0;
-- spin_unlock_irqrestore(&rsp->onofflock, flags);
-+ raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
- }
-
- /*
-@@ -953,23 +953,23 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
- struct rcu_node *rnp;
-
- /* Exclude any attempts to start a new grace period. */
-- spin_lock_irqsave(&rsp->onofflock, flags);
-+ raw_spin_lock_irqsave(&rsp->onofflock, flags);
-
- /* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */
- rnp = rdp->mynode; /* this is the outgoing CPU's rnp. */
- mask = rdp->grpmask; /* rnp->grplo is constant. */
- do {
-- spin_lock(&rnp->lock); /* irqs already disabled. */
-+ raw_spin_lock(&rnp->lock); /* irqs already disabled. */
- rnp->qsmaskinit &= ~mask;
- if (rnp->qsmaskinit != 0) {
- if (rnp != rdp->mynode)
-- spin_unlock(&rnp->lock); /* irqs remain disabled. */
-+ raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
- break;
- }
- if (rnp == rdp->mynode)
- need_report = rcu_preempt_offline_tasks(rsp, rnp, rdp);
- else
-- spin_unlock(&rnp->lock); /* irqs remain disabled. */
-+ raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
- mask = rnp->grpmask;
- rnp = rnp->parent;
- } while (rnp != NULL);
-@@ -980,12 +980,12 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
- * because invoking rcu_report_unblock_qs_rnp() with ->onofflock
- * held leads to deadlock.
- */
-- spin_unlock(&rsp->onofflock); /* irqs remain disabled. */
-+ raw_spin_unlock(&rsp->onofflock); /* irqs remain disabled. */
- rnp = rdp->mynode;
- if (need_report & RCU_OFL_TASKS_NORM_GP)
- rcu_report_unblock_qs_rnp(rnp, flags);
- else
-- spin_unlock_irqrestore(&rnp->lock, flags);
-+ raw_spin_unlock_irqrestore(&rnp->lock, flags);
- if (need_report & RCU_OFL_TASKS_EXP_GP)
- rcu_report_exp_rnp(rsp, rnp);
-
-@@ -1158,13 +1158,13 @@ static int rcu_process_dyntick(struct rcu_state *rsp, long lastcomp,
-
- rcu_for_each_leaf_node(rsp, rnp) {
- mask = 0;
-- spin_lock_irqsave(&rnp->lock, flags);
-+ raw_spin_lock_irqsave(&rnp->lock, flags);
- if (rnp->completed != lastcomp) {
-- spin_unlock_irqrestore(&rnp->lock, flags);
-+ raw_spin_unlock_irqrestore(&rnp->lock, flags);
- return 1;
- }
- if (rnp->qsmask == 0) {
-- spin_unlock_irqrestore(&rnp->lock, flags);
-+ raw_spin_unlock_irqrestore(&rnp->lock, flags);
- continue;
- }
- cpu = rnp->grplo;
-@@ -1179,7 +1179,7 @@ static int rcu_process_dyntick(struct rcu_state *rsp, long lastcomp,
- rcu_report_qs_rnp(mask, rsp, rnp, flags);
- continue;
- }
-- spin_unlock_irqrestore(&rnp->lock, flags);
-+ raw_spin_unlock_irqrestore(&rnp->lock, flags);
- }
- return 0;
- }
-@@ -1198,7 +1198,7 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
-
- if (!rcu_gp_in_progress(rsp))
- return; /* No grace period in progress, nothing to force. */
-- if (!spin_trylock_irqsave(&rsp->fqslock, flags)) {
-+ if (!raw_spin_trylock_irqsave(&rsp->fqslock, flags)) {
- rsp->n_force_qs_lh++; /* Inexact, can lose counts. Tough! */
- return; /* Someone else is already on the job. */
- }
-@@ -1206,16 +1206,16 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
- (long)(rsp->jiffies_force_qs - jiffies) >= 0)
- goto unlock_ret; /* no emergency and done recently. */
- rsp->n_force_qs++;
-- spin_lock(&rnp->lock);
-+ raw_spin_lock(&rnp->lock);
- lastcomp = rsp->gpnum - 1;
- signaled = rsp->signaled;
- rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
- if(!rcu_gp_in_progress(rsp)) {
- rsp->n_force_qs_ngp++;
-- spin_unlock(&rnp->lock);
-+ raw_spin_unlock(&rnp->lock);
- goto unlock_ret; /* no GP in progress, time updated. */
- }
-- spin_unlock(&rnp->lock);
-+ raw_spin_unlock(&rnp->lock);
- switch (signaled) {
- case RCU_GP_IDLE:
- case RCU_GP_INIT:
-@@ -1237,7 +1237,7 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
-
- /* Update state, record completion counter. */
- forcenow = 0;
-- spin_lock(&rnp->lock);
-+ raw_spin_lock(&rnp->lock);
- if (lastcomp + 1 == rsp->gpnum &&
- lastcomp == rsp->completed &&
- rsp->signaled == signaled) {
-@@ -1245,7 +1245,7 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
- rsp->completed_fqs = lastcomp;
- forcenow = signaled == RCU_SAVE_COMPLETED;
- }
-- spin_unlock(&rnp->lock);
-+ raw_spin_unlock(&rnp->lock);
- if (!forcenow)
- break;
- /* fall into next case. */
-@@ -1262,7 +1262,7 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
- break;
- }
- unlock_ret:
-- spin_unlock_irqrestore(&rsp->fqslock, flags);
-+ raw_spin_unlock_irqrestore(&rsp->fqslock, flags);
- }
-
- #else /* #ifdef CONFIG_SMP */
-@@ -1304,7 +1304,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
-
- /* Does this CPU require a not-yet-started grace period? */
- if (cpu_needs_another_gp(rsp, rdp)) {
-- spin_lock_irqsave(&rcu_get_root(rsp)->lock, flags);
-+ raw_spin_lock_irqsave(&rcu_get_root(rsp)->lock, flags);
- rcu_start_gp(rsp, flags); /* releases above lock */
- }
-
-@@ -1369,7 +1369,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
- unsigned long nestflag;
- struct rcu_node *rnp_root = rcu_get_root(rsp);
-
-- spin_lock_irqsave(&rnp_root->lock, nestflag);
-+ raw_spin_lock_irqsave(&rnp_root->lock, nestflag);
- rcu_start_gp(rsp, nestflag); /* releases rnp_root->lock. */
- }
-
-@@ -1659,7 +1659,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
- struct rcu_node *rnp = rcu_get_root(rsp);
-
- /* Set up local state, ensuring consistent view of global state. */
-- spin_lock_irqsave(&rnp->lock, flags);
-+ raw_spin_lock_irqsave(&rnp->lock, flags);
- rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
- rdp->nxtlist = NULL;
- for (i = 0; i < RCU_NEXT_SIZE; i++)
-@@ -1669,7 +1669,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
- rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
- #endif /* #ifdef CONFIG_NO_HZ */
- rdp->cpu = cpu;
-- spin_unlock_irqrestore(&rnp->lock, flags);
-+ raw_spin_unlock_irqrestore(&rnp->lock, flags);
- }
-
- /*
-@@ -1687,7 +1687,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable)
- struct rcu_node *rnp = rcu_get_root(rsp);
-
- /* Set up local state, ensuring consistent view of global state. */
-- spin_lock_irqsave(&rnp->lock, flags);
-+ raw_spin_lock_irqsave(&rnp->lock, flags);
- rdp->passed_quiesc = 0; /* We could be racing with new GP, */
- rdp->qs_pending = 1; /* so set up to respond to current GP. */
- rdp->beenonline = 1; /* We have now been online. */
-@@ -1695,7 +1695,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable)
- rdp->qlen_last_fqs_check = 0;
- rdp->n_force_qs_snap = rsp->n_force_qs;
- rdp->blimit = blimit;
-- spin_unlock(&rnp->lock); /* irqs remain disabled. */
-+ raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
-
- /*
- * A new grace period might start here. If so, we won't be part
-@@ -1703,14 +1703,14 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable)
- */
-
- /* Exclude any attempts to start a new GP on large systems. */
-- spin_lock(&rsp->onofflock); /* irqs already disabled. */
-+ raw_spin_lock(&rsp->onofflock); /* irqs already disabled. */
-
- /* Add CPU to rcu_node bitmasks. */
- rnp = rdp->mynode;
- mask = rdp->grpmask;
- do {
- /* Exclude any attempts to start a new GP on small systems. */
-- spin_lock(&rnp->lock); /* irqs already disabled. */
-+ raw_spin_lock(&rnp->lock); /* irqs already disabled. */
- rnp->qsmaskinit |= mask;
- mask = rnp->grpmask;
- if (rnp == rdp->mynode) {
-@@ -1718,11 +1718,11 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable)
- rdp->completed = rnp->completed;
- rdp->passed_quiesc_completed = rnp->completed - 1;
- }
-- spin_unlock(&rnp->lock); /* irqs already disabled. */
-+ raw_spin_unlock(&rnp->lock); /* irqs already disabled. */
- rnp = rnp->parent;
- } while (rnp != NULL && !(rnp->qsmaskinit & mask));
-
-- spin_unlock_irqrestore(&rsp->onofflock, flags);
-+ raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
- }
-
- static void __cpuinit rcu_online_cpu(int cpu)
-@@ -1823,7 +1823,7 @@ static void __init rcu_init_one(struct rcu_state *rsp)
- cpustride *= rsp->levelspread[i];
- rnp = rsp->level[i];
- for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) {
-- spin_lock_init(&rnp->lock);
-+ raw_spin_lock_init(&rnp->lock);
- lockdep_set_class(&rnp->lock, &rcu_node_class[i]);
- rnp->gpnum = 0;
- rnp->qsmask = 0;
-diff --git a/kernel/rcutree.h b/kernel/rcutree.h
-index d2a0046..4613de1 100644
---- a/kernel/rcutree.h
-+++ b/kernel/rcutree.h
-@@ -90,7 +90,7 @@ struct rcu_dynticks {
- * Definition for node within the RCU grace-period-detection hierarchy.
- */
- struct rcu_node {
-- spinlock_t lock; /* Root rcu_node's lock protects some */
-+ raw_spinlock_t lock; /* Root rcu_node's lock protects some */
- /* rcu_state fields as well as following. */
- long gpnum; /* Current grace period for this node. */
- /* This will either be equal to or one */
-@@ -282,7 +282,7 @@ struct rcu_state {
-
- /* End of fields guarded by root rcu_node's lock. */
-
-- spinlock_t onofflock; /* exclude on/offline and */
-+ raw_spinlock_t onofflock; /* exclude on/offline and */
- /* starting new GP. Also */
- /* protects the following */
- /* orphan_cbs fields. */
-@@ -292,7 +292,7 @@ struct rcu_state {
- /* going offline. */
- struct rcu_head **orphan_cbs_tail; /* And tail pointer. */
- long orphan_qlen; /* Number of orphaned cbs. */
-- spinlock_t fqslock; /* Only one task forcing */
-+ raw_spinlock_t fqslock; /* Only one task forcing */
- /* quiescent states. */
- long completed_fqs; /* Value of completed @ snap. */
- /* Protected by fqslock. */
-diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
-index 37fbccd..0533408 100644
---- a/kernel/rcutree_plugin.h
-+++ b/kernel/rcutree_plugin.h
-@@ -102,7 +102,7 @@ static void rcu_preempt_note_context_switch(int cpu)
- /* Possibly blocking in an RCU read-side critical section. */
- rdp = rcu_preempt_state.rda[cpu];
- rnp = rdp->mynode;
-- spin_lock_irqsave(&rnp->lock, flags);
-+ raw_spin_lock_irqsave(&rnp->lock, flags);
- t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
- t->rcu_blocked_node = rnp;
-
-@@ -123,7 +123,7 @@ static void rcu_preempt_note_context_switch(int cpu)
- WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
- phase = (rnp->gpnum + !(rnp->qsmask & rdp->grpmask)) & 0x1;
- list_add(&t->rcu_node_entry, &rnp->blocked_tasks[phase]);
-- spin_unlock_irqrestore(&rnp->lock, flags);
-+ raw_spin_unlock_irqrestore(&rnp->lock, flags);
- }
-
- /*
-@@ -180,7 +180,7 @@ static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
- struct rcu_node *rnp_p;
-
- if (rnp->qsmask != 0 || rcu_preempted_readers(rnp)) {
-- spin_unlock_irqrestore(&rnp->lock, flags);
-+ raw_spin_unlock_irqrestore(&rnp->lock, flags);
- return; /* Still need more quiescent states! */
- }
-
-@@ -197,8 +197,8 @@ static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
-
- /* Report up the rest of the hierarchy. */
- mask = rnp->grpmask;
-- spin_unlock(&rnp->lock); /* irqs remain disabled. */
-- spin_lock(&rnp_p->lock); /* irqs already disabled. */
-+ raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
-+ raw_spin_lock(&rnp_p->lock); /* irqs already disabled. */
- rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags);
- }
-
-@@ -248,10 +248,10 @@ static void rcu_read_unlock_special(struct task_struct *t)
- */
- for (;;) {
- rnp = t->rcu_blocked_node;
-- spin_lock(&rnp->lock); /* irqs already disabled. */
-+ raw_spin_lock(&rnp->lock); /* irqs already disabled. */
- if (rnp == t->rcu_blocked_node)
- break;
-- spin_unlock(&rnp->lock); /* irqs remain disabled. */
-+ raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
- }
- empty = !rcu_preempted_readers(rnp);
- empty_exp = !rcu_preempted_readers_exp(rnp);
-@@ -265,7 +265,7 @@ static void rcu_read_unlock_special(struct task_struct *t)
- * Note that rcu_report_unblock_qs_rnp() releases rnp->lock.
- */
- if (empty)
-- spin_unlock_irqrestore(&rnp->lock, flags);
-+ raw_spin_unlock_irqrestore(&rnp->lock, flags);
- else
- rcu_report_unblock_qs_rnp(rnp, flags);
-
-@@ -388,11 +388,11 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
- lp_root = &rnp_root->blocked_tasks[i];
- while (!list_empty(lp)) {
- tp = list_entry(lp->next, typeof(*tp), rcu_node_entry);
-- spin_lock(&rnp_root->lock); /* irqs already disabled */
-+ raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
- list_del(&tp->rcu_node_entry);
- tp->rcu_blocked_node = rnp_root;
- list_add(&tp->rcu_node_entry, lp_root);
-- spin_unlock(&rnp_root->lock); /* irqs remain disabled */
-+ raw_spin_unlock(&rnp_root->lock); /* irqs remain disabled */
- }
- }
- return retval;
-@@ -516,7 +516,7 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
- unsigned long flags;
- unsigned long mask;
-
-- spin_lock_irqsave(&rnp->lock, flags);
-+ raw_spin_lock_irqsave(&rnp->lock, flags);
- for (;;) {
- if (!sync_rcu_preempt_exp_done(rnp))
- break;
-@@ -525,12 +525,12 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
- break;
- }
- mask = rnp->grpmask;
-- spin_unlock(&rnp->lock); /* irqs remain disabled */
-+ raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
- rnp = rnp->parent;
-- spin_lock(&rnp->lock); /* irqs already disabled */
-+ raw_spin_lock(&rnp->lock); /* irqs already disabled */
- rnp->expmask &= ~mask;
- }
-- spin_unlock_irqrestore(&rnp->lock, flags);
-+ raw_spin_unlock_irqrestore(&rnp->lock, flags);
- }
-
- /*
-@@ -545,11 +545,11 @@ sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
- {
- int must_wait;
-
-- spin_lock(&rnp->lock); /* irqs already disabled */
-+ raw_spin_lock(&rnp->lock); /* irqs already disabled */
- list_splice_init(&rnp->blocked_tasks[0], &rnp->blocked_tasks[2]);
- list_splice_init(&rnp->blocked_tasks[1], &rnp->blocked_tasks[3]);
- must_wait = rcu_preempted_readers_exp(rnp);
-- spin_unlock(&rnp->lock); /* irqs remain disabled */
-+ raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
- if (!must_wait)
- rcu_report_exp_rnp(rsp, rnp);
- }
-@@ -594,13 +594,13 @@ void synchronize_rcu_expedited(void)
- /* force all RCU readers onto blocked_tasks[]. */
- synchronize_sched_expedited();
-
-- spin_lock_irqsave(&rsp->onofflock, flags);
-+ raw_spin_lock_irqsave(&rsp->onofflock, flags);
-
- /* Initialize ->expmask for all non-leaf rcu_node structures. */
- rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) {
-- spin_lock(&rnp->lock); /* irqs already disabled. */
-+ raw_spin_lock(&rnp->lock); /* irqs already disabled. */
- rnp->expmask = rnp->qsmaskinit;
-- spin_unlock(&rnp->lock); /* irqs remain disabled. */
-+ raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
- }
-
- /* Snapshot current state of ->blocked_tasks[] lists. */
-@@ -609,7 +609,7 @@ void synchronize_rcu_expedited(void)
- if (NUM_RCU_NODES > 1)
- sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp));
-
-- spin_unlock_irqrestore(&rsp->onofflock, flags);
-+ raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
-
- /* Wait for snapshotted ->blocked_tasks[] lists to drain. */
- rnp = rcu_get_root(rsp);
-@@ -734,7 +734,7 @@ static int rcu_preempted_readers(struct rcu_node *rnp)
- /* Because preemptible RCU does not exist, no quieting of tasks. */
- static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
- {
-- spin_unlock_irqrestore(&rnp->lock, flags);
-+ raw_spin_unlock_irqrestore(&rnp->lock, flags);
- }
-
- #endif /* #ifdef CONFIG_HOTPLUG_CPU */
---
-1.7.1.1
-
diff --git a/series b/series
index 355a6e8..2245a1f 100644
--- a/series
+++ b/series
@@ -319,7 +319,6 @@ make-takeover_delayed_drop-depend-on-HOTPLUG_CPU.patch
cond_resched_softirq-mask-SOFTIRQ_OFFSET-in-__might_.patch
blk-use-raw_smp_processor_id-in-blk_cpu_to_group.patch
kvm-use-raw_smp_processor_id-in-make_all_cpus_reques.patch
-rcu-upgrade-locks-to-rawlocks.patch
ratelimit-raw-locks-on-state-and-name.patch
sparc-devtree_lock-as-raw.patch
rt-trash-the-Linux-Semaphores-implemented-via-RT-mut.patch