summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Gortmaker <paul.gortmaker@windriver.com>2013-11-18 16:17:16 -0500
committerPaul Gortmaker <paul.gortmaker@windriver.com>2013-11-18 16:17:16 -0500
commit3e7f74fa499309fb9cf171a111ef2be5464542e6 (patch)
tree66a3f52316a729123916b054d85a9b6158c48691
parent35c34859666c9d577e1204a1d6598929bd95eb79 (diff)
download3.10-rt-patches-3e7f74fa499309fb9cf171a111ef2be5464542e6.tar.gz
patches-3.10.17-rt12.tar.xzv3.10.17-rt12
md5sum: d387c7b152a5a76215b4e3bd6b2af1ad patches-3.10.17-rt12.tar.xz Announce: ----------------- Dear RT folks! I'm pleased to announce the v3.10.17-rt12 patch set. This is only a stable update (which fixed the livelock in sem_lock()). Known issues: - SLAB support not working - The cpsw network driver shows some issues. - bcache is disabled. - an ancient race (since we got sleeping spinlocks) where the TASK_TRACED state is temporary replaced while waiting on a rw lock and the task can't be traced. The RT patch against 3.10.17 can be found here: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patch-3.10.17-rt12.patch.xz The split quilt queue is available at: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.17-rt12.tar.xz Sebastian ----------------- http://marc.info/?l=linux-rt-users&m=138299989520258&w=2 Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
-rw-r--r--patches/ipc-sem-rework-semaphore-wakeups.patch36
-rw-r--r--patches/localversion.patch2
-rw-r--r--patches/posix-timers-thread-posix-cpu-timers-on-rt.patch15
-rw-r--r--patches/series1
-rw-r--r--patches/softirq-local-lock.patch15
-rw-r--r--patches/softirq-split-locks.patch35
6 files changed, 40 insertions, 64 deletions
diff --git a/patches/ipc-sem-rework-semaphore-wakeups.patch b/patches/ipc-sem-rework-semaphore-wakeups.patch
index fc51db1..1312838 100644
--- a/patches/ipc-sem-rework-semaphore-wakeups.patch
+++ b/patches/ipc-sem-rework-semaphore-wakeups.patch
@@ -33,16 +33,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/ipc/sem.c
+++ b/ipc/sem.c
-@@ -155,7 +155,7 @@ static int sysvipc_sem_proc_show(struct
- * sem_array.sem_pending{,last},
- * sem_array.sem_undo: sem_lock() for read/write
- * sem_undo.proc_next: only "current" is allowed to read/write that field.
-- *
-+ *
- */
-
- #define sc_semmsl sem_ctls[0]
-@@ -498,7 +498,7 @@ static int try_atomic_semop (struct sem_
+@@ -607,7 +607,7 @@ static int perform_atomic_semop(struct s
curr = sma->sem_base + sop->sem_num;
sem_op = sop->sem_op;
result = curr->semval;
@@ -51,7 +42,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (!sem_op && result)
goto would_block;
-@@ -525,7 +525,7 @@ static int try_atomic_semop (struct sem_
+@@ -634,7 +634,7 @@ static int perform_atomic_semop(struct s
un->semadj[sop->sem_num] -= sop->sem_op;
sop--;
}
@@ -60,7 +51,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return 0;
out_of_range:
-@@ -557,6 +557,13 @@ undo:
+@@ -666,6 +666,13 @@ undo:
static void wake_up_sem_queue_prepare(struct list_head *pt,
struct sem_queue *q, int error)
{
@@ -74,7 +65,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (list_empty(pt)) {
/*
* Hold preempt off so that we don't get preempted and have the
-@@ -568,6 +575,7 @@ static void wake_up_sem_queue_prepare(st
+@@ -677,6 +684,7 @@ static void wake_up_sem_queue_prepare(st
q->pid = error;
list_add_tail(&q->list, pt);
@@ -82,7 +73,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/**
-@@ -581,6 +589,7 @@ static void wake_up_sem_queue_prepare(st
+@@ -690,6 +698,7 @@ static void wake_up_sem_queue_prepare(st
*/
static void wake_up_sem_queue_do(struct list_head *pt)
{
@@ -90,7 +81,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
struct sem_queue *q, *t;
int did_something;
-@@ -593,6 +602,7 @@ static void wake_up_sem_queue_do(struct
+@@ -702,6 +711,7 @@ static void wake_up_sem_queue_do(struct
}
if (did_something)
preempt_enable();
@@ -98,7 +89,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
-@@ -947,7 +957,7 @@ static int semctl_nolock(struct ipc_name
+@@ -1161,7 +1171,7 @@ static int semctl_nolock(struct ipc_name
err = security_sem_semctl(NULL, cmd);
if (err)
return err;
@@ -107,16 +98,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
memset(&seminfo,0,sizeof(seminfo));
seminfo.semmni = ns->sc_semmni;
seminfo.semmns = ns->sc_semmns;
-@@ -967,7 +977,7 @@ static int semctl_nolock(struct ipc_name
- }
- max_id = ipc_get_maxid(&sem_ids(ns));
- up_read(&sem_ids(ns).rw_mutex);
-- if (copy_to_user(p, &seminfo, sizeof(struct seminfo)))
-+ if (copy_to_user(p, &seminfo, sizeof(struct seminfo)))
- return -EFAULT;
- return (max_id < 0) ? 0: max_id;
- }
-@@ -1642,7 +1652,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid,
+@@ -1865,7 +1875,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid,
/* We need to sleep on this operation, so we put the current
* task into the pending queue and go to sleep.
*/
@@ -125,7 +107,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
queue.sops = sops;
queue.nsops = nsops;
queue.undo = un;
-@@ -1765,7 +1775,7 @@ int copy_semundo(unsigned long clone_fla
+@@ -2000,7 +2010,7 @@ int copy_semundo(unsigned long clone_fla
return error;
atomic_inc(&undo_list->refcnt);
tsk->sysvsem.undo_list = undo_list;
diff --git a/patches/localversion.patch b/patches/localversion.patch
index 4e59cb4..6e5d965 100644
--- a/patches/localversion.patch
+++ b/patches/localversion.patch
@@ -12,4 +12,4 @@ Link: http://lkml.kernel.org/n/tip-8vdw4bfcsds27cvox6rpb334@git.kernel.org
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt11
++-rt12
diff --git a/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch b/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch
index 77f29e9..9aeedb3 100644
--- a/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch
+++ b/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch
@@ -43,7 +43,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
[PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1186,6 +1186,9 @@ struct task_struct {
+@@ -1185,6 +1185,9 @@ struct task_struct {
struct task_cputime cputime_expires;
struct list_head cpu_timers[3];
@@ -55,19 +55,10 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
const struct cred __rcu *real_cred; /* objective and real subjective task
--- a/init/main.c
+++ b/init/main.c
-@@ -6,7 +6,7 @@
- * GK 2/5/95 - Changed to support mounting root fs via NFS
- * Added initrd & change_root: Werner Almesberger & Hans Lermen, Feb '96
- * Moan early if gcc is old, avoiding bogus kernels - Paul Gortmaker, May '96
-- * Simplified starting of init: Michael A. Griffith <grif@acm.org>
-+ * Simplified starting of init: Michael A. Griffith <grif@acm.org>
- */
-
- #define DEBUG /* Enable initcall_debug */
-@@ -74,6 +74,7 @@
- #include <linux/ptrace.h>
+@@ -75,6 +75,7 @@
#include <linux/blkdev.h>
#include <linux/elevator.h>
+ #include <linux/random.h>
+#include <linux/posix-timers.h>
#include <asm/io.h>
diff --git a/patches/series b/patches/series
index 801eb95..e00c7e4 100644
--- a/patches/series
+++ b/patches/series
@@ -6,7 +6,6 @@
# UPSTREAM changes queued
############################################################
hpsa-fix-warning-with-smp_processor_id-in-preemptibl.patch
-sparc64-Remove-RWSEM-export-leftovers.patch
genirq-Set-irq-thread-to-RT-priority-on-creation.patch
############################################################
diff --git a/patches/softirq-local-lock.patch b/patches/softirq-local-lock.patch
index 5c496a8..3ae98c1 100644
--- a/patches/softirq-local-lock.patch
+++ b/patches/softirq-local-lock.patch
@@ -84,7 +84,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
*
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1448,6 +1448,7 @@ struct task_struct {
+@@ -1447,6 +1447,7 @@ struct task_struct {
#endif
#ifdef CONFIG_PREEMPT_RT_BASE
struct rcu_head put_rcu;
@@ -94,7 +94,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/init/main.c
+++ b/init/main.c
-@@ -496,6 +496,7 @@ asmlinkage void __init start_kernel(void
+@@ -497,6 +497,7 @@ asmlinkage void __init start_kernel(void
* Interrupts are still disabled. Do necessary setups, then
* enable them
*/
@@ -295,15 +295,18 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
__irq_enter();
-@@ -391,10 +549,14 @@ void irq_enter(void)
+@@ -391,6 +549,7 @@ void irq_enter(void)
static inline void invoke_softirq(void)
{
+#ifndef CONFIG_PREEMPT_RT_FULL
- if (!force_irqthreads)
- __do_softirq();
- else
+ if (!force_irqthreads) {
+ /*
+ * We can safely execute softirq on the current stack if
+@@ -404,6 +563,9 @@ static inline void invoke_softirq(void)
+ } else {
wakeup_softirqd();
+ }
+#else
+ wakeup_softirqd();
+#endif
diff --git a/patches/softirq-split-locks.patch b/patches/softirq-split-locks.patch
index db7ede3..250cd67 100644
--- a/patches/softirq-split-locks.patch
+++ b/patches/softirq-split-locks.patch
@@ -30,7 +30,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1450,6 +1450,7 @@ struct task_struct {
+@@ -1449,6 +1449,7 @@ struct task_struct {
#ifdef CONFIG_PREEMPT_RT_BASE
struct rcu_head put_rcu;
int softirq_nestcnt;
@@ -116,17 +116,17 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
*/
-static DEFINE_LOCAL_IRQ_LOCK(local_softirq_lock);
+static DEFINE_PER_CPU(struct local_irq_lock [NR_SOFTIRQS], local_softirq_locks);
-
--static void __do_softirq_common(int need_rcu_bh_qs);
++
+void __init softirq_early_init(void)
+{
+ int i;
--void __do_softirq(void)
+-static void __do_softirq_common(int need_rcu_bh_qs);
+ for (i = 0; i < NR_SOFTIRQS; i++)
+ local_irq_lock_init(local_softirq_locks[i]);
+}
-+
+
+-void __do_softirq(void)
+static void lock_softirq(int which)
{
- __do_softirq_common(0);
@@ -135,13 +135,13 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-void __init softirq_early_init(void)
+static void unlock_softirq(int which)
- {
-- local_irq_lock_init(local_softirq_lock);
++{
+ __local_unlock(&__get_cpu_var(local_softirq_locks[which]));
+}
+
+static void do_single_softirq(int which, int need_rcu_bh_qs)
-+{
+ {
+- local_irq_lock_init(local_softirq_lock);
+ unsigned long old_flags = current->flags;
+
+ current->flags &= ~PF_MEMALLOC;
@@ -261,7 +261,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ * disabled. So the cpu can't go away under us.
+ */
+void thread_do_softirq(void)
-+{
+ {
+ if (!in_serving_softirq() && current->softirqs_raised) {
+ current->softirq_nestcnt++;
+ do_current_softirqs(0);
@@ -270,7 +270,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+}
+
+void __raise_softirq_irqoff(unsigned int nr)
- {
++{
+ trace_softirq_raise(nr);
+ or_softirq_pending(1UL << nr);
+
@@ -371,23 +371,24 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static inline void ksoftirqd_clr_sched_params(unsigned int cpu, bool online)
-@@ -574,8 +670,14 @@ static inline void invoke_softirq(void)
- __do_softirq();
- else
+@@ -583,8 +679,15 @@ static inline void invoke_softirq(void)
+ } else {
wakeup_softirqd();
+ }
-#else
+#else /* PREEMPT_RT_FULL */
+ unsigned long flags;
+
+ local_irq_save(flags);
+ if (__this_cpu_read(ksoftirqd) &&
-+ __this_cpu_read(ksoftirqd)->softirqs_raised)
++ __this_cpu_read(ksoftirqd)->softirqs_raised)
++
wakeup_softirqd();
+ local_irq_restore(flags);
#endif
}
-@@ -613,26 +715,6 @@ void irq_exit(void)
+@@ -622,26 +725,6 @@ void irq_exit(void)
rcu_irq_exit();
}
@@ -414,7 +415,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void raise_softirq(unsigned int nr)
{
unsigned long flags;
-@@ -642,12 +724,6 @@ void raise_softirq(unsigned int nr)
+@@ -651,12 +734,6 @@ void raise_softirq(unsigned int nr)
local_irq_restore(flags);
}
@@ -427,7 +428,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void open_softirq(int nr, void (*action)(struct softirq_action *))
{
softirq_vec[nr].action = action;
-@@ -1091,20 +1167,7 @@ EXPORT_SYMBOL(tasklet_unlock_wait);
+@@ -1100,20 +1177,7 @@ EXPORT_SYMBOL(tasklet_unlock_wait);
static int ksoftirqd_should_run(unsigned int cpu)
{