aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authordavem <davem>2001-12-06 00:16:11 +0000
committerdavem <davem>2001-12-06 00:16:11 +0000
commita025606f42044ca068f7601e092f5492a3cec87a (patch)
tree6a5a8b92a9ae489a1ef9775beea18215a090468c
parent7550a471942350daf932d4121e0adbc95d882771 (diff)
downloadnetdev-vger-cvs-a025606f42044ca068f7601e092f5492a3cec87a.tar.gz
Perform resched+signal checks atomically
with PSTATE_IE disabled, and keep it disabled all the way back to userspace. Also make register l6 handling more consistent.
-rw-r--r--arch/sparc64/kernel/rtrap.S130
1 files changed, 101 insertions, 29 deletions
diff --git a/arch/sparc64/kernel/rtrap.S b/arch/sparc64/kernel/rtrap.S
index 9ac6a30e8..f06dd499c 100644
--- a/arch/sparc64/kernel/rtrap.S
+++ b/arch/sparc64/kernel/rtrap.S
@@ -1,4 +1,4 @@
-/* $Id: rtrap.S,v 1.56 2001-10-13 00:14:34 kanoj Exp $
+/* $Id: rtrap.S,v 1.57 2001-12-06 00:16:11 davem Exp $
* rtrap.S: Preparing for return from trap on Sparc V9.
*
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
@@ -18,14 +18,13 @@
#define RTRAP_PSTATE_IRQOFF (PSTATE_RMO|PSTATE_PEF|PSTATE_PRIV)
#define RTRAP_PSTATE_AG_IRQOFF (PSTATE_RMO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG)
-#if 0
-#define RTRAP_CHECK call rtrap_check; add %sp, (STACK_BIAS+REGWIN_SZ), %o0;
-#else
-#define RTRAP_CHECK
-#endif
+ /* Register %l6 keeps track of whether we are returning
+ * from a system call or not. It is cleared if we call
+ * do_signal, and it must not be otherwise modified until
+ * we fully commit to returning to userspace.
+ */
.text
-
.align 32
__handle_softirq:
call do_softirq
@@ -34,42 +33,101 @@ __handle_softirq:
nop
__handle_preemption:
call schedule
- nop
+ wrpr %g0, RTRAP_PSTATE, %pstate
ba,pt %xcc, __handle_preemption_continue
- nop
+ wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
+
__handle_user_windows:
- wrpr %g0, RTRAP_PSTATE, %pstate
call fault_in_user_windows
- nop
+ wrpr %g0, RTRAP_PSTATE, %pstate
ba,pt %xcc, __handle_user_windows_continue
+ wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
+ /* Redo sched+sig checks */
+ ldx [%g6 + AOFF_task_need_resched], %l0
+ brz,pt %l0, 1f
+ nop
+ call schedule
+
+ wrpr %g0, RTRAP_PSTATE, %pstate
+ wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
+1: lduw [%g6 + AOFF_task_sigpending], %l0
+ brz,pt %l0, __handle_user_windows_continue
nop
+ clr %o0
+ mov %l5, %o2
+ mov %l6, %o3
+
+ add %sp, STACK_BIAS + REGWIN_SZ, %o1
+ call do_signal
+ wrpr %g0, RTRAP_PSTATE, %pstate
+ wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
+ clr %l6
+ /* Signal delivery can modify pt_regs tstate, so we must
+ * reload it.
+ */
+ ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
+ sethi %hi(0xf << 20), %l4
+ and %l1, %l4, %l4
+
+ ba,pt %xcc, __handle_user_windows_continue
+ andn %l1, %l4, %l1
__handle_perfctrs:
- /* Don't forget to preserve user window invariants. */
- wrpr %g0, RTRAP_PSTATE, %pstate
call update_perfctrs
- nop
+ wrpr %g0, RTRAP_PSTATE, %pstate
wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
ldub [%g6 + AOFF_task_thread + AOFF_thread_w_saved], %o2
- brz,pt %o2, __handle_perfctrs_continue
- sethi %hi(TSTATE_PEF), %l6
- wrpr %g0, RTRAP_PSTATE, %pstate
+ brz,pt %o2, 1f
+ nop
+ /* Redo userwin+sched+sig checks */
call fault_in_user_windows
+ wrpr %g0, RTRAP_PSTATE, %pstate
+ wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
+1: ldx [%g6 + AOFF_task_need_resched], %l0
+ brz,pt %l0, 1f
nop
+ call schedule
+ wrpr %g0, RTRAP_PSTATE, %pstate
+
+ wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
+1: lduw [%g6 + AOFF_task_sigpending], %l0
+ brz,pt %l0, __handle_perfctrs_continue
+ sethi %hi(TSTATE_PEF), %o0
+ clr %o0
+ mov %l5, %o2
+ mov %l6, %o3
+ add %sp, STACK_BIAS + REGWIN_SZ, %o1
+
+ call do_signal
+ wrpr %g0, RTRAP_PSTATE, %pstate
+ wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
+ clr %l6
+ /* Signal delivery can modify pt_regs tstate, so we must
+ * reload it.
+ */
+ ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
+ sethi %hi(0xf << 20), %l4
+ and %l1, %l4, %l4
+ andn %l1, %l4, %l1
+
ba,pt %xcc, __handle_perfctrs_continue
- nop
+ sethi %hi(TSTATE_PEF), %o0
__handle_userfpu:
rd %fprs, %l5
andcc %l5, FPRS_FEF, %g0
+ sethi %hi(TSTATE_PEF), %o0
be,a,pn %icc, __handle_userfpu_continue
- andn %l1, %l6, %l1
+ andn %l1, %o0, %l1
ba,a,pt %xcc, __handle_userfpu_continue
+
__handle_signal:
clr %o0
mov %l5, %o2
mov %l6, %o3
+ add %sp, STACK_BIAS + REGWIN_SZ, %o1
call do_signal
- add %sp, STACK_BIAS + REGWIN_SZ, %o1
+ wrpr %g0, RTRAP_PSTATE, %pstate
+ wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
clr %l6
/* Signal delivery can modify pt_regs tstate, so we must
@@ -99,29 +157,43 @@ __handle_softirq_continue:
and %l1, %l4, %l4
bne,pn %icc, to_kernel
andn %l1, %l4, %l1
-to_user: ldx [%g6 + AOFF_task_need_resched], %l0
- brnz,pn %l0, __handle_preemption
+ /* We must hold IRQs off and atomically test schedule+signal
+ * state, then hold them off all the way back to userspace.
+ * If we are returning to kernel, none of this matters.
+ *
+ * If we do not do this, there is a window where we would do
+ * the tests, later the signal/resched event arrives but we do
+ * not process it since we are still in kernel mode. It would
+ * take until the next local IRQ before the signal/resched
+ * event would be handled.
+ *
+ * This also means that if we have to deal with performance
+ * counters or user windows, we have to redo all of these
+ * sched+signal checks with IRQs disabled.
+ */
+to_user: wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
__handle_preemption_continue:
+ ldx [%g6 + AOFF_task_need_resched], %l0
+ brnz,pn %l0, __handle_preemption
lduw [%g6 + AOFF_task_sigpending], %l0
brnz,pn %l0, __handle_signal
nop
__handle_signal_continue:
-check_user_wins:
- wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
ldub [%g6 + AOFF_task_thread + AOFF_thread_w_saved], %o2
brnz,pn %o2, __handle_user_windows
- sethi %hi(TSTATE_PEF), %l6
-
+ nop
__handle_user_windows_continue:
- RTRAP_CHECK
ldub [%g6 + AOFF_task_thread + AOFF_thread_flags], %l5
andcc %l5, SPARC_FLAG_PERFCTR, %g0
+ sethi %hi(TSTATE_PEF), %o0
bne,pn %xcc, __handle_perfctrs
__handle_perfctrs_continue:
- andcc %l1, %l6, %g0
+ andcc %l1, %o0, %g0
+
+ /* This fpdepth clear is neccessary for non-syscall rtraps only */
bne,pn %xcc, __handle_userfpu
- stb %g0, [%g6 + AOFF_task_thread + AOFF_thread_fpdepth] ! This is neccessary for non-syscall rtraps only
+ stb %g0, [%g6 + AOFF_task_thread + AOFF_thread_fpdepth]
__handle_userfpu_continue:
rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1