1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
|
/*
* linux/arch/arm/kernel/entry-common.S
*
* Copyright (C) 2000 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/config.h>
#include "entry-header.S"
/*
* We rely on the fact that R0 is at the bottom of the stack (due to
* slow/fast restore user regs).
*/
#if S_R0 != 0
#error "Please fix"
#endif
/*
* Our do_softirq out of line code. See include/asm-arm/softirq.h for
* the calling assembly.
*/
ENTRY(__do_softirq)
stmfd sp!, {r0 - r3, ip, lr}
bl do_softirq
ldmfd sp!, {r0 - r3, ip, pc}
.align 5
/*
* This is the fast syscall return path. We do as little as
* possible here, and this includes saving r0 back into the SVC
* stack.
*/
ret_fast_syscall:
#error ldr r1, [tsk, #TSK_NEED_RESCHED]
#error ldr r2, [tsk, #TSK_SIGPENDING]
teq r1, #0 @ need_resched || sigpending
teqeq r2, #0
bne slow
fast_restore_user_regs
/*
* Ok, we need to do extra processing, enter the slow path.
*/
slow: str r0, [sp, #S_R0+S_OFF]! @ returned r0
b 1f
/*
* "slow" syscall return path. "why" tells us if this was a real syscall.
*/
reschedule:
bl SYMBOL_NAME(schedule)
ENTRY(ret_to_user)
ret_slow_syscall:
#error ldr r1, [tsk, #TSK_NEED_RESCHED]
#error ldr r2, [tsk, #TSK_SIGPENDING]
1: teq r1, #0 @ need_resched => schedule()
bne reschedule
teq r2, #0 @ sigpending => do_signal()
blne __do_signal
restore_user_regs
__do_signal:
mov r0, #0 @ NULL 'oldset'
mov r1, sp @ 'regs'
mov r2, why @ 'syscall'
#error b SYMBOL_NAME(do_signal) @ note the bl above sets lr
/*
* This is how we return from a fork. __switch_to will be calling us
* with r0 pointing at the previous task that was running (ready for
* calling schedule_tail).
*/
ENTRY(ret_from_fork)
bl SYMBOL_NAME(schedule_tail)
get_current_task tsk
ldr ip, [tsk, #TSK_PTRACE] @ check for syscall tracing
mov why, #1
tst ip, #PT_TRACESYS @ are we tracing syscalls?
beq ret_slow_syscall
mov r1, sp
mov r0, #1 @ trace exit [IP = 1]
#error bl SYMBOL_NAME(syscall_trace)
b ret_slow_syscall
#include "calls.S"
/*=============================================================================
* SWI handler
*-----------------------------------------------------------------------------
*/
/* If we're optimising for StrongARM the resulting code won't
run on an ARM7 and we can save a couple of instructions.
--pb */
#ifdef CONFIG_CPU_ARM710
.macro arm710_bug_check, instr, temp
and \temp, \instr, #0x0f000000 @ check for SWI
teq \temp, #0x0f000000
bne .Larm700bug
.endm
.Larm700bug:
ldr r0, [sp, #S_PSR] @ Get calling cpsr
sub lr, lr, #4
str lr, [r8]
msr spsr, r0
ldmia sp, {r0 - lr}^ @ Get calling r0 - lr
mov r0, r0
ldr lr, [sp, #S_PC] @ Get PC
add sp, sp, #S_FRAME_SIZE
movs pc, lr
#else
.macro arm710_bug_check, instr, temp
.endm
#endif
.align 5
ENTRY(vector_swi)
save_user_regs
zero_fp
get_scno
arm710_bug_check scno, ip
#ifdef CONFIG_ALIGNMENT_TRAP
ldr ip, __cr_alignment
ldr ip, [ip]
mcr p15, 0, ip, c1, c0 @ update control register
#endif
enable_irqs ip
str r4, [sp, #-S_OFF]! @ push fifth arg
get_current_task tsk
ldr ip, [tsk, #TSK_PTRACE] @ check for syscall tracing
bic scno, scno, #0xff000000 @ mask off SWI op-code
eor scno, scno, #OS_NUMBER << 20 @ check OS number
adr tbl, sys_call_table @ load syscall table pointer
tst ip, #PT_TRACESYS @ are we tracing syscalls?
bne __sys_trace
adrsvc al, lr, ret_fast_syscall @ return address
cmp scno, #NR_syscalls @ check upper syscall limit
ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
add r1, sp, #S_OFF
2: mov why, #0 @ no longer a real syscall
cmp scno, #ARMSWI_OFFSET
eor r0, scno, #OS_NUMBER << 20 @ put OS number back
bcs SYMBOL_NAME(arm_syscall)
b SYMBOL_NAME(sys_ni_syscall) @ not private func
/*
* This is the really slow path. We're going to be doing
* context switches, and waiting for our parent to respond.
*/
__sys_trace:
add r1, sp, #S_OFF
mov r0, #0 @ trace entry [IP = 0]
#error bl SYMBOL_NAME(syscall_trace)
adrsvc al, lr, __sys_trace_return @ return address
add r1, sp, #S_R0 + S_OFF @ pointer to regs
cmp scno, #NR_syscalls @ check upper syscall limit
ldmccia r1, {r0 - r3} @ have to reload r0 - r3
ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
b 2b
__sys_trace_return:
str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
mov r1, sp
mov r0, #1 @ trace exit [IP = 1]
#error bl SYMBOL_NAME(syscall_trace)
b ret_slow_syscall
.align 5
#ifdef CONFIG_ALIGNMENT_TRAP
.type __cr_alignment, #object
__cr_alignment:
.word SYMBOL_NAME(cr_alignment)
#endif
.type sys_call_table, #object
ENTRY(sys_call_table)
#include "calls.S"
/*============================================================================
* Special system call wrappers
*/
@ r0 = syscall number
@ r5 = syscall table
.type sys_syscall, #function
SYMBOL_NAME(sys_syscall):
eor scno, r0, #OS_NUMBER << 20
cmp scno, #NR_syscalls @ check range
stmleia sp, {r5, r6} @ shuffle args
movle r0, r1
movle r1, r2
movle r2, r3
movle r3, r4
ldrle pc, [tbl, scno, lsl #2]
b sys_ni_syscall
sys_fork_wrapper:
add r0, sp, #S_OFF
b SYMBOL_NAME(sys_fork)
sys_vfork_wrapper:
add r0, sp, #S_OFF
b SYMBOL_NAME(sys_vfork)
sys_execve_wrapper:
add r3, sp, #S_OFF
b SYMBOL_NAME(sys_execve)
sys_clone_wapper:
add r2, sp, #S_OFF
b SYMBOL_NAME(sys_clone)
sys_sigsuspend_wrapper:
add r3, sp, #S_OFF
b SYMBOL_NAME(sys_sigsuspend)
sys_rt_sigsuspend_wrapper:
add r2, sp, #S_OFF
b SYMBOL_NAME(sys_rt_sigsuspend)
sys_sigreturn_wrapper:
add r0, sp, #S_OFF
b SYMBOL_NAME(sys_sigreturn)
sys_rt_sigreturn_wrapper:
add r0, sp, #S_OFF
b SYMBOL_NAME(sys_rt_sigreturn)
sys_sigaltstack_wrapper:
ldr r2, [sp, #S_OFF + S_SP]
b do_sigaltstack
/*
* Note: off_4k (r5) is always units of 4K. If we can't do the requested
* offset, we return EINVAL.
*/
sys_mmap2:
#if PAGE_SHIFT > 12
tst r5, #PGOFF_MASK
moveq r5, r5, lsr #PAGE_SHIFT - 12
streq r5, [sp, #4]
beq do_mmap2
mov r0, #-EINVAL
RETINSTR(mov,pc, lr)
#else
str r5, [sp, #4]
b do_mmap2
#endif
|