aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/kernel/unaligned.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc64/kernel/unaligned.c')
-rw-r--r--arch/sparc64/kernel/unaligned.c279
1 files changed, 68 insertions, 211 deletions
diff --git a/arch/sparc64/kernel/unaligned.c b/arch/sparc64/kernel/unaligned.c
index 4372bf32ecf6f2..da9739f0d43723 100644
--- a/arch/sparc64/kernel/unaligned.c
+++ b/arch/sparc64/kernel/unaligned.c
@@ -180,169 +180,28 @@ static void __attribute_used__ unaligned_panic(char *str, struct pt_regs *regs)
die_if_kernel(str, regs);
}
-#define do_integer_load(dest_reg, size, saddr, is_signed, asi, errh) ({ \
-__asm__ __volatile__ ( \
- "wr %4, 0, %%asi\n\t" \
- "cmp %1, 8\n\t" \
- "bge,pn %%icc, 9f\n\t" \
- " cmp %1, 4\n\t" \
- "be,pt %%icc, 6f\n" \
-"4:\t" " lduba [%2] %%asi, %%l1\n" \
-"5:\t" "lduba [%2 + 1] %%asi, %%l2\n\t" \
- "sll %%l1, 8, %%l1\n\t" \
- "brz,pt %3, 3f\n\t" \
- " add %%l1, %%l2, %%l1\n\t" \
- "sllx %%l1, 48, %%l1\n\t" \
- "srax %%l1, 48, %%l1\n" \
-"3:\t" "ba,pt %%xcc, 0f\n\t" \
- " stx %%l1, [%0]\n" \
-"6:\t" "lduba [%2 + 1] %%asi, %%l2\n\t" \
- "sll %%l1, 24, %%l1\n" \
-"7:\t" "lduba [%2 + 2] %%asi, %%g7\n\t" \
- "sll %%l2, 16, %%l2\n" \
-"8:\t" "lduba [%2 + 3] %%asi, %%g1\n\t" \
- "sll %%g7, 8, %%g7\n\t" \
- "or %%l1, %%l2, %%l1\n\t" \
- "or %%g7, %%g1, %%g7\n\t" \
- "or %%l1, %%g7, %%l1\n\t" \
- "brnz,a,pt %3, 3f\n\t" \
- " sra %%l1, 0, %%l1\n" \
-"3:\t" "ba,pt %%xcc, 0f\n\t" \
- " stx %%l1, [%0]\n" \
-"9:\t" "lduba [%2] %%asi, %%l1\n" \
-"10:\t" "lduba [%2 + 1] %%asi, %%l2\n\t" \
- "sllx %%l1, 56, %%l1\n" \
-"11:\t" "lduba [%2 + 2] %%asi, %%g7\n\t" \
- "sllx %%l2, 48, %%l2\n" \
-"12:\t" "lduba [%2 + 3] %%asi, %%g1\n\t" \
- "sllx %%g7, 40, %%g7\n\t" \
- "sllx %%g1, 32, %%g1\n\t" \
- "or %%l1, %%l2, %%l1\n\t" \
- "or %%g7, %%g1, %%g7\n" \
-"13:\t" "lduba [%2 + 4] %%asi, %%l2\n\t" \
- "or %%l1, %%g7, %%g7\n" \
-"14:\t" "lduba [%2 + 5] %%asi, %%g1\n\t" \
- "sllx %%l2, 24, %%l2\n" \
-"15:\t" "lduba [%2 + 6] %%asi, %%l1\n\t" \
- "sllx %%g1, 16, %%g1\n\t" \
- "or %%g7, %%l2, %%g7\n" \
-"16:\t" "lduba [%2 + 7] %%asi, %%l2\n\t" \
- "sllx %%l1, 8, %%l1\n\t" \
- "or %%g7, %%g1, %%g7\n\t" \
- "or %%l1, %%l2, %%l1\n\t" \
- "or %%g7, %%l1, %%g7\n\t" \
- "cmp %1, 8\n\t" \
- "be,a,pt %%icc, 0f\n\t" \
- " stx %%g7, [%0]\n\t" \
- "srlx %%g7, 32, %%l1\n\t" \
- "sra %%g7, 0, %%g7\n\t" \
- "stx %%l1, [%0]\n\t" \
- "stx %%g7, [%0 + 8]\n" \
-"0:\n\t" \
- "wr %%g0, %5, %%asi\n\n\t" \
- ".section __ex_table\n\t" \
- ".word 4b, " #errh "\n\t" \
- ".word 5b, " #errh "\n\t" \
- ".word 6b, " #errh "\n\t" \
- ".word 7b, " #errh "\n\t" \
- ".word 8b, " #errh "\n\t" \
- ".word 9b, " #errh "\n\t" \
- ".word 10b, " #errh "\n\t" \
- ".word 11b, " #errh "\n\t" \
- ".word 12b, " #errh "\n\t" \
- ".word 13b, " #errh "\n\t" \
- ".word 14b, " #errh "\n\t" \
- ".word 15b, " #errh "\n\t" \
- ".word 16b, " #errh "\n\n\t" \
- ".previous\n\t" \
- : : "r" (dest_reg), "r" (size), "r" (saddr), "r" (is_signed), \
- "r" (asi), "i" (ASI_AIUS) \
- : "l1", "l2", "g7", "g1", "cc"); \
-})
+extern void do_int_load(unsigned long *dest_reg, int size,
+ unsigned long *saddr, int is_signed, int asi);
-#define store_common(dst_addr, size, src_val, asi, errh) ({ \
-__asm__ __volatile__ ( \
- "wr %3, 0, %%asi\n\t" \
- "ldx [%2], %%l1\n" \
- "cmp %1, 2\n\t" \
- "be,pn %%icc, 2f\n\t" \
- " cmp %1, 4\n\t" \
- "be,pt %%icc, 1f\n\t" \
- " srlx %%l1, 24, %%l2\n\t" \
- "srlx %%l1, 56, %%g1\n\t" \
- "srlx %%l1, 48, %%g7\n" \
-"4:\t" "stba %%g1, [%0] %%asi\n\t" \
- "srlx %%l1, 40, %%g1\n" \
-"5:\t" "stba %%g7, [%0 + 1] %%asi\n\t" \
- "srlx %%l1, 32, %%g7\n" \
-"6:\t" "stba %%g1, [%0 + 2] %%asi\n" \
-"7:\t" "stba %%g7, [%0 + 3] %%asi\n\t" \
- "srlx %%l1, 16, %%g1\n" \
-"8:\t" "stba %%l2, [%0 + 4] %%asi\n\t" \
- "srlx %%l1, 8, %%g7\n" \
-"9:\t" "stba %%g1, [%0 + 5] %%asi\n" \
-"10:\t" "stba %%g7, [%0 + 6] %%asi\n\t" \
- "ba,pt %%xcc, 0f\n" \
-"11:\t" " stba %%l1, [%0 + 7] %%asi\n" \
-"1:\t" "srl %%l1, 16, %%g7\n" \
-"12:\t" "stba %%l2, [%0] %%asi\n\t" \
- "srl %%l1, 8, %%l2\n" \
-"13:\t" "stba %%g7, [%0 + 1] %%asi\n" \
-"14:\t" "stba %%l2, [%0 + 2] %%asi\n\t" \
- "ba,pt %%xcc, 0f\n" \
-"15:\t" " stba %%l1, [%0 + 3] %%asi\n" \
-"2:\t" "srl %%l1, 8, %%l2\n" \
-"16:\t" "stba %%l2, [%0] %%asi\n" \
-"17:\t" "stba %%l1, [%0 + 1] %%asi\n" \
-"0:\n\t" \
- "wr %%g0, %4, %%asi\n\n\t" \
- ".section __ex_table\n\t" \
- ".word 4b, " #errh "\n\t" \
- ".word 5b, " #errh "\n\t" \
- ".word 6b, " #errh "\n\t" \
- ".word 7b, " #errh "\n\t" \
- ".word 8b, " #errh "\n\t" \
- ".word 9b, " #errh "\n\t" \
- ".word 10b, " #errh "\n\t" \
- ".word 11b, " #errh "\n\t" \
- ".word 12b, " #errh "\n\t" \
- ".word 13b, " #errh "\n\t" \
- ".word 14b, " #errh "\n\t" \
- ".word 15b, " #errh "\n\t" \
- ".word 16b, " #errh "\n\t" \
- ".word 17b, " #errh "\n\n\t" \
- ".previous\n\t" \
- : : "r" (dst_addr), "r" (size), "r" (src_val), "r" (asi), "i" (ASI_AIUS)\
- : "l1", "l2", "g7", "g1", "cc"); \
-})
-
-#define do_integer_store(reg_num, size, dst_addr, regs, asi, errh) ({ \
- unsigned long zero = 0; \
- unsigned long *src_val = &zero; \
- \
- if (size == 16) { \
- size = 8; \
- zero = (((long)(reg_num ? \
- (unsigned)fetch_reg(reg_num, regs) : 0)) << 32) | \
- (unsigned)fetch_reg(reg_num + 1, regs); \
- } else if (reg_num) src_val = fetch_reg_addr(reg_num, regs); \
- store_common(dst_addr, size, src_val, asi, errh); \
-})
-
-extern void smp_capture(void);
-extern void smp_release(void);
-
-#define do_atomic(srcdest_reg, mem, errh) ({ \
- unsigned long flags, tmp; \
- \
- smp_capture(); \
- local_irq_save(flags); \
- tmp = *srcdest_reg; \
- do_integer_load(srcdest_reg, 4, mem, 0, errh); \
- store_common(mem, 4, &tmp, errh); \
- local_irq_restore(flags); \
- smp_release(); \
-})
+extern void __do_int_store(unsigned long *dst_addr, int size,
+ unsigned long *src_val, int asi);
+
+static inline void do_int_store(int reg_num, int size, unsigned long *dst_addr,
+ struct pt_regs *regs, int asi)
+{
+ unsigned long zero = 0;
+ unsigned long *src_val = &zero;
+
+ if (size == 16) {
+ size = 8;
+ zero = (((long)(reg_num ?
+ (unsigned)fetch_reg(reg_num, regs) : 0)) << 32) |
+ (unsigned)fetch_reg(reg_num + 1, regs);
+ } else if (reg_num) {
+ src_val = fetch_reg_addr(reg_num, regs);
+ }
+ __do_int_store(dst_addr, size, src_val, asi);
+}
static inline void advance(struct pt_regs *regs)
{
@@ -364,24 +223,29 @@ static inline int ok_for_kernel(unsigned int insn)
return !floating_point_load_or_store_p(insn);
}
-void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn) __asm__ ("kernel_mna_trap_fault");
-
-void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
+void kernel_mna_trap_fault(void)
{
- unsigned long g2 = regs->u_regs [UREG_G2];
+ struct pt_regs *regs = current_thread_info()->kern_una_regs;
+ unsigned int insn = current_thread_info()->kern_una_insn;
+ unsigned long g2 = regs->u_regs[UREG_G2];
unsigned long fixup = search_extables_range(regs->tpc, &g2);
if (!fixup) {
- unsigned long address = compute_effective_address(regs, insn, ((insn >> 25) & 0x1f));
+ unsigned long address;
+
+ address = compute_effective_address(regs, insn,
+ ((insn >> 25) & 0x1f));
if (address < PAGE_SIZE) {
- printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference in mna handler");
+ printk(KERN_ALERT "Unable to handle kernel NULL "
+ "pointer dereference in mna handler");
} else
- printk(KERN_ALERT "Unable to handle kernel paging request in mna handler");
+ printk(KERN_ALERT "Unable to handle kernel paging "
+ "request in mna handler");
printk(KERN_ALERT " at virtual address %016lx\n",address);
- printk(KERN_ALERT "current->{mm,active_mm}->context = %016lx\n",
+ printk(KERN_ALERT "current->{active_,}mm->context = %016lx\n",
(current->mm ? CTX_HWBITS(current->mm->context) :
CTX_HWBITS(current->active_mm->context)));
- printk(KERN_ALERT "current->{mm,active_mm}->pgd = %016lx\n",
+ printk(KERN_ALERT "current->{active_,}mm->pgd = %016lx\n",
(current->mm ? (unsigned long) current->mm->pgd :
(unsigned long) current->active_mm->pgd));
die_if_kernel("Oops", regs);
@@ -400,48 +264,41 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn, u
enum direction dir = decode_direction(insn);
int size = decode_access_size(insn);
+ current_thread_info()->kern_una_regs = regs;
+ current_thread_info()->kern_una_insn = insn;
+
if (!ok_for_kernel(insn) || dir == both) {
- printk("Unsupported unaligned load/store trap for kernel at <%016lx>.\n",
- regs->tpc);
- unaligned_panic("Kernel does fpu/atomic unaligned load/store.", regs);
-
- __asm__ __volatile__ ("\n"
-"kernel_unaligned_trap_fault:\n\t"
- "mov %0, %%o0\n\t"
- "call kernel_mna_trap_fault\n\t"
- " mov %1, %%o1\n\t"
- :
- : "r" (regs), "r" (insn)
- : "o0", "o1", "o2", "o3", "o4", "o5", "o7",
- "g1", "g2", "g3", "g4", "g7", "cc");
+ printk("Unsupported unaligned load/store trap for kernel "
+ "at <%016lx>.\n", regs->tpc);
+ unaligned_panic("Kernel does fpu/atomic "
+ "unaligned load/store.", regs);
+
+ kernel_mna_trap_fault();
} else {
- unsigned long addr = compute_effective_address(regs, insn, ((insn >> 25) & 0x1f));
+ unsigned long addr;
+ addr = compute_effective_address(regs, insn,
+ ((insn >> 25) & 0x1f));
#ifdef DEBUG_MNA
- printk("KMNA: pc=%016lx [dir=%s addr=%016lx size=%d] retpc[%016lx]\n",
- regs->tpc, dirstrings[dir], addr, size, regs->u_regs[UREG_RETPC]);
+ printk("KMNA: pc=%016lx [dir=%s addr=%016lx size=%d] "
+ "retpc[%016lx]\n",
+ regs->tpc, dirstrings[dir], addr, size,
+ regs->u_regs[UREG_RETPC]);
#endif
switch (dir) {
case load:
- do_integer_load(fetch_reg_addr(((insn>>25)&0x1f), regs),
- size, (unsigned long *) addr,
- decode_signedness(insn), decode_asi(insn, regs),
- kernel_unaligned_trap_fault);
+ do_int_load(fetch_reg_addr(((insn>>25)&0x1f), regs),
+ size, (unsigned long *) addr,
+ decode_signedness(insn),
+ decode_asi(insn, regs));
break;
case store:
- do_integer_store(((insn>>25)&0x1f), size,
- (unsigned long *) addr, regs,
- decode_asi(insn, regs),
- kernel_unaligned_trap_fault);
- break;
-#if 0 /* unsupported */
- case both:
- do_atomic(fetch_reg_addr(((insn>>25)&0x1f), regs),
- (unsigned long *) addr,
- kernel_unaligned_trap_fault);
+ do_int_store(((insn>>25)&0x1f), size,
+ (unsigned long *) addr, regs,
+ decode_asi(insn, regs));
break;
-#endif
+
default:
panic("Impossible kernel unaligned trap.");
/* Not reached... */
@@ -492,9 +349,9 @@ int handle_popc(u32 insn, struct pt_regs *regs)
extern void do_fpother(struct pt_regs *regs);
extern void do_privact(struct pt_regs *regs);
-extern void data_access_exception(struct pt_regs *regs,
- unsigned long sfsr,
- unsigned long sfar);
+extern void spitfire_data_access_exception(struct pt_regs *regs,
+ unsigned long sfsr,
+ unsigned long sfar);
int handle_ldf_stq(u32 insn, struct pt_regs *regs)
{
@@ -537,14 +394,14 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs)
break;
}
default:
- data_access_exception(regs, 0, addr);
+ spitfire_data_access_exception(regs, 0, addr);
return 1;
}
if (put_user (first >> 32, (u32 __user *)addr) ||
__put_user ((u32)first, (u32 __user *)(addr + 4)) ||
__put_user (second >> 32, (u32 __user *)(addr + 8)) ||
__put_user ((u32)second, (u32 __user *)(addr + 12))) {
- data_access_exception(regs, 0, addr);
+ spitfire_data_access_exception(regs, 0, addr);
return 1;
}
} else {
@@ -557,7 +414,7 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs)
do_privact(regs);
return 1;
} else if (asi > ASI_SNFL) {
- data_access_exception(regs, 0, addr);
+ spitfire_data_access_exception(regs, 0, addr);
return 1;
}
switch (insn & 0x180000) {
@@ -574,7 +431,7 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs)
err |= __get_user (data[i], (u32 __user *)(addr + 4*i));
}
if (err && !(asi & 0x2 /* NF */)) {
- data_access_exception(regs, 0, addr);
+ spitfire_data_access_exception(regs, 0, addr);
return 1;
}
if (asi & 0x8) /* Little */ {
@@ -677,7 +534,7 @@ void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
*(u64 *)(f->regs + freg) = value;
current_thread_info()->fpsaved[0] |= flag;
} else {
-daex: data_access_exception(regs, sfsr, sfar);
+daex: spitfire_data_access_exception(regs, sfsr, sfar);
return;
}
advance(regs);
@@ -721,7 +578,7 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
__put_user ((u32)value, (u32 __user *)(sfar + 4)))
goto daex;
} else {
-daex: data_access_exception(regs, sfsr, sfar);
+daex: spitfire_data_access_exception(regs, sfsr, sfar);
return;
}
advance(regs);