aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGeert Uytterhoeven <geert+renesas@glider.be>2024-04-23 10:33:05 +0200
committerGeert Uytterhoeven <geert+renesas@glider.be>2024-04-23 10:33:05 +0200
commitbdd010331d36901864ae6ce26d1f4a4000367db1 (patch)
tree107022513afd0c46c510ced712094e4829c3faca
parent499fa83eeeb4569a698dc9e1fa68d99e610d7b76 (diff)
parentba5ea59f768f67d127b319b26ba209ff67e0d9a5 (diff)
downloadrenesas-drivers-bdd010331d36901864ae6ce26d1f4a4000367db1.tar.gz
Merge remote-tracking branch 'riscv/for-next' into renesas-drivers
Notice: this object is not reachable from any branch.
Notice: this object is not reachable from any branch.
-rw-r--r--Documentation/rust/arch-support.rst1
-rw-r--r--arch/riscv/Kconfig18
-rw-r--r--arch/riscv/Makefile26
-rw-r--r--arch/riscv/include/asm/atomic.h164
-rw-r--r--arch/riscv/include/asm/cacheflush.h7
-rw-r--r--arch/riscv/include/asm/cmpxchg.h404
-rw-r--r--arch/riscv/include/asm/signal.h12
-rw-r--r--arch/riscv/include/asm/suspend.h1
-rw-r--r--arch/riscv/kernel/suspend.c3
-rw-r--r--arch/riscv/kernel/sys_riscv.c1
-rw-r--r--scripts/generate_rust_target.rs6
-rw-r--r--tools/testing/selftests/syscall_user_dispatch/sud_test.c14
12 files changed, 253 insertions, 404 deletions
diff --git a/Documentation/rust/arch-support.rst b/Documentation/rust/arch-support.rst
index 5c4fa9f5d1cdb7..4d1495ded2aac7 100644
--- a/Documentation/rust/arch-support.rst
+++ b/Documentation/rust/arch-support.rst
@@ -17,6 +17,7 @@ Architecture Level of support Constraints
============= ================ ==============================================
``arm64`` Maintained Little Endian only.
``loongarch`` Maintained -
+``riscv`` Maintained ``riscv64`` only.
``um`` Maintained ``x86_64`` only.
``x86`` Maintained ``x86_64`` only.
============= ================ ==============================================
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 9e87287942dca1..04072e86e2403a 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -71,7 +71,7 @@ config RISCV
select ARCH_WANTS_THP_SWAP if HAVE_ARCH_TRANSPARENT_HUGEPAGE
select BINFMT_FLAT_NO_DATA_START_OFFSET if !MMU
select BUILDTIME_TABLE_SORT if MMU
- select CLINT_TIMER if !MMU
+ select CLINT_TIMER if RISCV_M_MODE
select CLONE_BACKWARDS
select COMMON_CLK
select CPU_PM if CPU_IDLE || HIBERNATION || SUSPEND
@@ -155,6 +155,7 @@ config RISCV
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RETHOOK if !XIP_KERNEL
select HAVE_RSEQ
+ select HAVE_RUST if 64BIT
select HAVE_SAMPLE_FTRACE_DIRECT
select HAVE_SAMPLE_FTRACE_DIRECT_MULTI
select HAVE_STACKPROTECTOR
@@ -231,8 +232,12 @@ config ARCH_MMAP_RND_COMPAT_BITS_MAX
# set if we run in machine mode, cleared if we run in supervisor mode
config RISCV_M_MODE
- bool
- default !MMU
+ bool "Build a kernel that runs in machine mode"
+ depends on !MMU
+ default y
+ help
+ Select this option if you want to run the kernel in M-mode,
+ without the assistance of any other firmware.
# set if we are running in S-mode and can use SBI calls
config RISCV_SBI
@@ -249,8 +254,9 @@ config MMU
config PAGE_OFFSET
hex
- default 0xC0000000 if 32BIT && MMU
- default 0x80000000 if !MMU
+ default 0x80000000 if !MMU && RISCV_M_MODE
+ default 0x80200000 if !MMU
+ default 0xc0000000 if 32BIT
default 0xff60000000000000 if 64BIT
config KASAN_SHADOW_OFFSET
@@ -598,7 +604,6 @@ config TOOLCHAIN_HAS_VECTOR_CRYPTO
config RISCV_ISA_ZBB
bool "Zbb extension support for bit manipulation instructions"
depends on TOOLCHAIN_HAS_ZBB
- depends on MMU
depends on RISCV_ALTERNATIVE
default y
help
@@ -630,7 +635,6 @@ config RISCV_ISA_ZICBOM
config RISCV_ISA_ZICBOZ
bool "Zicboz extension support for faster zeroing of memory"
- depends on MMU
depends on RISCV_ALTERNATIVE
default y
help
diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
index 5b3115a1985226..8b2ae27f1d98d6 100644
--- a/arch/riscv/Makefile
+++ b/arch/riscv/Makefile
@@ -34,6 +34,9 @@ ifeq ($(CONFIG_ARCH_RV64I),y)
KBUILD_AFLAGS += -mabi=lp64
KBUILD_LDFLAGS += -melf64lriscv
+
+ KBUILD_RUSTFLAGS += -Ctarget-cpu=generic-rv64 --target=riscv64imac-unknown-none-elf \
+ -Cno-redzone
else
BITS := 32
UTS_MACHINE := riscv32
@@ -68,6 +71,10 @@ riscv-march-$(CONFIG_FPU) := $(riscv-march-y)fd
riscv-march-$(CONFIG_RISCV_ISA_C) := $(riscv-march-y)c
riscv-march-$(CONFIG_RISCV_ISA_V) := $(riscv-march-y)v
+ifneq ($(CONFIG_RISCV_ISA_C),y)
+ KBUILD_RUSTFLAGS += -Ctarget-feature=-c
+endif
+
ifdef CONFIG_TOOLCHAIN_NEEDS_OLD_ISA_SPEC
KBUILD_CFLAGS += -Wa,-misa-spec=2.2
KBUILD_AFLAGS += -Wa,-misa-spec=2.2
@@ -133,7 +140,15 @@ boot := arch/riscv/boot
ifeq ($(CONFIG_XIP_KERNEL),y)
KBUILD_IMAGE := $(boot)/xipImage
else
+ifeq ($(CONFIG_RISCV_M_MODE)$(CONFIG_ARCH_CANAAN),yy)
+KBUILD_IMAGE := $(boot)/loader.bin
+else
+ifeq ($(CONFIG_EFI_ZBOOT),)
KBUILD_IMAGE := $(boot)/Image.gz
+else
+KBUILD_IMAGE := $(boot)/vmlinuz.efi
+endif
+endif
endif
libs-y += arch/riscv/lib/
@@ -153,17 +168,6 @@ endif
vdso-install-y += arch/riscv/kernel/vdso/vdso.so.dbg
vdso-install-$(CONFIG_COMPAT) += arch/riscv/kernel/compat_vdso/compat_vdso.so.dbg
-ifneq ($(CONFIG_XIP_KERNEL),y)
-ifeq ($(CONFIG_RISCV_M_MODE)$(CONFIG_ARCH_CANAAN),yy)
-KBUILD_IMAGE := $(boot)/loader.bin
-else
-ifeq ($(CONFIG_EFI_ZBOOT),)
-KBUILD_IMAGE := $(boot)/Image.gz
-else
-KBUILD_IMAGE := $(boot)/vmlinuz.efi
-endif
-endif
-endif
BOOT_TARGETS := Image Image.gz loader loader.bin xipImage vmlinuz.efi
all: $(notdir $(KBUILD_IMAGE))
diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h
index 0e0522e588ca6e..5b96c2f61adb59 100644
--- a/arch/riscv/include/asm/atomic.h
+++ b/arch/riscv/include/asm/atomic.h
@@ -195,22 +195,28 @@ ATOMIC_OPS(xor, xor, i)
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
+#define _arch_atomic_fetch_add_unless(_prev, _rc, counter, _a, _u, sfx) \
+({ \
+ __asm__ __volatile__ ( \
+ "0: lr." sfx " %[p], %[c]\n" \
+ " beq %[p], %[u], 1f\n" \
+ " add %[rc], %[p], %[a]\n" \
+ " sc." sfx ".rl %[rc], %[rc], %[c]\n" \
+ " bnez %[rc], 0b\n" \
+ " fence rw, rw\n" \
+ "1:\n" \
+ : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
+ : [a]"r" (_a), [u]"r" (_u) \
+ : "memory"); \
+})
+
/* This is required to provide a full barrier on success. */
static __always_inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int prev, rc;
- __asm__ __volatile__ (
- "0: lr.w %[p], %[c]\n"
- " beq %[p], %[u], 1f\n"
- " add %[rc], %[p], %[a]\n"
- " sc.w.rl %[rc], %[rc], %[c]\n"
- " bnez %[rc], 0b\n"
- RISCV_FULL_BARRIER
- "1:\n"
- : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
- : [a]"r" (a), [u]"r" (u)
- : "memory");
+ _arch_atomic_fetch_add_unless(prev, rc, v->counter, a, u, "w");
+
return prev;
}
#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
@@ -221,77 +227,86 @@ static __always_inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a,
s64 prev;
long rc;
- __asm__ __volatile__ (
- "0: lr.d %[p], %[c]\n"
- " beq %[p], %[u], 1f\n"
- " add %[rc], %[p], %[a]\n"
- " sc.d.rl %[rc], %[rc], %[c]\n"
- " bnez %[rc], 0b\n"
- RISCV_FULL_BARRIER
- "1:\n"
- : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
- : [a]"r" (a), [u]"r" (u)
- : "memory");
+ _arch_atomic_fetch_add_unless(prev, rc, v->counter, a, u, "d");
+
return prev;
}
#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
#endif
+#define _arch_atomic_inc_unless_negative(_prev, _rc, counter, sfx) \
+({ \
+ __asm__ __volatile__ ( \
+ "0: lr." sfx " %[p], %[c]\n" \
+ " bltz %[p], 1f\n" \
+ " addi %[rc], %[p], 1\n" \
+ " sc." sfx ".rl %[rc], %[rc], %[c]\n" \
+ " bnez %[rc], 0b\n" \
+ " fence rw, rw\n" \
+ "1:\n" \
+ : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
+ : \
+ : "memory"); \
+})
+
static __always_inline bool arch_atomic_inc_unless_negative(atomic_t *v)
{
int prev, rc;
- __asm__ __volatile__ (
- "0: lr.w %[p], %[c]\n"
- " bltz %[p], 1f\n"
- " addi %[rc], %[p], 1\n"
- " sc.w.rl %[rc], %[rc], %[c]\n"
- " bnez %[rc], 0b\n"
- RISCV_FULL_BARRIER
- "1:\n"
- : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
- :
- : "memory");
+ _arch_atomic_inc_unless_negative(prev, rc, v->counter, "w");
+
return !(prev < 0);
}
#define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative
+#define _arch_atomic_dec_unless_positive(_prev, _rc, counter, sfx) \
+({ \
+ __asm__ __volatile__ ( \
+ "0: lr." sfx " %[p], %[c]\n" \
+ " bgtz %[p], 1f\n" \
+ " addi %[rc], %[p], -1\n" \
+ " sc." sfx ".rl %[rc], %[rc], %[c]\n" \
+ " bnez %[rc], 0b\n" \
+ " fence rw, rw\n" \
+ "1:\n" \
+ : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
+ : \
+ : "memory"); \
+})
+
static __always_inline bool arch_atomic_dec_unless_positive(atomic_t *v)
{
int prev, rc;
- __asm__ __volatile__ (
- "0: lr.w %[p], %[c]\n"
- " bgtz %[p], 1f\n"
- " addi %[rc], %[p], -1\n"
- " sc.w.rl %[rc], %[rc], %[c]\n"
- " bnez %[rc], 0b\n"
- RISCV_FULL_BARRIER
- "1:\n"
- : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
- :
- : "memory");
+ _arch_atomic_dec_unless_positive(prev, rc, v->counter, "w");
+
return !(prev > 0);
}
#define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive
+#define _arch_atomic_dec_if_positive(_prev, _rc, counter, sfx) \
+({ \
+ __asm__ __volatile__ ( \
+ "0: lr." sfx " %[p], %[c]\n" \
+ " addi %[rc], %[p], -1\n" \
+ " bltz %[rc], 1f\n" \
+ " sc." sfx ".rl %[rc], %[rc], %[c]\n" \
+ " bnez %[rc], 0b\n" \
+ " fence rw, rw\n" \
+ "1:\n" \
+ : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
+ : \
+ : "memory"); \
+})
+
static __always_inline int arch_atomic_dec_if_positive(atomic_t *v)
{
int prev, rc;
- __asm__ __volatile__ (
- "0: lr.w %[p], %[c]\n"
- " addi %[rc], %[p], -1\n"
- " bltz %[rc], 1f\n"
- " sc.w.rl %[rc], %[rc], %[c]\n"
- " bnez %[rc], 0b\n"
- RISCV_FULL_BARRIER
- "1:\n"
- : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
- :
- : "memory");
+ _arch_atomic_dec_if_positive(prev, rc, v->counter, "w");
+
return prev - 1;
}
@@ -303,17 +318,8 @@ static __always_inline bool arch_atomic64_inc_unless_negative(atomic64_t *v)
s64 prev;
long rc;
- __asm__ __volatile__ (
- "0: lr.d %[p], %[c]\n"
- " bltz %[p], 1f\n"
- " addi %[rc], %[p], 1\n"
- " sc.d.rl %[rc], %[rc], %[c]\n"
- " bnez %[rc], 0b\n"
- RISCV_FULL_BARRIER
- "1:\n"
- : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
- :
- : "memory");
+ _arch_atomic_inc_unless_negative(prev, rc, v->counter, "d");
+
return !(prev < 0);
}
@@ -324,17 +330,8 @@ static __always_inline bool arch_atomic64_dec_unless_positive(atomic64_t *v)
s64 prev;
long rc;
- __asm__ __volatile__ (
- "0: lr.d %[p], %[c]\n"
- " bgtz %[p], 1f\n"
- " addi %[rc], %[p], -1\n"
- " sc.d.rl %[rc], %[rc], %[c]\n"
- " bnez %[rc], 0b\n"
- RISCV_FULL_BARRIER
- "1:\n"
- : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
- :
- : "memory");
+ _arch_atomic_dec_unless_positive(prev, rc, v->counter, "d");
+
return !(prev > 0);
}
@@ -345,17 +342,8 @@ static __always_inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
s64 prev;
long rc;
- __asm__ __volatile__ (
- "0: lr.d %[p], %[c]\n"
- " addi %[rc], %[p], -1\n"
- " bltz %[rc], 1f\n"
- " sc.d.rl %[rc], %[rc], %[c]\n"
- " bnez %[rc], 0b\n"
- RISCV_FULL_BARRIER
- "1:\n"
- : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
- :
- : "memory");
+ _arch_atomic_dec_if_positive(prev, rc, v->counter, "d");
+
return prev - 1;
}
diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h
index a129dac4521d35..dd8d07146116c8 100644
--- a/arch/riscv/include/asm/cacheflush.h
+++ b/arch/riscv/include/asm/cacheflush.h
@@ -33,8 +33,11 @@ static inline void flush_dcache_page(struct page *page)
* so instead we just flush the whole thing.
*/
#define flush_icache_range(start, end) flush_icache_all()
-#define flush_icache_user_page(vma, pg, addr, len) \
- flush_icache_mm(vma->vm_mm, 0)
+#define flush_icache_user_page(vma, pg, addr, len) \
+do { \
+ if (vma->vm_flags & VM_EXEC) \
+ flush_icache_mm(vma->vm_mm, 0); \
+} while (0)
#ifdef CONFIG_64BIT
#define flush_cache_vmap(start, end) flush_tlb_kernel_range(start, end)
diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h
index 2fee65cc844324..4d23f0c35b9497 100644
--- a/arch/riscv/include/asm/cmpxchg.h
+++ b/arch/riscv/include/asm/cmpxchg.h
@@ -10,140 +10,79 @@
#include <asm/fence.h>
-#define __xchg_relaxed(ptr, new, size) \
+#define __arch_xchg_masked(prepend, append, r, p, n) \
+({ \
+ u32 *__ptr32b = (u32 *)((ulong)(p) & ~0x3); \
+ ulong __s = ((ulong)(p) & (0x4 - sizeof(*p))) * BITS_PER_BYTE; \
+ ulong __mask = GENMASK(((sizeof(*p)) * BITS_PER_BYTE) - 1, 0) \
+ << __s; \
+ ulong __newx = (ulong)(n) << __s; \
+ ulong __retx; \
+ ulong __rc; \
+ \
+ __asm__ __volatile__ ( \
+ prepend \
+ "0: lr.w %0, %2\n" \
+ " and %1, %0, %z4\n" \
+ " or %1, %1, %z3\n" \
+ " sc.w %1, %1, %2\n" \
+ " bnez %1, 0b\n" \
+ append \
+ : "=&r" (__retx), "=&r" (__rc), "+A" (*(__ptr32b)) \
+ : "rJ" (__newx), "rJ" (~__mask) \
+ : "memory"); \
+ \
+ r = (__typeof__(*(p)))((__retx & __mask) >> __s); \
+})
+
+#define __arch_xchg(sfx, prepend, append, r, p, n) \
+({ \
+ __asm__ __volatile__ ( \
+ prepend \
+ " amoswap" sfx " %0, %2, %1\n" \
+ append \
+ : "=r" (r), "+A" (*(p)) \
+ : "r" (n) \
+ : "memory"); \
+})
+
+#define _arch_xchg(ptr, new, sfx, prepend, append) \
({ \
__typeof__(ptr) __ptr = (ptr); \
- __typeof__(new) __new = (new); \
- __typeof__(*(ptr)) __ret; \
- switch (size) { \
+ __typeof__(*(__ptr)) __new = (new); \
+ __typeof__(*(__ptr)) __ret; \
+ \
+ switch (sizeof(*__ptr)) { \
+ case 1: \
+ case 2: \
+ __arch_xchg_masked(prepend, append, \
+ __ret, __ptr, __new); \
+ break; \
case 4: \
- __asm__ __volatile__ ( \
- " amoswap.w %0, %2, %1\n" \
- : "=r" (__ret), "+A" (*__ptr) \
- : "r" (__new) \
- : "memory"); \
+ __arch_xchg(".w" sfx, prepend, append, \
+ __ret, __ptr, __new); \
break; \
case 8: \
- __asm__ __volatile__ ( \
- " amoswap.d %0, %2, %1\n" \
- : "=r" (__ret), "+A" (*__ptr) \
- : "r" (__new) \
- : "memory"); \
+ __arch_xchg(".d" sfx, prepend, append, \
+ __ret, __ptr, __new); \
break; \
default: \
BUILD_BUG(); \
} \
- __ret; \
+ (__typeof__(*(__ptr)))__ret; \
})
#define arch_xchg_relaxed(ptr, x) \
-({ \
- __typeof__(*(ptr)) _x_ = (x); \
- (__typeof__(*(ptr))) __xchg_relaxed((ptr), \
- _x_, sizeof(*(ptr))); \
-})
-
-#define __xchg_acquire(ptr, new, size) \
-({ \
- __typeof__(ptr) __ptr = (ptr); \
- __typeof__(new) __new = (new); \
- __typeof__(*(ptr)) __ret; \
- switch (size) { \
- case 4: \
- __asm__ __volatile__ ( \
- " amoswap.w %0, %2, %1\n" \
- RISCV_ACQUIRE_BARRIER \
- : "=r" (__ret), "+A" (*__ptr) \
- : "r" (__new) \
- : "memory"); \
- break; \
- case 8: \
- __asm__ __volatile__ ( \
- " amoswap.d %0, %2, %1\n" \
- RISCV_ACQUIRE_BARRIER \
- : "=r" (__ret), "+A" (*__ptr) \
- : "r" (__new) \
- : "memory"); \
- break; \
- default: \
- BUILD_BUG(); \
- } \
- __ret; \
-})
+ _arch_xchg(ptr, x, "", "", "")
#define arch_xchg_acquire(ptr, x) \
-({ \
- __typeof__(*(ptr)) _x_ = (x); \
- (__typeof__(*(ptr))) __xchg_acquire((ptr), \
- _x_, sizeof(*(ptr))); \
-})
-
-#define __xchg_release(ptr, new, size) \
-({ \
- __typeof__(ptr) __ptr = (ptr); \
- __typeof__(new) __new = (new); \
- __typeof__(*(ptr)) __ret; \
- switch (size) { \
- case 4: \
- __asm__ __volatile__ ( \
- RISCV_RELEASE_BARRIER \
- " amoswap.w %0, %2, %1\n" \
- : "=r" (__ret), "+A" (*__ptr) \
- : "r" (__new) \
- : "memory"); \
- break; \
- case 8: \
- __asm__ __volatile__ ( \
- RISCV_RELEASE_BARRIER \
- " amoswap.d %0, %2, %1\n" \
- : "=r" (__ret), "+A" (*__ptr) \
- : "r" (__new) \
- : "memory"); \
- break; \
- default: \
- BUILD_BUG(); \
- } \
- __ret; \
-})
+ _arch_xchg(ptr, x, "", "", RISCV_ACQUIRE_BARRIER)
#define arch_xchg_release(ptr, x) \
-({ \
- __typeof__(*(ptr)) _x_ = (x); \
- (__typeof__(*(ptr))) __xchg_release((ptr), \
- _x_, sizeof(*(ptr))); \
-})
-
-#define __arch_xchg(ptr, new, size) \
-({ \
- __typeof__(ptr) __ptr = (ptr); \
- __typeof__(new) __new = (new); \
- __typeof__(*(ptr)) __ret; \
- switch (size) { \
- case 4: \
- __asm__ __volatile__ ( \
- " amoswap.w.aqrl %0, %2, %1\n" \
- : "=r" (__ret), "+A" (*__ptr) \
- : "r" (__new) \
- : "memory"); \
- break; \
- case 8: \
- __asm__ __volatile__ ( \
- " amoswap.d.aqrl %0, %2, %1\n" \
- : "=r" (__ret), "+A" (*__ptr) \
- : "r" (__new) \
- : "memory"); \
- break; \
- default: \
- BUILD_BUG(); \
- } \
- __ret; \
-})
+ _arch_xchg(ptr, x, "", RISCV_RELEASE_BARRIER, "")
#define arch_xchg(ptr, x) \
-({ \
- __typeof__(*(ptr)) _x_ = (x); \
- (__typeof__(*(ptr))) __arch_xchg((ptr), _x_, sizeof(*(ptr))); \
-})
+ _arch_xchg(ptr, x, ".aqrl", "", "")
#define xchg32(ptr, x) \
({ \
@@ -162,190 +101,95 @@
* store NEW in MEM. Return the initial value in MEM. Success is
* indicated by comparing RETURN with OLD.
*/
-#define __cmpxchg_relaxed(ptr, old, new, size) \
-({ \
- __typeof__(ptr) __ptr = (ptr); \
- __typeof__(*(ptr)) __old = (old); \
- __typeof__(*(ptr)) __new = (new); \
- __typeof__(*(ptr)) __ret; \
- register unsigned int __rc; \
- switch (size) { \
- case 4: \
- __asm__ __volatile__ ( \
- "0: lr.w %0, %2\n" \
- " bne %0, %z3, 1f\n" \
- " sc.w %1, %z4, %2\n" \
- " bnez %1, 0b\n" \
- "1:\n" \
- : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
- : "rJ" ((long)__old), "rJ" (__new) \
- : "memory"); \
- break; \
- case 8: \
- __asm__ __volatile__ ( \
- "0: lr.d %0, %2\n" \
- " bne %0, %z3, 1f\n" \
- " sc.d %1, %z4, %2\n" \
- " bnez %1, 0b\n" \
- "1:\n" \
- : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
- : "rJ" (__old), "rJ" (__new) \
- : "memory"); \
- break; \
- default: \
- BUILD_BUG(); \
- } \
- __ret; \
-})
-#define arch_cmpxchg_relaxed(ptr, o, n) \
+#define __arch_cmpxchg_masked(sc_sfx, prepend, append, r, p, o, n) \
+({ \
+ u32 *__ptr32b = (u32 *)((ulong)(p) & ~0x3); \
+ ulong __s = ((ulong)(p) & (0x4 - sizeof(*p))) * BITS_PER_BYTE; \
+ ulong __mask = GENMASK(((sizeof(*p)) * BITS_PER_BYTE) - 1, 0) \
+ << __s; \
+ ulong __newx = (ulong)(n) << __s; \
+ ulong __oldx = (ulong)(o) << __s; \
+ ulong __retx; \
+ ulong __rc; \
+ \
+ __asm__ __volatile__ ( \
+ prepend \
+ "0: lr.w %0, %2\n" \
+ " and %1, %0, %z5\n" \
+ " bne %1, %z3, 1f\n" \
+ " and %1, %0, %z6\n" \
+ " or %1, %1, %z4\n" \
+ " sc.w" sc_sfx " %1, %1, %2\n" \
+ " bnez %1, 0b\n" \
+ append \
+ "1:\n" \
+ : "=&r" (__retx), "=&r" (__rc), "+A" (*(__ptr32b)) \
+ : "rJ" ((long)__oldx), "rJ" (__newx), \
+ "rJ" (__mask), "rJ" (~__mask) \
+ : "memory"); \
+ \
+ r = (__typeof__(*(p)))((__retx & __mask) >> __s); \
+})
+
+#define __arch_cmpxchg(lr_sfx, sc_sfx, prepend, append, r, p, co, o, n) \
({ \
- __typeof__(*(ptr)) _o_ = (o); \
- __typeof__(*(ptr)) _n_ = (n); \
- (__typeof__(*(ptr))) __cmpxchg_relaxed((ptr), \
- _o_, _n_, sizeof(*(ptr))); \
-})
-
-#define __cmpxchg_acquire(ptr, old, new, size) \
+ register unsigned int __rc; \
+ \
+ __asm__ __volatile__ ( \
+ prepend \
+ "0: lr" lr_sfx " %0, %2\n" \
+ " bne %0, %z3, 1f\n" \
+ " sc" sc_sfx " %1, %z4, %2\n" \
+ " bnez %1, 0b\n" \
+ append \
+ "1:\n" \
+ : "=&r" (r), "=&r" (__rc), "+A" (*(p)) \
+ : "rJ" (co o), "rJ" (n) \
+ : "memory"); \
+})
+
+#define _arch_cmpxchg(ptr, old, new, sc_sfx, prepend, append) \
({ \
__typeof__(ptr) __ptr = (ptr); \
- __typeof__(*(ptr)) __old = (old); \
- __typeof__(*(ptr)) __new = (new); \
- __typeof__(*(ptr)) __ret; \
- register unsigned int __rc; \
- switch (size) { \
+ __typeof__(*(__ptr)) __old = (old); \
+ __typeof__(*(__ptr)) __new = (new); \
+ __typeof__(*(__ptr)) __ret; \
+ \
+ switch (sizeof(*__ptr)) { \
+ case 1: \
+ case 2: \
+ __arch_cmpxchg_masked(sc_sfx, prepend, append, \
+ __ret, __ptr, __old, __new); \
+ break; \
case 4: \
- __asm__ __volatile__ ( \
- "0: lr.w %0, %2\n" \
- " bne %0, %z3, 1f\n" \
- " sc.w %1, %z4, %2\n" \
- " bnez %1, 0b\n" \
- RISCV_ACQUIRE_BARRIER \
- "1:\n" \
- : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
- : "rJ" ((long)__old), "rJ" (__new) \
- : "memory"); \
+ __arch_cmpxchg(".w", ".w" sc_sfx, prepend, append, \
+ __ret, __ptr, (long), __old, __new); \
break; \
case 8: \
- __asm__ __volatile__ ( \
- "0: lr.d %0, %2\n" \
- " bne %0, %z3, 1f\n" \
- " sc.d %1, %z4, %2\n" \
- " bnez %1, 0b\n" \
- RISCV_ACQUIRE_BARRIER \
- "1:\n" \
- : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
- : "rJ" (__old), "rJ" (__new) \
- : "memory"); \
+ __arch_cmpxchg(".d", ".d" sc_sfx, prepend, append, \
+ __ret, __ptr, /**/, __old, __new); \
break; \
default: \
BUILD_BUG(); \
} \
- __ret; \
+ (__typeof__(*(__ptr)))__ret; \
})
-#define arch_cmpxchg_acquire(ptr, o, n) \
-({ \
- __typeof__(*(ptr)) _o_ = (o); \
- __typeof__(*(ptr)) _n_ = (n); \
- (__typeof__(*(ptr))) __cmpxchg_acquire((ptr), \
- _o_, _n_, sizeof(*(ptr))); \
-})
+#define arch_cmpxchg_relaxed(ptr, o, n) \
+ _arch_cmpxchg((ptr), (o), (n), "", "", "")
-#define __cmpxchg_release(ptr, old, new, size) \
-({ \
- __typeof__(ptr) __ptr = (ptr); \
- __typeof__(*(ptr)) __old = (old); \
- __typeof__(*(ptr)) __new = (new); \
- __typeof__(*(ptr)) __ret; \
- register unsigned int __rc; \
- switch (size) { \
- case 4: \
- __asm__ __volatile__ ( \
- RISCV_RELEASE_BARRIER \
- "0: lr.w %0, %2\n" \
- " bne %0, %z3, 1f\n" \
- " sc.w %1, %z4, %2\n" \
- " bnez %1, 0b\n" \
- "1:\n" \
- : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
- : "rJ" ((long)__old), "rJ" (__new) \
- : "memory"); \
- break; \
- case 8: \
- __asm__ __volatile__ ( \
- RISCV_RELEASE_BARRIER \
- "0: lr.d %0, %2\n" \
- " bne %0, %z3, 1f\n" \
- " sc.d %1, %z4, %2\n" \
- " bnez %1, 0b\n" \
- "1:\n" \
- : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
- : "rJ" (__old), "rJ" (__new) \
- : "memory"); \
- break; \
- default: \
- BUILD_BUG(); \
- } \
- __ret; \
-})
+#define arch_cmpxchg_acquire(ptr, o, n) \
+ _arch_cmpxchg((ptr), (o), (n), "", "", RISCV_ACQUIRE_BARRIER)
#define arch_cmpxchg_release(ptr, o, n) \
-({ \
- __typeof__(*(ptr)) _o_ = (o); \
- __typeof__(*(ptr)) _n_ = (n); \
- (__typeof__(*(ptr))) __cmpxchg_release((ptr), \
- _o_, _n_, sizeof(*(ptr))); \
-})
-
-#define __cmpxchg(ptr, old, new, size) \
-({ \
- __typeof__(ptr) __ptr = (ptr); \
- __typeof__(*(ptr)) __old = (old); \
- __typeof__(*(ptr)) __new = (new); \
- __typeof__(*(ptr)) __ret; \
- register unsigned int __rc; \
- switch (size) { \
- case 4: \
- __asm__ __volatile__ ( \
- "0: lr.w %0, %2\n" \
- " bne %0, %z3, 1f\n" \
- " sc.w.rl %1, %z4, %2\n" \
- " bnez %1, 0b\n" \
- RISCV_FULL_BARRIER \
- "1:\n" \
- : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
- : "rJ" ((long)__old), "rJ" (__new) \
- : "memory"); \
- break; \
- case 8: \
- __asm__ __volatile__ ( \
- "0: lr.d %0, %2\n" \
- " bne %0, %z3, 1f\n" \
- " sc.d.rl %1, %z4, %2\n" \
- " bnez %1, 0b\n" \
- RISCV_FULL_BARRIER \
- "1:\n" \
- : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
- : "rJ" (__old), "rJ" (__new) \
- : "memory"); \
- break; \
- default: \
- BUILD_BUG(); \
- } \
- __ret; \
-})
+ _arch_cmpxchg((ptr), (o), (n), "", RISCV_RELEASE_BARRIER, "")
#define arch_cmpxchg(ptr, o, n) \
-({ \
- __typeof__(*(ptr)) _o_ = (o); \
- __typeof__(*(ptr)) _n_ = (n); \
- (__typeof__(*(ptr))) __cmpxchg((ptr), \
- _o_, _n_, sizeof(*(ptr))); \
-})
+ _arch_cmpxchg((ptr), (o), (n), ".rl", "", " fence rw, rw\n")
#define arch_cmpxchg_local(ptr, o, n) \
- (__cmpxchg_relaxed((ptr), (o), (n), sizeof(*(ptr))))
+ arch_cmpxchg_relaxed((ptr), (o), (n))
#define arch_cmpxchg64(ptr, o, n) \
({ \
diff --git a/arch/riscv/include/asm/signal.h b/arch/riscv/include/asm/signal.h
deleted file mode 100644
index 956ae0a01bad11..00000000000000
--- a/arch/riscv/include/asm/signal.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-
-#ifndef __ASM_SIGNAL_H
-#define __ASM_SIGNAL_H
-
-#include <uapi/asm/signal.h>
-#include <uapi/asm/ptrace.h>
-
-asmlinkage __visible
-void do_work_pending(struct pt_regs *regs, unsigned long thread_info_flags);
-
-#endif
diff --git a/arch/riscv/include/asm/suspend.h b/arch/riscv/include/asm/suspend.h
index 4718096fa5e3fc..4ffb022b097f42 100644
--- a/arch/riscv/include/asm/suspend.h
+++ b/arch/riscv/include/asm/suspend.h
@@ -13,7 +13,6 @@ struct suspend_context {
/* Saved and restored by low-level functions */
struct pt_regs regs;
/* Saved and restored by high-level functions */
- unsigned long scratch;
unsigned long envcfg;
unsigned long tvec;
unsigned long ie;
diff --git a/arch/riscv/kernel/suspend.c b/arch/riscv/kernel/suspend.c
index 8a327b485b90e7..c8cec0cc5833c4 100644
--- a/arch/riscv/kernel/suspend.c
+++ b/arch/riscv/kernel/suspend.c
@@ -14,7 +14,6 @@
void suspend_save_csrs(struct suspend_context *context)
{
- context->scratch = csr_read(CSR_SCRATCH);
if (riscv_cpu_has_extension_unlikely(smp_processor_id(), RISCV_ISA_EXT_XLINUXENVCFG))
context->envcfg = csr_read(CSR_ENVCFG);
context->tvec = csr_read(CSR_TVEC);
@@ -37,7 +36,7 @@ void suspend_save_csrs(struct suspend_context *context)
void suspend_restore_csrs(struct suspend_context *context)
{
- csr_write(CSR_SCRATCH, context->scratch);
+ csr_write(CSR_SCRATCH, 0);
if (riscv_cpu_has_extension_unlikely(smp_processor_id(), RISCV_ISA_EXT_XLINUXENVCFG))
csr_write(CSR_ENVCFG, context->envcfg);
csr_write(CSR_TVEC, context->tvec);
diff --git a/arch/riscv/kernel/sys_riscv.c b/arch/riscv/kernel/sys_riscv.c
index f1c1416a9f1e51..64155323cc923a 100644
--- a/arch/riscv/kernel/sys_riscv.c
+++ b/arch/riscv/kernel/sys_riscv.c
@@ -7,7 +7,6 @@
#include <linux/syscalls.h>
#include <asm/cacheflush.h>
-#include <asm-generic/mman-common.h>
static long riscv_sys_mmap(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
diff --git a/scripts/generate_rust_target.rs b/scripts/generate_rust_target.rs
index 54919cf48621e8..8f7846b9029a46 100644
--- a/scripts/generate_rust_target.rs
+++ b/scripts/generate_rust_target.rs
@@ -150,6 +150,12 @@ fn main() {
// `llvm-target`s are taken from `scripts/Makefile.clang`.
if cfg.has("ARM64") {
panic!("arm64 uses the builtin rustc aarch64-unknown-none target");
+ } else if cfg.has("RISCV") {
+ if cfg.has("64BIT") {
+ panic!("64-bit RISC-V uses the builtin rustc riscv64-unknown-none-elf target");
+ } else {
+ panic!("32-bit RISC-V is an unsupported architecture");
+ }
} else if cfg.has("X86_64") {
ts.push("arch", "x86_64");
ts.push(
diff --git a/tools/testing/selftests/syscall_user_dispatch/sud_test.c b/tools/testing/selftests/syscall_user_dispatch/sud_test.c
index b5d592d4099e85..d975a67673299f 100644
--- a/tools/testing/selftests/syscall_user_dispatch/sud_test.c
+++ b/tools/testing/selftests/syscall_user_dispatch/sud_test.c
@@ -158,6 +158,20 @@ static void handle_sigsys(int sig, siginfo_t *info, void *ucontext)
/* In preparation for sigreturn. */
SYSCALL_DISPATCH_OFF(glob_sel);
+
+ /*
+ * The tests for argument handling assume that `syscall(x) == x`. This
+ * is a NOP on x86 because the syscall number is passed in %rax, which
+ * happens to also be the function ABI return register. Other
+ * architectures may need to swizzle the arguments around.
+ */
+#if defined(__riscv)
+/* REG_A7 is not defined in libc headers */
+# define REG_A7 (REG_A0 + 7)
+
+ ((ucontext_t *)ucontext)->uc_mcontext.__gregs[REG_A0] =
+ ((ucontext_t *)ucontext)->uc_mcontext.__gregs[REG_A7];
+#endif
}
TEST(dispatch_and_return)