aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWill Deacon <will@kernel.org>2019-10-01 11:43:13 +0100
committerWill Deacon <will@kernel.org>2019-10-04 11:15:42 +0100
commita48e61de758c6b45f080fabc6fed3f4ed42598dc (patch)
tree3fa60eb371d9149121d037f26bc5354c886aa701
parenta2b99dcac36c332d4a49184716fc2a67dc1bdbb1 (diff)
downloadlinux-test-a48e61de758c6b45f080fabc6fed3f4ed42598dc.tar.gz
arm64: Mark functions using explicit register variables as '__always_inline'
As of ac7c3e4ff401 ("compiler: enable CONFIG_OPTIMIZE_INLINING forcibly"), inline functions are no longer annotated with '__always_inline', which allows the compiler to decide whether inlining is really a good idea or not. Although this is a great idea on paper, the reality is that AArch64 GCC prior to 9.1 has been shown to get confused when creating an out-of-line copy of a function passing explicit 'register' variables into an inline assembly block: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=91111 It's not clear whether this is specific to arm64 or not but, for now, ensure that all of our functions using 'register' variables are marked as '__always_inline' so that the old behaviour is effectively preserved. Hopefully other architectures are luckier with their compilers. Cc: Masahiro Yamada <yamada.masahiro@socionext.com> Cc: Nicolas Saenz Julienne <nsaenzjulienne@suse.de> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Russell King <linux@armlinux.org.uk> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Nick Desaulniers <ndesaulniers@google.com> Signed-off-by: Will Deacon <will@kernel.org>
-rw-r--r--arch/arm64/include/asm/atomic_lse.h6
1 files changed, 4 insertions, 2 deletions
diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
index c6bd87d2915b4d..574808b9df4c89 100644
--- a/arch/arm64/include/asm/atomic_lse.h
+++ b/arch/arm64/include/asm/atomic_lse.h
@@ -321,7 +321,8 @@ static inline s64 __lse_atomic64_dec_if_positive(atomic64_t *v)
}
#define __CMPXCHG_CASE(w, sfx, name, sz, mb, cl...) \
-static inline u##sz __lse__cmpxchg_case_##name##sz(volatile void *ptr, \
+static __always_inline u##sz \
+__lse__cmpxchg_case_##name##sz(volatile void *ptr, \
u##sz old, \
u##sz new) \
{ \
@@ -362,7 +363,8 @@ __CMPXCHG_CASE(x, , mb_, 64, al, "memory")
#undef __CMPXCHG_CASE
#define __CMPXCHG_DBL(name, mb, cl...) \
-static inline long __lse__cmpxchg_double##name(unsigned long old1, \
+static __always_inline long \
+__lse__cmpxchg_double##name(unsigned long old1, \
unsigned long old2, \
unsigned long new1, \
unsigned long new2, \