aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/include/asm/atomic_lse.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/include/asm/atomic_lse.h')
-rw-r--r--arch/arm64/include/asm/atomic_lse.h124
1 files changed, 81 insertions, 43 deletions
diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
index f9b0b09153e0..393d73797e73 100644
--- a/arch/arm64/include/asm/atomic_lse.h
+++ b/arch/arm64/include/asm/atomic_lse.h
@@ -32,7 +32,9 @@ static inline void atomic_##op(int i, atomic_t *v) \
register int w0 asm ("w0") = i; \
register atomic_t *x1 asm ("x1") = v; \
\
- asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(op), \
+ asm volatile( \
+ __LSE_PREAMBLE \
+ ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(op), \
" " #asm_op " %w[i], %[v]\n") \
: [i] "+r" (w0), [v] "+Q" (v->counter) \
: "r" (x1) \
@@ -52,7 +54,9 @@ static inline int atomic_fetch_##op##name(int i, atomic_t *v) \
register int w0 asm ("w0") = i; \
register atomic_t *x1 asm ("x1") = v; \
\
- asm volatile(ARM64_LSE_ATOMIC_INSN( \
+ asm volatile( \
+ __LSE_PREAMBLE \
+ ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \
__LL_SC_ATOMIC(fetch_##op##name), \
/* LSE atomics */ \
@@ -84,7 +88,9 @@ static inline int atomic_add_return##name(int i, atomic_t *v) \
register int w0 asm ("w0") = i; \
register atomic_t *x1 asm ("x1") = v; \
\
- asm volatile(ARM64_LSE_ATOMIC_INSN( \
+ asm volatile( \
+ __LSE_PREAMBLE \
+ ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \
__LL_SC_ATOMIC(add_return##name) \
__nops(1), \
@@ -110,7 +116,9 @@ static inline void atomic_and(int i, atomic_t *v)
register int w0 asm ("w0") = i;
register atomic_t *x1 asm ("x1") = v;
- asm volatile(ARM64_LSE_ATOMIC_INSN(
+ asm volatile(
+ __LSE_PREAMBLE
+ ARM64_LSE_ATOMIC_INSN(
/* LL/SC */
__LL_SC_ATOMIC(and)
__nops(1),
@@ -128,7 +136,9 @@ static inline int atomic_fetch_and##name(int i, atomic_t *v) \
register int w0 asm ("w0") = i; \
register atomic_t *x1 asm ("x1") = v; \
\
- asm volatile(ARM64_LSE_ATOMIC_INSN( \
+ asm volatile( \
+ __LSE_PREAMBLE \
+ ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \
__LL_SC_ATOMIC(fetch_and##name) \
__nops(1), \
@@ -154,7 +164,9 @@ static inline void atomic_sub(int i, atomic_t *v)
register int w0 asm ("w0") = i;
register atomic_t *x1 asm ("x1") = v;
- asm volatile(ARM64_LSE_ATOMIC_INSN(
+ asm volatile(
+ __LSE_PREAMBLE
+ ARM64_LSE_ATOMIC_INSN(
/* LL/SC */
__LL_SC_ATOMIC(sub)
__nops(1),
@@ -172,7 +184,9 @@ static inline int atomic_sub_return##name(int i, atomic_t *v) \
register int w0 asm ("w0") = i; \
register atomic_t *x1 asm ("x1") = v; \
\
- asm volatile(ARM64_LSE_ATOMIC_INSN( \
+ asm volatile( \
+ __LSE_PREAMBLE \
+ ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \
__LL_SC_ATOMIC(sub_return##name) \
__nops(2), \
@@ -200,7 +214,9 @@ static inline int atomic_fetch_sub##name(int i, atomic_t *v) \
register int w0 asm ("w0") = i; \
register atomic_t *x1 asm ("x1") = v; \
\
- asm volatile(ARM64_LSE_ATOMIC_INSN( \
+ asm volatile( \
+ __LSE_PREAMBLE \
+ ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \
__LL_SC_ATOMIC(fetch_sub##name) \
__nops(1), \
@@ -229,7 +245,9 @@ static inline void atomic64_##op(long i, atomic64_t *v) \
register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \
\
- asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(op), \
+ asm volatile( \
+ __LSE_PREAMBLE \
+ ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(op), \
" " #asm_op " %[i], %[v]\n") \
: [i] "+r" (x0), [v] "+Q" (v->counter) \
: "r" (x1) \
@@ -249,7 +267,9 @@ static inline long atomic64_fetch_##op##name(long i, atomic64_t *v) \
register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \
\
- asm volatile(ARM64_LSE_ATOMIC_INSN( \
+ asm volatile( \
+ __LSE_PREAMBLE \
+ ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \
__LL_SC_ATOMIC64(fetch_##op##name), \
/* LSE atomics */ \
@@ -281,7 +301,9 @@ static inline long atomic64_add_return##name(long i, atomic64_t *v) \
register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \
\
- asm volatile(ARM64_LSE_ATOMIC_INSN( \
+ asm volatile( \
+ __LSE_PREAMBLE \
+ ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \
__LL_SC_ATOMIC64(add_return##name) \
__nops(1), \
@@ -307,7 +329,9 @@ static inline void atomic64_and(long i, atomic64_t *v)
register long x0 asm ("x0") = i;
register atomic64_t *x1 asm ("x1") = v;
- asm volatile(ARM64_LSE_ATOMIC_INSN(
+ asm volatile(
+ __LSE_PREAMBLE
+ ARM64_LSE_ATOMIC_INSN(
/* LL/SC */
__LL_SC_ATOMIC64(and)
__nops(1),
@@ -325,7 +349,9 @@ static inline long atomic64_fetch_and##name(long i, atomic64_t *v) \
register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \
\
- asm volatile(ARM64_LSE_ATOMIC_INSN( \
+ asm volatile( \
+ __LSE_PREAMBLE \
+ ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \
__LL_SC_ATOMIC64(fetch_and##name) \
__nops(1), \
@@ -351,7 +377,9 @@ static inline void atomic64_sub(long i, atomic64_t *v)
register long x0 asm ("x0") = i;
register atomic64_t *x1 asm ("x1") = v;
- asm volatile(ARM64_LSE_ATOMIC_INSN(
+ asm volatile(
+ __LSE_PREAMBLE
+ ARM64_LSE_ATOMIC_INSN(
/* LL/SC */
__LL_SC_ATOMIC64(sub)
__nops(1),
@@ -369,7 +397,9 @@ static inline long atomic64_sub_return##name(long i, atomic64_t *v) \
register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \
\
- asm volatile(ARM64_LSE_ATOMIC_INSN( \
+ asm volatile( \
+ __LSE_PREAMBLE \
+ ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \
__LL_SC_ATOMIC64(sub_return##name) \
__nops(2), \
@@ -397,7 +427,9 @@ static inline long atomic64_fetch_sub##name(long i, atomic64_t *v) \
register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \
\
- asm volatile(ARM64_LSE_ATOMIC_INSN( \
+ asm volatile( \
+ __LSE_PREAMBLE \
+ ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \
__LL_SC_ATOMIC64(fetch_sub##name) \
__nops(1), \
@@ -422,7 +454,9 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
{
register long x0 asm ("x0") = (long)v;
- asm volatile(ARM64_LSE_ATOMIC_INSN(
+ asm volatile(
+ __LSE_PREAMBLE
+ ARM64_LSE_ATOMIC_INSN(
/* LL/SC */
__LL_SC_ATOMIC64(dec_if_positive)
__nops(6),
@@ -446,22 +480,24 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
#define __LL_SC_CMPXCHG(op) __LL_SC_CALL(__cmpxchg_case_##op)
-#define __CMPXCHG_CASE(w, sz, name, mb, cl...) \
-static inline unsigned long __cmpxchg_case_##name(volatile void *ptr, \
- unsigned long old, \
- unsigned long new) \
+#define __CMPXCHG_CASE(w, sfx, name, sz, mb, cl...) \
+static inline u##sz __cmpxchg_case_##name##sz(volatile void *ptr, \
+ unsigned long old, \
+ u##sz new) \
{ \
register unsigned long x0 asm ("x0") = (unsigned long)ptr; \
register unsigned long x1 asm ("x1") = old; \
- register unsigned long x2 asm ("x2") = new; \
+ register u##sz x2 asm ("x2") = new; \
\
- asm volatile(ARM64_LSE_ATOMIC_INSN( \
+ asm volatile( \
+ __LSE_PREAMBLE \
+ ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \
- __LL_SC_CMPXCHG(name) \
+ __LL_SC_CMPXCHG(name##sz) \
__nops(2), \
/* LSE atomics */ \
" mov " #w "30, %" #w "[old]\n" \
- " cas" #mb #sz "\t" #w "30, %" #w "[new], %[v]\n" \
+ " cas" #mb #sfx "\t" #w "30, %" #w "[new], %[v]\n" \
" mov %" #w "[ret], " #w "30") \
: [ret] "+r" (x0), [v] "+Q" (*(unsigned long *)ptr) \
: [old] "r" (x1), [new] "r" (x2) \
@@ -470,22 +506,22 @@ static inline unsigned long __cmpxchg_case_##name(volatile void *ptr, \
return x0; \
}
-__CMPXCHG_CASE(w, b, 1, )
-__CMPXCHG_CASE(w, h, 2, )
-__CMPXCHG_CASE(w, , 4, )
-__CMPXCHG_CASE(x, , 8, )
-__CMPXCHG_CASE(w, b, acq_1, a, "memory")
-__CMPXCHG_CASE(w, h, acq_2, a, "memory")
-__CMPXCHG_CASE(w, , acq_4, a, "memory")
-__CMPXCHG_CASE(x, , acq_8, a, "memory")
-__CMPXCHG_CASE(w, b, rel_1, l, "memory")
-__CMPXCHG_CASE(w, h, rel_2, l, "memory")
-__CMPXCHG_CASE(w, , rel_4, l, "memory")
-__CMPXCHG_CASE(x, , rel_8, l, "memory")
-__CMPXCHG_CASE(w, b, mb_1, al, "memory")
-__CMPXCHG_CASE(w, h, mb_2, al, "memory")
-__CMPXCHG_CASE(w, , mb_4, al, "memory")
-__CMPXCHG_CASE(x, , mb_8, al, "memory")
+__CMPXCHG_CASE(w, b, , 8, )
+__CMPXCHG_CASE(w, h, , 16, )
+__CMPXCHG_CASE(w, , , 32, )
+__CMPXCHG_CASE(x, , , 64, )
+__CMPXCHG_CASE(w, b, acq_, 8, a, "memory")
+__CMPXCHG_CASE(w, h, acq_, 16, a, "memory")
+__CMPXCHG_CASE(w, , acq_, 32, a, "memory")
+__CMPXCHG_CASE(x, , acq_, 64, a, "memory")
+__CMPXCHG_CASE(w, b, rel_, 8, l, "memory")
+__CMPXCHG_CASE(w, h, rel_, 16, l, "memory")
+__CMPXCHG_CASE(w, , rel_, 32, l, "memory")
+__CMPXCHG_CASE(x, , rel_, 64, l, "memory")
+__CMPXCHG_CASE(w, b, mb_, 8, al, "memory")
+__CMPXCHG_CASE(w, h, mb_, 16, al, "memory")
+__CMPXCHG_CASE(w, , mb_, 32, al, "memory")
+__CMPXCHG_CASE(x, , mb_, 64, al, "memory")
#undef __LL_SC_CMPXCHG
#undef __CMPXCHG_CASE
@@ -507,7 +543,9 @@ static inline long __cmpxchg_double##name(unsigned long old1, \
register unsigned long x3 asm ("x3") = new2; \
register unsigned long x4 asm ("x4") = (unsigned long)ptr; \
\
- asm volatile(ARM64_LSE_ATOMIC_INSN( \
+ asm volatile( \
+ __LSE_PREAMBLE \
+ ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \
__LL_SC_CMPXCHG_DBL(name) \
__nops(3), \
@@ -517,7 +555,7 @@ static inline long __cmpxchg_double##name(unsigned long old1, \
" eor %[old2], %[old2], %[oldval2]\n" \
" orr %[old1], %[old1], %[old2]") \
: [old1] "+&r" (x0), [old2] "+&r" (x1), \
- [v] "+Q" (*(unsigned long *)ptr) \
+ [v] "+Q" (*(__uint128_t *)ptr) \
: [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \
[oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \
: __LL_SC_CLOBBERS, ##cl); \