aboutsummaryrefslogtreecommitdiffstats
path: root/arch/parisc/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/parisc/include')
-rw-r--r--arch/parisc/include/asm/atomic.h2
-rw-r--r--arch/parisc/include/asm/barrier.h61
-rw-r--r--arch/parisc/include/asm/cmpxchg.h2
3 files changed, 65 insertions, 0 deletions
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
index bc54addd589f..614bcc7673f5 100644
--- a/arch/parisc/include/asm/atomic.h
+++ b/arch/parisc/include/asm/atomic.h
@@ -258,6 +258,8 @@ atomic64_set(atomic64_t *v, s64 i)
_atomic_spin_unlock_irqrestore(v, flags);
}
+#define atomic64_set_release(v, i) atomic64_set((v), (i))
+
static __inline__ s64
atomic64_read(const atomic64_t *v)
{
diff --git a/arch/parisc/include/asm/barrier.h b/arch/parisc/include/asm/barrier.h
index dbaaca84f27f..640d46edf32e 100644
--- a/arch/parisc/include/asm/barrier.h
+++ b/arch/parisc/include/asm/barrier.h
@@ -26,6 +26,67 @@
#define __smp_rmb() mb()
#define __smp_wmb() mb()
+#define __smp_store_release(p, v) \
+do { \
+ typeof(p) __p = (p); \
+ union { typeof(*p) __val; char __c[1]; } __u = \
+ { .__val = (__force typeof(*p)) (v) }; \
+ compiletime_assert_atomic_type(*p); \
+ switch (sizeof(*p)) { \
+ case 1: \
+ asm volatile("stb,ma %0,0(%1)" \
+ : : "r"(*(__u8 *)__u.__c), "r"(__p) \
+ : "memory"); \
+ break; \
+ case 2: \
+ asm volatile("sth,ma %0,0(%1)" \
+ : : "r"(*(__u16 *)__u.__c), "r"(__p) \
+ : "memory"); \
+ break; \
+ case 4: \
+ asm volatile("stw,ma %0,0(%1)" \
+ : : "r"(*(__u32 *)__u.__c), "r"(__p) \
+ : "memory"); \
+ break; \
+ case 8: \
+ if (IS_ENABLED(CONFIG_64BIT)) \
+ asm volatile("std,ma %0,0(%1)" \
+ : : "r"(*(__u64 *)__u.__c), "r"(__p) \
+ : "memory"); \
+ break; \
+ } \
+} while (0)
+
+#define __smp_load_acquire(p) \
+({ \
+ union { typeof(*p) __val; char __c[1]; } __u; \
+ typeof(p) __p = (p); \
+ compiletime_assert_atomic_type(*p); \
+ switch (sizeof(*p)) { \
+ case 1: \
+ asm volatile("ldb,ma 0(%1),%0" \
+ : "=r"(*(__u8 *)__u.__c) : "r"(__p) \
+ : "memory"); \
+ break; \
+ case 2: \
+ asm volatile("ldh,ma 0(%1),%0" \
+ : "=r"(*(__u16 *)__u.__c) : "r"(__p) \
+ : "memory"); \
+ break; \
+ case 4: \
+ asm volatile("ldw,ma 0(%1),%0" \
+ : "=r"(*(__u32 *)__u.__c) : "r"(__p) \
+ : "memory"); \
+ break; \
+ case 8: \
+ if (IS_ENABLED(CONFIG_64BIT)) \
+ asm volatile("ldd,ma 0(%1),%0" \
+ : "=r"(*(__u64 *)__u.__c) : "r"(__p) \
+ : "memory"); \
+ break; \
+ } \
+ __u.__val; \
+})
#include <asm-generic/barrier.h>
#endif /* !__ASSEMBLY__ */
diff --git a/arch/parisc/include/asm/cmpxchg.h b/arch/parisc/include/asm/cmpxchg.h
index ab5c215cf46c..068958575871 100644
--- a/arch/parisc/include/asm/cmpxchg.h
+++ b/arch/parisc/include/asm/cmpxchg.h
@@ -60,6 +60,7 @@ extern void __cmpxchg_called_with_bad_pointer(void);
extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old,
unsigned int new_);
extern u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new_);
+extern u8 __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new_);
/* don't worry...optimizer will get rid of most of this */
static inline unsigned long
@@ -71,6 +72,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
#endif
case 4: return __cmpxchg_u32((unsigned int *)ptr,
(unsigned int)old, (unsigned int)new_);
+ case 1: return __cmpxchg_u8((u8 *)ptr, (u8)old, (u8)new_);
}
__cmpxchg_called_with_bad_pointer();
return old;