aboutsummaryrefslogtreecommitdiffstats
path: root/tools/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'tools/include/linux')
-rw-r--r--tools/include/linux/bitmap.h1
-rw-r--r--tools/include/linux/bitops.h7
-rw-r--r--tools/include/linux/bits.h26
-rw-r--r--tools/include/linux/err.h20
-rw-r--r--tools/include/linux/kernel.h3
-rw-r--r--tools/include/linux/ring_buffer.h73
-rw-r--r--tools/include/linux/spinlock.h12
7 files changed, 134 insertions, 8 deletions
diff --git a/tools/include/linux/bitmap.h b/tools/include/linux/bitmap.h
index e63662db131b..05dca5c203f3 100644
--- a/tools/include/linux/bitmap.h
+++ b/tools/include/linux/bitmap.h
@@ -15,6 +15,7 @@ void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits);
int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int bits);
+void bitmap_clear(unsigned long *map, unsigned int start, int len);
#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1)))
diff --git a/tools/include/linux/bitops.h b/tools/include/linux/bitops.h
index acc704bd3998..0b0ef3abc966 100644
--- a/tools/include/linux/bitops.h
+++ b/tools/include/linux/bitops.h
@@ -3,8 +3,6 @@
#define _TOOLS_LINUX_BITOPS_H_
#include <asm/types.h>
-#include <linux/compiler.h>
-
#ifndef __WORDSIZE
#define __WORDSIZE (__SIZEOF_LONG__ * 8)
#endif
@@ -12,10 +10,9 @@
#ifndef BITS_PER_LONG
# define BITS_PER_LONG __WORDSIZE
#endif
+#include <linux/bits.h>
+#include <linux/compiler.h>
-#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
-#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
-#define BITS_PER_BYTE 8
#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
#define BITS_TO_U64(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u64))
#define BITS_TO_U32(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u32))
diff --git a/tools/include/linux/bits.h b/tools/include/linux/bits.h
new file mode 100644
index 000000000000..2b7b532c1d51
--- /dev/null
+++ b/tools/include/linux/bits.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_BITS_H
+#define __LINUX_BITS_H
+#include <asm/bitsperlong.h>
+
+#define BIT(nr) (1UL << (nr))
+#define BIT_ULL(nr) (1ULL << (nr))
+#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
+#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
+#define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG))
+#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG)
+#define BITS_PER_BYTE 8
+
+/*
+ * Create a contiguous bitmask starting at bit position @l and ending at
+ * position @h. For example
+ * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
+ */
+#define GENMASK(h, l) \
+ (((~0UL) - (1UL << (l)) + 1) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
+
+#define GENMASK_ULL(h, l) \
+ (((~0ULL) - (1ULL << (l)) + 1) & \
+ (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
+
+#endif /* __LINUX_BITS_H */
diff --git a/tools/include/linux/err.h b/tools/include/linux/err.h
index 7a8b61ad44cb..2f5a12b88a86 100644
--- a/tools/include/linux/err.h
+++ b/tools/include/linux/err.h
@@ -52,4 +52,24 @@ static inline bool __must_check IS_ERR_OR_NULL(__force const void *ptr)
return unlikely(!ptr) || IS_ERR_VALUE((unsigned long)ptr);
}
+static inline int __must_check PTR_ERR_OR_ZERO(__force const void *ptr)
+{
+ if (IS_ERR(ptr))
+ return PTR_ERR(ptr);
+ else
+ return 0;
+}
+
+/**
+ * ERR_CAST - Explicitly cast an error-valued pointer to another pointer type
+ * @ptr: The pointer to cast.
+ *
+ * Explicitly cast an error-valued pointer to another pointer type in such a
+ * way as to make it clear that's what's going on.
+ */
+static inline void * __must_check ERR_CAST(__force const void *ptr)
+{
+ /* cast away the const */
+ return (void *) ptr;
+}
#endif /* _LINUX_ERR_H */
diff --git a/tools/include/linux/kernel.h b/tools/include/linux/kernel.h
index 0ad884452c5c..857d9e22826e 100644
--- a/tools/include/linux/kernel.h
+++ b/tools/include/linux/kernel.h
@@ -70,6 +70,7 @@
#define BUG_ON(cond) assert(!(cond))
#endif
#endif
+#define BUG() BUG_ON(1)
#if __BYTE_ORDER == __BIG_ENDIAN
#define cpu_to_le16 bswap_16
@@ -115,6 +116,6 @@ int scnprintf(char * buf, size_t size, const char * fmt, ...);
#define round_down(x, y) ((x) & ~__round_mask(x, y))
#define current_gfp_context(k) 0
-#define synchronize_sched()
+#define synchronize_rcu()
#endif
diff --git a/tools/include/linux/ring_buffer.h b/tools/include/linux/ring_buffer.h
new file mode 100644
index 000000000000..9a083ae60473
--- /dev/null
+++ b/tools/include/linux/ring_buffer.h
@@ -0,0 +1,73 @@
+#ifndef _TOOLS_LINUX_RING_BUFFER_H_
+#define _TOOLS_LINUX_RING_BUFFER_H_
+
+#include <asm/barrier.h>
+
+/*
+ * Contract with kernel for walking the perf ring buffer from
+ * user space requires the following barrier pairing (quote
+ * from kernel/events/ring_buffer.c):
+ *
+ * Since the mmap() consumer (userspace) can run on a
+ * different CPU:
+ *
+ * kernel user
+ *
+ * if (LOAD ->data_tail) { LOAD ->data_head
+ * (A) smp_rmb() (C)
+ * STORE $data LOAD $data
+ * smp_wmb() (B) smp_mb() (D)
+ * STORE ->data_head STORE ->data_tail
+ * }
+ *
+ * Where A pairs with D, and B pairs with C.
+ *
+ * In our case A is a control dependency that separates the
+ * load of the ->data_tail and the stores of $data. In case
+ * ->data_tail indicates there is no room in the buffer to
+ * store $data we do not.
+ *
+ * D needs to be a full barrier since it separates the data
+ * READ from the tail WRITE.
+ *
+ * For B a WMB is sufficient since it separates two WRITEs,
+ * and for C an RMB is sufficient since it separates two READs.
+ *
+ * Note, instead of B, C, D we could also use smp_store_release()
+ * in B and D as well as smp_load_acquire() in C.
+ *
+ * However, this optimization does not make sense for all kernel
+ * supported architectures since for a fair number it would
+ * resolve into READ_ONCE() + smp_mb() pair for smp_load_acquire(),
+ * and smp_mb() + WRITE_ONCE() pair for smp_store_release().
+ *
+ * Thus for those smp_wmb() in B and smp_rmb() in C would still
+ * be less expensive. For the case of D this has either the same
+ * cost or is less expensive, for example, due to TSO x86 can
+ * avoid the CPU barrier entirely.
+ */
+
+static inline u64 ring_buffer_read_head(struct perf_event_mmap_page *base)
+{
+/*
+ * Architectures where smp_load_acquire() does not fallback to
+ * READ_ONCE() + smp_mb() pair.
+ */
+#if defined(__x86_64__) || defined(__aarch64__) || defined(__powerpc64__) || \
+ defined(__ia64__) || defined(__sparc__) && defined(__arch64__)
+ return smp_load_acquire(&base->data_head);
+#else
+ u64 head = READ_ONCE(base->data_head);
+
+ smp_rmb();
+ return head;
+#endif
+}
+
+static inline void ring_buffer_write_tail(struct perf_event_mmap_page *base,
+ u64 tail)
+{
+ smp_store_release(&base->data_tail, tail);
+}
+
+#endif /* _TOOLS_LINUX_RING_BUFFER_H_ */
diff --git a/tools/include/linux/spinlock.h b/tools/include/linux/spinlock.h
index 1738c0391da4..c934572d935c 100644
--- a/tools/include/linux/spinlock.h
+++ b/tools/include/linux/spinlock.h
@@ -8,8 +8,14 @@
#define spinlock_t pthread_mutex_t
#define DEFINE_SPINLOCK(x) pthread_mutex_t x = PTHREAD_MUTEX_INITIALIZER
#define __SPIN_LOCK_UNLOCKED(x) (pthread_mutex_t)PTHREAD_MUTEX_INITIALIZER
-#define spin_lock_init(x) pthread_mutex_init(x, NULL)
-
+#define spin_lock_init(x) pthread_mutex_init(x, NULL)
+
+#define spin_lock(x) pthread_mutex_lock(x)
+#define spin_unlock(x) pthread_mutex_unlock(x)
+#define spin_lock_bh(x) pthread_mutex_lock(x)
+#define spin_unlock_bh(x) pthread_mutex_unlock(x)
+#define spin_lock_irq(x) pthread_mutex_lock(x)
+#define spin_unlock_irq(x) pthread_mutex_unlock(x)
#define spin_lock_irqsave(x, f) (void)f, pthread_mutex_lock(x)
#define spin_unlock_irqrestore(x, f) (void)f, pthread_mutex_unlock(x)
@@ -31,4 +37,6 @@ static inline bool arch_spin_is_locked(arch_spinlock_t *mutex)
return true;
}
+#include <linux/lockdep.h>
+
#endif