aboutsummaryrefslogtreecommitdiffstats
path: root/mm/kmsan/kmsan.h
blob: a1474420543515e447dc34ec658856a52320fc1d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
/* SPDX-License-Identifier: GPL-2.0 */
/*
 * Functions used by the KMSAN runtime.
 *
 * Copyright (C) 2017-2022 Google LLC
 * Author: Alexander Potapenko <glider@google.com>
 *
 */

#ifndef __MM_KMSAN_KMSAN_H
#define __MM_KMSAN_KMSAN_H

#include <asm/pgtable_64_types.h>
#include <linux/irqflags.h>
#include <linux/sched.h>
#include <linux/stackdepot.h>
#include <linux/stacktrace.h>
#include <linux/nmi.h>
#include <linux/mm.h>
#include <linux/printk.h>

#define KMSAN_ALLOCA_MAGIC_ORIGIN 0xabcd0100
#define KMSAN_CHAIN_MAGIC_ORIGIN 0xabcd0200

#define KMSAN_POISON_NOCHECK 0x0
#define KMSAN_POISON_CHECK 0x1
#define KMSAN_POISON_FREE 0x2

#define KMSAN_ORIGIN_SIZE 4
#define KMSAN_MAX_ORIGIN_DEPTH 7

#define KMSAN_STACK_DEPTH 64

#define KMSAN_META_SHADOW (false)
#define KMSAN_META_ORIGIN (true)

extern bool kmsan_enabled;
extern int panic_on_kmsan;

/*
 * KMSAN performs a lot of consistency checks that are currently enabled by
 * default. BUG_ON is normally discouraged in the kernel, unless used for
 * debugging, but KMSAN itself is a debugging tool, so it makes little sense to
 * recover if something goes wrong.
 */
#define KMSAN_WARN_ON(cond)                                           \
	({                                                            \
		const bool __cond = WARN_ON(cond);                    \
		if (unlikely(__cond)) {                               \
			WRITE_ONCE(kmsan_enabled, false);             \
			if (panic_on_kmsan) {                         \
				/* Can't call panic() here because */ \
				/* of uaccess checks. */              \
				BUG();                                \
			}                                             \
		}                                                     \
		__cond;                                               \
	})

/*
 * A pair of metadata pointers to be returned by the instrumentation functions.
 */
struct shadow_origin_ptr {
	void *shadow, *origin;
};

struct shadow_origin_ptr kmsan_get_shadow_origin_ptr(void *addr, u64 size,
						     bool store);
void *kmsan_get_metadata(void *addr, bool is_origin);
void __init kmsan_init_alloc_meta_for_range(void *start, void *end);

enum kmsan_bug_reason {
	REASON_ANY,
	REASON_COPY_TO_USER,
	REASON_SUBMIT_URB,
};

void kmsan_print_origin(depot_stack_handle_t origin);

/**
 * kmsan_report() - Report a use of uninitialized value.
 * @origin:    Stack ID of the uninitialized value.
 * @address:   Address at which the memory access happens.
 * @size:      Memory access size.
 * @off_first: Offset (from @address) of the first byte to be reported.
 * @off_last:  Offset (from @address) of the last byte to be reported.
 * @user_addr: When non-NULL, denotes the userspace address to which the kernel
 *             is leaking data.
 * @reason:    Error type from enum kmsan_bug_reason.
 *
 * kmsan_report() prints an error message for a consequent group of bytes
 * sharing the same origin. If an uninitialized value is used in a comparison,
 * this function is called once without specifying the addresses. When checking
 * a memory range, KMSAN may call kmsan_report() multiple times with the same
 * @address, @size, @user_addr and @reason, but different @off_first and
 * @off_last corresponding to different @origin values.
 */
void kmsan_report(depot_stack_handle_t origin, void *address, int size,
		  int off_first, int off_last, const void *user_addr,
		  enum kmsan_bug_reason reason);

DECLARE_PER_CPU(struct kmsan_ctx, kmsan_percpu_ctx);

static __always_inline struct kmsan_ctx *kmsan_get_context(void)
{
	return in_task() ? &current->kmsan_ctx : raw_cpu_ptr(&kmsan_percpu_ctx);
}

/*
 * When a compiler hook or KMSAN runtime function is invoked, it may make a
 * call to instrumented code and eventually call itself recursively. To avoid
 * that, we guard the runtime entry regions with
 * kmsan_enter_runtime()/kmsan_leave_runtime() and exit the hook if
 * kmsan_in_runtime() is true.
 *
 * Non-runtime code may occasionally get executed in nested IRQs from the
 * runtime code (e.g. when called via smp_call_function_single()). Because some
 * KMSAN routines may take locks (e.g. for memory allocation), we conservatively
 * bail out instead of calling them. To minimize the effect of this (potentially
 * missing initialization events) kmsan_in_runtime() is not checked in
 * non-blocking runtime functions.
 */
static __always_inline bool kmsan_in_runtime(void)
{
	if ((hardirq_count() >> HARDIRQ_SHIFT) > 1)
		return true;
	if (in_nmi())
		return true;
	return kmsan_get_context()->kmsan_in_runtime;
}

static __always_inline void kmsan_enter_runtime(void)
{
	struct kmsan_ctx *ctx;

	ctx = kmsan_get_context();
	KMSAN_WARN_ON(ctx->kmsan_in_runtime++);
}

static __always_inline void kmsan_leave_runtime(void)
{
	struct kmsan_ctx *ctx = kmsan_get_context();

	KMSAN_WARN_ON(--ctx->kmsan_in_runtime);
}

depot_stack_handle_t kmsan_save_stack(void);
depot_stack_handle_t kmsan_save_stack_with_flags(gfp_t flags,
						 unsigned int extra_bits);

/*
 * Pack and unpack the origin chain depth and UAF flag to/from the extra bits
 * provided by the stack depot.
 * The UAF flag is stored in the lowest bit, followed by the depth in the upper
 * bits.
 * set_dsh_extra_bits() is responsible for clamping the value.
 */
static __always_inline unsigned int kmsan_extra_bits(unsigned int depth,
						     bool uaf)
{
	return (depth << 1) | uaf;
}

static __always_inline bool kmsan_uaf_from_eb(unsigned int extra_bits)
{
	return extra_bits & 1;
}

static __always_inline unsigned int kmsan_depth_from_eb(unsigned int extra_bits)
{
	return extra_bits >> 1;
}

/*
 * kmsan_internal_ functions are supposed to be very simple and not require the
 * kmsan_in_runtime() checks.
 */
void kmsan_internal_memmove_metadata(void *dst, void *src, size_t n);
void kmsan_internal_poison_memory(void *address, size_t size, gfp_t flags,
				  unsigned int poison_flags);
void kmsan_internal_unpoison_memory(void *address, size_t size, bool checked);
void kmsan_internal_set_shadow_origin(void *address, size_t size, int b,
				      u32 origin, bool checked);
depot_stack_handle_t kmsan_internal_chain_origin(depot_stack_handle_t id);

void kmsan_internal_task_create(struct task_struct *task);

bool kmsan_metadata_is_contiguous(void *addr, size_t size);
void kmsan_internal_check_memory(void *addr, size_t size, const void *user_addr,
				 int reason);

struct page *kmsan_vmalloc_to_page_or_null(void *vaddr);
void kmsan_setup_meta(struct page *page, struct page *shadow,
		      struct page *origin, int order);

/*
 * kmsan_internal_is_module_addr() and kmsan_internal_is_vmalloc_addr() are
 * non-instrumented versions of is_module_address() and is_vmalloc_addr() that
 * are safe to call from KMSAN runtime without recursion.
 */
static inline bool kmsan_internal_is_module_addr(void *vaddr)
{
	return ((u64)vaddr >= MODULES_VADDR) && ((u64)vaddr < MODULES_END);
}

static inline bool kmsan_internal_is_vmalloc_addr(void *addr)
{
	return ((u64)addr >= VMALLOC_START) && ((u64)addr < VMALLOC_END);
}

#endif /* __MM_KMSAN_KMSAN_H */